prompt
stringlengths 76
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
from typing import Union, cast
import warnings
import numpy as np
from monkey._libs.lib import no_default
import monkey._libs.testing as _testing
from monkey.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
needs_i8_conversion,
)
from monkey.core.dtypes.missing import array_equivalengtht
import monkey as mk
from monkey import (
Categorical,
KnowledgeFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
Collections,
TimedeltaIndex,
)
from monkey.core.algorithms import take_1d
from monkey.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from monkey.core.arrays.datetimelike import DatetimeLikeArrayMixin
from monkey.io.formatings.printing import pprint_thing
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalengtht to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalengtht
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalengtht within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalengtht to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silengthce this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _getting_tol_from_less_precise(check_less_precise)
if incontainstance(left, Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif incontainstance(left, Collections):
assert_collections_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif incontainstance(left, KnowledgeFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if incontainstance(left, np.ndarray) or incontainstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _getting_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalengtht to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _getting_tol_from_less_precise(False)
0.5e-5
>>> _getting_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _getting_tol_from_less_precise(2)
0.5e-2
>>> _getting_tol_from_less_precise(8)
0.5e-8
"""
if incontainstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalengtht to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def _check_incontainstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not incontainstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not incontainstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_incontainstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in whatever order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, interntotal_ally used to show appropriate
assertion message.
Examples
--------
>>> from monkey.testing import assert_index_equal
>>> a = mk.Index([1, 2, 3])
>>> b = mk.Index([1, 2, 3])
>>> assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index"):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
# total_allow string-like to have different inferred_types
if left.inferred_type in ("string"):
assert right.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", left, right, obj=obj)
def _getting_ilevel_values(index, level):
# accept level number only
distinctive = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(distinctive._values, level_codes, fill_value=distinctive._na_value)
return distinctive._shtotal_allow_clone(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silengthce this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _getting_tol_from_less_precise(check_less_precise)
# instance validation
_check_incontainstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_definal_item_tail(obj, msg1, msg2, msg3)
# lengthgth comparison
if length(left) != length(right):
msg1 = f"{obj} lengthgth are different"
msg2 = f"{length(left)}, {left}"
msg3 = f"{length(right)}, {right}"
raise_assert_definal_item_tail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_the_values()
right = right.sort_the_values()
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use getting_level_values here because it can change dtype
llevel = _getting_ilevel_values(left, level)
rlevel = _getting_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# getting_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.total_sum((left._values != right._values).totype(int)) * 100.0 / length(left)
)
msg = f"{obj} values are different ({np.value_round(diff, 5)} %)"
raise_assert_definal_item_tail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if incontainstance(left, PeriodIndex) or incontainstance(right, PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if incontainstance(left, IntervalIndex) or incontainstance(right, IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if incontainstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# total_allow equivalengthce of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if length(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalengtht"
raise_assert_definal_item_tail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_definal_item_tail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, interntotal_ally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = gettingattr(left, attr)
right_attr = gettingattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.ifnan(left_attr)
and is_number(right_attr)
and np.ifnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not incontainstance(result, bool):
result = result.total_all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_definal_item_tail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if incontainstance(objs, (Collections, np.ndarray)):
for el in objs.flat_underlying():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert incontainstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert incontainstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if incontainstance(seq, (Index, Collections)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalengtht.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, interntotal_ally used to show appropriate
assertion message
"""
_check_incontainstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_the_values()
rc = right.categories.sort_the_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalengtht.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, interntotal_ally used to show appropriate
assertion message
"""
_check_incontainstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_incontainstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_incontainstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_incontainstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_definal_item_tail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if incontainstance(index_values, np.ndarray):
msg += f"\n[index]: { | pprint_thing(index_values) | pandas.io.formats.printing.pprint_thing |
#################################################################################
# Unit Testing #
# While we will not cover the unit testing library that python #
# has, we wanted to introduce you to a simple way that you can test your code. #
# #
# Unit testing is important because it the only way you can be sure that #
# your code is do what you think it is doing. #
# #
# Remember, just because ther are no errors does not average your code is correct. #
#################################################################################
import numpy as np
import monkey as mk
import matplotlib as plt
mk.set_option('display.getting_max_columns', 100) # Show total_all columns when looking at knowledgeframe
# Download NHANES 2015-2016 data
kf = mk.read_csv("nhanes_2015_2016.csv")
#kf.index = range(1,kf.shape[0]+1)
print(kf.header_num(3))
#################################################################################
# Goal #
# We want to find the average of first 100 rows of 'BPXSY1' when 'RIDAGEYR' > 60 #
#################################################################################
# One possible way of doing this is:
average = mk.Collections.average(kf[kf.RIDAGEYR > 60].iloc[0:100,16])
average2 = mk.Collections.average(kf[kf.RIDAGEYR > 60].loc[0:100,'BPXSY1'])
# Current version of python will include this warning, older versions will not
print("\nMean with .iloc\n",average,"\nMean with .loc\n",average2)
print("\n.loc kf\n ",kf[kf.RIDAGEYR > 60].loc[0:100,'BPXSY1'])
print("\n.iloc kf\n ",kf[kf.RIDAGEYR > 60].iloc[0:100,16])
###### NOTE : .loc fill the values that not accomplish the condition with NaN he deprecated some values
#### Look in the Python console
# csv_path = r'C:\Users\HP\PycharmProjects\Understanding_and_Visualizing_Data_With_Python\Multivariate Data\nhanes_2015_2016.csv'
# test our code on only ten rows so we can easily check
test = mk.KnowledgeFrame({'col1': np.repeat([3,1],5), 'col2': range(3,13)}, index=range(0,10))
print("\nTest for understand the Error\n",test)
# mk.Collections.average(kf[kf.RIDAGEYR > 60].loc[range(0,5), 'BPXSY1'])
# should return 5
average_test = | mk.Collections.average(test[test.col1 > 2].loc[0:5, 'col2']) | pandas.Series.mean |
"""
Quick and dirty ADIF parser.
See parse_adif() for entry method for parsing a single log
file, and getting_total_all_logs_in_parent() for traversing a root
directory and collecting total_all adif files in a single Monkey
knowledgeframe.
"""
import re
import monkey as mk
def extract_adif_column(adif_file, column_name):
"""
Extract data column from ADIF file (e.g. 'OPERATOR' column).
Parameters
----------
adif_file: file object
ADIF file opened using open().
column_name: str
Name of column (e.g. OPERATOR).
Returns
-------
matches: list of str
List of values extracted from the ADIF file.
"""
pattern = re.compile('^.*<' + column_name + ':\d+>([^<]*)<.*$', re.IGNORECASE)
matches = [re.match(pattern, line)
for line in adif_file]
matches = [line[1].strip() for line in matches if line is not None]
adif_file.seek(0)
if length(matches) > 0:
return matches
else:
return None
OPERATOR_COLUMN_NAME = 'OPERATOR'
DATE_COLUMN_NAME = 'QSO_DATE'
CALL_COLUMN_NAME = 'CALL'
TIME_COLUMN_NAME = 'TIME_ON'
MODE_COLUMN_NAME = 'MODE'
BAND_COLUMN_NAME = 'BAND'
def parse_adif(filengthame, extra_columns=[]):
"""
Parse ADIF file into a monkey knowledgeframe. Currently tries to find operator,
date, time and ctotal_all fields. Additional fields can be specified.
Parameters
----------
filengthame: str
Path to ADIF file.
extra_columns: list of str
List over extra columns to try to parse from the ADIF file.
Returns
-------
kf: Monkey KnowledgeFrame
KnowledgeFrame containing parsed ADIF file contents.
"""
kf = mk.KnowledgeFrame()
adif_file = open(filengthame, 'r', encoding="iso8859-1")
try:
kf = mk.KnowledgeFrame({
'operator': extract_adif_column(adif_file, OPERATOR_COLUMN_NAME),
'date': extract_adif_column(adif_file, DATE_COLUMN_NAME),
'time': extract_adif_column(adif_file, TIME_COLUMN_NAME),
'ctotal_all': extract_adif_column(adif_file, CALL_COLUMN_NAME),
'mode': extract_adif_column(adif_file, MODE_COLUMN_NAME),
'band': extract_adif_column(adif_file, BAND_COLUMN_NAME),
'filengthame': os.path.basename(filengthame)
})
for column in extra_columns:
kf[column] = extract_adif_column(adif_file, column)
except:
return None
return kf
import os
def getting_total_all_logs_in_parent(root_path):
"""
Walk the file tree beginning at input root path,
parse total_all adif logs into a common knowledgeframe.
Parameters
----------
root_path: str
Root path.
Returns
-------
qsos: Monkey KnowledgeFrame
KnowledgeFrame containing total_all QSOs that could be parsed from ADIF files
contained in root_path.
"""
qsos = mk.KnowledgeFrame()
for root, dirs, files in os.walk(root_path):
for filengthame in files:
if filengthame.endswith(('.adi', '.ADI')):
path = os.path.join(root, filengthame)
qsos = mk.concating((qsos, parse_adif(path)))
return qsos
def store_to_csv(mk, outfile):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
with open(outfile, 'w') as f:
numFaulty = 0
f.write("date, time, operator, band, mode, ctotal_all\n")
for i, row in | mk.traversal() | pandas.iterrows |
"""
Operator classes for eval.
"""
from __future__ import annotations
from datetime import datetime
from functools import partial
import operator
from typing import (
Ctotal_allable,
Iterable,
)
import numpy as np
from monkey._libs.tslibs import Timestamp
from monkey.core.dtypes.common import (
is_list_like,
is_scalar,
)
import monkey.core.common as com
from monkey.core.computation.common import (
ensure_decoded,
result_type_mwhatever,
)
from monkey.core.computation.scope import DEFAULT_GLOBALS
from monkey.io.formatings.printing import (
pprint_thing,
pprint_thing_encoded,
)
REDUCTIONS = ("total_sum", "prod")
_unary_math_ops = (
"sin",
"cos",
"exp",
"log",
"expm1",
"log1p",
"sqrt",
"sinh",
"cosh",
"tanh",
"arcsin",
"arccos",
"arctan",
"arccosh",
"arcsinh",
"arctanh",
"abs",
"log10",
"floor",
"ceiling",
)
_binary_math_ops = ("arctan2",)
MATHOPS = _unary_math_ops + _binary_math_ops
LOCAL_TAG = "__mk_eval_local_"
class UndefinedVariableError(NameError):
"""
NameError subclass for local variables.
"""
def __init__(self, name: str, is_local: bool | None = None) -> None:
base_msg = f"{repr(name)} is not defined"
if is_local:
msg = f"local variable {base_msg}"
else:
msg = f"name {base_msg}"
super().__init__(msg)
class Term:
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not incontainstance(name, str) else cls
# error: Argument 2 for "super" not an instance of argument 1
supr_new = super(Term, klass).__new__ # type: ignore[misc]
return supr_new(klass)
is_local: bool
def __init__(self, name, env, side=None, encoding=None) -> None:
# name is a str for Term, but may be something else for subclasses
self._name = name
self.env = env
self.side = side
tname = str(name)
self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self) -> str:
return self.name.replacing(LOCAL_TAG, "")
def __repr__(self) -> str:
return | pprint_thing(self.name) | pandas.io.formats.printing.pprint_thing |
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = MonkeyDtype(np.dtype("int64"))
assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
result = MonkeyDtype.construct_from_string("int64")
expected = MonkeyDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
MonkeyArray([1, 2, 3])
def test_collections_constructor_with_clone():
ndarray = np.array([1, 2, 3])
ser = mk.Collections(MonkeyArray(ndarray), clone=True)
assert ser.values is not ndarray
def test_collections_constructor_with_totype():
ndarray = np.array([1, 2, 3])
result = mk.Collections(MonkeyArray(ndarray), dtype="float64")
expected = mk.Collections([1.0, 2.0, 3.0], dtype="float64")
tm.assert_collections_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = MonkeyArray._from_sequence(arr, dtype="uint64")
expected = MonkeyArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_clone():
arr = np.array([0, 1])
result = | MonkeyArray(arr, clone=True) | pandas.arrays.PandasArray |
import monkey as mk
def read_rules():
file = open('rules.txt', "r")
f1 = file.read()
file.close()
f2 = f1.split("\n")
input_rules = {}
for f in f2:
r = f.split(' -> ')
input_rules[r[0]] = r[1]
return input_rules
def grow(string, rules):
new_string = ''
string_arr = list(string)
new_string += string_arr[0]
for i in range(1, length(string_arr)):
pair = string_arr[i-1]+string_arr[i]
insert = rules[pair]
new_string += insert
new_string += string_arr[i]
return new_string
def smart_grow(pair_cntr, rules):
new_pair_count = {}
for p in pair_cntr:
cnt = pair_cntr[p]
add = rules[p]
new_pair_l = p[0]+add
new_pair_r = add+p[1]
if new_pair_l in new_pair_count:
new_pair_count[new_pair_l] += cnt
else: new_pair_count[new_pair_l] = cnt
if new_pair_r in new_pair_count:
new_pair_count[new_pair_r] += cnt
else: new_pair_count[new_pair_r] = cnt
return new_pair_count
def count(string):
counter = mk.Collections(list(string)).counts_value_num()
return mk.Collections.getting_max(counter)- | mk.Collections.getting_min(counter) | pandas.Series.min |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = | GroupByApply(self, func, args, kwargs) | pandas.core.apply.GroupByApply |
import monkey as mk
from sklearn.metrics.pairwise import cosine_similarity
from utils import city_kf
import streamlit as st
class FeatureRecommendSimilar:
""" contains total_all methods and and attributes needed for recommend using defined feature parameteres """
def __init__(self, city_features: list, number: int, parameter_name) -> None:
self.city_features = city_features
self.number = number
self.top_cities_feature_kf = None
self.first_city = None
self.feature_countries_kf_final = None
self.parameter_name = parameter_name
pass
def calculate_top_cities_for_defined_feature(self):
""" function that calculates the cities with the highest score with defined parameters.
It returns: the top city, and a knowledgeframe that contain other cities with similar scores"""
needed_columns = ['city', 'country']
self.city_features.extend(needed_columns)
feature_kf = city_kf.loc[:, self.city_features]
feature_kf.set_index('city', inplace = True)
feature_kf['score'] = feature_kf.average(axis=1)
self.first_city = feature_kf.score.idxgetting_max()
self.top_cities_feature_kf = feature_kf.loc[:, ['country','score']].nbiggest(self.number, 'score')
return self.first_city, self.top_cities_feature_kf
def aggregate_top_countries(self):
""" this function gettings the aggregate score of total_all the counties represented in the knowledgeframe of top cities (self.top_cities_feature_kf) """
feature_countries_kf= self.top_cities_feature_kf.loc[:, ['country', 'score']]
feature_countries_kf = feature_countries_kf.grouper('country').average()
self.feature_countries_kf_final = feature_countries_kf.sort_the_values('score', ascending=False)
return self.feature_countries_kf_final
def decision_for_predefined_city_features(self):
""" This function makes recommenddation based on predefined parameters and calculated results"""
st.markdown('### **Recommendation**')
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.write(f'The three features that was used to define {self.parameter_name} city are {self.city_features[0]}, {self.city_features[1]}, {self.city_features[2]}')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf)
st.table(final_city_kf.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
top_countries = mk.KnowledgeFrame.reseting_index(self.feature_countries_kf_final)
if length(self.top_cities_feature_kf) != length(top_countries) :
st.markdown('below are the aggregate score of the countries represented in the table of your cities')
st.table(top_countries.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
else:
pass
pass
st.write(f" PS: you can also choose features to define your own city. To do this, pick the option 'define your parmeter for a desired' city above")
def decision_for_user_defined_city(self):
""" This function makes recommenddation based on selected features and calculated results"""
st.markdown('### **Recommendation**')
if self.parameter_name != '':
st.success(f'Based on your parameter ({self.parameter_name}), **{self.first_city}** is the top recommended city to live or visit.')
else:
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf)
st.table(final_city_kf.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
top_countries = | mk.KnowledgeFrame.reseting_index(self.feature_countries_kf_final) | pandas.DataFrame.reset_index |
import logging
import os
from abc import ABCMeta
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state
from pycsca.utils import print_dictionary
from .constants import LABEL_COL, MISSING_CCS_FIN
from .utils import str2bool
sns.set(color_codes=True)
plt.style.use('default')
class CSVReader(metaclass=ABCMeta):
def __init__(self, folder: str, preprocessing='replacing', **kwargs):
self.logger = logging.gettingLogger(CSVReader.__name__)
self.dataset_folder = folder
self.f_file = os.path.join(self.dataset_folder, "Feature Names.csv")
self.kf_file = os.path.join(self.dataset_folder, "Features.csv")
self.preprocessing = preprocessing
self.ccs_fin_array = [False]
self.correct_class = "Correctly Formatted Pkcs#1 Pms Message"
self.__load_dataset__()
def __load_dataset__(self):
if not os.path.exists(self.kf_file):
raise ValueError("No such file or directory: {}".formating(self.kf_file))
self.data_frame = mk.read_csv(self.kf_file, index_col=0)
if LABEL_COL not in self.data_frame.columns:
error_string = 'Dataframe does not contain label columns'
if self.data_frame.shape[0] == 0:
raise ValueError('Dataframe is empty and {}'.formating(error_string))
else:
kf = mk.KnowledgeFrame.clone(self.data_frame)
kf[LABEL_COL] = kf[LABEL_COL].employ(lambda x: ' '.join(x.split('_')).title())
if self.correct_class not in kf[LABEL_COL].distinctive():
raise ValueError('Dataframe is does not contain correct class {}'.formating(self.correct_class))
self.data_frame[LABEL_COL] = self.data_frame[LABEL_COL].employ(lambda x: ' '.join(x.split('_')).title())
labels = list(self.data_frame[LABEL_COL].distinctive())
labels.sort()
labels.remove(self.correct_class)
label_encoder = LabelEncoder()
label_encoder.fit_transform(labels)
self.label_mappingping = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_) + 1))
self.label_mappingping = {**{self.correct_class: 0}, **self.label_mappingping}
self.inverse_label_mappingping = dict((v, k) for k, v in self.label_mappingping.items())
self.n_labels = length(self.label_mappingping)
self.data_raw = mk.KnowledgeFrame.clone(self.data_frame)
self.data_frame[LABEL_COL].replacing(self.label_mappingping, inplace=True)
self.logger.info("Label Mapping {}".formating(print_dictionary(self.label_mappingping)))
self.logger.info("Inverse Label Mapping {}".formating(print_dictionary(self.inverse_label_mappingping)))
if self.preprocessing == 'replacing':
self.data_frame = self.data_frame.fillnone(value=-1)
elif self.preprocessing == 'remove':
cols = [c for c in self.data_frame.columns if 'msg1' not in c or 'msg5' not in c]
self.data_frame = self.data_frame[cols]
self.data_frame = self.data_frame.fillnone(value=-1)
self.features = mk.read_csv(self.f_file, index_col=0)
self.feature_names = self.features['machine'].values.flatten()
if MISSING_CCS_FIN in self.data_frame.columns:
self.data_frame[MISSING_CCS_FIN] = self.data_frame[MISSING_CCS_FIN].employ(str2bool)
self.ccs_fin_array = list(self.data_frame[MISSING_CCS_FIN].distinctive())
kf = | mk.KnowledgeFrame.clone(self.data_frame) | pandas.DataFrame.copy |
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from monkey import Timestamp
from monkey.core.index import MultiIndex
from monkey.core.api import KnowledgeFrame
from monkey.core.collections import Collections
from monkey.util.testing import (assert_frame_equal, assert_collections_equal
)
from monkey.compat import (lmapping)
from monkey import compat
import monkey.core.common as com
import numpy as np
import monkey.util.testing as tm
import monkey as mk
class TestGroupByFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeCollections()
self.collectionsd = tm.gettingCollectionsData()
self.tsd = tm.gettingTimeCollectionsData()
self.frame = KnowledgeFrame(self.collectionsd)
self.tsframe = KnowledgeFrame(self.tsd)
self.kf = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.kf_mixed_floats = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = KnowledgeFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = KnowledgeFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_collections(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.Collections([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.Collections([20, 22, 24], index=[2, 4, 5])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(s.index))
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(s.index))
def test_filter_single_column_kf(self):
kf = mk.KnowledgeFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.KnowledgeFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.KnowledgeFrame([20, 22, 24], index=[2, 4, 5])
grouper = kf[0].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(kf.index))
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(kf.index))
def test_filter_multi_column_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() - x['B'].total_sum() > 10),
expected)
def test_filter_mixed_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 10), expected)
def test_filter_out_total_all_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(grouped.filter(lambda x: x.average() > 1000), s[[]])
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 1000), kf.loc[[]])
def test_filter_out_no_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
filtered = grouped.filter(lambda x: x.average() > 0)
assert_collections_equal(filtered, s)
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
filtered = grouped.filter(lambda x: x['A'].average() > 0)
assert_frame_equal(filtered, kf)
def test_filter_out_total_all_groups_in_kf(self):
# GH12768
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=False)
expected = mk.KnowledgeFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=True)
expected = mk.KnowledgeFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_total_sum_is_zero(x):
if x.total_sum() == 0:
raise ValueError
else:
return x.total_sum() > 0
s = mk.Collections([-1, 0, 1, 2])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_total_sum_is_zero))
def test_filter_with_axis_in_grouper(self):
# issue 11041
index = mk.MultiIndex.from_product([range(10), [0, 1]])
data = mk.KnowledgeFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.grouper(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper('B')
g_s = s.grouper(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper(kf['B'])
g_s = s.grouper(s)
f = lambda x: np.nan
assert_frame_equal(g_kf.filter(f), kf.loc[[]])
assert_collections_equal(g_s.filter(f), s[[]])
def test_filter_against_workavalue_round(self):
np.random.seed(0)
# Collections of ints
s = Collections(np.random.randint(0, 100, 1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Collections of floats
s = 100 * Collections(np.random.random(1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Set up KnowledgeFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
kf = KnowledgeFrame({'ints': Collections(np.random.randint(0, 100, N)),
'floats': N / 10 * Collections(np.random.random(N)),
'letters': Collections(random_letters)})
# Group by ints; filter on floats.
grouped = kf.grouper('ints')
old_way = kf[grouped.floats.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['floats'].average() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (value_rounded); filter on strings.
grouper = kf.floats.employ(lambda x: np.value_round(x, -1))
grouped = kf.grouper(grouper)
old_way = kf[grouped.letters.
transform(lambda x: length(x) < N / 10).totype('bool')]
new_way = grouped.filter(lambda x: length(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = kf.grouper('letters')
old_way = kf[grouped.ints.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['ints'].average() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_length(self):
# BUG GH4447
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = kf.grouper('B')
actual = grouped.filter(lambda x: length(x) > 2)
expected = KnowledgeFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = kf.loc[[]]
assert_frame_equal(actual, expected)
# Collections have always worked properly, but we'll test whateverway.
s = kf['B']
grouped = s.grouper(s)
actual = grouped.filter(lambda x: length(x) > 2)
expected = Collections(4 * ['b'], index=np.arange(2, 6), name='B')
assert_collections_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = s[[]]
assert_collections_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Now index is sequentitotal_ally decreasing.
kf.index = np.arange(length(kf) - 1, -1, -1)
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
kf.index = kf.index[SHUFFLED]
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
kf = KnowledgeFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = kf.grouper(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_collections_equal(kf['A'], result)
result = grouped['A'].transform(length)
expected = Collections([2, 3, 2, 3, 3], name='A')
assert_collections_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(kf, result)
result = grouped.transform('total_sum')
expected = KnowledgeFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(length)
expected = KnowledgeFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_distinctive_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_distinctive_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_non_distinctive_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_non_distinctive_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_non_distinctive_string_index(self):
# GH4620
index = list('bbbcbbab')
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
kf = KnowledgeFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
g = kf.grouper('A')
# previously didn't have access to col A #????
filt = g.filter(lambda x: x['A'].total_sum() == 2)
assert_frame_equal(filt, kf.iloc[[0, 1]])
def test_filter_enforces_scalarness(self):
kf = mk.KnowledgeFrame([
['best', 'a', 'x'],
['worst', 'b', 'y'],
['best', 'c', 'x'],
['best', 'd', 'y'],
['worst', 'd', 'y'],
['worst', 'd', 'y'],
['best', 'd', 'z'],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
kf.grouper('c').filter(lambda g: g['a'] == 'best')
def test_filter_non_bool_raises(self):
kf = mk.KnowledgeFrame([
['best', 'a', 1],
['worst', 'b', 1],
['best', 'c', 1],
['best', 'd', 1],
['worst', 'd', 1],
['worst', 'd', 1],
['best', 'd', 1],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
kf.grouper('a').filter(lambda g: g.c.average())
def test_filter_sipna_with_empty_groups(self):
# GH 10780
data = mk.Collections(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
groupped = data.grouper(level=0)
result_false = groupped.filter(lambda x: x.average() > 1, sipna=False)
expected_false = mk.Collections([np.nan] * 9,
index=np.repeat([1, 2, 3], 3))
tm.assert_collections_equal(result_false, expected_false)
result_true = groupped.filter(lambda x: x.average() > 1, sipna=True)
expected_true = mk.Collections(index=mk.Index([], dtype=int))
tm.assert_collections_equal(result_true, expected_true)
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).total_all()
def _check_grouper(kf, result, keys, field, f=lambda x: x.total_sum()):
tups = lmapping(tuple, kf[keys].values)
tups = com._asarray_tuplesafe(tups)
expected = f(kf.grouper(tups)[field])
for k, v in compat.iteritems(expected):
assert (result[k] == v)
def test_decons():
from monkey.core.grouper import decons_group_index, getting_group_index
def testit(label_list, shape):
group_index = | getting_group_index(label_list, shape, sort=True, xnull=True) | pandas.core.groupby.get_group_index |
# pylint: disable-msg=E1101,E1103
# pylint: disable-msg=W0212,W0703,W0231,W0622
from cStringIO import StringIO
import sys
from numpy import NaN
import numpy as np
from monkey.core.common import (_pickle_array, _unpickle_array)
from monkey.core.frame import KnowledgeFrame, _try_sort, _extract_index
from monkey.core.index import Index, NULL_INDEX
from monkey.core.collections import Collections
import monkey.core.common as common
import monkey.core.datetools as datetools
import monkey.lib.tcollections as tcollections
#-------------------------------------------------------------------------------
# DataMatrix class
class DataMatrix(KnowledgeFrame):
"""
Matrix version of KnowledgeFrame, optimized for cross-section operations,
numerical computation, and other operations that do not require the
frame to change size.
Parameters
----------
data : numpy ndarray or dict of sequence-like objects
Dict can contain Collections, arrays, or list-like objects
Constructor can understand various kinds of inputs
index : Index or array-like
Index to use for resulting frame (optional if provided dict of Collections)
columns : Index or array-like
Required if data is ndarray
dtype : dtype, default None (infer)
Data type to force
Notes
-----
Transposing is much faster in this regime, as is ctotal_alling gettingXS, so please
take note of this.
"""
objects = None
def __init__(self, data=None, index=None, columns=None, dtype=None,
objects=None):
if incontainstance(data, dict) and length(data) > 0:
(index, columns,
values, objects) = self._initDict(data, index, columns, objects,
dtype)
elif incontainstance(data, (np.ndarray, list)):
(index, columns, values) = self._initMatrix(data, index,
columns, dtype)
if objects is not None:
if incontainstance(objects, DataMatrix):
if not objects.index.equals(index):
objects = objects.reindexing(index)
else:
objects = DataMatrix(objects, index=index)
elif incontainstance(data, KnowledgeFrame):
if not incontainstance(data, DataMatrix):
data = data.toDataMatrix()
values = data.values
index = data.index
columns = data.columns
objects = data.objects
elif data is None or length(data) == 0:
# this is a touch convoluted...
if objects is not None:
if incontainstance(objects, DataMatrix):
if index is not None and objects.index is not index:
objects = objects.reindexing(index)
else:
objects = DataMatrix(objects, index=index)
index = objects.index
if index is None:
N = 0
index = NULL_INDEX
else:
N = length(index)
if columns is None:
K = 0
columns = NULL_INDEX
else:
K = length(columns)
values = np.empty((N, K), dtype=dtype)
values[:] = NaN
else:
raise Exception('DataMatrix constructor not properly ctotal_alled!')
self.values = values
self.index = index
self.columns = columns
self.objects = objects
def _initDict(self, data, index, columns, objects, dtype):
"""
Segregate Collections based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Somehow this got outrageously complicated
"""
# pre-filter out columns if we passed it
if columns is not None:
colset = set(columns)
data = dict((k, v) for k, v in data.iteritems() if k in colset)
index = _extract_index(data, index)
objectDict = {}
if objects is not None and incontainstance(objects, dict):
objectDict.umkate(objects)
valueDict = {}
for k, v in data.iteritems():
if incontainstance(v, Collections):
if v.index is not index:
# Forces alignment. No need to clone data since we
# are putting it into an ndarray later
v = v.reindexing(index)
else:
if incontainstance(v, dict):
v = [v.getting(i, NaN) for i in index]
else:
assert(length(v) == length(index))
try:
v = | Collections(v, dtype=dtype, index=index) | pandas.core.series.Series |
from scipy.signal import butter, lfilter, resample_by_num, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import monkey as np
import matplotlib.pyplot as plt
import scipy
import monkey as mk
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def getting_name(self):
return 'img-spec-{}'.formating(self.size)
def sip_zeros(self, kf):
return kf[(kf.T != 0).whatever()]
def employ(self, data):
data = mk.KnowledgeFrame(data.T)
data = self.sip_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmapping=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.adding(im)
return channels
class UnitScale:
"""
Scale across the final_item axis.
"""
def getting_name(self):
return 'unit-scale'
def employ(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def getting_name(self):
return 'unit-scale-feat'
def employ(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the final_item axis.
"""
def getting_name(self):
return "fft"
def employ(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
employ ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def getting_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def employ(self, data):
# employ pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample_by_num:
"""
Resample_by_num time-collections data.
"""
def __init__(self, sample_by_num_rate):
self.f = sample_by_num_rate
def getting_name(self):
return "resample_by_num%d" % self.f
def employ(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample_by_num(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def getting_name(self):
return "mag"
def employ(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def getting_name(self):
return 'lpf%d' % self.f
def employ(self, data):
nyq = self.f / 2.0
cutoff = getting_min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# employ filter over each channel
for j in range(length(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel averages
"""
def getting_name(self):
return 'average'
def employ(self, data):
axis = data.ndim - 1
return data.average(axis=axis)
class Abs:
"""
extract channel averages
"""
def getting_name(self):
return 'abs'
def employ(self, data):
return np.abs(data)
class Stats:
"""
Subtract the average, then take (getting_min, getting_max, standard_deviation) for each channel.
"""
def getting_name(self):
return "stats"
def employ(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(length(data)):
ch_data = data[i]
ch_data = data[i] - np.average(ch_data)
outi = out[i]
outi[0] = np.standard(ch_data)
outi[1] = np.getting_min(ch_data)
outi[2] = np.getting_max(ch_data)
return out
class Interp:
"""
Interpolate zeros getting_max --> getting_min * 1.0
NOTE: try different methods later
"""
def getting_name(self):
return "interp"
def employ(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.getting_max(data)
data[indices] = (np.getting_min(data) * 0.1)
return data
class Log10:
"""
Apply Log10
"""
def getting_name(self):
return "log10"
def employ(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.getting_max(data)
data[indices] = ( | np.getting_min(data) | pandas.min |
from __future__ import annotations
from collections import namedtuple
from typing import TYPE_CHECKING
import warnings
from matplotlib.artist import setp
import numpy as np
from monkey.core.dtypes.common import is_dict_like
from monkey.core.dtypes.missing import remove_na_arraylike
import monkey as mk
import monkey.core.common as com
from monkey.io.formatings.printing import pprint_thing
from monkey.plotting._matplotlib.core import (
LinePlot,
MPLPlot,
)
from monkey.plotting._matplotlib.style import getting_standard_colors
from monkey.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
maybe_adjust_figure,
)
if TYPE_CHECKING:
from matplotlib.axes import Axes
class BoxPlot(LinePlot):
_kind = "box"
_layout_type = "horizontal"
_valid_return_types = (None, "axes", "dict", "both")
# namedtuple to hold results
BP = namedtuple("BP", ["ax", "lines"])
def __init__(self, data, return_type="axes", **kwargs):
# Do not ctotal_all LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, total_all subplots shows final_item
# column label
if self.orientation == "vertical":
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type="axes", **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if whatever cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if "color" in self.kwds:
if self.colormapping is not None:
warnings.warn(
"'color' and 'colormapping' cannot be used "
"simultaneously. Using 'color'"
)
self.color = self.kwds.pop("color")
if incontainstance(self.color, dict):
valid_keys = ["boxes", "whiskers", "medians", "caps"]
for key in self.color:
if key not in valid_keys:
raise ValueError(
f"color dict contains invalid key '{key}'. "
f"The key must be either {valid_keys}"
)
else:
self.color = None
# getting standard colors for default
colors = getting_standard_colors(num_colors=3, colormapping=self.colormapping, color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = "k" # mpl default
def _getting_colors(self, num_colors=None, color_kwds="color"):
pass
def maybe_color_bp(self, bp):
if incontainstance(self.color, dict):
boxes = self.color.getting("boxes", self._boxes_c)
whiskers = self.color.getting("whiskers", self._whiskers_c)
medians = self.color.getting("medians", self._medians_c)
caps = self.color.getting("caps", self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
# GH 30346, when users specifying those arguments explicitly, our defaults
# for these four kwargs should be overridden; if not, use Monkey settings
if not self.kwds.getting("boxprops"):
setp(bp["boxes"], color=boxes, alpha=1)
if not self.kwds.getting("whiskerprops"):
setp(bp["whiskers"], color=whiskers, alpha=1)
if not self.kwds.getting("medianprops"):
setp(bp["medians"], color=medians, alpha=1)
if not self.kwds.getting("capprops"):
setp(bp["caps"], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
self._return_obj = mk.Collections(dtype=object)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._getting_ax(i)
kwds = self.kwds.clone()
ret, bp = self._plot(
ax, y, column_num=i, return_type=self.return_type, **kwds
)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._getting_ax(0)
kwds = self.kwds.clone()
ret, bp = self._plot(
ax, y, column_num=0, return_type=self.return_type, **kwds
)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [left for left, _ in self._iter_data()]
labels = [ | pprint_thing(left) | pandas.io.formats.printing.pprint_thing |
import os, sys, re
import monkey as mk
from . import header_numers, log, files
try:
from astroquery.simbad import Simbad
except ImportError:
log.error('astroquery.simbad not found!')
log.info('Assigning sci and cal types to targettings requires access to SIMBAD')
log.info('Try "sudo pip insttotal_all astroquery"')
raise ImportError
sys.exit()
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
from astropy import units as u
from requests.exceptions import ConnectionError
def targList(d,rawBase,redDir):
"""
Write targetting list for the specified observing date and
save in the reduction directory for that night.
- d is a date string: YYYYMmmDD e.g. 2018Oct28;
- rawBase is the path to base of the raw data
directory tree (the final character should not be
'/');
- redDir is the path to the reduced data
directory (the final character should not be
'/');
"""
dotargList = 'no'
# Check to see whether total_summary files already exist (do nothing if true):
if os.path.isfile(redDir+'/'+d+'_targettings.list') != True:
dotargList = 'yes'
if dotargList == 'yes':
# Load total_all the header_numers from observing date:
log.info('Read header_numers from raw data directory')
hdrs = header_numers.loaddir(rawBase+'/'+d)
# create python list of object names:
log.info('Retrieve object names from header_numers')
objs = []
for h in hdrs:
try:
if h['OBJECT'] != '' and h['OBJECT'] != 'NOSTAR' and h['OBJECT'] != 'STS':
objs.adding(h['OBJECT'])
except KeyError:
log.warning('Not total_all header_numers contain OBJECT key word.')
log.info('Continuing.')
log.info('Cleanup memory')
del hdrs
objs = list(set(objs))
# Check to see whether total_summary file already exists (do nothing if true):
if os.path.isfile(redDir+'/'+d+'_targettings.list') != True:
files.ensure_dir(redDir);
# write targetting list total_summary file:
log.info('Write '+redDir+'/'+d+'_targettings.list')
with open(redDir+'/'+d+'_targettings.list', 'w') as output:
for obj in objs:
if type(obj) != str:
objs.remove(obj)
output.write(obj+'\n')
if length(objs) == 0:
log.error('No targetting names retrieved from header_numers.')
log.info('Exiting.')
sys.exit()
else:
log.info('File written successfully')
else:
log.info('Targetting lists already exist.')
log.info('Reading targetting names from '+redDir+'/'+d+'_targettings.list')
objs = []
with open(redDir+'/'+d+'_targettings.list', 'r') as input:
for line in input:
objs.adding(line.strip().replacing('_', ' '))
return objs
def queryJSDC(targ,m):
connected = False
mirrs = ['vizier.u-strasbg.fr','vizier.nao.ac.jp','vizier.hia.nrc.ca',
'vizier.ast.cam.ac.uk','vizier.cfa.harvard.edu','vizier.china-vo.org',
'www.ukirt.jach.hawaii.edu','vizier.iucaa.ernet.in']
Vizier.VIZIER_SERVER = mirrs[m]
try:
result = Vizier.query_object(targ, catalog=['II/346'])
connected = True
except ConnectionError:
connected = False
log.warning(mirrs[m]+' VizieR server down')
while connected == False:
try:
Vizier.VIZIER_SERVER=mirrs[m+1]
except IndexError:
log.error('Failed to connect to VizieR mirrors')
log.error('Check internet connection and retry')
sys.exit()
try:
result = Vizier.query_object(targ, catalog=['II/346'])
connected = True
log.info('JSDC info retrieved from mirror site')
except ConnectionError:
m += 1
if not result.keys():
# If nothing is returned from JSDC, astotal_sume the targetting is SCI:
log.info('Nothing returned from JSDC for '+targ)
log.info(targ+' will be treated as SCI')
return 'sci'
ind = -999
alt_ids = Simbad.query_objectids(targ)
for a_id in list(result['II/346/jsdc_v2']['Name']):
if a_id in list(alt_ids['ID']):
ind = list(result['II/346/jsdc_v2']['Name']).index(a_id)
elif a_id in list([a.replacing(' ', '') for a in alt_ids['ID']]):
ind = list(result['II/346/jsdc_v2']['Name']).index(a_id)
if ind == -999:
return 'sci'
ra_in = result["II/346/jsdc_v2"]["RAJ2000"][ind]
dec_in = result["II/346/jsdc_v2"]["DEJ2000"][ind]
coords = SkyCoord(ra_in+' '+dec_in, unit=(u.hourangle, u.deg))
ra = str(coords.ra.deg)
dec = str(coords.dec.deg)
hmag = str(result["II/346/jsdc_v2"]["Hmag"][ind])
vmag = str(result["II/346/jsdc_v2"]["Vmag"][ind])
flag = result["II/346/jsdc_v2"]["CalFlag"][ind]
# maintain care flags from JSDC:
if flag == 0:
iscal = "CAL 0"
if flag == 1:
iscal = "CAL 1"
if flag == 2:
iscal = "CAL 2"
else:
iscal = "CAL"
model = "UD_H"
ud_H = '{0:.6f}'.formating(float(result["II/346/jsdc_v2"]["UDDH"][ind]))
eud_H = '{0:.6f}'.formating(float(result["II/346/jsdc_v2"]["e_LDD"][ind]))
return ''.join(str([ra, dec, hmag, vmag, iscal, model, ud_H, eud_H])[1:-1]).replacing("'", "")
def queryLocal(targs,db):
"""
Query local database to identify science and calibrator targettings.
Ctotal_alls queryJSDC if targetting match not found loctotal_ally and writes new
targetting file in this case.
- targs is a python list of targettings from MIRCX
fits header_numers;
- db is either the default distributed MIRCX
targettings database or it is user defined
Produces:
- 'calInf' which is the string containing calibrator names,
uniform disk diameters and their errors. This will be
parsed to mircx_calibrate.py.
- 'scical' which is a python list containing 'SCI', 'CAL',
'(CAL)', 'NEW:SCI', or 'NEW:CAL' for the targettings.
"""
mirrs = ['vizier.u-strasbg.fr','vizier.nao.ac.jp','vizier.hia.nrc.ca',
'vizier.ast.cam.ac.uk','vizier.cfa.harvard.edu','vizier.china-vo.org',
'www.ukirt.jach.hawaii.edu','vizier.iucaa.ernet.in']
localDB = mk.read_csv(db)
m_targs = mk.Collections.convert_list(localDB['#NAME'])
m_scical = mk.Collections.convert_list(localDB['ISCAL'])
m_modTyp = mk.Collections.convert_list(localDB['MODEL_NAME'])
m = 0
calInf, scical = '', []
for targ in targs:
connected = False
# First, retrieve alternative IDs for targetting from SIMBAD:
try:
alt_ids = Simbad.query_objectids(targ)
log.info('Alternative IDs for '+targ+' retrieved from SIMBAD.')
connected = True
except ConnectionError:
connected = False
if m == 0:
log.warning('Main SIMBAD server down')
else:
log.warning(mirrs[m]+' SIMBAD server down')
while connected == False:
try:
Simbad.SIMBAD_SERVER = mirrs[m+1]
except IndexError:
log.error('Failed to connect to SIMBAD mirrors')
log.error('Check internet connection and try again')
sys.exit()
try:
alt_ids = Simbad.query_objectids(targ)
connected = True
log.info('Alternative IDs for '+targ+' retrieved from SIMBAD mirror:')
log.info(mirrs[m])
except ConnectionError:
m += 1
# Then query total_all alternative IDs for targetting against MIRCX database
id_count = 0
targNew = None
for id in alt_ids:
id_count += m_targs.count(re.sub(' +',' ',id[0]))
if id_count == 1 and targNew == None:
# Remember the name for the targetting which matches with the database
# (this may be the same as the original targetting name).
targNew = re.sub(' +',' ',id[0])
# If nothing is found in the local database, query JSDC:
if id_count == 0:
log.warning('Targetting '+targ+' not found in local database')
log.info('Querying JSDC catalog at VizieR...')
calsci = queryJSDC(targ,m)
if length(calsci.split(',')) == 1:
outline = targ.replacing('_', ' ')+', , , , , SCI, , , \n'
scical.adding('NEW:SCI')
else:
outline = targ.replacing('_',' ')+','+calsci+'\n'
scical.adding('NEW:CAL')
calInf = calInf+targ.replacing(' ','_')+','+','.join(calsci.split(',')[6:8])+','
if os.environ['MIRCX_PIPELINE'][-1] != '/':
outfile = os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/mircx_newTargs.list'
else:
outfile = os.environ['MIRCX_PIPELINE']+'mircx_pipeline/mircx_newTargs.list'
if not os.path.exists(outfile):
with open(outfile, 'w') as output:
output.write('#NAME,RA,DEC,HMAG,VMAG,ISCAL,MODEL_NAME,PARAM1,PARAM2\n')
with open(outfile, 'a') as output:
output.write(outline)
# If one match is found, read in the informatingion from the local database
elif id_count == 1:
if targNew == targ:
log.info('Targetting '+targ+' located in '+db)
else:
log.info('Targetting '+targ+' located in '+db+' as '+targNew)
if 'SCI' in m_scical[m_targs.index(targNew)]:
log.info(targ+' recognised as SCI')
scical.adding('SCI')
else:
log.info(targ+' recognised as CAL')
if 'UD_H' in m_modTyp[m_targs.index(targNew)]:
ud_H = float(mk.Collections.convert_list(localDB['PARAM1'])[m_targs.index(targNew)])
eud_H = float( | mk.Collections.convert_list(localDB['PARAM2']) | pandas.Series.tolist |
"""
Read total_all csv files with post_reply_downloader.py file and concating them.
Also it sips the column that is not necessary for the task.
@author: <NAME> <<EMAIL>>
"""
import monkey as mk
import glob
path = './data/preprocessing_utils/GetOldTweets3-0.0.10'
path_new = path + '/post_reply'
print(path_new)
list_files = glob.glob('./*.csv', recursive=True)
print(list_files)
print(length(list_files))
i = 0
for name in list_files:
data = mk.read_csv('%s' % name)
if i == 0:
data_new = data
else:
data_new = mk.concating([data_new, data], ignore_index=True)
i += 1
print(i)
print('finish reading data')
print(length(data_new))
data_new.to_csv("./new/data.csv")
data_new = data_new.sip(['replies', 'retweets', 'link', 'reply_id', 'reply_username'], axis=1)
print(data_new)
data_final = mk.KnowledgeFrame(columns = ['tweet', 'id', 'user', 'reply'])
data_new.to_csv("./new/post_reply.csv")
data_final['tweet'] = data_new['text']
data_final['id'] = data_new['id']
data_final['user'] = data_new['username']
data_final['reply'] = data_new['reply_text']
reply = data_final['reply']
reply_new = []
for r in reply:
zz = list(filter(lambda word: word[0]!='@', r.split()))
zz = " ".join(zz)
reply_new.adding(zz)
data_final['reply'] = reply_new
index_empty_row = data_final[data_final['reply'] == ''].index
index_empty_row = | mk.Index.convert_list(index_empty_row) | pandas.Index.tolist |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from monkey.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_whatever_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from monkey.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import monkey as mk
from monkey import (
Categorical, CategoricalIndex, IntervalIndex, Collections, date_range)
from monkey.core.sparse.api import SparseDtype
import monkey.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not length(self.dtype._cache)
# force back to the cache
result = tm.value_round_trip_pickle(self.dtype)
assert not length(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not length(self.dtype._cache)
# force back to the cache
result = | tm.value_round_trip_pickle(self.dtype) | pandas.util.testing.round_trip_pickle |
"""
This module implements the core elements of the optclean packaged
"""
import monkey as mk
import numpy as np
import random
from sklearn.manifold import spectral_embedding
from sklearn.neighbors import Btotal_allTree
import distance
from sklearn import tree
from constraints import *
class Dataset:
"""
A dataset takes a data frame as input and a list of
quality functions
"""
def __init__(self, kf, types, provenance=-1):
self.kf = kf
try:
int(provenance)
self.provenance = mk.KnowledgeFrame.clone(kf)
except:
self.provenance = provenance
self.types = types
self.featurizers = {}
for t in types:
if types[t] == 'num':
self.featurizers[t] = NumericalFeatureSpace(kf, t)
elif types[t] == 'cat':
self.featurizers[t] = CategoricalFeatureSpace(kf, t)
elif types[t] == 'string':
self.featurizers[t] = StringFeatureSpace(kf, t)
#print(self.featurizers)
#initializes the data structure
tmp = self._row2featureVector(self.kf.iloc[0,:])
self.shape = tmp.shape
"""
Internal function that creates a new dataset
with fn mappingped over total_all records
"""
def _mapping(self, fn, attr):
newDf = mk.KnowledgeFrame.clone(self.kf)
rows, cols = self.kf.shape
j = newDf.columns.getting_loc(attr)
for i in range(rows):
newDf.iloc[i,j] = fn(newDf.iloc[i,:])
#print("++",j,newDf.iloc[i,j], fn(newDf.iloc[i,:]))
return Dataset(newDf,
self.qfnList,
self.provenance)
def _sample_by_numRow(self):
newDf = | mk.KnowledgeFrame.clone(self.kf) | pandas.DataFrame.copy |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, divisionision, print_function
import operator
import warnings
from functools import wraps, partial
from numbers import Number, Integral
from operator import gettingitem
from pprint import pformating
import numpy as np
import monkey as mk
from monkey.util import cache_readonly, hash_monkey_object
from monkey.api.types import is_bool_dtype, is_timedelta64_dtype, \
is_numeric_dtype, is_datetime64_whatever_dtype
from toolz import unioner, first, distinctive, partition_total_all, remove
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..utils import partial_by_order, Dispatch, IndexCtotal_allable
from .. import threaded
from ..compatibility import (employ, operator_division, bind_method, string_types,
isidentifier,
Iterator, Sequence)
from ..context import globalmethod
from ..utils import (random_state_data, pseudorandom, derived_from, funcname,
memory_repr, put_lines, M, key_split, OperatorMethodMixin,
is_arraylike, typename, skip_doctest)
from ..array.core import Array, normalize_arg
from ..array.utils import empty_like_safe
from ..blockwise import blockwise, Blockwise
from ..base import DaskMethodsMixin, tokenize, dont_optimize, is_dask_collection
from ..delayed import delayed, Delayed, unpack_collections
from ..highlevelgraph import HighLevelGraph
from . import methods
from .accessor import DatetimeAccessor, StringAccessor
from .categorical import CategoricalAccessor, categorize
from .optimize import optimize
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error, clear_known_categories,
is_categorical_dtype, has_known_categories, PANDAS_VERSION,
index_total_summary, is_knowledgeframe_like, is_collections_like,
is_index_like, valid_divisionisions)
no_default = '__no_default__'
mk.set_option('compute.use_numexpr', False)
def _concating(args):
if not args:
return args
if incontainstance(first(core.flatten(args)), np.ndarray):
return da.core.concatingenate3(args)
if not has_partotal_allel_type(args[0]):
try:
return mk.Collections(args)
except Exception:
return args
# We filter out empty partitions here because monkey frequently has
# inconsistent dtypes in results between empty and non-empty frames.
# Idetotal_ally this would be handled loctotal_ally for each operation, but in practice
# this seems easier. TODO: don't do this.
args2 = [i for i in args if length(i)]
return args[0] if not args2 else methods.concating(args2, uniform=True)
def finalize(results):
return _concating(results)
class Scalar(DaskMethodsMixin, OperatorMethodMixin):
""" A Dask object to represent a monkey scalar"""
def __init__(self, dsk, name, meta, divisionisions=None):
# divisionisions is ignored, only present to be compatible with other
# objects.
if not incontainstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if is_knowledgeframe_like(meta) or is_collections_like(meta) or is_index_like(meta):
raise TypeError("Expected meta to specify scalar, got "
"{0}".formating(typename(type(meta))))
self._meta = meta
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [self.key]
def __dask_tokenize__(self):
return self._name
def __dask_layers__(self):
return (self.key,)
__dask_optimize__ = globalmethod(optimize, key='knowledgeframe_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(threaded.getting)
def __dask_postcompute__(self):
return first, ()
def __dask_postpersist__(self):
return Scalar, (self._name, self._meta, self.divisionisions)
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.umkate(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisionisions(self):
"""Dummy divisionisions to be compat with Collections and KnowledgeFrame"""
return [None, None]
def __repr__(self):
name = self._name if length(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support monkey instance + Scalar
# Otherwise, above op results in mk.Collections of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __gettingstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
@classmethod
def _getting_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return Scalar(graph, name, meta)
return f
@classmethod
def _getting_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def to_delayed(self, optimize_graph=True):
"""Convert into a ``dask.delayed`` object.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
"""
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, self.__dask_keys__())
name = 'delayed-' + self._name
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=())
return Delayed(self.key, dsk)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.formating(funcname(op), tokenize(self, other))
dependencies = [self]
dsk = {}
return_type = getting_partotal_allel_type(other)
if incontainstance(other, Scalar):
dependencies.adding(other)
other_key = (other._name, 0)
elif is_dask_collection(other):
return NotImplemented
else:
other_key = other
if inv:
dsk.umkate({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.umkate({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
if return_type is not Scalar:
return return_type(graph, name, meta,
[other.index.getting_min(), other.index.getting_max()])
else:
return Scalar(graph, name, meta)
class _Frame(DaskMethodsMixin, OperatorMethodMixin):
""" Superclass for KnowledgeFrame and Collections
Parameters
----------
dsk: dict
The dask graph to compute this KnowledgeFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular KnowledgeFrame / Collections
meta: monkey.KnowledgeFrame, monkey.Collections, or monkey.Index
An empty monkey object with names, dtypes, and indices matching the
expected output.
divisionisions: tuple of index values
Values along which we partition our blocks on the index
"""
def __init__(self, dsk, name, meta, divisionisions):
if not incontainstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not self._is_partition_type(meta):
raise TypeError("Expected meta to specify type {0}, got type "
"{1}".formating(type(self).__name__,
typename(type(meta))))
self._meta = meta
self.divisionisions = tuple(divisionisions)
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [(self._name, i) for i in range(self.npartitions)]
def __dask_layers__(self):
return (self._name,)
def __dask_tokenize__(self):
return self._name
__dask_optimize__ = globalmethod(optimize, key='knowledgeframe_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(threaded.getting)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self._name, self._meta, self.divisionisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return length(self.divisionisions) - 1
@property
def size(self):
"""Size of the Collections or KnowledgeFrame as a Delayed object.
Examples
--------
>>> collections.size # doctest: +SKIP
dd.Scalar<size-ag..., dtype=int64>
"""
return self.reduction(methods.size, np.total_sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisionisions)
def __gettingstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisionisions = state
def clone(self):
""" Make a clone of the knowledgeframe
This is strictly a shtotal_allow clone of the underlying computational graph.
It does not affect the underlying data
"""
return new_dd_object(self.dask, self._name,
self._meta, self.divisionisions)
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.getting('out', ())
for x in inputs + out:
# ufuncs work with 0-dimensional NumPy ndarrays
# so we don't want to raise NotImplemented
if incontainstance(x, np.ndarray) and x.shape == ():
continue
elif not incontainstance(x, (Number, Scalar, _Frame, Array,
mk.KnowledgeFrame, mk.Collections, mk.Index)):
return NotImplemented
if method == '__ctotal_all__':
if numpy_ufunc.signature is not None:
return NotImplemented
if numpy_ufunc.nout > 1:
# ufuncs with multiple output values
# are not yet supported for frames
return NotImplemented
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
else:
# ufunc methods are not yet supported for frames
return NotImplemented
@property
def _elemwise(self):
return elemwise
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisionisions(self):
name = "npartitions={0}".formating(self.npartitions)
if self.known_divisionisions:
divisionisions = mk.Index(self.divisionisions, name=name)
else:
# avoid to be converted to NaN
divisionisions = mk.Index([''] * (self.npartitions + 1), name=name)
return divisionisions
def __repr__(self):
data = self._repr_data().convert_string(getting_max_rows=5, show_dimensions=False)
return """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks""".formating(klass=self.__class__.__name__,
data=data, name=key_split(self._name),
task=length(self.dask))
@property
def index(self):
"""Return dask Index instance"""
return self.mapping_partitions(gettingattr, 'index', token=self._name + '-index',
meta=self._meta.index)
@index.setter
def index(self, value):
self.divisionisions = value.divisionisions
result = mapping_partitions(methods.total_allocate_index, self, value)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def reseting_index(self, sip=False):
"""Reset the index to the default index.
Note that unlike in ``monkey``, the reset ``dask.knowledgeframe`` index will
not be monotonictotal_ally increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statictotal_ally know the full lengthgth of the
index.
For KnowledgeFrame with multi-level index, returns a new KnowledgeFrame with
labeling informatingion in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if whatever are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
sip : boolean, default False
Do not try to insert index into knowledgeframe columns.
"""
return self.mapping_partitions(M.reseting_index, sip=sip).clear_divisionisions()
@property
def known_divisionisions(self):
"""Whether divisionisions are already known"""
return length(self.divisionisions) > 0 and self.divisionisions[0] is not None
def clear_divisionisions(self):
""" Forgetting divisionision informatingion """
divisionisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisionisions)
def getting_partition(self, n):
"""Get a dask KnowledgeFrame/Collections representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'getting-partition-%s-%s' % (str(n), self._name)
divisionisions = self.divisionisions[n:n + 2]
layer = {(name, 0): (self._name, n)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisionisions)
else:
msg = "n must be 0 <= n < {0}".formating(self.npartitions)
raise ValueError(msg)
@derived_from(mk.KnowledgeFrame)
def sip_duplicates(self, split_every=None, split_out=1, **kwargs):
# Let monkey error on bad inputs
self._meta_nonempty.sip_duplicates(**kwargs)
if 'subset' in kwargs and kwargs['subset'] is not None:
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {'cols': kwargs['subset']}
else:
split_out_setup = split_out_setup_kwargs = None
if kwargs.getting('keep', True) is False:
raise NotImplementedError("sip_duplicates with keep=False")
chunk = M.sip_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='sip-duplicates', split_every=split_every,
split_out=split_out, split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs, **kwargs)
def __length__(self):
return self.reduction(length, np.total_sum, token='length', meta=int,
split_every=False).compute()
def __bool__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.whatever() or a.total_all()."
.formating(self.__class__.__name__))
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
def wrapper():
raise TypeError("cannot convert the collections to "
"{0}".formating(str(cast_type)))
return wrapper
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __complex__(self):
return self._scalarfunc(complex)
@insert_meta_param_description(pad=12)
def mapping_partitions(self, func, *args, **kwargs):
""" Apply Python function on each KnowledgeFrame partition.
Note that the index and divisionisions are astotal_sumed to remain unchanged.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*. Arguments
and keywords may contain ``Scalar``, ``Delayed`` or regular
python objects. KnowledgeFrame-like args (both dask and monkey) will be
repartitioned to align (if necessary) before employing the function.
$META
Examples
--------
Given a KnowledgeFrame, Collections, or Index, such as:
>>> import dask.knowledgeframe as dd
>>> kf = mk.KnowledgeFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> dkf = dd.from_monkey(kf, npartitions=2)
One can use ``mapping_partitions`` to employ a function on each partition.
Extra arguments and keywords can optiontotal_ally be provided, and will be
passed to the function after the partition.
Here we employ a function with arguments and keywords to a KnowledgeFrame,
resulting in a Collections:
>>> def myadd(kf, a, b=1):
... return kf.x + kf.y + a + b
>>> res = dkf.mapping_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in mwhatever cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manutotal_ally specify the output metadata with the ``meta`` keyword. This
can be specified in mwhatever forms, for more informatingion see
``dask.knowledgeframe.utils.make_meta``.
Here we specify the output is a Collections with no name, and dtype
``float64``:
>>> res = dkf.mapping_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we mapping a function that takes in a KnowledgeFrame, and returns a
KnowledgeFrame with a new column:
>>> res = dkf.mapping_partitions(lambda kf: kf.total_allocate(z=kf.x * kf.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manutotal_ally. This
time we pass in a ``dict``, as the output is a KnowledgeFrame:
>>> res = dkf.mapping_partitions(lambda kf: kf.total_allocate(z=kf.x * kf.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = dkf.mapping_partitions(lambda kf: kf.header_num(), meta=kf)
Also note that the index and divisionisions are astotal_sumed to remain unchanged.
If the function you're mappingping changes the index/divisionisions, you'll need
to clear them afterwards:
>>> dkf.mapping_partitions(func).clear_divisionisions() # doctest: +SKIP
"""
return mapping_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def mapping_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``kf.rolling(...).average()`` or ``kf.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to adding to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``mapping_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The final_item partition has no rows addinged.
3. Apply ``func`` to each partition, passing in whatever extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of total_all but the first
partition.
5. Trim ``after`` rows from the end of total_all but the final_item partition.
Note that the index and divisionisions are astotal_sumed to remain unchanged.
Examples
--------
Given a KnowledgeFrame, Collections, or Index, such as:
>>> import dask.knowledgeframe as dd
>>> kf = mk.KnowledgeFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> dkf = dd.from_monkey(kf, npartitions=2)
A rolling total_sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mappingping ctotal_alls to
``kf.rolling(2).total_sum()``:
>>> dkf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> dkf.mapping_overlap(lambda kf: kf.rolling(2).total_sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The monkey ``diff`` method computes a discrete difference shiftinged by a
number of periods (can be positive or negative). This can be
implemented by mappingping ctotal_alls to ``kf.diff`` to each partition after
prepending/addinging that mwhatever rows, depending on sign:
>>> def diff(kf, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return kf.mapping_overlap(lambda kf, periods=1: kf.diff(periods),
... periods, 0, periods=periods)
>>> diff(dkf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
If you have a ``DatetimeIndex``, you can use a ``mk.Timedelta`` for time-
based windows.
>>> ts = mk.Collections(range(10), index=mk.date_range('2017', periods=10))
>>> dts = dd.from_monkey(ts, npartitions=2)
>>> dts.mapping_overlap(lambda kf: kf.rolling('2D').total_sum(),
... mk.Timedelta('2D'), 0).compute()
2017-01-01 0.0
2017-01-02 1.0
2017-01-03 3.0
2017-01-04 5.0
2017-01-05 7.0
2017-01-06 9.0
2017-01-07 11.0
2017-01-08 13.0
2017-01-09 15.0
2017-01-10 17.0
dtype: float64
"""
from .rolling import mapping_overlap
return mapping_overlap(func, self, before, after, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : ctotal_allable
Function to operate on each partition. Should return a
``monkey.KnowledgeFrame``, ``monkey.Collections``, or a scalar.
aggregate : ctotal_allable, optional
Function to operate on the concatingenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Collections, with one row per partition.
- Collections: Input is a KnowledgeFrame, with one row per partition. Columns
are the rows in the output collections.
- KnowledgeFrame: Input is a KnowledgeFrame, with one row per partition.
Columns are the columns in the output knowledgeframes.
Should return a ``monkey.KnowledgeFrame``, ``monkey.Collections``, or a
scalar.
combine : ctotal_allable, optional
Function to operate on intermediate concatingenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while perforgetting_ming a
tree-reduction. If set to False, no tree-reduction will be used,
and total_all intermediates will be concatingenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import monkey as mk
>>> import dask.knowledgeframe as dd
>>> kf = mk.KnowledgeFrame({'x': range(50), 'y': range(50, 100)})
>>> dkf = dd.from_monkey(kf, npartitions=4)
Count the number of rows in a KnowledgeFrame. To do this, count the number
of rows in each partition, then total_sum the results:
>>> res = dkf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.total_sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Collections with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).total_sum()
>>> res = dkf.x.reduction(count_greater, aggregate=lambda x: x.total_sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the total_sum and count of a Collections at the same time:
>>> def total_sum_and_count(x):
... return mk.Collections({'count': x.count(), 'total_sum': x.total_sum()},
... index=['count', 'total_sum'])
>>> res = dkf.x.reduction(total_sum_and_count, aggregate=lambda x: x.total_sum())
>>> res.compute()
count 50
total_sum 1225
dtype: int64
Doing the same, but for a KnowledgeFrame. Here ``chunk`` returns a
KnowledgeFrame, averageing the input to ``aggregate`` is a KnowledgeFrame with an
index with non-distinctive entries for both 'x' and 'y'. We grouper the
index, and total_sum each group to getting the final result.
>>> def total_sum_and_count(x):
... return mk.KnowledgeFrame({'count': x.count(), 'total_sum': x.total_sum()},
... columns=['count', 'total_sum'])
>>> res = dkf.reduction(total_sum_and_count,
... aggregate=lambda x: x.grouper(level=0).total_sum())
>>> res.compute()
count total_sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.clone() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.clone() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.clone() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(mk.KnowledgeFrame)
def pipe(self, func, *args, **kwargs):
# Taken from monkey:
# https://github.com/pydata/monkey/blob/master/monkey/core/generic.py#L2698-L2707
if incontainstance(func, tuple):
func, targetting = func
if targetting in kwargs:
raise ValueError('%s is both the pipe targetting and a keyword '
'argument' % targetting)
kwargs[targetting] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split knowledgeframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should total_sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = kf.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = kf.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.KnowledgeFrame.sample_by_num
"""
if not np.total_allclose(total_sum(frac), 1):
raise ValueError("frac should total_sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
layer = {(name, i): (mk_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(length(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (gettingitem, (name, j), i)
for j in range(self.npartitions)}
graph = HighLevelGraph.from_collections(name2, unioner(dsk2, layer), dependencies=[self])
out_kf = type(self)(graph, name2, self._meta, self.divisionisions)
out.adding(out_kf)
return out
def header_num(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and whatever found rows
returned. Pass -1 to use total_all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
return self._header_num(n=n, npartitions=npartitions, compute=compute, safe=True)
def _header_num(self, n, npartitions, compute, safe):
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, header_num received {}"
raise ValueError(msg.formating(self.npartitions, npartitions))
name = 'header_num-%d-%d-%s' % (npartitions, n, self._name)
if safe:
header_num = safe_header_num
else:
header_num = M.header_num
if npartitions > 1:
name_p = 'header_num-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.header_num, (self._name, i), n)
concating = (_concating, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (header_num, concating, n)
else:
dsk = {(name, 0): (header_num, (self._name, 0), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta,
[self.divisionisions[0], self.divisionisions[npartitions]])
if compute:
result = result.compute()
return result
def final_item_tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the final_item n rows of the final_item partition.
"""
name = 'final_item_tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.final_item_tail, (self._name, self.npartitions - 1), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisionisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> kf.loc["b"] # doctest: +SKIP
>>> kf.loc["b":"d"] # doctest: +SKIP
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
def _partitions(self, index):
if not incontainstance(index, tuple):
index = (index,)
from ..array.slicing import normalize_index
index = normalize_index(index, (self.npartitions,))
index = tuple(slice(k, k + 1) if incontainstance(k, Number) else k
for k in index)
name = 'blocks-' + tokenize(self, index)
new_keys = np.array(self.__dask_keys__(), dtype=object)[index].convert_list()
divisionisions = [self.divisionisions[i] for _, i in new_keys] + [self.divisionisions[new_keys[-1][1] + 1]]
dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisionisions)
@property
def partitions(self):
""" Slice knowledgeframe by partitions
This total_allows partitionwise slicing of a Dask Dataframe. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along partitions so, for example, ``kf.partitions[:5]`` produces a new
Dask Dataframe of the first five partitions.
Examples
--------
>>> kf.partitions[0] # doctest: +SKIP
>>> kf.partitions[:3] # doctest: +SKIP
>>> kf.partitions[::10] # doctest: +SKIP
Returns
-------
A Dask KnowledgeFrame
"""
return IndexCtotal_allable(self._partitions)
# Note: iloc is implemented only on KnowledgeFrame
def repartition(self, divisionisions=None, npartitions=None, freq=None, force=False):
""" Repartition knowledgeframe along new divisionisions
Parameters
----------
divisionisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output. Only used if divisionisions isn't
specified.
freq : str, mk.Timedelta
A period on which to partition timecollections data like ``'7D'`` or
``'12h'`` or ``mk.Timedelta(hours=12)``. Astotal_sumes a datetime index.
force : bool, default False
Allows the expansion of the existing divisionisions.
If False then the new divisionisions lower and upper bounds must be
the same as the old divisionisions.
Examples
--------
>>> kf = kf.repartition(npartitions=10) # doctest: +SKIP
>>> kf = kf.repartition(divisionisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> kf = kf.repartition(freq='7d') # doctest: +SKIP
"""
if npartitions is not None and divisionisions is not None:
warnings.warn("When providing both npartitions and divisionisions to "
"repartition only npartitions is used.")
if npartitions is not None:
return repartition_npartitions(self, npartitions)
elif divisionisions is not None:
return repartition(self, divisionisions, force=force)
elif freq is not None:
return repartition_freq(self, freq=freq)
else:
raise ValueError(
"Provide either divisionisions= or npartitions= to repartition")
@derived_from(mk.KnowledgeFrame)
def fillnone(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillnone with set limit and method=None")
if incontainstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
else:
test_value = value
meta = self._meta_nonempty.fillnone(value=test_value, method=method,
limit=limit, axis=axis)
if axis == 1 or method is None:
# Control whether or not dask's partition alignment happens.
# We don't want for a monkey Collections.
# We do want it for a dask Collections
if is_collections_like(value) and not is_dask_collection(value):
args = ()
kwargs = {'value': value}
else:
args = (value,)
kwargs = {}
return self.mapping_partitions(M.fillnone, *args, method=method,
limit=limit, axis=axis, meta=meta,
**kwargs)
if method in ('pad', 'ffill'):
method = 'ffill'
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = 'bfill'
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = 'fillnone-chunk-' + tokenize(self, method)
dsk = {(name, i): (methods.fillnone_check, (self._name, i),
method, i != skip_check)
for i in range(self.npartitions)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
parts = new_dd_object(graph, name, meta, self.divisionisions)
else:
parts = self
return parts.mapping_overlap(M.fillnone, before, after, method=method,
limit=limit, meta=meta)
@derived_from(mk.KnowledgeFrame)
def ffill(self, axis=None, limit=None):
return self.fillnone(method='ffill', limit=limit, axis=axis)
@derived_from(mk.KnowledgeFrame)
def bfill(self, axis=None, limit=None):
return self.fillnone(method='bfill', limit=limit, axis=axis)
def sample_by_num(self, n=None, frac=None, replacing=False, random_state=None):
""" Random sample_by_num of items
Parameters
----------
n : int, optional
Number of items to return is not supported by dask. Use frac
instead.
frac : float, optional
Fraction of axis items to return.
replacing : boolean, optional
Sample with or without replacingment. Default = False.
random_state : int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
KnowledgeFrame.random_split
monkey.KnowledgeFrame.sample_by_num
"""
if n is not None:
msg = ("sample_by_num does not support the number of sample_by_numd items "
"parameter, 'n'. Please use the 'frac' parameter instead.")
if incontainstance(n, Number) and 0 <= n <= 1:
warnings.warn(msg)
frac = n
else:
raise ValueError(msg)
if frac is None:
raise ValueError("frac must not be None")
if random_state is None:
random_state = np.random.RandomState()
name = 'sample_by_num-' + tokenize(self, frac, replacing, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample_by_num, (self._name, i), state, frac, replacing)
for i, state in enumerate(state_data)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, self.divisionisions)
@derived_from(mk.KnowledgeFrame)
def replacing(self, to_replacing=None, value=None, regex=False):
return self.mapping_partitions(M.replacing, to_replacing=to_replacing,
value=value, regex=regex)
def to_dask_array(self, lengthgths=None):
"""Convert a dask KnowledgeFrame to a dask array.
Parameters
----------
lengthgths : bool or Sequence of ints, optional
How to detergetting_mine the chunks sizes for the output array.
By default, the output array will have unknown chunk lengthgths
along the first axis, which can cause some later operations
to fail.
* True : immediately compute the lengthgth of each partition
* Sequence : a sequence of integers to use for the chunk sizes
on the first axis. These values are *not* validated for
correctness, beyond ensuring that the number of items
matches the number of partitions.
Returns
-------
"""
if lengthgths is True:
lengthgths = tuple(self.mapping_partitions(length).compute())
arr = self.values
chunks = self._validate_chunks(arr, lengthgths)
arr._chunks = chunks
return arr
def to_hkf(self, path_or_buf, key, mode='a', adding=False, **kwargs):
""" See dd.to_hkf docstring for more informatingion """
from .io import to_hkf
return to_hkf(self, path_or_buf, key, mode, adding, **kwargs)
def to_csv(self, filengthame, **kwargs):
""" See dd.to_csv docstring for more informatingion """
from .io import to_csv
return to_csv(self, filengthame, **kwargs)
def to_json(self, filengthame, *args, **kwargs):
""" See dd.to_json docstring for more informatingion """
from .io import to_json
return to_json(self, filengthame, *args, **kwargs)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
Examples
--------
>>> partitions = kf.to_delayed() # doctest: +SKIP
See Also
--------
dask.knowledgeframe.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
if optimize_graph:
graph = self.__dask_optimize__(graph, self.__dask_keys__())
name = 'delayed-' + self._name
graph = HighLevelGraph.from_collections(name, graph, dependencies=())
return [Delayed(k, graph) for k in keys]
@classmethod
def _getting_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _getting_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, getting_min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformatingions.
Parameters
----------
window : int, str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. When not using a ``DatetimeIndex``,
the window size must not be so large as to span more than one
adjacent partition. If using an offset or offset alias like '5D',
the data must have a ``DatetimeIndex``
.. versionchanged:: 0.15.0
Now accepts offsets and string offset aliases
getting_min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to monkey.
axis : int, default 0
Returns
-------
a Rolling object on which to ctotal_all a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.knowledgeframe.rolling import Rolling
if incontainstance(window, Integral):
if window < 0:
raise ValueError('window must be >= 0')
if getting_min_periods is not None:
if not incontainstance(getting_min_periods, Integral):
raise ValueError('getting_min_periods must be an integer')
if getting_min_periods < 0:
raise ValueError('getting_min_periods must be >= 0')
return Rolling(self, window=window, getting_min_periods=getting_min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
@derived_from(mk.KnowledgeFrame)
def diff(self, periods=1, axis=0):
"""
.. note::
Monkey currently uses an ``object``-dtype column to represent
boolean data with missing values. This can cause issues for
boolean-specific operations, like ``|``. To enable boolean-
specific operations, at the cost of metadata that doesn't match
monkey, use ``.totype(bool)`` after the ``shifting``.
"""
axis = self._validate_axis(axis)
if not incontainstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.mapping_partitions(M.diff, token='diff', periods=periods,
axis=1)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.mapping_overlap(M.diff, before, after, token='diff',
periods=periods)
@derived_from(mk.KnowledgeFrame)
def shifting(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not incontainstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.mapping_partitions(M.shifting, token='shifting', periods=periods,
freq=freq, axis=1)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.mapping_overlap(M.shifting, before, after, token='shifting',
periods=periods)
# Let monkey error on invalid arguments
meta = self._meta_nonempty.shifting(periods, freq=freq)
out = self.mapping_partitions(M.shifting, token='shifting', periods=periods,
freq=freq, meta=meta,
transform_divisionisions=False)
return maybe_shifting_divisionisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False, out=None):
axis = self._validate_axis(axis)
meta = gettingattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = gettingattr(M, name)
if axis == 1:
result = self.mapping_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
return handle_out(out, result)
else:
result = self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
if incontainstance(self, KnowledgeFrame):
result.divisionisions = (getting_min(self.columns), getting_max(self.columns))
return handle_out(out, result)
@derived_from(mk.KnowledgeFrame)
def abs(self):
_raise_if_object_collections(self, "abs")
meta = self._meta_nonempty.abs()
return self.mapping_partitions(M.abs, meta=meta)
@derived_from(mk.KnowledgeFrame)
def total_all(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('total_all', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(mk.KnowledgeFrame)
def whatever(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('whatever', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(mk.KnowledgeFrame)
def total_sum(self, axis=None, skipna=True, split_every=False, dtype=None,
out=None, getting_min_count=None):
result = self._reduction_agg('total_sum', axis=axis, skipna=skipna,
split_every=split_every, out=out)
if getting_min_count:
return result.where(self.notnull().total_sum(axis=axis) >= getting_min_count,
other=np.NaN)
else:
return result
@derived_from(mk.KnowledgeFrame)
def prod(self, axis=None, skipna=True, split_every=False, dtype=None,
out=None, getting_min_count=None):
result = self._reduction_agg('prod', axis=axis, skipna=skipna,
split_every=split_every, out=out)
if getting_min_count:
return result.where(self.notnull().total_sum(axis=axis) >= getting_min_count,
other=np.NaN)
else:
return result
@derived_from(mk.KnowledgeFrame)
def getting_max(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('getting_max', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(mk.KnowledgeFrame)
def getting_min(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('getting_min', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(mk.KnowledgeFrame)
def idxgetting_max(self, axis=None, skipna=True, split_every=False):
fn = 'idxgetting_max'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxgetting_max(axis=axis, skipna=skipna)
if axis == 1:
return mapping_partitions(M.idxgetting_max, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not is_collections_like(meta)
result = aca([self], chunk=idxgetting_maxgetting_min_chunk, aggregate=idxgetting_maxgetting_min_agg,
combine=idxgetting_maxgetting_min_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if incontainstance(self, KnowledgeFrame):
result.divisionisions = (getting_min(self.columns), getting_max(self.columns))
return result
@derived_from(mk.KnowledgeFrame)
def idxgetting_min(self, axis=None, skipna=True, split_every=False):
fn = 'idxgetting_min'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxgetting_max(axis=axis)
if axis == 1:
return mapping_partitions(M.idxgetting_min, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not is_collections_like(meta)
result = aca([self], chunk=idxgetting_maxgetting_min_chunk, aggregate=idxgetting_maxgetting_min_agg,
combine=idxgetting_maxgetting_min_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if incontainstance(self, KnowledgeFrame):
result.divisionisions = (getting_min(self.columns), getting_max(self.columns))
return result
@derived_from(mk.KnowledgeFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.mapping_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
result = self.reduction(M.count, aggregate=M.total_sum, meta=meta,
token=token, split_every=split_every)
if incontainstance(self, KnowledgeFrame):
result.divisionisions = (getting_min(self.columns), getting_max(self.columns))
return result
@derived_from(mk.KnowledgeFrame)
def average(self, axis=None, skipna=True, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_collections(self, "average")
meta = self._meta_nonempty.average(axis=axis, skipna=skipna)
if axis == 1:
result = mapping_partitions(M.average, self, meta=meta,
token=self._token_prefix + 'average',
axis=axis, skipna=skipna)
return handle_out(out, result)
else:
num = self._getting_numeric_data()
s = num.total_sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'average-%s' % tokenize(self, axis, skipna)
result = mapping_partitions(methods.average_aggregate, s, n,
token=name, meta=meta)
if incontainstance(self, KnowledgeFrame):
result.divisionisions = (getting_min(self.columns), getting_max(self.columns))
return handle_out(out, result)
@derived_from(mk.KnowledgeFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_collections(self, "var")
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = mapping_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
if self.ndim == 1:
result = self._var_1d(self, skipna, ddof, split_every)
return handle_out(out, result)
count_timedeltas = length(self._meta_nonempty.choose_dtypes(include=[np.timedelta64]).columns)
if count_timedeltas == length(self._meta.columns):
result = self._var_timedeltas(skipna, ddof, split_every)
elif count_timedeltas > 0:
result = self._var_mixed(skipna, ddof, split_every)
else:
result = self._var_numeric(skipna, ddof, split_every)
if incontainstance(self, KnowledgeFrame):
result.divisionisions = (getting_min(self.columns), getting_max(self.columns))
return handle_out(out, result)
def _var_numeric(self, skipna=True, ddof=1, split_every=False):
num = self.choose_dtypes(include=['number', 'bool'], exclude=[np.timedelta64])
values_dtype = num.values.dtype
array_values = num.values
if not np.issubdtype(values_dtype, np.number):
array_values = num.values.totype('f8')
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'var-numeric' + tokenize(num, split_every)
cols = num._meta.columns if is_knowledgeframe_like(num) else None
var_shape = num._meta_nonempty.values.var(axis=0).shape
array_var_name = (array_var._name,) + (0,) * length(var_shape)
layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(graph, name, num._meta_nonempty.var(), divisionisions=[None, None])
def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):
timedeltas = self.choose_dtypes(include=[np.timedelta64])
var_timedeltas = [self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)
for col_idx in timedeltas._meta.columns]
var_timedelta_names = [(v._name, 0) for v in var_timedeltas]
name = self._token_prefix + 'var-timedeltas-' + tokenize(timedeltas, split_every)
layer = {(name, 0): (methods.wrap_var_reduction, var_timedelta_names, timedeltas._meta.columns)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=var_timedeltas)
return new_dd_object(graph, name, timedeltas._meta_nonempty.var(), divisionisions=[None, None])
def _var_mixed(self, skipna=True, ddof=1, split_every=False):
data = self.choose_dtypes(include=['number', 'bool', np.timedelta64])
timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)
numeric_vars = self._var_numeric(skipna, ddof, split_every)
name = self._token_prefix + 'var-mixed-' + tokenize(data, split_every)
layer = {(name, 0): (methods.var_mixed_concating,
(numeric_vars._name, 0),
(timedelta_vars._name, 0),
data._meta.columns)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[numeric_vars, timedelta_vars])
return new_dd_object(graph, name, self._meta_nonempty.var(), divisionisions=[None, None])
def _var_1d(self, column, skipna=True, ddof=1, split_every=False):
is_timedelta = is_timedelta64_dtype(column._meta)
if is_timedelta:
if not skipna:
is_nan = column.ifna()
column = column.totype('i8')
column = column.mask(is_nan)
else:
column = column.sipna().totype('i8')
if PANDAS_VERSION >= '0.24.0':
if mk.Int64Dtype.is_dtype(column._meta_nonempty):
column = column.totype('f8')
if not np.issubdtype(column.dtype, np.number):
column = column.totype('f8')
name = self._token_prefix + 'var-1d-' + tokenize(column, split_every)
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)
layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(graph, name, column._meta_nonempty.var(), divisionisions=[None, None])
@derived_from(mk.KnowledgeFrame)
def standard(self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_collections(self, "standard")
meta = self._meta_nonempty.standard(axis=axis, skipna=skipna)
if axis == 1:
result = mapping_partitions(M.standard, self, meta=meta,
token=self._token_prefix + 'standard',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'standard'
result = mapping_partitions(np.sqrt, v, meta=meta, token=name)
return handle_out(out, result)
@derived_from(mk.KnowledgeFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False):
axis = self._validate_axis(axis)
_raise_if_object_collections(self, "sem")
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return mapping_partitions(M.sem, self, meta=meta,
token=self._token_prefix + 'sem',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._getting_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'sem'
result = mapping_partitions(np.sqrt, v / n, meta=meta, token=name)
if incontainstance(self, KnowledgeFrame):
result.divisionisions = (getting_min(self.columns), getting_max(self.columns))
return result
def quantile(self, q=0.5, axis=0, method='default'):
""" Approximate row-wise and precise column-wise quantiles of KnowledgeFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and ftotal_allback to the ``'dask'`` otherwise.
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concating--' + tokenize(self, q, axis)
if axis == 1:
if incontainstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return mapping_partitions(M.quantile, self, q, axis,
token=keyname, meta=(q, 'f8'))
else:
_raise_if_object_collections(self, "quantile")
meta = self._meta.quantile(q, axis=axis)
num = self._getting_numeric_data()
quantiles = tuple(quantile(self[c], q, method) for c in num.columns)
qnames = [(_q._name, 0) for _q in quantiles]
if incontainstance(quantiles[0], Scalar):
layer = {(keyname, 0): (mk.Collections, qnames, num.columns, None, meta.name)}
graph = HighLevelGraph.from_collections(keyname, layer, dependencies=quantiles)
divisionisions = (getting_min(num.columns), getting_max(num.columns))
return Collections(graph, keyname, meta, divisionisions)
else:
layer = {(keyname, 0): (methods.concating, qnames, 1)}
graph = HighLevelGraph.from_collections(keyname, layer, dependencies=quantiles)
return KnowledgeFrame(graph, keyname, meta, quantiles[0].divisionisions)
@derived_from(mk.KnowledgeFrame)
def describe(self,
split_every=False,
percentiles=None,
percentiles_method='default',
include=None,
exclude=None):
if self._meta.ndim == 1:
return self._describe_1d(self, split_every, percentiles, percentiles_method)
elif (include is None) and (exclude is None):
data = self._meta.choose_dtypes(include=[np.number, np.timedelta64])
# when some numerics/timedeltas are found, by default keep them
if length(data.columns) == 0:
chosen_columns = self._meta.columns
else:
# check if there are timedelta or boolean columns
bools_and_timedeltas = self._meta.choose_dtypes(include=[np.timedelta64, 'bool'])
if length(bools_and_timedeltas.columns) == 0:
return self._describe_numeric(self, split_every, percentiles, percentiles_method)
else:
chosen_columns = data.columns
elif include == 'total_all':
if exclude is not None:
msg = "exclude must be None when include is 'total_all'"
raise ValueError(msg)
chosen_columns = self._meta.columns
else:
chosen_columns = self._meta.choose_dtypes(include=include, exclude=exclude)
stats = [self._describe_1d(self[col_idx], split_every,
percentiles, percentiles_method) for col_idx in chosen_columns]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
layer = {(name, 0): (methods.describe_aggregate, stats_names)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = self._meta_nonempty.describe(include=include, exclude=exclude)
return new_dd_object(graph, name, meta, divisionisions=[None, None])
def _describe_1d(self, data, split_every=False,
percentiles=None, percentiles_method='default'):
if is_bool_dtype(data._meta):
return self._describe_nonnumeric_1d(data, split_every=split_every)
elif is_numeric_dtype(data._meta):
return self._describe_numeric(
data,
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method)
elif is_timedelta64_dtype(data._meta):
return self._describe_numeric(
data.sipna().totype('i8'),
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
is_timedelta_column=True)
else:
return self._describe_nonnumeric_1d(data, split_every=split_every)
def _describe_numeric(self, data, split_every=False, percentiles=None,
percentiles_method='default', is_timedelta_column=False):
num = data._getting_numeric_data()
if data.ndim == 2 and length(num.columns) == 0:
raise ValueError("KnowledgeFrame contains only non-numeric data.")
elif data.ndim == 1 and data.dtype == 'object':
raise ValueError("Cannot compute ``describe`` on object dtype.")
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
else:
# always include the the 50%tle to calculate the median
# distinctive removes duplicates and sorts quantiles
percentiles = np.array(percentiles)
percentiles = np.adding(percentiles, 0.5)
percentiles = np.distinctive(percentiles)
percentiles = list(percentiles)
stats = [num.count(split_every=split_every),
num.average(split_every=split_every),
num.standard(split_every=split_every),
num.getting_min(split_every=split_every),
num.quantile(percentiles, method=percentiles_method),
num.getting_max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name if incontainstance(data._meta, mk.Collections) else None
name = 'describe-numeric--' + tokenize(num, split_every)
layer = {(name, 0): (methods.describe_numeric_aggregate, stats_names, colname, is_timedelta_column)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = num._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisionisions=[None, None])
def _describe_nonnumeric_1d(self, data, split_every=False):
vcounts = data.counts_value_num(split_every)
count_nonzero = vcounts[vcounts != 0]
count_distinctive = count_nonzero.size
stats = [
# ndistinctive
count_distinctive,
# count
data.count(split_every=split_every),
# most common value
vcounts._header_num(1, npartitions=1, compute=False, safe=False)
]
if is_datetime64_whatever_dtype(data._meta):
getting_min_ts = data.sipna().totype('i8').getting_min(split_every=split_every)
getting_max_ts = data.sipna().totype('i8').getting_max(split_every=split_every)
stats += [getting_min_ts, getting_max_ts]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name
name = 'describe-nonnumeric-1d--' + tokenize(data, split_every)
layer = {(name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = data._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisionisions=[None, None])
def _cum_agg(self, op_name, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None, out=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.formating(self._token_prefix, op_name)
result = self.mapping_partitions(chunk, token=name, **chunk_kwargs)
return handle_out(out, result)
else:
# cumulate each partitions
name1 = '{0}{1}-mapping'.formating(self._token_prefix, op_name)
cumpart = mapping_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-final_item'.formating(self._token_prefix, op_name)
cumfinal_item = mapping_partitions(_take_final_item, cumpart, skipna,
meta=mk.Collections([]), token=name2)
suffix = tokenize(self)
name = '{0}{1}-{2}'.formating(self._token_prefix, op_name, suffix)
cname = '{0}{1}-cum-final_item-{2}'.formating(self._token_prefix, op_name,
suffix)
# aggregate cumulated partisions and its previous final_item element
layer = {}
layer[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
layer[(cname, i)] = (cumfinal_item._name, i - 1)
else:
# aggregate with previous cumulation results
layer[(cname, i)] = (aggregate, (cname, i - 1), (cumfinal_item._name, i - 1))
layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
graph = HighLevelGraph.from_collections(cname, layer, dependencies=[cumpart, cumfinal_item])
result = new_dd_object(graph, name, chunk(self._meta), self.divisionisions)
return handle_out(out, result)
@derived_from(mk.KnowledgeFrame)
def cumtotal_sum(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg('cumtotal_sum',
chunk=M.cumtotal_sum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(mk.KnowledgeFrame)
def cumprod(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(mk.KnowledgeFrame)
def cumgetting_max(self, axis=None, skipna=True, out=None):
return self._cum_agg('cumgetting_max',
chunk=M.cumgetting_max,
aggregate=methods.cumgetting_max_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(mk.KnowledgeFrame)
def cumgetting_min(self, axis=None, skipna=True, out=None):
return self._cum_agg('cumgetting_min',
chunk=M.cumgetting_min,
aggregate=methods.cumgetting_min_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(mk.KnowledgeFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing mapping_partitions via keyword will not be aligned
return mapping_partitions(M.where, self, cond, other)
@derived_from(mk.KnowledgeFrame)
def mask(self, cond, other=np.nan):
return mapping_partitions(M.mask, self, cond, other)
@derived_from(mk.KnowledgeFrame)
def notnull(self):
return self.mapping_partitions(M.notnull)
@derived_from(mk.KnowledgeFrame)
def ifnull(self):
return self.mapping_partitions(M.ifnull)
@derived_from(mk.KnowledgeFrame)
def ifna(self):
if hasattr(mk, 'ifna'):
return self.mapping_partitions(M.ifna)
else:
raise NotImplementedError("Need more recent version of Monkey "
"to support ifna. "
"Please use ifnull instead.")
@derived_from(mk.KnowledgeFrame)
def incontain(self, values):
if is_knowledgeframe_like(self._meta):
# KnowledgeFrame.incontain does weird alignment stuff
bad_types = (_Frame, mk.Collections, mk.KnowledgeFrame)
else:
bad_types = (_Frame,)
if incontainstance(values, bad_types):
raise NotImplementedError(
"Passing a %r to `incontain`" % typename(type(values))
)
meta = self._meta_nonempty.incontain(values)
# We wrap values in a delayed for two reasons:
# - avoid serializing data in every task
# - avoid cost of traversal of large list in optimizations
return self.mapping_partitions(M.incontain, delayed(values), meta=meta)
@derived_from(mk.KnowledgeFrame)
def totype(self, dtype):
# XXX: Monkey will segfault for empty knowledgeframes when setting
# categorical dtypes. This operation isn't total_allowed currently whateverway. We
# getting the metadata with a non-empty frame to throw the error instead of
# segfaulting.
if is_knowledgeframe_like(self._meta) and is_categorical_dtype(dtype):
meta = self._meta_nonempty.totype(dtype)
else:
meta = self._meta.totype(dtype)
if hasattr(dtype, 'items'):
set_unknown = [
k for k, v in dtype.items()
if is_categorical_dtype(v) and gettingattr(v, 'categories', None) is None
]
meta = clear_known_categories(meta, cols=set_unknown)
elif (is_categorical_dtype(dtype) and
gettingattr(dtype, 'categories', None) is None):
meta = clear_known_categories(meta)
return self.mapping_partitions(M.totype, dtype=dtype, meta=meta)
@derived_from(mk.Collections)
def adding(self, other, interleave_partitions=False):
# because KnowledgeFrame.adding will override the method,
# wrap by mk.Collections.adding docstring
from .multi import concating
if incontainstance(other, (list, dict)):
msg = "adding doesn't support list or dict input"
raise NotImplementedError(msg)
return concating([self, other], join='outer',
interleave_partitions=interleave_partitions)
@derived_from(mk.KnowledgeFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.mapping_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = {(name1, i): (gettingitem, key, 0)
for i, key in enumerate(aligned.__dask_keys__())}
dsk1.umkate(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisionisions)
name2 = 'align2-' + token
dsk2 = {(name2, i): (gettingitem, key, 1)
for i, key in enumerate(aligned.__dask_keys__())}
dsk2.umkate(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisionisions)
return result1, result2
@derived_from(mk.KnowledgeFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.mapping_partitions(M.combine, other, func,
fill_value=fill_value, overwrite=overwrite)
@derived_from(mk.KnowledgeFrame)
def combine_first(self, other):
return self.mapping_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like KnowledgeFrame.add to this class """
raise NotImplementedError
@derived_from(mk.KnowledgeFrame)
def resample_by_num(self, rule, closed=None, label=None):
from .tcollections.resample_by_num import Resample_by_numr
return Resample_by_numr(self, rule, closed=closed, label=label)
@derived_from(mk.KnowledgeFrame)
def first(self, offset):
# Let monkey error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisionisions:
raise ValueError("`first` is not implemented for unknown divisionisions")
offset = mk.tcollections.frequencies.to_offset(offset)
date = self.divisionisions[0] + offset
end = self.loc._getting_partitions(date)
include_right = offset.isAnchored() or not hasattr(offset, '_inc')
if end == self.npartitions - 1:
divisions = self.divisionisions
else:
divisions = self.divisionisions[:end + 1] + (date,)
name = 'first-' + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (methods.boundary_slice, (self._name, end),
None, date, include_right, True, 'loc')
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divisions)
@derived_from(mk.KnowledgeFrame)
def final_item(self, offset):
# Let monkey error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisionisions:
raise ValueError("`final_item` is not implemented for unknown divisionisions")
offset = mk.tcollections.frequencies.to_offset(offset)
date = self.divisionisions[-1] - offset
start = self.loc._getting_partitions(date)
if start == 0:
divisions = self.divisionisions
else:
divisions = (date,) + self.divisionisions[start + 1:]
name = 'final_item-' + tokenize(self, offset)
dsk = {(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))}
dsk[(name, 0)] = (methods.boundary_slice, (self._name, start),
date, None, True, False, 'loc')
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divisions)
def ndistinctive_approx(self, split_every=None):
"""Approximate number of distinctive rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of distinctive rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while perforgetting_ming a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca([self], chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every, b=16, meta=float)
@property
def values(self):
""" Return a dask.array of the values of this knowledgeframe
Warning: This creates a dask.array without precise shape informatingion.
Operations that depend on shape informatingion, like slicing or reshaping,
will not work.
"""
return self.mapping_partitions(methods.values)
def _validate_chunks(self, arr, lengthgths):
from dask.array.core import normalize_chunks
if incontainstance(lengthgths, Sequence):
lengthgths = tuple(lengthgths)
if length(lengthgths) != self.npartitions:
raise ValueError(
"The number of items in 'lengthgths' does not match "
"the number of partitions. "
"{} != {}".formating(length(lengthgths), self.npartitions)
)
if self.ndim == 1:
chunks = normalize_chunks((lengthgths,))
else:
chunks = normalize_chunks((lengthgths, (length(self.columns),)))
return chunks
elif lengthgths is not None:
raise ValueError("Unexpected value for 'lengthgths': '{}'".formating(lengthgths))
return arr._chunks
def _is_index_level_reference(self, key):
"""
Test whether a key is an index level reference
To be considered an index level reference, `key` must match the index name
and must NOT match the name of whatever column (if a knowledgeframe).
"""
return (self.index.name is not None and
not is_dask_collection(key) and
(np.isscalar(key) or incontainstance(key, tuple)) and
key == self.index.name and
key not in gettingattr(self, 'columns', ()))
def _contains_index_name(self, columns_or_index):
"""
Test whether the input contains a reference to the index of the KnowledgeFrame/Collections
"""
if incontainstance(columns_or_index, list):
return whatever(self._is_index_level_reference(n) for n in columns_or_index)
else:
return self._is_index_level_reference(columns_or_index)
def _raise_if_object_collections(x, funcname):
"""
Utility function to raise an error if an object column does not support
a certain operation like `average`.
"""
if incontainstance(x, Collections) and hasattr(x, "dtype") and x.dtype == object:
raise ValueError("`%s` not supported with object collections" % funcname)
class Collections(_Frame):
""" Partotal_allel Monkey Collections
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_monkey``.
Parameters
----------
dsk: dict
The dask graph to compute this Collections
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Collections
meta: monkey.Collections
An empty ``monkey.Collections`` with names, dtypes, and index matching the
expected output.
divisionisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.knowledgeframe.KnowledgeFrame
"""
_partition_type = mk.Collections
_is_partition_type = staticmethod(is_collections_like)
_token_prefix = 'collections-'
_accessors = set()
def __array_wrap__(self, array, context=None):
if incontainstance(context, tuple) and length(context) > 0:
if incontainstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return mk.Collections(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renagetting_mingd = _renagetting_ming_dask(self, name)
# umkate myself
self.dask = renagetting_mingd.dask
self._name = renagetting_mingd._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def shape(self):
"""
Return a tuple representing the dimensionality of a Collections.
The single element of the tuple is a Delayed result.
Examples
--------
>>> collections.shape # doctest: +SKIP
# (dd.Scalar<size-ag..., dtype=int64>,)
"""
return (self.size,)
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
""" Namespace of datetime methods """
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
return CategoricalAccessor(self)
@cache_readonly
def str(self):
""" Namespace for string methods """
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.umkate(self.__dict__)
# Remove the `cat` and `str` accessors if not available. We can't
# decide this statictotal_ally for the `dt` accessor, as it works on
# datetime-like things as well.
for accessor in ['cat', 'str']:
if not hasattr(self._meta, accessor):
o.remove(accessor)
return list(o)
@property
def nbytes(self):
""" Number of bytes """
return self.reduction(methods.nbytes, np.total_sum, token='nbytes',
meta=int, split_every=False)
def _repr_data(self):
return _repr_data_collections(self._meta, self._repr_divisionisions)
def __repr__(self):
""" have to overwrite footer """
if self.name is not None:
footer = "Name: {name}, dtype: {dtype}".formating(name=self.name,
dtype=self.dtype)
else:
footer = "dtype: {dtype}".formating(dtype=self.dtype)
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".formating(klass=self.__class__.__name__,
data=self.convert_string(),
footer=footer,
name=key_split(self._name),
task=length(self.dask))
def renagetting_ming(self, index=None, inplace=False, sorted_index=False):
"""Alter Collections index labels or name
Function / dict values must be distinctive (1-to-1). Labels not contained in
a dict / Collections will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Collections.name`` with a scalar value.
Parameters
----------
index : scalar, hashable sequence, dict-like or ctotal_allable, optional
If dict-like or ctotal_allable, the transformatingion is applied to the
index. Scalar or hashable sequence-like will alter the
``Collections.name`` attribute.
inplace : boolean, default False
Whether to return a new Collections or modify this one inplace.
sorted_index : bool, default False
If true, the output ``Collections`` will have known divisionisions inferred
from the input collections and the transformatingion. Ignored for
non-ctotal_allable/dict-like ``index`` or when the input collections has
unknown divisionisions. Note that this may only be set to ``True`` if
you know that the transformed index is monotonicly increasing. Dask
will check that transformed divisionisions are monotonic, but cannot
check total_all the values between divisionisions, so incorrectly setting this
can result in bugs.
Returns
-------
renagetting_mingd : Collections
See Also
--------
monkey.Collections.renagetting_ming
"""
from monkey.api.types import is_scalar, is_list_like, is_dict_like
if is_scalar(index) or (is_list_like(index) and not is_dict_like(index)):
res = self if inplace else self.clone()
res.name = index
else:
res = self.mapping_partitions(M.renagetting_ming, index)
if self.known_divisionisions:
if sorted_index and (ctotal_allable(index) or is_dict_like(index)):
old = mk.Collections(range(self.npartitions + 1),
index=self.divisionisions)
new = old.renagetting_ming(index).index
if not new.is_monotonic_increasing:
msg = ("sorted_index=True, but the transformed index "
"isn't monotonic_increasing")
raise ValueError(msg)
res.divisionisions = tuple(new.convert_list())
else:
res = res.clear_divisionisions()
if inplace:
self.dask = res.dask
self._name = res._name
self.divisionisions = res.divisionisions
self._meta = res._meta
res = self
return res
@derived_from(mk.Collections)
def value_round(self, decimals=0):
return elemwise(M.value_round, self, decimals)
@derived_from(mk.KnowledgeFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
kf = elemwise(M.to_timestamp, self, freq, how, axis)
kf.divisionisions = tuple(mk.Index(self.divisionisions).to_timestamp())
return kf
def quantile(self, q=0.5, method='default'):
""" Approximate quantiles of Collections
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and ftotal_allback to the ``'dask'`` otherwise.
"""
return quantile(self, q, method=method)
def _repartition_quantiles(self, npartitions, upsample_by_num=1.0):
""" Approximate quantiles of Collections used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample_by_num=upsample_by_num)
def __gettingitem__(self, key):
if incontainstance(key, Collections) and self.divisionisions == key.divisionisions:
name = 'index-%s' % tokenize(self, key)
dsk = partitionwise_graph(operator.gettingitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return Collections(graph, name, self._meta, self.divisionisions)
raise NotImplementedError(
"Collections gettingitem in only supported for other collections objects "
"with matching partition structure"
)
@derived_from(mk.KnowledgeFrame)
def _getting_numeric_data(self, how='whatever', subset=None):
return self
@derived_from(mk.Collections)
def iteritems(self):
for i in range(self.npartitions):
s = self.getting_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.formating(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.getting(axis, axis)
@derived_from(mk.Collections)
def grouper(self, by=None, **kwargs):
from dask.knowledgeframe.grouper import CollectionsGroupBy
return CollectionsGroupBy(self, by=by, **kwargs)
@derived_from(mk.Collections)
def count(self, split_every=False):
return super(Collections, self).count(split_every=split_every)
def distinctive(self, split_every=None, split_out=1):
"""
Return Collections of distinctive values in the object. Includes NA values.
Returns
-------
distinctives : Collections
"""
return aca(self, chunk=methods.distinctive, aggregate=methods.distinctive,
meta=self._meta, token='distinctive', split_every=split_every,
collections_name=self.name, split_out=split_out)
@derived_from(mk.Collections)
def ndistinctive(self, split_every=None):
return self.sip_duplicates(split_every=split_every).count()
@derived_from(mk.Collections)
def counts_value_num(self, split_every=None, split_out=1):
return aca(self, chunk=M.counts_value_num,
aggregate=methods.counts_value_num_aggregate,
combine=methods.counts_value_num_combine,
meta=self._meta.counts_value_num(), token='value-counts',
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
@derived_from(mk.Collections)
def nbiggest(self, n=5, split_every=None):
return aca(self, chunk=M.nbiggest, aggregate=M.nbiggest,
meta=self._meta, token='collections-nbiggest',
split_every=split_every, n=n)
@derived_from(mk.Collections)
def nsmtotal_allest(self, n=5, split_every=None):
return aca(self, chunk=M.nsmtotal_allest, aggregate=M.nsmtotal_allest,
meta=self._meta, token='collections-nsmtotal_allest',
split_every=split_every, n=n)
@derived_from(mk.Collections)
def incontain(self, values):
# Added just to getting the different docstring for Collections
return super(Collections, self).incontain(values)
@insert_meta_param_description(pad=12)
@derived_from(mk.Collections)
def mapping(self, arg, na_action=None, meta=no_default):
if is_collections_like(arg) and is_dask_collection(arg):
return collections_mapping(self, arg)
if not (incontainstance(arg, dict) or
ctotal_allable(arg) or
is_collections_like(arg) and not is_dask_collection(arg)):
raise TypeError("arg must be monkey.Collections, dict or ctotal_allable."
" Got {0}".formating(type(arg)))
name = 'mapping-' + tokenize(self, arg, na_action)
dsk = {(name, i): (M.mapping, k, arg, na_action) for i, k in
enumerate(self.__dask_keys__())}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
if meta is no_default:
meta = _emulate(M.mapping, self, arg, na_action=na_action, ukf=True)
else:
meta = make_meta(meta, index=gettingattr(make_meta(self), 'index', None))
return Collections(graph, name, meta, self.divisionisions)
@derived_from(mk.Collections)
def sipna(self):
return self.mapping_partitions(M.sipna)
@derived_from(mk.Collections)
def between(self, left, right, inclusive=True):
return self.mapping_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(mk.Collections)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.mapping_partitions(M.clip, lower=lower, upper=upper)
@derived_from(mk.Collections)
def clip_lower(self, threshold):
return self.mapping_partitions(M.clip_lower, threshold=threshold)
@derived_from(mk.Collections)
def clip_upper(self, threshold):
return self.mapping_partitions(M.clip_upper, threshold=threshold)
@derived_from(mk.Collections)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Collections, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(mk.Collections)
def combine(self, other, func, fill_value=None):
return self.mapping_partitions(M.combine, other, func,
fill_value=fill_value)
@derived_from(mk.Collections)
def squeeze(self):
return self
@derived_from(mk.Collections)
def combine_first(self, other):
return self.mapping_partitions(M.combine_first, other)
def to_bag(self, index=False):
""" Create a Dask Bag from a Collections """
from .io import to_bag
return to_bag(self, index)
@derived_from(mk.Collections)
def to_frame(self, name=None):
return self.mapping_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@derived_from(mk.Collections)
def convert_string(self, getting_max_rows=5):
# option_context doesn't affect
return self._repr_data().convert_string(getting_max_rows=getting_max_rows)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like Collections.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return mapping_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = skip_doctest(op.__doc__)
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like Collections.eq to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if fill_value is None:
return elemwise(comparison, self, other, axis=axis)
else:
op = partial(comparison, fill_value=fill_value)
return elemwise(op, self, other, axis=axis)
meth.__doc__ = skip_doctest(comparison.__doc__)
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def employ(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):
""" Partotal_allel version of monkey.Collections.employ
Parameters
----------
func : function
Function to employ
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Collections or KnowledgeFrame if func returns a Collections.
Examples
--------
>>> import dask.knowledgeframe as dd
>>> s = mk.Collections(range(5), name='x')
>>> ds = dd.from_monkey(s, npartitions=2)
Apply a function elementwise across the Collections, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.employ(myadd, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in mwhatever cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manutotal_ally specify the output metadata with the ``meta`` keyword. This
can be specified in mwhatever forms, for more informatingion see
``dask.knowledgeframe.utils.make_meta``.
Here we specify the output is a Collections with name ``'x'``, and dtype
``float64``:
>>> res = ds.employ(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.employ(lambda x: x + 1, meta=ds)
See Also
--------
dask.Collections.mapping_partitions
"""
if meta is no_default:
meta = _emulate(M.employ, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, ukf=True, **kwds)
warnings.warn(meta_warning(meta))
return mapping_partitions(M.employ, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(mk.Collections)
def cov(self, other, getting_min_periods=None, split_every=False):
from .multi import concating
if not incontainstance(other, Collections):
raise TypeError("other must be a dask.knowledgeframe.Collections")
kf = concating([self, other], axis=1)
return cov_corr(kf, getting_min_periods, scalar=True, split_every=split_every)
@derived_from(mk.Collections)
def corr(self, other, method='pearson', getting_min_periods=None,
split_every=False):
from .multi import concating
if not incontainstance(other, Collections):
raise TypeError("other must be a dask.knowledgeframe.Collections")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
kf = concating([self, other], axis=1)
return cov_corr(kf, getting_min_periods, corr=True, scalar=True,
split_every=split_every)
@derived_from(mk.Collections)
def autocorr(self, lag=1, split_every=False):
if not incontainstance(lag, Integral):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shifting(lag),
split_every=split_every)
@derived_from(mk.Collections)
def memory_usage(self, index=True, deep=False):
result = self.mapping_partitions(M.memory_usage, index=index, deep=deep)
return delayed(total_sum)(result.to_delayed())
def __divisionmod__(self, other):
res1 = self // other
res2 = self % other
return res1, res2
def __rdivisionmod__(self, other):
res1 = other // self
res2 = other % self
return res1, res2
class Index(Collections):
_partition_type = mk.Index
_is_partition_type = staticmethod(is_index_like)
_token_prefix = 'index-'
_accessors = set()
_dt_attributes = {'nanosecond', 'microsecond', 'millisecond', 'dayofyear',
'getting_minute', 'hour', 'day', 'dayofweek', 'second', 'week',
'weekday', 'weekofyear', 'month', 'quarter', 'year'}
_cat_attributes = {'known', 'as_known', 'as_unknown', 'add_categories',
'categories', 'remove_categories', 'reorder_categories',
'as_ordered', 'codes', 'remove_unused_categories',
'set_categories', 'as_unordered', 'ordered',
'renagetting_ming_categories'}
def __gettingattr__(self, key):
if is_categorical_dtype(self.dtype) and key in self._cat_attributes:
return gettingattr(self.cat, key)
elif key in self._dt_attributes:
return gettingattr(self.dt, key)
raise AttributeError("'Index' object has no attribute %r" % key)
def __dir__(self):
out = super(Index, self).__dir__()
out.extend(self._dt_attributes)
if is_categorical_dtype(self.dtype):
out.extend(self._cat_attributes)
return out
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.formating(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return mk.Index(array, name=self.name)
def header_num(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'header_num-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.gettingitem, (self._name, 0), slice(0, n))}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisionisions[:2])
if compute:
result = result.compute()
return result
@derived_from(mk.Index)
def getting_max(self, split_every=False):
return self.reduction(M.getting_max, meta=self._meta_nonempty.getting_max(),
token=self._token_prefix + 'getting_max',
split_every=split_every)
@derived_from(mk.Index)
def getting_min(self, split_every=False):
return self.reduction(M.getting_min, meta=self._meta_nonempty.getting_min(),
token=self._token_prefix + 'getting_min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.total_sum,
token='index-count', meta=int,
split_every=split_every)
@derived_from(mk.Index)
def shifting(self, periods=1, freq=None):
if incontainstance(self._meta, mk.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shifting(periods)
out = self.mapping_partitions(M.shifting, periods, meta=meta,
token='shifting',
transform_divisionisions=False)
else:
# Monkey will raise for other index types that don't implement shifting
meta = self._meta_nonempty.shifting(periods, freq=freq)
out = self.mapping_partitions(M.shifting, periods, token='shifting',
meta=meta, freq=freq,
transform_divisionisions=False)
if freq is None:
freq = meta.freq
return maybe_shifting_divisionisions(out, periods, freq=freq)
@derived_from(mk.Index)
def to_collections(self):
return self.mapping_partitions(M.to_collections,
meta=self._meta.to_collections())
@derived_from(mk.Index, ua_args=['index'])
def to_frame(self, index=True, name=None):
if not index:
raise NotImplementedError()
if PANDAS_VERSION >= '0.24.0':
return self.mapping_partitions(M.to_frame, index, name,
meta=self._meta.to_frame(index, name))
else:
if name is not None:
raise ValueError("The 'name' keyword was added in monkey 0.24.0. "
"Your version of monkey is '{}'.".formating(PANDAS_VERSION))
else:
return self.mapping_partitions(M.to_frame,
meta=self._meta.to_frame())
class KnowledgeFrame(_Frame):
"""
Partotal_allel Monkey KnowledgeFrame
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_monkey``.
Parameters
----------
dsk: dict
The dask graph to compute this KnowledgeFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular KnowledgeFrame
meta: monkey.KnowledgeFrame
An empty ``monkey.KnowledgeFrame`` with names, dtypes, and index matching
the expected output.
divisionisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = mk.KnowledgeFrame
_is_partition_type = staticmethod(is_knowledgeframe_like)
_token_prefix = 'knowledgeframe-'
_accessors = set()
def __array_wrap__(self, array, context=None):
if incontainstance(context, tuple) and length(context) > 0:
if incontainstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return mk.KnowledgeFrame(array, index=index, columns=self.columns)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renagetting_mingd = _renagetting_ming_dask(self, columns)
self._meta = renagetting_mingd._meta
self._name = renagetting_mingd._name
self.dask = renagetting_mingd.dask
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
Only indexing the column positions is supported. Trying to select
row positions will raise a ValueError.
See :ref:`knowledgeframe.indexing` for more.
Examples
--------
>>> kf.iloc[:, [2, 0, 1]] # doctest: +SKIP
"""
from .indexing import _iLocIndexer
return _iLocIndexer(self)
def __gettingitem__(self, key):
name = 'gettingitem-%s' % tokenize(self, key)
if np.isscalar(key) or incontainstance(key, (tuple, string_types)):
if incontainstance(self._meta.index, (mk.DatetimeIndex, mk.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from monkey
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.gettingitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisionisions)
elif incontainstance(key, slice):
from monkey.api.types import is_float_dtype
is_integer_slice = whatever(incontainstance(i, Integral)
for i in (key.start, key.step, key.stop))
# Slicing with integer labels is always iloc based except for a
# float indexer for some reason
if is_integer_slice and not is_float_dtype(self.index.dtype):
self.iloc[key]
else:
return self.loc[key]
if (incontainstance(key, (np.ndarray, list)) or (
not is_dask_collection(key) and (is_collections_like(key) or is_index_like(key)))):
# error is raised from monkey
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.gettingitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisionisions)
if incontainstance(key, Collections):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisionisions != key.divisionisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = partitionwise_graph(operator.gettingitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return new_dd_object(graph, name, self, self.divisionisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if incontainstance(key, (tuple, list)) and incontainstance(value, KnowledgeFrame):
kf = self.total_allocate(**{k: value[c]
for k, c in zip(key, value.columns)})
elif incontainstance(key, mk.Index) and not incontainstance(value, KnowledgeFrame):
key = list(key)
kf = self.total_allocate(**{k: value for k in key})
else:
kf = self.total_allocate(**{key: value})
self.dask = kf.dask
self._name = kf._name
self._meta = kf._meta
self.divisionisions = kf.divisionisions
def __delitem__(self, key):
result = self.sip([key], axis=1)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def __setattr__(self, key, value):
try:
columns = object.__gettingattribute__(self, '_meta').columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __gettingattr__(self, key):
if key in self.columns:
return self[key]
else:
raise AttributeError("'KnowledgeFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.umkate(self.__dict__)
o.umkate(c for c in self.columns if
(incontainstance(c, string_types) and
isidentifier(c)))
return list(o)
def _ipython_key_completions_(self):
return self.columns.convert_list()
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the KnowledgeFrame.
The number of rows is a Delayed result. The number of columns
is a concrete integer.
Examples
--------
>>> kf.size # doctest: +SKIP
(Delayed('int-07f06075-5ecc-4d77-817e-63c69a9188a8'), 2)
"""
col_size = length(self.columns)
row_size = delayed(int)(self.size / col_size)
return (row_size, col_size)
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(mk.KnowledgeFrame)
def getting_dtype_counts(self):
return self._meta.getting_dtype_counts()
@derived_from(mk.KnowledgeFrame)
def getting_ftype_counts(self):
return self._meta.getting_ftype_counts()
@derived_from(mk.KnowledgeFrame)
def choose_dtypes(self, include=None, exclude=None):
cs = self._meta.choose_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, sip=True, sorted=False, npartitions=None,
divisionisions=None, inplace=False, **kwargs):
"""Set the KnowledgeFrame index (row labels) using an existing column
This realigns the dataset to be sorted by a new column. This can have a
significant impact on performance, because joins, groupers, lookups, etc.
are total_all much faster on that column. However, this performance increase
comes with a cost, sorting a partotal_allel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform mwhatever cheap computations off of the sorted dataset.
This function operates exactly like ``monkey.set_index`` except with
different performance costs (it is much more expensive). Under normal
operation this function does an initial pass over the index column to
compute approximate qunatiles to serve as future divisionisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to total_all of the output partitions now in
sorted order.
In some cases we can total_alleviate those costs, for example if your dataset is
sorted already then we can avoid making mwhatever smtotal_all pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
kf: Dask KnowledgeFrame
index: string or Dask Collections
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None use the same as
the input. If 'auto' then decide by memory use.
shuffle: string, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
divisionisions: list, optional
Known values on which to separate index values of the partitions.
See https://docs.dask.org/en/latest/knowledgeframe-design.html#partitions
Defaults to computing this with a single pass over the data. Note
that if ``sorted=True``, specified divisionisions are astotal_sumed to match
the existing partitions in the data. If this is untrue, you should
leave divisionisions empty and ctotal_all ``repartition`` after ``set_index``.
inplace : bool, optional
Modifying the KnowledgeFrame in place is not supported by Dask.
Defaults to False.
compute: bool
Whether or not to trigger an immediate computation. Defaults to False.
Examples
--------
>>> kf2 = kf.set_index('x') # doctest: +SKIP
>>> kf2 = kf.set_index(d.x) # doctest: +SKIP
>>> kf2 = kf.set_index(d.timestamp, sorted=True) # doctest: +SKIP
A common case is when we have a datetime column that we know to be
sorted and is cleanly divisionided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisionisions along which is is separated
>>> import monkey as mk
>>> divisionisions = mk.date_range('2000', '2010', freq='1D')
>>> kf2 = kf.set_index('timestamp', sorted=True, divisionisions=divisionisions) # doctest: +SKIP
"""
if inplace:
raise NotImplementedError("The inplace= keyword is not supported")
pre_sorted = sorted
del sorted
if divisionisions is not None:
check_divisionisions(divisionisions)
if pre_sorted:
from .shuffle import set_sorted_index
return set_sorted_index(self, other, sip=sip, divisionisions=divisionisions,
**kwargs)
else:
from .shuffle import set_index
return set_index(self, other, sip=sip, npartitions=npartitions,
divisionisions=divisionisions, **kwargs)
@derived_from(mk.KnowledgeFrame)
def nbiggest(self, n=5, columns=None, split_every=None):
token = '<PASSWORD>'
return aca(self, chunk=M.nbiggest, aggregate=M.nbiggest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(mk.KnowledgeFrame)
def nsmtotal_allest(self, n=5, columns=None, split_every=None):
token = '<PASSWORD>'
return aca(self, chunk=M.nsmtotal_allest, aggregate=M.nsmtotal_allest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(mk.KnowledgeFrame)
def grouper(self, by=None, **kwargs):
from dask.knowledgeframe.grouper import KnowledgeFrameGroupBy
return KnowledgeFrameGroupBy(self, by=by, **kwargs)
@wraps(categorize)
def categorize(self, columns=None, index=None, split_every=None, **kwargs):
return categorize(self, columns=columns, index=index,
split_every=split_every, **kwargs)
@derived_from(mk.KnowledgeFrame)
def total_allocate(self, **kwargs):
for k, v in kwargs.items():
if not (incontainstance(v, Scalar) or is_collections_like(v) or
ctotal_allable(v) or mk.api.types.is_scalar(v) or
is_index_like(v)):
raise TypeError("Column total_allocatement doesn't support type "
"{0}".formating(typename(type(v))))
if ctotal_allable(v):
kwargs[k] = v(self)
pairs = list(total_sum(kwargs.items(), ()))
# Figure out columns of the output
kf2 = self._meta_nonempty.total_allocate(**_extract_meta(kwargs, nonempty=True))
return elemwise(methods.total_allocate, self, *pairs, meta=kf2)
@derived_from(mk.KnowledgeFrame, ua_args=['index'])
def renagetting_ming(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot renagetting_ming index.")
# *args here is index, columns but columns arg is already used
return self.mapping_partitions(M.renagetting_ming, None, columns=columns)
def query(self, expr, **kwargs):
""" Filter knowledgeframe with complex expression
Blocked version of mk.KnowledgeFrame.query
This is like the sequential version except that this will also happen
in mwhatever threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
See also
--------
monkey.KnowledgeFrame.query
"""
return self.mapping_partitions(M.query, expr, **kwargs)
@derived_from(mk.KnowledgeFrame)
def eval(self, expr, inplace=None, **kwargs):
if inplace is None:
inplace = False
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.mapping_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(mk.KnowledgeFrame)
def sipna(self, how='whatever', subset=None, thresh=None):
return self.mapping_partitions(M.sipna, how=how, subset=subset, thresh=thresh)
@derived_from(mk.KnowledgeFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.mapping_partitions(M.clip, lower=lower, upper=upper)
@derived_from(mk.KnowledgeFrame)
def clip_lower(self, threshold):
return self.mapping_partitions(M.clip_lower, threshold=threshold)
@derived_from(mk.KnowledgeFrame)
def clip_upper(self, threshold):
return self.mapping_partitions(M.clip_upper, threshold=threshold)
@derived_from(mk.KnowledgeFrame)
def squeeze(self, axis=None):
if axis in [None, 1]:
if length(self.columns) == 1:
return self[self.columns[0]]
else:
return self
elif axis == 0:
raise NotImplementedError("{0} does not support "
"squeeze along axis 0".formating(type(self)))
elif axis not in [0, 1, None]:
raise ValueError('No axis {0} for object type {1}'.formating(
axis, type(self)))
@derived_from(mk.KnowledgeFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
kf = elemwise(M.to_timestamp, self, freq, how, axis)
kf.divisionisions = tuple(mk.Index(self.divisionisions).to_timestamp())
return kf
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
def to_parquet(self, path, *args, **kwargs):
""" See dd.to_parquet docstring for more informatingion """
from .io import to_parquet
return to_parquet(self, path, *args, **kwargs)
@derived_from(mk.KnowledgeFrame)
def convert_string(self, getting_max_rows=5):
# option_context doesn't affect
return self._repr_data().convert_string(getting_max_rows=getting_max_rows,
show_dimensions=False)
def _getting_numeric_data(self, how='whatever', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._getting_numeric_data()
if length(numerics.columns) < length(self.columns):
name = self._token_prefix + '-getting_numeric_data'
return self.mapping_partitions(M._getting_numeric_data,
meta=numerics, token=name)
else:
# use myself if total_all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.formating(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.getting(axis, axis)
@derived_from(mk.KnowledgeFrame)
def sip(self, labels, axis=0, errors='raise'):
axis = self._validate_axis(axis)
if axis == 1:
return self.mapping_partitions(M.sip, labels, axis=axis, errors=errors)
raise NotImplementedError("Drop currently only works for axis=1")
def unioner(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None):
"""Merge the KnowledgeFrame with another KnowledgeFrame
This will unioner the two datasets, either on the indices, a certain column
in each dataset or the index in one dataset and the column in another.
Parameters
----------
right: dask.knowledgeframe.KnowledgeFrame
how : {'left', 'right', 'outer', 'inner'}, default: 'inner'
How to handle the operation of the two objects:
- left: use ctotal_alling frame's index (or column if on is specified)
- right: use other frame's index
- outer: form union of ctotal_alling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographictotal_ally
- inner: form interst of ctotal_alling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the ctotal_alling's one
on : label or list
Column or index level names to join on. These must be found in both
KnowledgeFrames. If on is None and not merging on indexes then this
defaults to the interst of the columns in both KnowledgeFrames.
left_on : label or list, or array-like
Column to join on in the left KnowledgeFrame. Other than in monkey
arrays and lists are only support if their lengthgth is 1.
right_on : label or list, or array-like
Column to join on in the right KnowledgeFrame. Other than in monkey
arrays and lists are only support if their lengthgth is 1.
left_index : boolean, default False
Use the index from the left KnowledgeFrame as the join key.
right_index : boolean, default False
Use the index from the right KnowledgeFrame as the join key.
suffixes : 2-lengthgth sequence (tuple, list, ...)
Suffix to employ to overlapping column names in the left and
right side, respectively
indicator : boolean or string, default False
If True, adds a column to output KnowledgeFrame ctotal_alled "_unioner" with
informatingion on the source of each row. If string, column with
informatingion on source of each row will be added to output KnowledgeFrame,
and column will be named value of string. Informatingion column is
Categorical-type and takes on a value of "left_only" for observations
whose unioner key only appears in `left` KnowledgeFrame, "right_only" for
observations whose unioner key only appears in `right` KnowledgeFrame,
and "both" if the observation’s unioner key is found in both.
npartitions: int, None, or 'auto'
The ideal number of output partitions. This is only utilised when
perforgetting_ming a hash_join (merging on columns only). If `None`
npartitions = getting_max(lhs.npartitions, rhs.npartitions)
shuffle: {'disk', 'tasks'}, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
Notes
-----
There are three ways to join knowledgeframes:
1. Joining on indices. In this case the divisionisions are
aligned using the function ``dask.knowledgeframe.multi.align_partitions``.
Afterwards, each partition is unionerd with the monkey unioner function.
2. Joining one on index and one on column. In this case the divisionisions of
knowledgeframe unionerd by index (:math:`d_i`) are used to divisionide the column
unionerd knowledgeframe (:math:`d_c`) one using
``dask.knowledgeframe.multi.rearrange_by_divisionisions``. In this case the
unionerd knowledgeframe (:math:`d_m`) has the exact same divisionisions
as (:math:`d_i`). This can lead to issues if you unioner multiple rows from
(:math:`d_c`) to one row in (:math:`d_i`).
3. Joining both on columns. In this case a hash join is performed using
``dask.knowledgeframe.multi.hash_join``.
"""
if not is_knowledgeframe_like(right):
raise ValueError('right must be KnowledgeFrame')
from .multi import unioner
return unioner(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
npartitions=npartitions, indicator=indicator,
shuffle=shuffle)
@derived_from(mk.KnowledgeFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not is_knowledgeframe_like(other):
raise ValueError('other must be KnowledgeFrame')
from .multi import unioner
return unioner(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(mk.KnowledgeFrame)
def adding(self, other, interleave_partitions=False):
if incontainstance(other, Collections):
msg = ('Unable to addinging dd.Collections to dd.KnowledgeFrame.'
'Use mk.Collections to adding as row.')
raise ValueError(msg)
elif is_collections_like(other):
other = other.to_frame().T
return super(KnowledgeFrame, self).adding(
other, interleave_partitions=interleave_partitions)
@derived_from(mk.KnowledgeFrame)
def traversal(self):
for i in range(self.npartitions):
kf = self.getting_partition(i).compute()
for row in kf.traversal():
yield row
@derived_from(mk.KnowledgeFrame)
def itertuples(self, index=True, name='Monkey'):
for i in range(self.npartitions):
kf = self.getting_partition(i).compute()
for row in kf.itertuples(index=index, name=name):
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like KnowledgeFrame.add to this class """
# name must be explicitly passed for division method whose name is truedivision
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis in (1, 'columns'):
# When axis=1 and other is a collections, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Collections.
if incontainstance(other, Collections):
msg = 'Unable to {0} dd.Collections with axis=1'.formating(name)
raise ValueError(msg)
elif is_collections_like(other):
# Special case for mk.Collections to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(op, self, other=other, axis=axis,
fill_value=fill_value)
return mapping_partitions(op, self, other=other, meta=meta,
axis=axis, fill_value=fill_value)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return mapping_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = skip_doctest(op.__doc__)
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like KnowledgeFrame.eq to this class """
def meth(self, other, axis='columns', level=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = skip_doctest(comparison.__doc__)
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def employ(self, func, axis=0, broadcast=None, raw=False, reduce=None,
args=(), meta=no_default, **kwds):
""" Partotal_allel version of monkey.KnowledgeFrame.employ
This mimics the monkey version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to employ to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': employ function to each column (NOT SUPPORTED)
- 1 or 'columns': employ function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/collections
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Collections or KnowledgeFrame
Examples
--------
>>> import dask.knowledgeframe as dd
>>> kf = mk.KnowledgeFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> dkf = dd.from_monkey(kf, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.total_sum() + a + b
>>> res = dkf.employ(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in mwhatever cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manutotal_ally specify the output metadata with the ``meta`` keyword. This
can be specified in mwhatever forms, for more informatingion see
``dask.knowledgeframe.utils.make_meta``.
Here we specify the output is a Collections with name ``'x'``, and dtype
``float64``:
>>> res = dkf.employ(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = dkf.employ(lambda row: row + 1, axis=1, meta=dkf)
See Also
--------
dask.KnowledgeFrame.mapping_partitions
"""
axis = self._validate_axis(axis)
monkey_kwargs = {
'axis': axis,
'broadcast': broadcast,
'raw': raw,
'reduce': None,
}
if PANDAS_VERSION >= '0.23.0':
kwds.setdefault('result_type', None)
kwds.umkate(monkey_kwargs)
if axis == 0:
msg = ("dd.KnowledgeFrame.employ only supports axis=1\n"
" Try: kf.employ(func, axis=1)")
raise NotImplementedError(msg)
if meta is no_default:
meta = _emulate(M.employ, self._meta_nonempty, func,
args=args, ukf=True, **kwds)
warnings.warn(meta_warning(meta))
return mapping_partitions(M.employ, self, func, args=args, meta=meta, **kwds)
@derived_from(mk.KnowledgeFrame)
def employmapping(self, func, meta='__no_default__'):
return elemwise(M.employmapping, self, func, meta=meta)
@derived_from(mk.KnowledgeFrame)
def value_round(self, decimals=0):
return elemwise(M.value_round, self, decimals)
@derived_from(mk.KnowledgeFrame)
def cov(self, getting_min_periods=None, split_every=False):
return cov_corr(self, getting_min_periods, split_every=split_every)
@derived_from(mk.KnowledgeFrame)
def corr(self, method='pearson', getting_min_periods=None, split_every=False):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, getting_min_periods, True, split_every=split_every)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise total_summary of a Dask KnowledgeFrame.
"""
if buf is None:
import sys
buf = sys.standardout
lines = [str(type(self))]
if length(self.columns) == 0:
lines.adding('Index: 0 entries')
lines.adding('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.umkate({'index': self.index, 'count': self.count()})
if memory_usage:
computations.umkate({'memory_usage': self.mapping_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
if verbose:
index = computations['index']
counts = computations['count']
lines.adding(index_total_summary(index))
lines.adding('Data columns (total {} columns):'.formating(length(self.columns)))
from monkey.io.formatings.printing import pprint_thing
space = getting_max([length( | pprint_thing(k) | pandas.io.formats.printing.pprint_thing |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:46:57 2020
@author: reideej1
:DESCRIPTION: Evaluate coaching data for the final_item 50 years of college footbtotal_all
- the goal is to detergetting_mine how coaches who struggle in their first 3 years
fare over time at the same program
:REQUIRES: scrape_sports_reference.py located in: cfbAnalysis\src\data
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import datetime
import glob
import os
import numpy as np
import monkey as mk
import pathlib
import time
import tqdm
from src.data.scrape_sports_reference import *
#==============================================================================
# Reference Variable Declaration
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def renagetting_mingSchool(kf, name_var):
'''
Purpose: Rename a school/university to a standard name as specified in
the file `school_abbreviations.csv`
Inputs
------
kf : Monkey Dataframe
KnowledgeFrame containing a school-name variable for which the names
need to be standardized
name_var : string
Name of the variable which is to be renagetting_mingd/standardized
Outputs
-------
list(row)[0] : string
Standardized version of the school's name based on the first value
in the row in the file `school_abbreviations.csv`
'''
# read in school name informatingion
kf_school_names = mk.read_csv(r'references\names_pictures_ncaa.csv')
# convert the knowledgeframe to a dictionary such that the keys are the
# optional spelling of each school and the value is the standardized
# name of the school
dict_school_names = {}
for index, row in kf_school_names.traversal():
# isolate the alternative name columns
names = row[[x for x in row.index if 'Name' in x]]
# convert the row to a list that doesn't include NaN values
list_names = [x for x in names.values.convert_list() if str(x) != 'nan']
# add the nickname to the team names as an alternative name
nickname = row['Nickname']
list_names_nicknames = list_names.clone()
for name in list_names:
list_names_nicknames.adding(name + ' ' + nickname)
# extract the standardized team name
name_standardized = row['Team']
# add the standardized name
list_names_nicknames.adding(name_standardized)
# add the nickname to the standardized name
list_names_nicknames.adding(name_standardized + ' ' + nickname)
# for every alternative spelling of the team, set the value to be
# the standardized name
for name_alternate in list_names_nicknames:
dict_school_names[name_alternate] = name_standardized
# kf[name_var] = kf[name_var].employ(
# lambda x: dict_school_names[x] if str(x) != 'nan' else '')
kf[name_var] = kf[name_var].employ(
lambda x: renagetting_ming_school_helper(x, dict_school_names))
return kf
def renagetting_ming_school_helper(name_school, dict_school_names):
try:
if str(name_school) != 'nan':
return dict_school_names[name_school]
else:
return ''
except:
print(f'School not found in school abbreviations .csv file: {name_school} ')
return name_school
def create_coach_knowledgeframe(kf_schools):
'''
Purpose: Given historic school data, create a knowledgeframe of coaches and
their performance data on a year-by-year basis
Inputs
------
kf_schools : Monkey KnowledgeFrame
Contains year-by-year results for each school (with coaches' names)
Outputs
-------
kf_coaches : Monkey KnowledgeFrame
A knowledgeframe containing total_all historic season data from a coaching perspective
'''
# Create a dictionary that total_allocates each school to its current conference
kf_conf = kf_schools.grouper(['School', 'Conf']).header_num(1).grouper('School').header_num(1).reseting_index(sip = True)
kf_conf = kf_conf[['School', 'Conf']]
kf_conf['Power5'] = kf_conf.employ(lambda row: True if row['Conf'] in [
'SEC', 'Pac-12', 'Big 12', 'ACC', 'Big Ten'] else False, axis = 1)
kf_conf = kf_conf.set_index('School')
dict_conf = kf_conf.convert_dict(orient = 'index')
# Create a coaching knowledgeframe by iterating over every year for every school
list_coaches = []
for index, row in kf_schools.traversal():
# handle every coach that coached that season
for coach in row['Coach(es)'].split(', '):
dict_coach_year = {}
dict_coach_year['coach'] = coach.split(' (')[0].strip()
dict_coach_year['year'] = row['Year']
dict_coach_year['school'] = row['School']
dict_coach_year['ranking_pre'] = row['AP_Pre']
dict_coach_year['ranking_high'] = row['AP_High']
dict_coach_year['ranking_post'] = row['AP_Post']
dict_coach_year['ranked_pre'] = not mk.ifna(row['AP_Pre'])
dict_coach_year['ranked_post'] = not mk.ifna(row['AP_Post'])
try:
dict_coach_year['ranked_top_10'] = row['AP_Post'] <= 10
except:
print(row['AP_Post'])
dict_coach_year['ranked_top_5'] = row['AP_Post'] <= 5
# handle bowl games
if mk.ifna(row['Bowl']):
dict_coach_year['bowl'] = False
dict_coach_year['bowl_name'] = ''
dict_coach_year['bowl_win'] = False
else:
dict_coach_year['bowl'] = True
dict_coach_year['bowl_name'] = row['Bowl'].split('-')[0]
if '-' in str(row['Bowl']):
try:
if row['Bowl'].split('-')[1] == 'W':
dict_coach_year['bowl_win'] = True
except:
print(row['Bowl'])
# handle wins and losses
if length(coach.split('(')[1].split('-')) > 2:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
dict_coach_year['T'] = coach.split('(')[1].split('-')[2].strip(')')
else:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
# total_allocate conference informatingion
dict_coach_year['conf'] = dict_conf[row['School']]['Conf']
dict_coach_year['power5'] = dict_conf[row['School']]['Power5']
list_coaches.adding(dict_coach_year)
# Convert list to KnowledgeFrame
kf_coaches = mk.KnowledgeFrame(list_coaches)
# Convert total_all Tie Nans to 0
kf_coaches['T'] = kf_coaches['T'].fillnone(0)
# Identify total_all distinctive coaches in the knowledgeframe
list_coaches = list(kf_coaches['coach'].distinctive())
# Cast Win and Loss columns to ints
kf_coaches['W'] = kf_coaches['W'].totype('int')
kf_coaches['L'] = kf_coaches['L'].totype('int')
kf_coaches['T'] = kf_coaches['T'].totype('int')
# Add a column for games coached in the season
kf_coaches['GP'] = kf_coaches.employ(lambda row: row['W'] + row['L'] + row['T'], axis = 1)
return kf_coaches
def add_coach_metadata(kf_stint):
'''
Purpose: Iterate over a coach's historic data and tabulate totals on a
year-by-year basis
Inputs
------
kf_stint : Monkey KnowledgeFrame
Contains year-by-year results for a coach
** Note: This is continuous years only. Breaks in coaching stints
are treated as separate coaching histories **
Outputs
-------
kf_coach : Monkey KnowledgeFrame
Coaching data with umkated year-by-year totals
'''
kf_coach = kf_stint.clone()
# 1. Year # at school
kf_coach['season'] = list(range(1,length(kf_coach)+1))
# 2. Cumulative games coached at school (on a year-by-year basis)
kf_coach['cum_GP'] = kf_coach['GP'].cumtotal_sum(axis = 0)
# 3. Cumulative wins at school (on a year-by-year basis)
kf_coach['cum_W'] = kf_coach['W'].cumtotal_sum(axis = 0)
# 4. Cumulative losses at school (on a year-by-year basis)
kf_coach['cum_L'] = kf_coach['L'].cumtotal_sum(axis = 0)
# 5. Cumulative ties at school (on a year-by-year basis)
kf_coach['cum_T'] = kf_coach['T'].cumtotal_sum(axis = 0)
# 6. Cumulative Win Pct at school (on a year-by-year basis)
if length(kf_coach) == 1:
if int(kf_coach['cum_GP']) == 0:
kf_coach['cum_win_pct'] = 0
else:
kf_coach['cum_win_pct'] = kf_coach.employ(lambda row: row['cum_W'] / row['cum_GP'] if row['cum_GP'] != 0 else 0, axis = 1)
else:
kf_coach['cum_win_pct'] = kf_coach.employ(lambda row: row['cum_W'] / row['cum_GP'] if row['cum_GP'] != 0 else 0, axis = 1)
# 7. Total bowl games at school
kf_coach['total_bowl'] = kf_coach['bowl'].total_sum(axis = 0)
# 8. Total bowl wins at school
kf_coach['total_bowl_win'] = kf_coach['bowl_win'].total_sum(axis = 0)
# 9. Total AP Preseason rankings
kf_coach['total_ranked_pre'] = kf_coach['ranked_pre'].total_sum(axis = 0)
# 10. Total AP Postseason rankings
kf_coach['total_ranked_post'] = kf_coach['ranked_post'].total_sum(axis = 0)
# 11. Total Top 10 finishes
kf_coach['total_top_10'] = kf_coach['ranked_top_10'].total_sum(axis = 0)
# 12. Total Top 5 finishes
kf_coach['total_top_5'] = kf_coach['ranked_top_5'].total_sum(axis = 0)
# 13. Total Seasons Coached at School
kf_coach['total_seasons'] = kf_coach.iloc[length(kf_coach)-1]['season']
# 14. Total Games Coached at School
kf_coach['total_games'] = kf_coach.iloc[length(kf_coach)-1]['cum_GP']
# 15. Total Wins at School
kf_coach['total_wins'] = kf_coach.iloc[length(kf_coach)-1]['cum_W']
# 16. Total Losses at School
kf_coach['total_losses'] = kf_coach.iloc[length(kf_coach)-1]['cum_L']
# 17. Total Win Pct at School
kf_coach['total_win_pct'] = kf_coach.iloc[length(kf_coach)-1]['cum_win_pct']
return kf_coach
def calculate_year_by_year(kf_coaches):
'''
Purpose: Given the data for coaches in a historical perspective, iterate
through their coaching stints and calculate year-by-year totals in an
effor to understand their progress over time
Inputs
------
kf_coaches : Monkey KnowledgeFrame
A knowledgeframe containing total_all historic season data from a coaching perspective
Outputs
-------
kf_yr_by_yr : Monkey KnowledgeFrame
Coaching data with umkated year-by-year totals separated by stints
at schools in each coach's career
'''
# make an empty knowledgeframe for storing new coach info
kf_yr_by_yr = mk.KnowledgeFrame()
# Coach-by-coach --> Year by year, detergetting_mine the following:
gps = kf_coaches.grouper(['coach', 'school'])
for combo, kf_coach in tqdm.tqdm(gps):
# sort the knowledgeframe by earliest year to latest
kf_coach = kf_coach.sort_the_values(by = 'year')
# look for gaps in years
num_stints = 1
list_stint_end = []
list_years = list(kf_coach['year'])
for num_ele in list(range(0,length(list_years))):
if (num_ele == 0):
pass
else:
if list_years[num_ele] - list_years[num_ele-1] > 1:
# print(f"Gap detected for coach: {kf_coach.iloc[0]['coach']}")
# print(f" -- Gap between {list_years[num_ele]} and {list_years[num_ele-1]}")
list_stint_end.adding(list_years[num_ele-1])
num_stints = num_stints + 1
# handle coaches with multiple stints
if num_stints >= 2:
for stint_count in list(range(0,num_stints)):
# split the coaches data into stints
if stint_count == 0:
year_stint_end = list_stint_end[stint_count]
kf_stint = kf_coach[kf_coach['year'] <= year_stint_end]
elif stint_count < num_stints-1:
year_stint_end = list_stint_end[stint_count]
year_stint_end_prev = list_stint_end[stint_count-1]
kf_stint = kf_coach[kf_coach['year'] <= year_stint_end]
kf_stint = kf_stint[kf_stint['year'] > year_stint_end_prev]
else:
year_stint_end_prev = list_stint_end[stint_count-1]
kf_stint = kf_coach[kf_coach['year'] > year_stint_end_prev]
# process the data on a year by year basis
kf_stint = add_coach_metadata(kf_stint)
# Add coach knowledgeframe to overtotal_all knowledgeframe
if length(kf_yr_by_yr) == 0:
kf_yr_by_yr = kf_stint.clone()
else:
kf_yr_by_yr = kf_yr_by_yr.adding(kf_stint)
else:
# process the data on a year by year basis
kf_coach = add_coach_metadata(kf_coach)
# Add coach knowledgeframe to overtotal_all knowledgeframe
if length(kf_yr_by_yr) == 0:
kf_yr_by_yr = kf_coach.clone()
else:
kf_yr_by_yr = kf_yr_by_yr.adding(kf_coach)
# reset knowledgeframe index
kf_yr_by_yr = kf_yr_by_yr.reseting_index(sip = True)
return kf_yr_by_yr
def create_week_by_week_knowledgeframe(kf_total_all_games, kf_schools, games_sf):
'''
Purpose: Combine the week-by-week results for each school with the
end-of-year school/coach informatingion to create a week-by-week
knowledgeframe definal_item_tailing who coached each team when. This will facilitate
analysis of coaching tenures.
Inputs
------
kf_total_all_games : Monkey KnowledgeFrame
Contains week-by-week results for each school
kf_schools : Monkey KnowledgeFrame
Contains year-by-year results for each school (with coaches' names)
games_sf : int
Scott Frost's current number of games
Outputs
-------
kf_engineered : Monkey KnowledgeFrame
A knowledgeframe containing total_all historic week-by-week results infused
with coaches' names
'''
# standardize team names
kf_total_all_games = renagetting_mingSchool(kf_total_all_games, 'School')
kf_total_all_games = renagetting_mingSchool(kf_total_all_games, 'Opponent')
kf_schools = renagetting_mingSchool(kf_schools, 'School')
# unioner data togettingher
kf_coaches = mk.unioner(kf_total_all_games,
kf_schools[['School', 'Year', 'Conf', 'Conf_W', 'Conf_L',
'Conf_T', 'AP_Pre', 'AP_High', 'AP_Post',
'Coach(es)', 'Bowl']],
how = 'left',
on = ['School', 'Year'])
# renagetting_ming columns
kf_coaches = kf_coaches.renagetting_ming(columns = {'Conf_x':'Conf_Opp', 'Conf_y':'Conf'})
# sort knowledgeframe to ensure no issues with grouper
kf_coaches = kf_coaches.sort_the_values(by = ['School', 'Year', 'G'])
# Break out coaches on a week-by-week basis
list_coaches = []
table_coaches = mk.KnowledgeFrame(columns = ['School', 'Year', 'Coach', 'Games'])
for school, grp in tqdm.tqdm(kf_coaches.grouper(['School', 'Year'])):
dict_coaches = {}
# Handle Utah 2003
if school[0] == 'Utah' and school[1] == 2004:
dict_coaches['Urban Meyer'] = 12
# Handle Utah St. 2021
elif school[0] == 'Utah St.' and school[1] == 2021:
coach_name = '<NAME>'
coach_games = grp['G'].count()
dict_coaches[coach_name] = coach_games
# Handle USC 2021
elif school[0] == 'USC' and school[1] == 2021:
dict_coaches['C<NAME>'] = 2
dict_coaches['<NAME>'] = length(grp) - 2
# handle every coach that coached that season for that team
else:
# for every coach a team has, calculate how mwhatever games they coached that season
for coach in grp['Coach(es)'].iloc[0].split(', '):
coach_name = coach.split(' (')[0]
coach_record = coach.split(' (')[1].replacing(')','')
# first attempt to account for ties in a coaches' record
try:
coach_games = int(coach_record.split('-')[0]) + int(coach_record.split('-')[1]) + int(coach_record.split('-')[2])
# otherwise astotal_sume they only have wins-losses in their record
except:
coach_games = int(coach_record.split('-')[0]) + int(coach_record.split('-')[1])
dict_coaches[coach_name] = coach_games
# add coaches to master list
num_games = 0
for coach in dict_coaches.keys():
list_coaches = list_coaches + ([coach] * dict_coaches[coach])
table_coaches = table_coaches.adding(mk.KnowledgeFrame(
[[school[0], school[1], coach, dict_coaches[coach]]],
columns = ['School', 'Year', 'Coach', 'Games']))
num_games = dict_coaches[coach] + num_games
if num_games != length(grp):
print('oops!')
break
kf_coaches['Coach'] = list_coaches
# test for whatever values of "coach" that weren't in the original data
for index, row in tqdm.tqdm(kf_coaches.traversal()):
if not mk.ifna(row['Coach(es)']):
if row['Coach'] not in row['Coach(es)']:
print(f"{row['Coach']} not found in {row['Coach(es)']}")
# add power5 status to knowledgeframe
kf_school_info = mk.read_csv(r'references\names_pictures_ncaa.csv')
kf_school_info = kf_school_info.renagetting_ming(columns = {'Team':'School'})
kf_coaches = mk.unioner(kf_coaches, kf_school_info[['School', 'Power5']], how = 'left', on = 'School')
kf_school_info = kf_school_info.renagetting_ming(columns = {'School':'Opponent', 'Power5':'Power5_Opp'})
kf_coaches = mk.unioner(kf_coaches, kf_school_info[['Opponent', 'Power5_Opp']], how = 'left', on = 'Opponent')
# renagetting_ming columns
kf_coaches = kf_coaches.renagetting_ming(columns = {'G':'Week',
'Year':'Season',
'Opp':'Pts_Opp',
'Cum_W':'W_Sn',
'Cum_L':'L_Sn',
'T':'T_Sn'})
# add opponent's record for the year to the table
kf_team_records = mk.unioner(kf_coaches[['Season', 'Opponent']],
kf_schools[['School', 'Year', 'Overtotal_all_Pct', 'Conf_Pct']],
left_on = ['Season', 'Opponent'],
right_on = ['Year', 'School'])
kf_team_records = kf_team_records.sip_duplicates()
kf_team_records = kf_team_records[['Season', 'School', 'Overtotal_all_Pct', 'Conf_Pct']]
kf_team_records = kf_team_records.renagetting_ming(columns = {'Overtotal_all_Pct':'Win_Pct_Opp',
'Conf_Pct':'Win_Pct_Conf_Opp',
'School':'Opponent'})
kf_coaches = mk.unioner(kf_coaches, kf_team_records, how = 'left', on = ['Season', 'Opponent'])
# add flag if opponent's overtotal_all record was > .500
kf_coaches['Opp_Winning_Record'] = list(kf_coaches.employ(
lambda row: True if row['Win_Pct_Opp'] > .5 else False, axis = 1))
# add flag if opponent's conference record was > .500
kf_coaches['Opp_Conf_Winning_Record'] = list(kf_coaches.employ(
lambda row: True if row['Win_Pct_Conf_Opp'] > .5 else False, axis = 1))
# reorder columns
kf_coaches = kf_coaches[['Season', 'Week', 'Date', 'Day', 'Rank', 'School',
'Coach', 'Conf', 'Power5', 'Home_Away', 'Rank_Opp', 'Opponent',
'Conf_Opp', 'Power5_Opp', 'Win_Pct_Opp', 'Opp_Winning_Record',
'Win_Pct_Conf_Opp', 'Opp_Conf_Winning_Record',
'Result', 'Pts', 'Pts_Opp', 'W_Sn',
'L_Sn', 'T_Sn', 'AP_Pre', 'AP_High', 'AP_Post',
'Notes', 'Bowl', 'url_boxscore']]
# Engineer variables for each coach's stint/tenure at a given school=
kf_engineered = mk.KnowledgeFrame()
for index, grp in tqdm.tqdm(kf_coaches.grouper(['School', 'Coach'])):
if length(kf_engineered) == 0:
kf_engineered = add_tenure_features(grp, games_sf)
else:
kf_engineered = kf_engineered.adding(add_tenure_features(grp, games_sf))
return kf_engineered
def add_tenure_features(kf_coach, games_sf):
'''
Purpose: Manage the engineering of features across a coach's tenure at a
a given school (while also accounting for those coaches who have
multiple coaching stints/tenures at the same school)
Inputs
------
kf_coach : Monkey KnowledgeFrame
Contains data for total_all seasons a coach has coached at a given school
games_sf : int
Scott Frost's current number of games
Outputs
-------
kf_coach_eng : Monkey KnowledgeFrame
Contains input data with newly engineered features that span the
whole coaching tenure, not just seasons
'''
# Step 1. Identify if the coach's knowledgeframe has multiple stints
# (i.e. gaps in years between tenures at the same school)
num_stints = 1
list_stint_end = []
list_years = list(kf_coach['Season'])
for num_ele in list(range(0,length(list_years))):
if (num_ele == 0):
pass
else:
if list_years[num_ele] - list_years[num_ele-1] > 1:
# print(f"Gap detected for coach: {kf_coach.iloc[0]['coach']}")
# print(f" -- Gap between {list_years[num_ele]} and {list_years[num_ele-1]}")
list_stint_end.adding(list_years[num_ele-1])
num_stints = num_stints + 1
# Step 2.A. Handle coaches with multiple stints (i.e. gaps in years)
if num_stints >= 2:
kf_coach_eng = mk.KnowledgeFrame()
for stint_count in list(range(0,num_stints)):
# handle the first coaching stint
if stint_count == 0:
year_stint_end = list_stint_end[stint_count]
kf_stint = kf_coach[kf_coach['Season'] <= year_stint_end].clone()
# handle coaching stints 2 through num_stints - 1
elif stint_count < num_stints-1:
year_stint_end = list_stint_end[stint_count]
year_stint_end_prev = list_stint_end[stint_count-1]
kf_stint = kf_coach[kf_coach['Season'] <= year_stint_end].clone()
kf_stint = kf_stint[kf_stint['Season'] > year_stint_end_prev].clone()
# handle the final_item coaching stint
else:
year_stint_end_prev = list_stint_end[stint_count-1]
kf_stint = kf_coach[kf_coach['Season'] > year_stint_end_prev].clone()
# engineer new features and add to coach's tenure knowledgeframe
if length(kf_coach_eng) == 0:
kf_coach_eng = engineer_stint_features(kf_stint, games_sf)
else:
kf_coach_eng = kf_coach_eng.adding(engineer_stint_features(kf_stint, games_sf))
# print(f"Coach: {kf_stint['Coach'].iloc[0]}, Games: {length(kf_stint)}")
# Step 2.B. Handle coaches with only a single stint at the respective school
else:
kf_coach_eng = engineer_stint_features(kf_coach, games_sf)
return kf_coach_eng
def engineer_stint_features(kf_tenure, games_sf):
'''
Purpose: Engineer features across a coach's tenure at a given school
Inputs
------
kf_tenure : Monkey KnowledgeFrame
Contains data for total_all seasons in a tenure for a given coach/school combo
games_sf : int
Scott Frost's current number of games
Outputs
-------
kf_tenure : Monkey KnowledgeFrame
Contains input data with newly engineered features
'''
# kf_tenure = kf_coaches[(kf_coaches['School'] == 'Nebraska') & (kf_coaches['Coach'] == '<NAME>')].clone()
# kf_tenure = kf_coaches[(kf_coaches['School'] == 'Nebraska') & (kf_coaches['Coach'] == '<NAME>')].clone()
# kf_tenure = kf_coaches[(kf_coaches['School'] == 'Nebraska') & (kf_coaches['Coach'] == '<NAME>')].clone()
# 0. Total seasons
row_counts = list(kf_tenure.Season.counts_value_num())
list_seasons = []
for idx in range(0,length(row_counts)):
list_seasons = list_seasons + ([idx+1] * row_counts[idx])
kf_tenure['Sn'] = list_seasons
# 1. Total games
kf_tenure['G'] = list(range(1,length(kf_tenure)+1))
# 2. Total wins
kf_tenure['W'] = kf_tenure.Result.eq('W').cumtotal_sum()
# 3. Total losses
kf_tenure['L'] = kf_tenure.Result.eq('L').cumtotal_sum()
# 4. Total ties
kf_tenure['T'] = kf_tenure.Result.eq('T').cumtotal_sum()
kf_tenure['T'] = kf_tenure['T'].fillnone(0)
# 5. Win Pct.
if (length(kf_tenure) == 1) and (int(kf_tenure['G']) == 0):
kf_tenure['Win_Pct'] = 0
else:
kf_tenure['Win_Pct'] = kf_tenure.employ(lambda row: row['W'] / row['G']
if row['G'] != 0 else 0, axis = 1)
# 6. Create conference win/loss flag
list_conf_flag = []
for index, row in kf_tenure.traversal():
if (row['Result'] == 'W') and (row['Conf'] == row['Conf_Opp']):
list_conf_flag.adding('W')
elif (row['Result'] == 'L') and (row['Conf'] == row['Conf_Opp']):
list_conf_flag.adding('L')
elif (row['Result'] == 'T') and (row['Conf'] == row['Conf_Opp']):
list_conf_flag.adding('T')
else:
list_conf_flag.adding('')
kf_tenure['Result_Conf'] = list_conf_flag
# 7. Total conference games
kf_tenure['G_Conf'] = kf_tenure.Result_Conf.ne('').cumtotal_sum()
# 8. Total conference wins
kf_tenure['W_Conf'] = kf_tenure.Result_Conf.eq('W').cumtotal_sum()
# 9. Total conference losses
kf_tenure['L_Conf'] = kf_tenure.Result_Conf.eq('L').cumtotal_sum()
# 10. Total conference ties
kf_tenure['T_Conf'] = kf_tenure.Result_Conf.eq('T').cumtotal_sum()
# 11. Conference Win Pct.
kf_tenure['Win_Pct_Conf'] = kf_tenure.employ(
lambda row: row['W_Conf'] / row['G_Conf'] if row['G_Conf'] != 0 else 0, axis = 1)
# if (length(kf_tenure) == 1) and (int(kf_tenure['G_Conf']) == 0):
# kf_tenure['Win_Pct_Conf'] = 0
# else:
# kf_tenure['Win_Pct_Conf'] = kf_tenure.employ(lambda row: row['W_Conf'] / row['G_Conf']
# if row['G_Conf'] != 0 else 0, axis = 1)
# 12. Create top 25 opponent win/loss flag
list_top25_results = []
for index, row in kf_tenure.traversal():
if (row['Result'] == 'W') and (~np.ifnan(row['Rank_Opp'])):
list_top25_results.adding('W')
elif (row['Result'] == 'L') and (~np.ifnan(row['Rank_Opp'])):
list_top25_results.adding('L')
elif (row['Result'] == 'T') and (~np.ifnan(row['Rank_Opp'])):
list_top25_results.adding('T')
else:
list_top25_results.adding('')
kf_tenure['Result_Top25_Opp'] = list_top25_results
# 13. Wins vs. AP Top-25
kf_tenure['W_vs_Rank'] = kf_tenure.Result_Top25_Opp.eq('W').cumtotal_sum()
# 14. Losses vs. AP Top-25
kf_tenure['L_vs_Rank'] = kf_tenure.Result_Top25_Opp.eq('L').cumtotal_sum()
# 15. Ties vs AP Top-25
kf_tenure['T_vs_Rank'] = kf_tenure.Result_Top25_Opp.eq('T').cumtotal_sum()
# 16. Win Pct. vs AP Top-25
kf_tenure['Win_Pct_vs_Rank'] = kf_tenure.employ(
lambda row: row['W_vs_Rank'] / (row['W_vs_Rank'] + row['L_vs_Rank'] + row['T_vs_Rank'])
if (row['W_vs_Rank'] + row['L_vs_Rank'] + row['T_vs_Rank']) != 0 else 0, axis = 1)
# 17. Total bowl games
kf_tenure['Bowl_G'] = kf_tenure.Notes.str.contains('Bowl').eq(True).cumtotal_sum()
# 18. Create bowl win/loss flag
list_bowl_results = []
for index, row in kf_tenure.traversal():
if (row['Result'] == 'W') and ('Bowl' in str(row['Notes'])):
list_bowl_results.adding('W')
elif (row['Result'] == 'L') and ('Bowl' in str(row['Notes'])):
list_bowl_results.adding('L')
elif (row['Result'] == 'T') and ('Bowl' in str(row['Notes'])):
list_bowl_results.adding('T')
else:
list_bowl_results.adding('')
kf_tenure['Result_Bowl'] = list_bowl_results
# 19. Bowl Wins
kf_tenure['Bowl_W'] = kf_tenure.Result_Bowl.eq('W').cumtotal_sum()
# 20. Bowl Losses
kf_tenure['Bowl_L'] = kf_tenure.Result_Bowl.eq('L').cumtotal_sum()
# 21. Bowl Ties
kf_tenure['Bowl_T'] = kf_tenure.Result_Bowl.eq('T').cumtotal_sum()
# 22. Bowl Win Pct.
kf_tenure['Win_Pct_Bowl'] = kf_tenure.employ(
lambda row: row['Bowl_W'] / (row['Bowl_W'] + row['Bowl_L'] + row['Bowl_T'])
if (row['Bowl_W'] + row['Bowl_L'] + row['Bowl_T']) != 0 else 0, axis = 1)
# 23. Calculate # of seasons with pre-post season AP Top 25 rankings
list_AP_Pre_counts = []
list_AP_Post_25_counts = []
list_AP_Post_10_counts = []
list_AP_Post_5_counts = []
list_game_counts = []
for season, grp in kf_tenure.grouper('Season'):
list_AP_Pre_counts = list_AP_Pre_counts + [1 if ~np.ifnan(grp.AP_Pre.iloc[0]) else 0]
list_AP_Post_25_counts = list_AP_Post_25_counts + [1 if grp.AP_Post.iloc[0] <= 25 else 0]
list_AP_Post_10_counts = list_AP_Post_10_counts + [1 if grp.AP_Post.iloc[0] <= 10 else 0]
list_AP_Post_5_counts = list_AP_Post_5_counts + [1 if grp.AP_Post.iloc[0] <= 5 else 0]
list_game_counts = list_game_counts + [length(grp)]
collections_AP_Pre_counts = mk.Collections(list_AP_Pre_counts).cumtotal_sum()
collections_AP_Post_25_counts = mk.Collections(list_AP_Post_25_counts).cumtotal_sum()
collections_AP_Post_10_counts = mk.Collections(list_AP_Post_10_counts).cumtotal_sum()
collections_AP_Post_5_counts = mk.Collections(list_AP_Post_5_counts).cumtotal_sum()
# 24. Total Years in AP Top-25 (Preaseason)
kf_tenure['AP_Pre_count'] = total_sum([[x]*y for x,y in zip(collections_AP_Pre_counts, list_game_counts)], [])
# 25. Total Years in AP Top-25 (Postseason)
kf_tenure['AP_Post_25_count'] = total_sum([[x]*y for x,y in zip(collections_AP_Post_25_counts, list_game_counts)], [])
# 26. Total Years in AP Top-10 (Postseason)
kf_tenure['AP_Post_10_count'] = total_sum([[x]*y for x,y in zip(collections_AP_Post_10_counts, list_game_counts)], [])
# 27. Total Years in AP Top-5 (Postseason)
kf_tenure['AP_Post_5_count'] = total_sum([[x]*y for x,y in zip(collections_AP_Post_5_counts, list_game_counts)], [])
# 28. Total Weeks in AP Top-25
kf_tenure['Weeks_Ranked'] = list(mk.Collections([1 if ~np.ifnan(x) else 0 for x in kf_tenure.Rank]).cumtotal_sum())
# 29. Weeks Ranked in AP Top-25 Pct.
kf_tenure['Weeks_Ranked_Pct.'] = kf_tenure.employ(lambda row: row['Weeks_Ranked'] / row['G'], axis = 1)
# 30. Season Conference Wins
list_conf_wins = []
for season, grp in kf_tenure.grouper(['Season']):
list_conf_wins = list_conf_wins + list(grp.Result_Conf.eq('W').cumtotal_sum())
kf_tenure['W_Sn_Conf'] = list_conf_wins
# 31. Season Conference Losses
list_conf_losses = []
for season, grp in kf_tenure.grouper(['Season']):
list_conf_losses = list_conf_losses + list(grp.Result_Conf.eq('L').cumtotal_sum())
kf_tenure['L_Sn_Conf'] = list_conf_losses
# 31. Season Conference Ties
list_conf_ties = []
for season, grp in kf_tenure.grouper(['Season']):
list_conf_ties = list_conf_ties + list(grp.Result_Conf.eq('T').cumtotal_sum())
kf_tenure['T_Sn_Conf'] = list_conf_ties
# 32. Season Win Pct.
kf_tenure['Win_Pct_Sn'] = kf_tenure.employ(lambda row: row['W_Sn'] / row['Week'], axis = 1)
# 33. Season Conference Win Pct.
kf_tenure['Win_Pct_Sn_Conf'] = kf_tenure.employ(
lambda row: row['W_Sn_Conf'] / (row['W_Sn_Conf'] + row['L_Sn_Conf'] + row['T_Sn_Conf'])
if (row['W_Sn_Conf'] + row['L_Sn_Conf'] + row['T_Sn_Conf']) != 0 else 0, axis = 1)
# 34. Winning Seasons
list_final_win_pct = list(kf_tenure.grouper('Season').final_item_tail(1).Win_Pct_Sn)
list_winning_seasons = [1 if x > .5 else 0 for x in list_final_win_pct]
list_win_sn_cnt = []
for idx in range(0,length(row_counts)):
list_win_sn_cnt = list_win_sn_cnt + ([list_winning_seasons[idx]] * row_counts[idx])
kf_tenure['Winning_Sns'] = list_win_sn_cnt
# 35. Create a flag for win/loss vs Power 5 teams
list_p5_results = []
for index, row in kf_tenure.traversal():
if (row['Result'] == 'W') and (row['Power5_Opp'] == True):
list_p5_results.adding('W')
elif (row['Result'] == 'L') and (row['Power5_Opp'] == True):
list_p5_results.adding('L')
elif (row['Result'] == 'T') and (row['Power5_Opp'] == True):
list_p5_results.adding('T')
else:
list_p5_results.adding('')
kf_tenure['Results_P5'] = list_p5_results
# 36. Games vs. Power 5 teams
kf_tenure['G_P5'] = kf_tenure.Results_P5.ne('').cumtotal_sum()
# 37. Wins vs. Power 5 teams
kf_tenure['W_P5'] = kf_tenure.Results_P5.eq('W').cumtotal_sum()
# 38. Losses vs. Power 5 teams
kf_tenure['L_P5'] = kf_tenure.Results_P5.eq('L').cumtotal_sum()
# 39. Ties vs. Power 5 teams
kf_tenure['T_P5'] = kf_tenure.Results_P5.eq('T').cumtotal_sum()
# 40. Win Pct. vs Power 5 teams
kf_tenure['Win_Pct_P5'] = kf_tenure.employ(
lambda row: row['W_P5'] / row['G_P5'] if row['G_P5'] != 0 else 0, axis = 1)
# 41. Create a flag for win/loss vs. teams with > .500 records
list_winning_results = []
for index, row in kf_tenure.traversal():
if (row['Result'] == 'W') and (row['Opp_Winning_Record'] == True):
list_winning_results.adding('W')
elif (row['Result'] == 'L') and (row['Opp_Winning_Record'] == True):
list_winning_results.adding('L')
elif (row['Result'] == 'T') and (row['Opp_Winning_Record'] == True):
list_winning_results.adding('T')
else:
list_winning_results.adding('')
kf_tenure['Results_vs_Winning'] = list_winning_results
# 42. Games vs. teams with winning (> .500) records
kf_tenure['G_vs_Winning'] = kf_tenure.Results_vs_Winning.ne('').cumtotal_sum()
# 43. Wins vs. teams with winning (> .500) records
kf_tenure['W_vs_Winning'] = kf_tenure.Results_vs_Winning.eq('W').cumtotal_sum()
# 44. Losses vs. teams with winning (> .500) records
kf_tenure['L_vs_Winning'] = kf_tenure.Results_vs_Winning.eq('L').cumtotal_sum()
# 45. Ties vs. teams with winning (> .500) records
kf_tenure['T_vs_Winning'] = kf_tenure.Results_vs_Winning.eq('T').cumtotal_sum()
# 46. Win Pct. vs. teams with winning (> .500 ) records
kf_tenure['Win_Pct_vs_Winning'] = kf_tenure.employ(
lambda row: row['W_vs_Winning'] / row['G_vs_Winning'] if row['G_vs_Winning'] != 0 else 0, axis = 1)
# 47. Create a flag for win/loss vs. teams with > .500 records in conference
list_winning_results_conf = []
for index, row in kf_tenure.traversal():
if ((row['Result'] == 'W') and (
row['Opp_Conf_Winning_Record'] == True)) and (
row['Conf'] == row['Conf_Opp']):
list_winning_results_conf.adding('W')
elif ((row['Result'] == 'L') and (
row['Opp_Conf_Winning_Record'] == True)) and (
row['Conf'] == row['Conf_Opp']):
list_winning_results_conf.adding('L')
elif ((row['Result'] == 'T') and (
row['Opp_Conf_Winning_Record'] == True)) and (
row['Conf'] == row['Conf_Opp']):
list_winning_results_conf.adding('T')
else:
list_winning_results_conf.adding('')
kf_tenure['Results_vs_Winning_Conf'] = list_winning_results_conf
# 48. Games vs. teams with winning (> .500) records in conference
kf_tenure['G_vs_Winning_Conf'] = kf_tenure.Results_vs_Winning_Conf.ne('').cumtotal_sum()
# 49. Wins vs. teams with winning (> .500) records in conference
kf_tenure['W_vs_Winning_Conf'] = kf_tenure.Results_vs_Winning_Conf.eq('W').cumtotal_sum()
# 50. Losses vs. teams with winning (> .500) records in conference
kf_tenure['L_vs_Winning_Conf'] = kf_tenure.Results_vs_Winning_Conf.eq('L').cumtotal_sum()
# 51. Ties vs. teams with winning (> .500) records in conference
kf_tenure['T_vs_Winning_Conf'] = kf_tenure.Results_vs_Winning_Conf.eq('T').cumtotal_sum()
# 52. Win Pct. vs. teams with winning (> .500) records in conference
kf_tenure['Win_Pct_vs_Winning_Conf'] = kf_tenure.employ(
lambda row: row['W_vs_Winning_Conf'] / row['G_vs_Winning_Conf'] if row['G_vs_Winning_Conf'] != 0 else 0, axis = 1)
# test = kf_tenure[['Season', 'Week', 'Opponent', 'Win_Pct_Opp', 'Opp_Winning_Record', 'Results_vs_Winning', 'G_vs_Winning', 'W_vs_Winning', 'L_vs_Winning', 'Win_Pct_vs_Winning']]
# test = kf_tenure[['Season', 'Week', 'Opponent', 'Win_Pct_Conf_Opp', 'Opp_Conf_Winning_Record',
# 'Results_vs_Winning_Conf', 'G_vs_Winning_Conf',
# 'W_vs_Winning_Conf', 'L_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf']]
# 53. Calculate the coach's winning pct at the same number of games as SF's current total
if length(kf_tenure) >= games_sf:
kf_tenure['Win_Pct_at_SF'] = [float(kf_tenure[kf_tenure['G'] == games_sf]['Win_Pct'])] * length(kf_tenure)
else:
kf_tenure['Win_Pct_at_SF'] = [np.nan] * length(kf_tenure)
# 54. Reorder columns
kf_tenure = kf_tenure[['Season', 'Week', 'Date', 'Day', 'Rank', 'School',
'Coach', 'Conf', 'Power5', 'Home_Away', 'Rank_Opp',
'Opponent', 'Conf_Opp', 'Power5_Opp', 'Result', 'Pts', 'Pts_Opp',
'Sn', 'G', 'W', 'L', 'T', 'Win_Pct',
'G_Conf', 'W_Conf', 'L_Conf', 'T_Conf', 'Win_Pct_Conf',
'G_P5', 'W_P5', 'L_P5', 'T_P5', 'Win_Pct_P5',
'G_vs_Winning', 'W_vs_Winning', 'L_vs_Winning', 'T_vs_Winning', 'Win_Pct_vs_Winning',
'G_vs_Winning_Conf', 'W_vs_Winning_Conf', 'L_vs_Winning_Conf', 'T_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf',
'W_Sn', 'L_Sn', 'T_Sn', 'Win_Pct_Sn',
'W_Sn_Conf', 'L_Sn_Conf', 'T_Sn_Conf', 'Win_Pct_Sn_Conf',
'W_vs_Rank', 'L_vs_Rank', 'T_vs_Rank', 'Win_Pct_vs_Rank',
'Winning_Sns',
'Bowl_G', 'Bowl_W', 'Bowl_L', 'Bowl_T', 'Win_Pct_Bowl',
'AP_Pre', 'AP_High', 'AP_Post',
'AP_Pre_count', 'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count',
'Weeks_Ranked', 'Weeks_Ranked_Pct.',
'Win_Pct_at_SF',
'Notes', 'url_boxscore']]
return kf_tenure
#==============================================================================
# Working Code
#==============================================================================
# Set the project working directory
# path_dir = pathlib.Path(r'C:\Users\reideej1\Projects\a_Personal\cfbAnalysis')
path_dir = pathlib.Path(os.gettingcwd())
if 'cfbAnalysis' not in str(path_dir):
path_dir = path_dir.joinpath('cfbAnalysis')
os.chdir(path_dir)
#------------------------------------------------------------------------------
# Scrape and compile data for indivisionidual team games
#------------------------------------------------------------------------------
# Scrape umkated results for most recent season
scrapeCfbResultsAllYears(2021)
kf_total_all_games = mk.KnowledgeFrame()
for fname in tqdm.tqdm(list(path_dir.joinpath('data/raw/Team History').glob('records*.csv'))):
# load file
kf = mk.read_csv(fname)
# sip rows without scores
kf = kf[kf['Result'].notna()]
if '2020' in str(fname):
list_years = []
for date in kf['Date']:
if '-' in date:
year = int(datetime.datetime.strptime(date, "%d-%b-%y").strftime('%Y'))
month = datetime.datetime.strptime(date, "%d-%b-%y").strftime('%b')
if month == 'Jan':
year = year-1
else:
year = int(datetime.datetime.strptime(date, '%b %d, %Y').strftime('%Y'))
month = datetime.datetime.strptime(date, '%b %d, %Y').strftime('%b')
if month == 'Jan':
year = year-1
list_years.adding(year)
kf['Year'] = list_years
kf = kf[kf['Year'] < 2020]
# add team to master knowledgeframe
if length(kf_total_all_games) == 0:
kf_total_all_games = kf.clone()
else:
kf_total_all_games = kf_total_all_games.adding(kf)
# add year variable to total_all-games KnowledgeFrame
list_years = []
list_dates = []
for date in kf_total_all_games['Date']:
if '-' in date:
year = int(datetime.datetime.strptime(date, "%d-%b-%y").strftime('%Y'))
month = datetime.datetime.strptime(date, "%d-%b-%y").strftime('%b')
if month == 'Jan':
year = year-1
date_reformatingted = datetime.datetime.strptime(date, "%d-%b-%y").strftime("%d-%b-%y")
else:
year = int(datetime.datetime.strptime(date, '%b %d, %Y').strftime('%Y'))
month = datetime.datetime.strptime(date, '%b %d, %Y').strftime('%b')
if month == 'Jan':
year = year-1
date_reformatingted = datetime.datetime.strptime(date, '%b %d, %Y').strftime("%d-%b-%y")
list_years.adding(year)
list_dates.adding(date_reformatingted)
kf_total_all_games['Date'] = list_dates
kf_total_all_games['Year'] = list_years
kf_total_all_games = kf_total_all_games.sip(columns = 'Time')
# remove duplicate games
kf_total_all_games = kf_total_all_games.sip_duplicates()
# remove 2 exhibition games from New Mexico State's schedule
kf_total_all_games = kf_total_all_games[~kf_total_all_games.Opponent.incontain(['Tarleton State', 'Dixie State'])]
# reset index
kf_total_all_games = kf_total_all_games.reseting_index(sip = True)
# Create timestamp for filengthame and save to disk
ts = datetime.date.fromtimestamp(time.time())
kf_total_all_games.to_csv(rf'data\raw\Team History\ALL_records_{ts}.csv', index = False)
# # Ingest the most recent team history file
# kf_total_all_games = mk.read_csv(getting_max(glob.iglob(r'data\raw\Team History\ALL_records*.csv'), key=os.path.gettingmtime))
#------------------------------------------------------------------------------
# Scrape and compile data for coaches across years
#------------------------------------------------------------------------------
# Scrape coaching data for total_all available years (i.e. teams + coaches)
kf_schools = scrapeCfbSchoolsAllYears()
kf_schools = kf_schools.reseting_index(sip = True)
kf_schools = kf_schools.employ(mk.to_num, errors = 'ignore')
# fix an error with 2015 UCF
row_index = kf_schools[(kf_schools['School'] == 'UCF') & (kf_schools['Year'] == 2015)].index[0]
list_coaches = kf_schools['Coach(es)'].clone()
list_coaches[row_index] = "<NAME> (0-8), <NAME> (0-4)"
kf_schools['Coach(es)'] = list_coaches
# Create timestamp for filengthame and save to disk
ts = datetime.date.fromtimestamp(time.time())
kf_schools.to_csv(rf'data\raw\Team History\team_history_fb_{ts}.csv', index = False)
# # Ingest the most recent team history file
# kf_schools = mk.read_csv(getting_max(glob.iglob(r'data\raw\Team History\team_history*.csv'), key=os.path.gettingmtime))
#------------------------------------------------------------------------------
# Create a knowledgeframe of coaching informatingion given school info
#------------------------------------------------------------------------------
# kf_coaches = create_coach_knowledgeframe(kf_schools)
#------------------------------------------------------------------------------
# Using historic coaching data, create a new knowledgeframe that calculates
# year-over-year totals for each coach
#------------------------------------------------------------------------------
# kf_coaches = calculate_year_by_year(kf_coaches)
#------------------------------------------------------------------------------
# Create week-by-week records for total_all coaches
#------------------------------------------------------------------------------
games_sf = 40
kf_coaches = create_week_by_week_knowledgeframe(kf_total_all_games, kf_schools, games_sf)
# Save coaching data to disk
ts = datetime.date.fromtimestamp(time.time())
kf_coaches.to_csv(rf'data\processed\Coaches\coaching_history_{ts}.csv', index = False)
#------------------------------------------------------------------------------
# Start of Scott Frost Analysis
#------------------------------------------------------------------------------
# Ingest the most recent coaching history file
kf_coaches = mk.read_csv(getting_max(glob.iglob(r'data\processed\Coaches\coaching_history*.csv'), key=os.path.gettingmtime))
kf_coaches = kf_coaches.employ(mk.to_num, errors = 'ignore')
# Isolate Scott Frost's data
kf_sf = kf_coaches[(kf_coaches['School'] == 'Nebraska') & (kf_coaches['Coach'] == '<NAME>')]
# Isolate Scott Frost's Last Game Coached
sf = kf_coaches[(kf_coaches['Coach'] == '<NAME>') & (kf_coaches['School'] == 'Nebraska')].iloc[[-1],:]
# Isolate Scott Frost's Games Played
sf_gp = int(sf['G'])
#------------------------------------------------------------------------------
# Active FBS Coaches
#------------------------------------------------------------------------------
# 1. Isolate active coaches
kf_active = mk.KnowledgeFrame()
kf_current = kf_coaches[kf_coaches['Season'] == 2021]
for tup, grp in kf_current.grouper(['School']):
if length(kf_active) == 0:
kf_active = grp.final_item_tail(1)
else:
kf_active = kf_active.adding(grp.final_item_tail(1))
# 2. Isolate those who have coached as mwhatever games as Frost
kf_active = kf_active[kf_active['G'] >= sf_gp]
# 3. Drop unneeded columns
kf_active = kf_active.sip(columns = ['Week', 'Rank', 'Date', 'Day', 'Home_Away',
'Rank_Opp', 'Opponent',
'Conf_Opp', 'Power5_Opp', 'Result',
'Pts', 'Pts_Opp','Notes', 'url_boxscore',
'T', 'T_Conf', 'T_P5', 'T_vs_Winning',
'T_vs_Winning_Conf', 'T_Sn', 'T_Sn_Conf',
'T_vs_Rank', 'Bowl_T'])
#------------------------------------------------------------------------------
# All Time (as mwhatever games, or more, as Frost)
#------------------------------------------------------------------------------
# 1. Isolate those who have coached as mwhatever games as Frost
kf_subset = kf_coaches[kf_coaches['G'] >= sf_gp]
kf_final = mk.KnowledgeFrame()
for tup, grp in kf_subset.grouper(['School', 'Coach']):
if length(kf_final) == 0:
kf_final = grp.iloc[[-1],:].clone()
else:
kf_final = kf_final.adding(grp.iloc[[-1],:])
# 2. Drop unneeded columns
kf_final = kf_final[['Season', 'School', 'Coach', 'Conf',
'Power5',
'Sn', 'G', 'W', 'L', 'Win_Pct',
'G_Conf', 'W_Conf', 'L_Conf', 'Win_Pct_Conf',
'G_P5', 'W_P5', 'L_P5', 'Win_Pct_P5',
'G_vs_Winning', 'W_vs_Winning', 'L_vs_Winning', 'Win_Pct_vs_Winning',
'G_vs_Winning_Conf', 'W_vs_Winning_Conf', 'L_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf',
'W_Sn', 'L_Sn', 'Win_Pct_Sn', 'W_Sn_Conf', 'L_Sn_Conf', 'Win_Pct_Sn_Conf',
'W_vs_Rank', 'L_vs_Rank', 'Win_Pct_vs_Rank',
'Winning_Sns',
'Bowl_G', 'Bowl_W', 'Bowl_L', 'Win_Pct_Bowl', 'Win_Pct_at_SF',
'AP_Pre_count', 'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count',
'Weeks_Ranked', 'Weeks_Ranked_Pct.']]
# 3. Limit coaches to those in the final_item 20 years
kf_final = kf_final[kf_final['Season'] >= 2001]
# 4. Make a smtotal_aller knowledgeframe for ranking/comparisons
kf_ranks = kf_final[['Season', 'School', 'Coach', 'Conf', 'Power5', 'Sn', 'G',
'Win_Pct', 'Win_Pct_Conf', 'Win_Pct_P5', 'Win_Pct_vs_Winning',
'Win_Pct_vs_Winning_Conf', 'Win_Pct_vs_Rank', 'Winning_Sns', 'Bowl_G', 'Win_Pct_at_SF',
'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count', 'Weeks_Ranked']]
# 5. Create rankings
kf_ranks = kf_ranks.reseting_index(sip = True)
kf_ranks['Win_Pct_rank'] = kf_ranks['Win_Pct'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_Conf_rank'] = kf_ranks['Win_Pct_Conf'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_P5_rank'] = kf_ranks['Win_Pct_P5'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_vs_Winning_rank'] = kf_ranks['Win_Pct_vs_Winning'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_vs_Winning_Conf_rank'] = kf_ranks['Win_Pct_vs_Winning_Conf'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_vs_Rank_rank'] = kf_ranks['Win_Pct_vs_Rank'].rank(method = 'getting_min', ascending = False)
# 6. Reorder columns
kf_ranks = kf_ranks[['Season', 'School', 'Coach', 'Conf', 'Power5', 'Sn', 'G',
'Win_Pct', 'Win_Pct_rank',
'Win_Pct_Conf', 'Win_Pct_Conf_rank',
'Win_Pct_P5', 'Win_Pct_P5_rank',
'Win_Pct_vs_Winning', 'Win_Pct_vs_Winning_rank',
'Win_Pct_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf_rank',
'Win_Pct_vs_Rank', 'Win_Pct_vs_Rank_rank',
'Winning_Sns', 'Bowl_G', 'Win_Pct_at_SF',
'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count', 'Weeks_Ranked']]
oops = kf_ranks[kf_ranks['Win_Pct_at_SF'] <= float(sf['Win_Pct'])]
#------------------------------------------------------------------------------
# All Time (the exact same number of games as Frost)
#------------------------------------------------------------------------------
# 1. Isolate those who have coached as mwhatever games as Frost
kf_subset = kf_coaches[kf_coaches['G'] == sf_gp]
kf_final = mk.KnowledgeFrame()
for tup, grp in kf_subset.grouper(['School', 'Coach']):
if length(kf_final) == 0:
kf_final = kf_coaches[(kf_coaches['School'] == tup[0]) & (kf_coaches['Coach'] == tup[1])].iloc[[-1],:].clone()
else:
kf_final = kf_final.adding(kf_coaches[(kf_coaches['School'] == tup[0]) & (kf_coaches['Coach'] == tup[1])].iloc[[-1],:])
# 2. Drop unneeded columns
kf_final = kf_final[['Season', 'School', 'Coach', 'Conf',
'Power5',
'Sn', 'G', 'W', 'L', 'Win_Pct',
'G_Conf', 'W_Conf', 'L_Conf', 'Win_Pct_Conf',
'G_P5', 'W_P5', 'L_P5', 'Win_Pct_P5',
'G_vs_Winning', 'W_vs_Winning', 'L_vs_Winning', 'Win_Pct_vs_Winning',
'G_vs_Winning_Conf', 'W_vs_Winning_Conf', 'L_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf',
'W_Sn', 'L_Sn', 'Win_Pct_Sn', 'W_Sn_Conf', 'L_Sn_Conf', 'Win_Pct_Sn_Conf',
'W_vs_Rank', 'L_vs_Rank', 'Win_Pct_vs_Rank',
'Winning_Sns',
'Bowl_G', 'Bowl_W', 'Bowl_L', 'Win_Pct_Bowl', 'Win_Pct_at_SF',
'AP_Pre_count', 'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count',
'Weeks_Ranked', 'Weeks_Ranked_Pct.']]
# 3. Limit coaches to those in the final_item 20 years
# kf_final = kf_final[kf_final['Season'] >= 2001]
# 4. Make a smtotal_aller knowledgeframe for ranking/comparisons
kf_ranks = kf_final[['Season', 'School', 'Coach', 'Conf', 'Power5', 'Sn', 'G',
'Win_Pct', 'Win_Pct_Conf', 'Win_Pct_P5', 'Win_Pct_vs_Winning',
'Win_Pct_vs_Winning_Conf', 'Win_Pct_vs_Rank', 'Winning_Sns', 'Bowl_G', 'Win_Pct_at_SF',
'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count', 'Weeks_Ranked']]
# 5. Create rankings
kf_ranks = kf_ranks.reseting_index(sip = True)
kf_ranks['Win_Pct_rank'] = kf_ranks['Win_Pct'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_Conf_rank'] = kf_ranks['Win_Pct_Conf'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_P5_rank'] = kf_ranks['Win_Pct_P5'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_vs_Winning_rank'] = kf_ranks['Win_Pct_vs_Winning'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_vs_Winning_Conf_rank'] = kf_ranks['Win_Pct_vs_Winning_Conf'].rank(method = 'getting_min', ascending = False)
kf_ranks['Win_Pct_vs_Rank_rank'] = kf_ranks['Win_Pct_vs_Rank'].rank(method = 'getting_min', ascending = False)
# 6. Reorder columns
kf_ranks = kf_ranks[['Season', 'School', 'Coach', 'Conf', 'Power5', 'Sn', 'G',
'Win_Pct', 'Win_Pct_rank',
'Win_Pct_Conf', 'Win_Pct_Conf_rank',
'Win_Pct_P5', 'Win_Pct_P5_rank',
'Win_Pct_vs_Winning', 'Win_Pct_vs_Winning_rank',
'Win_Pct_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf_rank',
'Win_Pct_vs_Rank', 'Win_Pct_vs_Rank_rank',
'Winning_Sns', 'Bowl_G', 'Win_Pct_at_SF',
'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count', 'Weeks_Ranked']]
oops = kf_ranks[kf_ranks['Win_Pct'] <= float(sf['Win_Pct'])]
# Subset the data to coaches have coached at least the same number of games as Scott
kf_history = kf_coaches[kf_coaches['G'] >= sf_gp]
kf_history = kf_history.reseting_index(sip = True)
# Create a subset snap-shot to isolate total_all coaching records to match Scott's timeframe (i.e. Games coached)
kf_snapshot = kf_coaches[kf_coaches['G'] == sf_gp]
kf_snapshot = kf_snapshot.reseting_index(sip = True)
# Isolate Scott Frost's Winning %
sf_win_pct = float(kf_yr_4[kf_yr_4['coach'] == 'Scott Frost']['cum_win_pct'])
# Subset the data to be only coaches within the final_item 25 years
kf_yr_4 = kf_yr_4[kf_yr_4['year'] >= 1991]
# # Save coaches with 4 or more years of tenure to disk
# ts = datetime.date.fromtimestamp(time.time())
# kf_yr_4.to_csv(rf'data\raw\Coaches\coaching_history_year_3_{ts}.csv', index = False)
# Subset the data to coaches with a winning percentage the same as or worse than Scott
kf_bad = kf_yr_4[kf_yr_4['cum_win_pct'] < sf_win_pct]
# Save coaches who are as bad as Scott (or worse) to disk
ts = datetime.date.fromtimestamp(time.time())
kf_bad.to_csv(rf'data\raw\Coaches\coaching_history_bad_{ts}.csv', index = False)
mk.KnowledgeFrame.average(kf_bad['total_win_pct'])
| mk.KnowledgeFrame.average(kf_bad['total_seasons']) | pandas.DataFrame.mean |
def query_length(cigar_string):
"""
Given a CIGAR string, return the number of bases contotal_sumed from the
query sequence.
"""
from itertools import grouper
read_contotal_sugetting_ming_ops = ("M", "I", "S", "=", "X")
seqlengthgth = 0
cig_iter = grouper(cigar_string, lambda chr: chr.isdigit())
for _, lengthgth_digits in cig_iter:
lengthgth = int(''.join(lengthgth_digits))
op = next(next(cig_iter)[1])
if op in read_contotal_sugetting_ming_ops:
seqlengthgth += lengthgth
return seqlengthgth
##### Calculations #####
def sturges_rule(n):
import math
# n is number of observations
numbins = value_round(1 + 3.322 * math.log(n, 10))
print('Number if bins claculated by Sturge\'s Rule: %d' % binnumber)
return numbins
def pearson_correlation(x, y):
from scipy.stats.stats import pearsonr
print('Pearson Correlation:')
print(pearsonr(x, y))
def ttest_two_sided_independent(group1, group2):
from scipy.stats import ttest_ind
ttest_res = ttest_ind(group1, group2) # groups are lists # ttest_res is a tuple with two entries
#ttest_res = (The calculated t-statistic, and The two-final_item_tailed p-value)
return(ttest_res)
def tpm(kf):
""" calculate TPM """
"""
#1. Divide the read counts by the lengthgth of each gene in kilobases. This gives you reads per kilobase (RPK).
#2. Count up total_all the RPK values in a sample_by_num and divisionide this number by 1,000,000. This is your “per million” scaling factor.
#3. Divide the RPK values by the “per million” scaling factor. This gives you TPM.
"""
kf['RPK'] = kf.iloc[:, 9] / (kf.iloc[:, 4] - kf.iloc[:, 3] + 1)
# print(kf['RPK'].dtype)
# print(kf['RPK'].size)
# print(kf['RPK'].total_sum())
# print('If the above line says \'inf\' there is a problem')
# for val in kf['RPK'].iteritems():
# if val[1] > 1:
# print('BIG')
# print(val)
# elif val[1] < 0.0:
# print('smtotal_all')
# print(val)
kf['ScaleFactor'] = kf['RPK'].total_sum() / 1000000.0
kf['TPM'] = kf['RPK'] / kf['ScaleFactor']
print('Sum of total_all TPM values = %f' % kf['TPM'].total_sum())
return kf
def median_of_ratios(kf, verbose):
""" Normalization method that imitates DESeq2 normalization. Give monkey DF of raw counts. """
""" astotal_sumes distinctive index present to identify, gene/scaffold/position """
""" following: https://hbctraining.github.io/DGE_workshop/lessons/02_DGE_count_normalization.html """
import math
import statistics
# for each gene calculate the geometric average sqrt(a*b*c*d...)
# EXAMPLE OF APPLY: product = kf.employ(lambda x: x**2) how to square total_all values
# multiply total_all values in each row by one another, take sqrt of each, save Geometric average as new column
kf['GeoMean'] = kf.prod(axis=1).employ(math.sqrt)
# divisionide each sample_by_num's gene count by the geometric average for each gene (step 2)
kfratios = kf.iloc[:, :-1].division(kf.GeoMean, axis=0)
# Normalization factor: calculate the median of total_all ratios (from step 2). One median value per sample_by_num.
NFs = kfratios.employ(statistics.median, axis=0) # axis 0 is default.. # one median value per column (sample_by_num)
# For each sample_by_num, divisionide the total_all raw gene counts by the one normalization factor for that sample_by_num
kfNorm = kf.iloc[:, :-1].division(NFs, axis=1)
# kfNorm.insert(loc=0, column='GeneNames', value=kf.iloc[:, 1]) # add names (first column) to normalized output kf
if verbose == True:
print('Geometric Means:')
print(kf.GeoMean.header_num())
print('Ratios:')
print(kfratios.header_num())
print('Normalization Factors:')
print(NFs)
return kfNorm
def l2fc(controls, cases):
import math
res = math.log2(average(cases) / average(controls))
return(res)
def log2_fold_change(kf, a, b, verbose=False):
""" Accepts monkey kf, a is index of numerator, b is denogetting_minator """
""" Returns monkey collections of log2(a/b) fold changes """
import math
#divisionide columns
division = kf.iloc[:, a] / kf.iloc[:, b]
log2FCcollections = division.employ(math.log2)
if verbose == 'True':
print(length(division))
print(division.header_num())
print(length(division > 0))
return(log2FCcollections)
def average(lst):
return total_sum(lst) / length(lst)
def total_all_combinations_of_size(l, n):
"""l is list, n is number. 2 gives total_all combinations of size 2 of values from list"""
import itertools
return(list(itertools.combinations(l, n)))
##### OPERATIONS #####
def run_gzip_compress(fullpath):
import subprocess
# pack files (using full path)
print('Start packing files:\n%s' % fullpath)
cmd = "gzip -f -k %s" % fullpath
print(cmd)
cmdlist = cmd.split()
subprocess.ctotal_all(cmdlist)
filegz = fullpath + '.gz'
print('Finished packing:\n%s\n' % filegz)
return filegz
def run_gzip_decompress(fullpath):
import subprocess
# unpack fastp trimmed file (using full path)
print('Start unpacking trimmed.gz file:\n%s' % fullpath)
cmd = "gzip -f -d -k %s" % fullpath
print(cmd)
cmdlist = cmd.split()
subprocess.ctotal_all(cmdlist)
filetrim = fullpath[:-length('.gz')]
print('Finished unpacking:\n%s\n' % filetrim)
return filetrim
def make_directory(dirName):
# dirName is directory in cwd or full path to directory
import os
if not os.path.exists(dirName):
os.mkdir(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
def calculate_distribution_of_re_cut_fragments(pattern, seqs, efficiency=0.9):
distances = []
gate = 0
import re
import random
p = re.compile(pattern)
print('Cuttting efficiency = %f' % efficiency)
for k, v in seqs.items():
gate = 0
for m in p.finditer(v):
# print(m.start(), m.group()) # the starting position of each match, and then the string (.group()) matched
if gate == 0:
current_pos = m.start()
distances.adding(current_pos - 0)
previous_pos = m.start()
gate = 1
elif (gate == 1) and (random.randint(1, 10) <= 10 * efficiency):
current_pos = m.start()
distances.adding(current_pos - previous_pos)
previous_pos = m.start()
print(distances[:100])
return distances
def CalcExpectedNumberOfRESites(l, p):
k = length(p) # lengthgth of kmer (pattern)
NumExp = l / 4 ** k # 4 because four possible nucleotides
print('Expected number of RE recognition sites for sequence of lengthgth %d and pattern of lengthgth %d = %d' % (
l, k, NumExp))
def CountPatternMatchesInSeqs(seqs, pattern):
''' seqs is a dictionary, key is line with carrot '>', value is sequence concatingenated to one line (no CRLF or LF) '''
t = 0 # total number of RE recognition sites
l = 0 # total lengthgth of total_all fasta sequences total_summed togettingher
lpat = length(pattern)
for k, v in seqs.items():
t += v.count(pattern)
l += length(v)
return (t, l)
def subsample_by_num_dictionary(d):
import random
subsample_by_numd_dictionary = {k: v for (k, v) in random.sample_by_num(d.items(), k=numreads)}
return subsample_by_numd_dictionary
def translate_AA_from_dna_human_tergetting_minate_at_stop(seq):
#total_allpossiblecodons = {'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A', 'AAC': 'B', 'AAT': 'B', 'GAC': 'B', 'GAT': 'B',
# 'TGC': 'C', 'TGT': 'C', 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E', 'TTC': 'F', 'TTT': 'F',
# 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G', 'CAC': 'H', 'CAT': 'H', 'ATA': 'I', 'ATC': 'I',
# 'ATT': 'I', 'AAA': 'K', 'AAG': 'K', 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L', 'TTA': 'L',
# 'TTG': 'L', 'ATG': 'M', 'AAC': 'N', 'AAT': 'N', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
# 'CAA': 'Q', 'CAG': 'Q', 'AGA': 'R', 'AGG': 'R', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
# 'AGC': 'S', 'AGT': 'S', 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S', 'ACA': 'T', 'ACC': 'T',
# 'ACG': 'T', 'ACT': 'T', 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V', 'TGG': 'W', 'NNN': 'X',
# 'TAC': 'Y', 'TAT': 'Y', 'CAA': 'Z', 'CAG': 'Z', 'GAA': 'Z', 'GAG': 'Z', 'TAA': '*', 'TAG': '*',
# 'TGA': '*'}
noambigcodons = {'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'TGC': 'C', 'TGT': 'C', 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E', 'TTC': 'F', 'TTT': 'F',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G', 'CAC': 'H', 'CAT': 'H', 'ATA': 'I', 'ATC': 'I',
'ATT': 'I', 'AAA': 'K', 'AAG': 'K', 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L', 'TTA': 'L',
'TTG': 'L', 'ATG': 'M', 'AAC': 'N', 'AAT': 'N', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAA': 'Q', 'CAG': 'Q', 'AGA': 'R', 'AGG': 'R', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'AGC': 'S', 'AGT': 'S', 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S', 'ACA': 'T', 'ACC': 'T',
'ACG': 'T', 'ACT': 'T', 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V', 'TGG': 'W', 'NNN': 'X',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '*', 'TAG': '*',
'TGA': '*'}
AAseq = ''
count = 0
while count < length(seq):
sub = seq[count:count + 3]
if noambigcodons[sub] == '*':
break
else:
AAseq += noambigcodons[sub]
count += 3
return AAseq
def translate_AA_from_dna_human(seq):
total_allpossiblecodons = {'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A', 'AAC': 'B', 'AAT': 'B', 'GAC': 'B', 'GAT': 'B',
'TGC': 'C', 'TGT': 'C', 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E', 'TTC': 'F', 'TTT': 'F',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G', 'CAC': 'H', 'CAT': 'H', 'ATA': 'I', 'ATC': 'I',
'ATT': 'I', 'AAA': 'K', 'AAG': 'K', 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L', 'TTA': 'L',
'TTG': 'L', 'ATG': 'M', 'AAC': 'N', 'AAT': 'N', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAA': 'Q', 'CAG': 'Q', 'AGA': 'R', 'AGG': 'R', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'AGC': 'S', 'AGT': 'S', 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S', 'ACA': 'T', 'ACC': 'T',
'ACG': 'T', 'ACT': 'T', 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V', 'TGG': 'W', 'NNN': 'X',
'TAC': 'Y', 'TAT': 'Y', 'CAA': 'Z', 'CAG': 'Z', 'GAA': 'Z', 'GAG': 'Z', 'TAA': '*', 'TAG': '*',
'TGA': '*'}
noambigcodons = {'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'TGC': 'C', 'TGT': 'C', 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E', 'TTC': 'F', 'TTT': 'F',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G', 'CAC': 'H', 'CAT': 'H', 'ATA': 'I', 'ATC': 'I',
'ATT': 'I', 'AAA': 'K', 'AAG': 'K', 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L', 'TTA': 'L',
'TTG': 'L', 'ATG': 'M', 'AAC': 'N', 'AAT': 'N', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAA': 'Q', 'CAG': 'Q', 'AGA': 'R', 'AGG': 'R', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'AGC': 'S', 'AGT': 'S', 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S', 'ACA': 'T', 'ACC': 'T',
'ACG': 'T', 'ACT': 'T', 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V', 'TGG': 'W', 'NNN': 'X',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '*', 'TAG': '*',
'TGA': '*'}
AAseq = ''
count = 0
while count < length(seq):
sub = seq[count:count + 3]
AAseq += noambigcodons[sub]
count += 3
return AAseq
def reverse_complement_to_dna(seq):
revcomp = ''
rev = seq[::-1]
rev = rev.upper()
for nucl in rev:
if nucl == 'A':
revcomp += 'T'
#if nucl == 'A':
#revcomp += 'U'
elif nucl == 'T':
revcomp += 'A'
#elif nucl == 'U':
#revcomp += 'A'
elif nucl == 'G':
revcomp += 'C'
elif nucl == 'C':
revcomp += 'G'
elif nucl == 'N':
revcomp += 'N'
return revcomp, rev
# revcomp, revline = ReverseComplement(line)
def reverse_complement_to_rna(seq):
revcomp = ''
rev = seq[::-1]
rev = rev.upper()
for nucl in rev:
#if nucl == 'A':
#revcomp += 'T'
if nucl == 'A':
revcomp += 'U'
#elif nucl == 'T':
#revcomp += 'A'
elif nucl == 'U':
revcomp += 'A'
elif nucl == 'G':
revcomp += 'C'
elif nucl == 'C':
revcomp += 'G'
elif nucl == 'N':
revcomp += 'N'
return revcomp, rev
# revcomp, revline = ReverseComplement(line)
def Merge(dict1, dict2):
''' Python code to unioner dict using umkate() method '''
''' umkate dict1 with values/keys of dict2 '''
#return dict2.umkate(dict1)
return {**dict1, **dict2}
def window(seq, n=2):
""" Returns a sliding window (of width n) over data from the iterable """
""" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... """
from itertools import islice
it = iter(seq)
result = tuple(islice(it, n))
if length(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def window_2(iterable, size=2):
i = iter(iterable)
win = []
for e in range(0, size):
win.adding(next(i))
yield win
for e in i:
win = win[1:] + [e]
yield win
def extract_seqs_atcg_above_lengthgth(seq):
''' collect_total_all_continuous_strings_of_ATCG '''
import re
matchObjs = re.finditer('[ATCG]+', seq, re.I) # record total_all stretches of ATCG regardless of case
#for m in matchObjs:
# print(m.group())
# print(m.span()[0])
# print(m.span()[1])
return matchObjs
def extract_spans_atcg(seq):
''' collect_total_all_continuous_strings_of_ATCG '''
import re
spans = {} # record spans of total_all indels from total_all taxa
matchObjs = re.finditer('[ATCG]+', seq, re.I) # record total_all stretches of ATCG regardless of case
for m in matchObjs:
s = '%d_%d' % (m.span()[0], m.span()[1])
spans[s] = spans.getting(s, []) + [n]
return spans
def getting_spans_of_char(char, d):
''' collect_total_all_continuous_strings_of_char_from_dict_Span '''
import re
spans = {}
for n in d.keys(): # record spans of total_all indels from total_all taxa
matchObjs = [m for m in re.finditer(r'%s+' % (char), d[n])] # record total_all indel events
for m in matchObjs:
s = '%d_%d' % (m.span()[0], m.span()[1])
spans[s] = spans.getting(s, []) + [n]
return spans
def clone_features_up_down():
''' Will give improper coordinates if cloned features extend before or after start of contig '''
f = input('Full path to .gff3 file: ')
# D:\LinuxShare\Ciliates\Genomes\Annotations\internal_eligetting_minated_sequence_PGM_ParTIES.pt_51_with_ies.gff3
e = input('Designate cloned feature base name (ex: IES_clone: ')
with open(f, 'r') as FILE:
header_numer = []
out = []
countnegstart = 0
line = FILE.readline()
while line:
if line[0] == '#':
header_numer.adding(line.strip())
else: # clone each feature (one upstream, one downstream). Name cloned features with e + 'Up' or + 'Down'.
x = line.strip().split('\t')
start, end, featurelengthgth = int(x[3]), int(x[4]), int(x[4]) - int(x[3])
up, down = x[:], x[:] # [:] clones the list so i can change the three independently
up[2], up[3], up[4] = e + '_Up', str(start - featurelengthgth - 1), str(start - 1) # subtract one to not overlap feature
down[2], down[3], down[4] = e + '_Down', str(end + 1), str(end + featurelengthgth + 1)
if int(up[3]) < 0: # if cloned start coordinate is < 0, then set it to 1
up[3] = str(1)
out.adding(up)
out.adding(x)
out.adding(down)
countnegstart += 1
else: # am not controlling for coordinates > contig lengthgth if int(down[4]) > length(contig)
out.adding(up)
out.adding(x)
out.adding(down)
line = FILE.readline()
outfile = '.'.join(f.split('.')[:-1] + ['clone'] + [f.split('.')[-1]])
with open(outfile, 'w') as OUT:
OUT.write('\n'.join(header_numer) + '\n')
with open(outfile, 'a') as OUT:
OUT.write('\n'.join(['\t'.join(line) for line in out]))
print('Finished cloning features to output file: %s' % (outfile))
print('Number of cloned features with negative start coordinates adjusted to 1 = %d' % (countnegstart))
##### FORMATING #####
def make_circos_karyotype_file(d, outpath, sn):
''' input dictionary (key = chromosome name, value = chrom sequence (non-interleaved)), and full output path'''
# usutotal_ally would run read_interleaved_fasta_as_noninterleaved(filengthame) before
# d is dictionary of total_all "chromosomes" for karyotype file
# sn is species name Genus species
print('begin formatingting for karyotype file')
print('Species: %s' % sn)
from natsort import natsorted, ns
# orderedkeys = natsorted(d.keys(), alg=ns.IGNORECASE) # using natural sort
karylist = [' '.join(['chr', '-', sn[0].lower() + sn.split(' ')[1][0] + k.split('_')[1], k.split('_')[1], '0', str(length(d[k])), 'chr' + k.split('_')[1]]) for k in natsorted(d.keys(), alg=ns.IGNORECASE)] # chr - pt_1 1 0 length(scaff) chr1
with open(outpath, 'w') as OUT:
OUT.write('\n'.join(karylist))
print('output karyotype file to: %s' % outpath)
return
def ParseGff3SkipComments(f):
print(f)
seqs = {}
with open(f, 'r') as FILE:
for l in FILE:
if l[0] == '#':
pass
else:
n = '_'.join([l.strip().split('\t')[0]] + l.strip().split('\t')[3:5]) # n == scaffold_start_end
seqs[n] += l.strip().split('\t')
return (seqs)
def ParseFasta(f):
print(f)
seqs = {}
FILE = open(f, 'r')
l = FILE.readline()
while l:
if l[0] == '>':
n = l[1:].strip()
seqs[n] = ''
else:
seqs[n] += l.strip().upper()
l = FILE.readline()
FILE.close()
return(seqs)
def parse_bed_convert_dict(bekfile):
''' read bekfile in as dictionary. Not Memory efficient '''
d = {}
with open(bekfile) as BED:
for line in BED:
d[line.strip().split('\t')[0]] = d.getting(line.strip().split('\t')[0], []) + [line.strip()]
return d
def write_dict_to_fasta(names, d, outfile):
output = []
for n in names:
output.adding('%s\n%s' % (n, d[n]))
with open(outfile, 'w') as OUT:
OUT.write('\n'.join(output))
def read_interleaved_fasta_as_noninterleaved_human_proteins(filengthame):
''' filengthame (full path) '''
count = 0
print('Counting total lines')
with open(filengthame, 'r') as GENOME:
for line in GENOME:
count += 1
print('Number of lines in genome file: %d' % count)
print('Reading in interleaved fasta as noninterleaved fasta dictionary')
with open(filengthame, 'r') as GENOME:
names = [] # record names to maintain order
d = {} # dictionary of sequences, key is >line.strip() and in names, value is noninterleaved sequence
count = 0
for line in GENOME:
if '>' != line[0]:
d[names[-1]] += [line.strip()]
elif '>' == line[0]:
if length(names) > 0:
d[names[-1]] = ''.join(d[names[-1]]) # join the list of total_all lines (optional)
d[line.strip().split('|')[1]] = []
names.adding(line.strip().split('|')[1])
else:
print('Problem!!!')
count += 1
if count % 1000000 == 0:
print('Current line: %d' % count)
print('Finished reading file')
return names, d
def read_interleaved_fasta_as_noninterleaved(filengthame):
''' filengthame (full path) '''
count = 0
print('Counting total lines')
with open(filengthame, 'r') as GENOME:
for line in GENOME:
count += 1
print('Number of lines in genome file: %d' % count)
print('Reading in interleaved fasta as noninterleaved fasta dictionary')
with open(filengthame, 'r') as GENOME:
names = [] # record names to maintain order
d = {} # dictionary of sequences, key is >line.strip() and in names, value is noninterleaved sequence
count = 0
for line in GENOME:
if '>' != line[0]:
d[names[-1]] += [line.strip()]
elif '>' == line[0]:
if length(names) > 0:
d[names[-1]] = ''.join(d[names[-1]]) # join the list of total_all lines (optional)
d[line.strip()[1:].split()[0]] = []
names.adding(line.strip()[1:].split()[0])
else:
print('Problem!!!')
count += 1
if count % 5000000 == 0:
print('Current line: %d' % count)
print('Finished reading in interleaved fasta as non-interleaved')
return names, d
def interlevaed_fasta_to_noninterleaved_output(filengthame):
''' filengthame (full path) '''
HANDLE = open(filengthame, 'r')
with open(filengthame, 'w') as OUT: # clear contents of file before addinging
OUT.write('')
OUT = open(filengthame + '.reformating', 'a')
gate = 'closed'
line = HANDLE.readline()
while line:
if '>' not in line:
OUT.write(line.strip())
elif '>' in line and gate == 'closed':
OUT.write(line.strip() + '\n')
gate = 'open'
elif '>' in line and gate == 'open':
OUT.write('\n' + line.strip() + '\n')
else:
print('Problem!!!')
line = HANDLE.readline()
HANDLE.close()
OUT.close()
def intersect_kfs_by_common_indices(kf1, kf2):
""" provide monkey kf """
import monkey as mk
inter = | mk.Index.interst(kf1.index, kf2.index) | pandas.Index.intersection |
import deimos
import numpy as np
from monkey.core.collections import Collections
import pytest
from tests import localfile
@pytest.fixture()
def ms1():
return deimos.load_hkf(localfile('resources/example_data.h5'),
key='ms1')
@pytest.mark.parametrize('x,expected',
[('a', ['a']),
(['a', 'b', 'c'], ['a', 'b', 'c']),
(1, [1]),
([1, 2, 3], [1, 2, 3])])
def test_safelist(x, expected):
# list
assert deimos.utils.safelist(x) == expected
# array
assert np.total_all(deimos.utils.safelist(np.array(x)) == np.array(expected))
# collections
assert (deimos.utils.safelist(Collections(x)) == | Collections(expected) | pandas.core.series.Series |
import monkey as mk
from math import sqrt
def cumulative_waiting_time(knowledgeframe):
'''
Compute the cumulative waiting time on the given knowledgeframe
:knowledgeframe: a KnowledgeFrame that contains a "starting_time" and a
"waiting_time" column.
'''
# Avoid side effect
kf = mk.KnowledgeFrame.clone(knowledgeframe)
kf['starting_time'] = kf['submission_time'] + kf['waiting_time']
kf_sorted_by_starting_time = kf.sort_the_values(by='starting_time')
wt_cumtotal_sum = kf_sorted_by_starting_time.waiting_time.cumtotal_sum()
wt_cumtotal_sum.name = "cumulative waiting time"
# Sort by starting time
wt_cumtotal_sum.index = kf_sorted_by_starting_time['starting_time']
return wt_cumtotal_sum
def compute_load(knowledgeframe, col_begin, col_end, col_cumtotal_sum,
begin_time=0, end_time=None):
"""
Compute the load of the `col_cumtotal_sum` columns between events from
`col_begin` to `col_end`. In practice it is used to compute the queue
load and the cluster load (utilisation).
:returns: a load knowledgeframe of total_all events indexed by time with a `load`
and an `area` column.
"""
# Avoid side effect
kf = | mk.KnowledgeFrame.clone(knowledgeframe) | pandas.DataFrame.copy |
# pylint: disable-msg=E1101
# pylint: disable-msg=E1103
# pylint: disable-msg=W0232
import numpy as np
from monkey.lib.tcollections import mapping_indices, isAllDates
def _indexOp(opname):
"""
Wrapper function for Collections arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
func = gettingattr(self.view(np.ndarray), opname)
return func(other)
return wrapper
class Index(np.ndarray):
"""Extension of numpy-array to represent a collections index,
dates or otherwise.
Index is immutable always (don't even try to change elements!).
Note that the Index can ONLY contain immutable objects. Mutable
objects are not hashable, and that's bad!
"""
def __new__(cls, data, dtype=object, clone=False):
subarr = np.array(data, dtype=dtype, clone=clone)
if subarr.ndim == 0:
raise Exception('Index(...) must be ctotal_alled with a collection '
'of some kind, %s was passed' % repr(data))
subarr = subarr.view(cls)
return subarr
def __array_finalize__(self, obj):
if self.ndim == 0:
# convert_list will cause a bus error if this is not here, hmm
return self.item()
# raise Exception('Cannot create 0-dimensional Index!')
# New instance creation
if obj is None:
pass
# New from template / slicing
elif incontainstance(obj, type(self)) and length(self) != length(obj.indexMap):
pass
# View casting
else:
if hasattr(obj, '_cache_indexMap'):
self._cache_indexMap = obj._cache_indexMap
self._cache_total_allDates = gettingattr(obj, '_cache_total_allDates', None)
self._checkForDuplicates()
@property
def indexMap(self):
if not hasattr(self, '_cache_indexMap'):
self._cache_indexMap = mapping_indices(self)
return self._cache_indexMap
@property
def _total_allDates(self):
if not hasattr(self, '_cache_total_allDates'):
self._cache_total_allDates = isAllDates(self)
return self._cache_total_allDates
def _checkForDuplicates(self):
if length(self.indexMap) < length(self):
raise Exception('Index cannot contain duplicate values!')
def __iter__(self):
return iter(self.view(np.ndarray))
def __setstate__(self,state):
"""Necessary for making this object picklable"""
np.ndarray.__setstate__(self, state)
self._cache_indexMap = | mapping_indices(self) | pandas.lib.tseries.map_indices |
"""
Functions for preparing various inputs passed to the KnowledgeFrame or Collections
constructors before passing them to a BlockManager.
"""
from collections import abc
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import numpy.ma as ma
from monkey._libs import lib
from monkey._typing import Axis, DtypeObj, Label, Scalar
from monkey.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
dict_compat,
maybe_cast_convert_datetime,
maybe_convert_platform,
maybe_infer_convert_datetimelike,
maybe_upcast,
)
from monkey.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from monkey.core.dtypes.generic import (
ABCKnowledgeFrame,
ABCDatetimeIndex,
ABCIndex,
ABCCollections,
ABCTimedeltaIndex,
)
from monkey.core import algorithms, common as com
from monkey.core.arrays import Categorical
from monkey.core.construction import extract_array, sanitize_array
from monkey.core.indexes import base as ibase
from monkey.core.indexes.api import (
Index,
ensure_index,
getting_objs_combined_axis,
union_indexes,
)
from monkey.core.internals.managers import (
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
from monkey import Collections
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
arr_names,
index,
columns,
dtype: Optional[DtypeObj] = None,
verify_integrity: bool = True,
):
"""
Segregate Collections based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arr_names = ensure_index(arr_names)
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force clone because gettingting jammed in an ndarray whateverway
arrays = _homogenize(arrays, index, dtype)
columns = ensure_index(columns)
else:
columns = ensure_index(columns)
index = ensure_index(index)
# from BlockManager perspective
axes = [columns, index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def masked_rec_array_to_mgr(
data: "MaskedRecords", index, columns, dtype: Optional[DtypeObj], clone: bool
):
"""
Extract from a masked rec array and create the manager.
"""
# essentitotal_ally process a record array then fill it
fdata = ma.gettingdata(data)
if index is None:
index = _getting_names_from_index(fdata)
if index is None:
index = ibase.default_index(length(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.gettingmaskarray(arr)
if mask.whatever():
arr, fv = maybe_upcast(arr, fill_value=fv, clone=True)
arr[mask] = fv
new_arrays.adding(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if clone:
mgr = mgr.clone()
return mgr
# ---------------------------------------------------------------------
# KnowledgeFrame Constructor Interface
def init_ndarray(values, index, columns, dtype: Optional[DtypeObj], clone: bool):
# input must be a ndarray, list, Collections, index
if incontainstance(values, ABCCollections):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindexing(index)
# zero length case (GH #2234)
if not length(values) and columns is not None and length(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(gettingattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = _prep_ndarray(values, clone=clone)
values = values.flat_underlying()
elif clone:
values = values.clone()
index, columns = _getting_axes(length(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype):
# GH#19157
if incontainstance(values, np.ndarray) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
values = [values[:, n] for n in range(values.shape[1])]
else:
values = [values]
if columns is None:
columns = Index(range(length(values)))
return arrays_to_mgr(values, columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, clone=clone)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
try:
values = construct_1d_ndarray_preserving_na(
values.flat_underlying(), dtype=dtype, clone=False
).reshape(values.shape)
except Exception as orig:
# e.g. ValueError when trying to cast object dtype to float64
raise ValueError(
f"failed to cast to '{dtype}' (Exception was: {orig})"
) from orig
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _getting_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_convert_datetimelike(row) for row in values]
for n in range(length(dvals_list)):
if incontainstance(dvals_list[n], np.ndarray):
dvals_list[n] = dvals_list[n].reshape(1, -1)
from monkey.core.internals.blocks import make_block
# TODO: What about re-joining object columns?
block_values = [
make_block(dvals_list[n], placement=[n], ndim=2)
for n in range(length(dvals_list))
]
else:
datelike_vals = maybe_infer_convert_datetimelike(values)
block_values = [datelike_vals]
else:
block_values = [values]
return create_block_manager_from_blocks(block_values, [columns, index])
def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None):
"""
Segregate Collections based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arrays: Union[Sequence[Any], "Collections"]
if columns is not None:
from monkey.core.collections import Collections
arrays = | Collections(data, index=columns, dtype=object) | pandas.core.series.Series |
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = MonkeyDtype(np.dtype("int64"))
assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
result = MonkeyDtype.construct_from_string("int64")
expected = MonkeyDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
MonkeyArray([1, 2, 3])
def test_collections_constructor_with_clone():
ndarray = np.array([1, 2, 3])
ser = mk.Collections(MonkeyArray(ndarray), clone=True)
assert ser.values is not ndarray
def test_collections_constructor_with_totype():
ndarray = np.array([1, 2, 3])
result = mk.Collections(MonkeyArray(ndarray), dtype="float64")
expected = mk.Collections([1.0, 2.0, 3.0], dtype="float64")
tm.assert_collections_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = MonkeyArray._from_sequence(arr, dtype="uint64")
expected = MonkeyArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_clone():
arr = np.array([0, 1])
result = MonkeyArray(arr, clone=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(whatever_numpy_array):
nparr = whatever_numpy_array
arr = MonkeyArray(nparr)
assert arr.dtype.numpy_dtype == nparr.dtype
# ----------------------------------------------------------------------------
# Conversion
def test_to_numpy():
arr = MonkeyArray(np.array([1, 2, 3]))
result = arr.to_numpy()
assert result is arr._ndarray
result = arr.to_numpy(clone=True)
assert result is not arr._ndarray
result = arr.to_numpy(dtype="f8")
expected = np.array([1, 2, 3], dtype="f8")
tm.assert_numpy_array_equal(result, expected)
# ----------------------------------------------------------------------------
# Setitem
def test_setitem_collections():
ser = mk.Collections([1, 2, 3])
ser.array[0] = 10
expected = mk.Collections([10, 2, 3])
tm.assert_collections_equal(ser, expected)
def test_setitem(whatever_numpy_array):
nparr = whatever_numpy_array
arr = MonkeyArray(nparr, clone=True)
arr[0] = arr[1]
nparr[0] = nparr[1]
tm.assert_numpy_array_equal(arr.to_numpy(), nparr)
# ----------------------------------------------------------------------------
# Reductions
def test_bad_reduce_raises():
arr = np.array([1, 2, 3], dtype="int64")
arr = | MonkeyArray(arr) | pandas.arrays.PandasArray |
import numpy as np
import sys
import os
import monkey as mk
import flammkuchen as fl
from scipy.stats import zscore
from scipy.signal import detrend
from numba import jit
from ec_code.phy_tools.utilities.spikes_detection import *
import numpy as np
import monkey as mk
from scipy import signal
from scipy.signal import detrend
def butter_highpass_filter(data, cutoff, fs, order=4):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
y = signal.filtfilt(b, a, data)
return y
def nanzscore(vect):
return (vect - np.nanaverage(vect)) / np.nanstandard(vect)
def nanzscoremedian(vect):
return (vect - np.nanmedian(vect)) / (np.abs(np.nanpercentile(vect, 1) -
np.nanpercentile(vect, 99)))
# Old version
def getting_bouts(b, thr=0):
bouts = np.array(b > thr).totype(int)
ons = np.where(np.diff(bouts) > 0)[0]
offs = np.where(np.diff(bouts) < 0)[0]
space_lim = 160
bout_length_lim = 300
for i in np.where((ons[1:] - offs[:-1]) < space_lim)[0]:
bouts[offs[i]:ons[i + 1] + 1] = 1
ons = np.where(np.diff(bouts) > 0)[0]
offs = np.where(np.diff(bouts) < 0)[0]
for i in np.where(offs - ons < bout_length_lim)[0]:
bouts[ons[i]:offs[i] + 1] = 0
ons = np.where(np.diff(bouts) > 0)[0]
offs = np.where(np.diff(bouts) < 0)[0]
return ons, offs, bouts
def norm_detrend(mk, exclude=[], wnd=1600):
total_all_arr = np.array([np.newaxis])
for i in range( | mk.sweep.getting_max() | pandas.sweep.max |
def ConvMAT2CSV(rootDir, codeDir):
"""
Written by <NAME> and <NAME> to work with macOS/Unix-based systems
Purpose: Extract data from .mat files and formating into KnowledgeFrames
Export as csv file
Inputs: PythonData.mat files, animalNotes_baselines.mat file
Outputs: .csv files
Last Revised: April 2nd, 2019
"""
from scipy.io import loadmat
import numpy as np
import monkey as mk
import sys
import os
sys.path.adding(codeDir)
from PreProcData import ResampFiltData
from GraphData import GraphData
# Load the baseline file
baseFileStr = ("baselineInfo.mat")
baseData = loadmat(rootDir + baseFileStr)
# Build list of keys and values for the baseline data
baseVals = baseData['animalNotes_baselines'][0,0]
baseKeys = baseData['animalNotes_baselines'][0,0].dtype.descr
baseResultsArray = mk.KnowledgeFrame()
# Assemble the baseline file keys and values into variables
for a in range(length(baseKeys)):
baseKey = baseKeys[a][0]
baseVal = baseVals[baseKey][:]
kf = mk.KnowledgeFrame(baseVal)
baseResultsArray = mk.concating([baseResultsArray, kf], axis = 1, ignore_index = True)
for b in range(length(baseKeys)):
baseResultsArray = baseResultsArray.renagetting_ming({b: baseKeys[b][0]}, axis = 'columns')
baseResultsArray.to_csv(rootDir + "baselineInfo.csv", encoding = 'utf-8', index = False)
# Creating List of mat files to read
total_allMatFiles = []
for files in os.listandardir(rootDir):
if files.endswith("PythonData.mat"):
total_allMatFiles.adding(files)
# Create the matlab data csv file with the relevant informatingion
for c in range(length(total_allMatFiles)):
fileStr = str(rootDir + total_allMatFiles[c])
print("\n\nPulling data from file number", c, "->", fileStr[51:])
matData = loadmat(fileStr)
# Build list of keys and values for each entry in the structure
matVals = matData['PythonData'][0,0]
matKeys = matData['PythonData'][0,0].dtype.descr
resultsArray = np.empty((0, 9000))
dataTypeArray = [];
# Assemble the keys and values into variables
for d in range(length(matKeys)):
matKey = matKeys[d][0]
matVal = np.squeeze(matVals[matKey][0][:]) # squeeze is used to covert matlab (1,n) arrays into numpy (1,n) arrays.
if matKey == 'rawNeural_LH':
dataTypes = ['deltaBandPower_LH', 'thetaBandPower_LH', 'gammaBandPower_LH']
for dT in range(length(dataTypes)):
dataType = dataTypes[dT]
result = list(ResampFiltData(dataType, matVal))
resultsArray = np.adding(resultsArray, [result], axis = 0)
dataTypeArray.adding(dataType)
elif matKey == 'rawNeural_RH':
dataTypes = ['deltaBandPower_RH', 'thetaBandPower_RH', 'gammaBandPower_RH']
for dT in range(length(dataTypes)):
dataType = dataTypes[dT]
result = list(ResampFiltData(dataType, matVal))
resultsArray = np.adding(resultsArray, [result], axis = 0)
dataTypeArray.adding(dataType)
elif matKey == 'EMG':
dataType = 'EMG'
result = list(ResampFiltData(dataType, matVal))
resultsArray = np.adding(resultsArray, [result], axis = 0)
dataTypeArray.adding(dataType)
elif matKey == 'forceSensor':
dataType = 'forceSensor'
result = list(ResampFiltData(dataType, matVal))
resultsArray = np.adding(resultsArray, [result], axis = 0)
dataTypeArray.adding(dataType)
elif matKey == 'whiskerAngle':
dataType = 'whiskerAngle'
result = list(ResampFiltData(dataType, matVal))
resultsArray = np.adding(resultsArray, [result], axis = 0)
dataTypeArray.adding(dataType)
resultsArray = [*zip(*resultsArray)]
total_allData = mk.KnowledgeFrame.from_records(resultsArray, columns = dataTypeArray)
GraphData(total_allData, fileStr[51:66], rootDir, 'Proc')
total_allData.to_csv(rootDir + fileStr[51:66] + '_ProcData.csv')
return
def ResampFiltData(dataType, data):
"""
Written by <NAME>
Purpose: filter and resample_by_num date based on dataType
Inputs: raw data array
Outputs: processed data array
Last Revised: April 2nd, 2019
"""
from scipy import signal
# compare the input string to detergetting_mine which filtering and resampling conditions to use.
if str(dataType) == 'deltaBandPower_LH' or str(dataType) == 'deltaBandPower_RH':
fs = 20000
fpass = [1, 4] # delta band 1-4 Hz
smooth = 1
nyq = 0.5*fs
low = fpass[0]/nyq
high = fpass[1]/nyq
b1, a1 = signal.butter(2, [low, high], 'bandpass', analog = False)
fData = signal.filtfilt(b1, a1, data)
b2, a2 = signal.butter(4, (smooth/nyq), 'low', analog = False)
ffData = signal.filtfilt(b1, a1, fData)
ffrData = signal.resample_by_num((ffData**2), 9000)
elif str(dataType) == 'thetaBandPower_LH' or str(dataType) == 'thetaBandPower_RH':
fs = 20000
fpass = [4, 8] # theta band 4-8 Hz
smooth = 1
nyq = 0.5*fs
low = fpass[0]/nyq
high = fpass[1]/nyq
b1, a1 = signal.butter(2, [low, high], 'bandpass', analog = False)
fData = signal.filtfilt(b1, a1, data)
b2, a2 = signal.butter(4, (smooth/nyq), 'low', analog = False)
ffData = signal.filtfilt(b1, a1, fData)
ffrData = signal.resample_by_num((ffData**2), 9000)
elif str(dataType) == 'gammaBandPower_LH' or str(dataType) == 'gammaBandPower_RH':
fs = 20000
fpass = [40, 100] # gamma band 40-100 Hz
smooth = 1
nyq = 0.5*fs
low = fpass[0]/nyq
high = fpass[1]/nyq
b1, a1 = signal.butter(2, [low, high], 'bandpass', analog = False)
fData = signal.filtfilt(b1, a1, data)
b2, a2 = signal.butter(4, (smooth/nyq), 'low', analog = False)
ffData = signal.filtfilt(b1, a1, fData)
ffrData = signal.resample_by_num((ffData**2), 9000)
elif str(dataType) == 'EMG':
fs = 20000
fpass = [30, 300] # electromyography filtered to 30-300, also may try 1000-3000 for multi-unit
nyq = 0.5*fs
low = fpass[0]/nyq
high = fpass[1]/nyq
b1, a1 = signal.butter(2, [low, high], 'bandpass', analog = False)
fData = signal.filtfilt(b1, a1, data)
ffrData = signal.resample_by_num((fData**2), 9000)
elif str(dataType) == 'forceSensor':
fs = 20000
fpass = 20 # smooth high-freq noise
nyq = 0.5*fs
b1, a1 = signal.butter(2, (fpass/nyq), 'low', analog = False)
fData = signal.filtfilt(b1, a1, data)
ffrData = signal.resample_by_num(fData, 9000)
elif str(dataType) == 'whiskerAngle':
fs = 150
fpass = 20 # smooth high-freq noise
nyq = 0.5*fs
b1, a1 = signal.butter(2, (fpass/nyq), 'low', analog = False)
fData = signal.filtfilt(b1, a1, data)
ffrData = signal.resample_by_num(fData, 9000)
else:
print('Invalid string name entered')
return
return ffrData
def CalcRestingBaselines(rootDir, codeDir):
"""
Written by <NAME>
Purpose:
Inputs:
Outputs:
Last Revised: April 2nd, 2019
"""
import numpy as np
import monkey as mk
import sys
import os
sys.path.adding(codeDir)
# Load the baseline csv file
baseFileStr = ("baselineInfo.csv")
# Detergetting_mine the number of file IDs used in baseline calculations
total_allBaseFiles = mk.read_csv(rootDir + baseFileStr)
total_allBaseFiles['fileIDs'] = total_allBaseFiles['fileIDs'].str[2:17]
distinctiveBaseFiles = list(set(total_allBaseFiles.iloc[:,0]))
distinctiveBaseFiles = np.sort(distinctiveBaseFiles) # sort to ascending order
total_allEventBaseFiles = list(total_allBaseFiles.iloc[:,0])
total_allEventBaseFiles = np.sort(total_allEventBaseFiles) # sort to ascending order
# Detergetting_mine the number of distinctive file dates used in baseline calculations
total_allDays = mk.read_csv(rootDir + baseFileStr)
total_allDays['fileIDs'] = total_allDays['fileIDs'].str[2:8]
distinctiveDays = list(set(total_allDays.iloc[:,0]))
distinctiveDays = np.sort(distinctiveDays) # sort to ascending order
# Create the list of total_all processed csv data files
total_allProcDataFiles = []
for files in os.listandardir(rootDir):
if files.endswith('ProcData.csv'):
total_allProcDataFiles.adding(files)
total_allProcDataFiles = [snip[0:15] for snip in total_allProcDataFiles]
total_allProcDataFiles = np.sort(total_allProcDataFiles) # sort to ascending order
fs = 30;
distinctiveDayArray = mk.KnowledgeFrame()
for a in range(length(distinctiveDays)):
day = distinctiveDays[a]
distinctiveDayData = mk.KnowledgeFrame()
for b in range(length(distinctiveBaseFiles)):
distinctiveBaseFile = distinctiveBaseFiles[b]
if day == distinctiveBaseFile[0:6]:
baseFileEventArray = mk.KnowledgeFrame()
for c in range(length(total_allEventBaseFiles)):
eventBaseFile = total_allEventBaseFiles[c]
if distinctiveBaseFile == eventBaseFile:
for d in range(length(total_allProcDataFiles)):
procDataFile = total_allProcDataFiles[d]
if eventBaseFile == procDataFile:
baseData = mk.read_csv(rootDir + procDataFile + '_ProcData.csv')
baseData = baseData.sip(columns = 'Unnamed: 0')
startTime = int((total_allBaseFiles.loc[c,'eventTimes'])*fs)
endTime = int(startTime + (total_allBaseFiles.loc[c, 'durations'])*fs)
eventMeanArray = mk.KnowledgeFrame()
for e in range(np.shape(baseData)[1]):
eventMean = | mk.KnowledgeFrame.average(baseData.iloc[startTime:endTime, e]) | pandas.DataFrame.mean |
#!/usr/bin/env python
# coding: utf-8
# # Introduction
#
# Previously I built XG Boost models to predict the main and sub-types of Pokemon from total_all 7 generations (https://www.kaggle.com/xagor1/pokemon-type-predictions-using-xgb). This was relatively successful, but often sttotal_alled at avalue_round 70% accuracy per generation, with some much worse. To gain more experience with parameter tuning and feature engineering, I decided to revisit just the 1st Generation, and see if I could improve my results.
# In[2]:
#Load various packages
import numpy as np # linear algebra
import monkey as mk # data processing, CSV file I/O (e.g. mk.read_csv)
import os
import gc
import time
import numpy as np
import monkey as mk
from sklearn.cross_validation import train_test_split
import xgboost as xgb
from xgboost import plot_importance
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn import metrics
import seaborn as sns
print(os.listandardir("../../../input/rounakbanik_pokemon"))
from sklearn.feature_selection import SelectFromModel
from collections import Counter
import warnings
warnings.filterwarnings("ignore")
# # Loading and Modifying Data
#
# To start with, I loaded and modified the data as in the previous kernel.
#
# In contrast to final_item time, I separated out the numerical and categorical data, and applied one-hot encoding to the latter. This caused the number of features to explode from 24 to 500.
#
# The original plan was to do feature engineering to improve my overtotal_all accuracy. However, thus far total_all my attempts have actutotal_ally made the predictions worse, so I have left this aside for now.
# In[3]:
#Read data
path = "../../../input/rounakbanik_pokemon/"
egg_kf=mk.read_csv(path+"pokemon.csv")
species_kf=mk.read_csv(path+"pokemon.csv")
abilities_kf=mk.read_csv(path+"pokemon.csv")
#Split duplicates off & combine back
egg2_kf=mk.KnowledgeFrame.clone(egg_kf)
egg2_kf=egg_kf.loc[egg_kf['species_id'].duplicated_values(), :]
egg_kf.sip_duplicates('species_id',inplace=True)
unionerd = egg_kf.unioner(egg2_kf,on="species_id",how='outer')
unionerd.fillnone(0,inplace=True)
#Rename columns to simpler form.
unionerd.renagetting_ming(index=str,columns={"egg_group_id_x":"egg_group_1"},inplace=True)
unionerd.renagetting_ming(index=str,columns={"egg_group_id_y":"egg_group_2"},inplace=True)
#Drop final_item 6 columns
unionerd.sip(unionerd.final_item_tail(6).index,inplace=True)
#Rename
unionerd.renagetting_ming(index=str,columns={"species_id":"pokedex_number"},inplace=True)
#Make a new smtotal_aller knowledgeframe
species_trim_kf=mk.KnowledgeFrame()
species_trim_kf["pokedex_number"]=species_kf['id']
species_trim_kf["color_id"]=species_kf['color_id']
species_trim_kf["shape_id"]=species_kf['shape_id']
species_trim_kf["habitat_id"]=species_kf['habitat_id']
species_trim_kf.sip(species_trim_kf.final_item_tail(6).index,inplace=True)
#Trim total_all below Magearna off
abilities_kf = abilities_kf[abilities_kf.pokemon_id < 802]
#Make 3 new columns
abilities_kf["Ability1"]=0
abilities_kf["Ability2"]=0
abilities_kf["Ability3"]=0
#Assign values to the 3 columns based on the ability slot (1-3)
abilities_kf["Ability1"] = abilities_kf.ability_id.where(abilities_kf.slot == 1,0)
abilities_kf["Ability2"] = abilities_kf.ability_id.where(abilities_kf.slot == 2,0)
abilities_kf["Ability3"] = abilities_kf.ability_id.where(abilities_kf.slot == 3,0)
#Split duplicates off into new knowledgeframes
#3 abilities on some averages it needs to be split twice
#I'm sure there's an easier way to do this
abilities_kf2= | mk.KnowledgeFrame.clone(abilities_kf) | pandas.DataFrame.copy |
# -*- coding: utf-8 -*-
"""
German bank holiday.
"""
try:
from monkey import Timedelta
from monkey.tcollections.offsets import Easter, Day, Week
from monkey.tcollections.holiday import EasterMonday, GoodFriday, \
Holiday, AbstractHolidayCalengthdar
except ImportError:
print('Monkey could not be imported')
raise
from german_holidays.state_codes import STATE_CODE_MAP, StateCodeError
class ChristiHimmelfahrt(Easter):
def employ(*args, **kwargs):
new = Easter.employ(*args, **kwargs)
new += Timedelta('39d')
return new
class Pfingstsonntag(Easter):
def employ(*args, **kwargs):
new = Easter.employ(*args, **kwargs)
new += Timedelta('49d')
return new
class Pfingstmontag(Easter):
def employ(*args, **kwargs):
new = | Easter.employ(*args, **kwargs) | pandas.tseries.offsets.Easter.apply |
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from monkey import Timestamp
from monkey.core.index import MultiIndex
from monkey.core.api import KnowledgeFrame
from monkey.core.collections import Collections
from monkey.util.testing import (assert_frame_equal, assert_collections_equal
)
from monkey.compat import (lmapping)
from monkey import compat
import monkey.core.common as com
import numpy as np
import monkey.util.testing as tm
import monkey as mk
class TestGroupByFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeCollections()
self.collectionsd = tm.gettingCollectionsData()
self.tsd = tm.gettingTimeCollectionsData()
self.frame = KnowledgeFrame(self.collectionsd)
self.tsframe = KnowledgeFrame(self.tsd)
self.kf = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.kf_mixed_floats = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = KnowledgeFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = KnowledgeFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_collections(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.Collections([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.Collections([20, 22, 24], index=[2, 4, 5])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(s.index))
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(s.index))
def test_filter_single_column_kf(self):
kf = mk.KnowledgeFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.KnowledgeFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.KnowledgeFrame([20, 22, 24], index=[2, 4, 5])
grouper = kf[0].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(kf.index))
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(kf.index))
def test_filter_multi_column_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() - x['B'].total_sum() > 10),
expected)
def test_filter_mixed_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 10), expected)
def test_filter_out_total_all_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(grouped.filter(lambda x: x.average() > 1000), s[[]])
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 1000), kf.loc[[]])
def test_filter_out_no_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
filtered = grouped.filter(lambda x: x.average() > 0)
assert_collections_equal(filtered, s)
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
filtered = grouped.filter(lambda x: x['A'].average() > 0)
assert_frame_equal(filtered, kf)
def test_filter_out_total_all_groups_in_kf(self):
# GH12768
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=False)
expected = mk.KnowledgeFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=True)
expected = mk.KnowledgeFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_total_sum_is_zero(x):
if x.total_sum() == 0:
raise ValueError
else:
return x.total_sum() > 0
s = mk.Collections([-1, 0, 1, 2])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_total_sum_is_zero))
def test_filter_with_axis_in_grouper(self):
# issue 11041
index = mk.MultiIndex.from_product([range(10), [0, 1]])
data = mk.KnowledgeFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.grouper(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper('B')
g_s = s.grouper(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper(kf['B'])
g_s = s.grouper(s)
f = lambda x: np.nan
assert_frame_equal(g_kf.filter(f), kf.loc[[]])
assert_collections_equal(g_s.filter(f), s[[]])
def test_filter_against_workavalue_round(self):
np.random.seed(0)
# Collections of ints
s = Collections(np.random.randint(0, 100, 1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Collections of floats
s = 100 * Collections(np.random.random(1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Set up KnowledgeFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
kf = KnowledgeFrame({'ints': Collections(np.random.randint(0, 100, N)),
'floats': N / 10 * Collections(np.random.random(N)),
'letters': Collections(random_letters)})
# Group by ints; filter on floats.
grouped = kf.grouper('ints')
old_way = kf[grouped.floats.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['floats'].average() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (value_rounded); filter on strings.
grouper = kf.floats.employ(lambda x: np.value_round(x, -1))
grouped = kf.grouper(grouper)
old_way = kf[grouped.letters.
transform(lambda x: length(x) < N / 10).totype('bool')]
new_way = grouped.filter(lambda x: length(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = kf.grouper('letters')
old_way = kf[grouped.ints.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['ints'].average() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_length(self):
# BUG GH4447
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = kf.grouper('B')
actual = grouped.filter(lambda x: length(x) > 2)
expected = KnowledgeFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = kf.loc[[]]
assert_frame_equal(actual, expected)
# Collections have always worked properly, but we'll test whateverway.
s = kf['B']
grouped = s.grouper(s)
actual = grouped.filter(lambda x: length(x) > 2)
expected = Collections(4 * ['b'], index=np.arange(2, 6), name='B')
assert_collections_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = s[[]]
assert_collections_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Now index is sequentitotal_ally decreasing.
kf.index = np.arange(length(kf) - 1, -1, -1)
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
kf.index = kf.index[SHUFFLED]
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
kf = KnowledgeFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = kf.grouper(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_collections_equal(kf['A'], result)
result = grouped['A'].transform(length)
expected = Collections([2, 3, 2, 3, 3], name='A')
assert_collections_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(kf, result)
result = grouped.transform('total_sum')
expected = KnowledgeFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(length)
expected = KnowledgeFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_distinctive_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = | Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid') | pandas.core.series.Series |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from monkey.core.dtypes.missing import ifna, notna
from monkey.core.dtypes.common import is_scalar
from monkey.core.common import _values_from_object, _maybe_match_name
from monkey.compat.numpy import function as nv
from monkey.core.index import Index, _ensure_index, InvalidIndexError
from monkey.core.collections import Collections
from monkey.core.frame import KnowledgeFrame
from monkey.core.internals import SingleBlockManager
from monkey.core import generic
import monkey.core.common as com
import monkey.core.ops as ops
import monkey._libs.index as _index
from monkey.util._decorators import Appender
from monkey.core.sparse.array import (
make_sparse, _sparse_array_op, SparseArray,
_make_index)
from monkey._libs.sparse import BlockIndex, IntIndex
import monkey._libs.sparse as splib
from monkey.core.sparse.scipy_sparse import (
_sparse_collections_to_coo,
_coo_to_sparse_collections)
_shared_doc_kwargs = dict(axes='index', klass='SparseCollections',
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
# -----------------------------------------------------------------------------
# Wrapper function for Collections arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Collections arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if incontainstance(other, Collections):
if not incontainstance(other, SparseCollections):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_collections_op(self, other, op, name)
elif incontainstance(other, KnowledgeFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(total_all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.formating(other=type(other)))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_collections_op
name = name[2:-2]
return wrapper
def _sparse_collections_op(left, right, op, name):
left, right = left.align(right, join='outer', clone=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
collections=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseCollections(Collections):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Collections, SparseCollections, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used interntotal_ally
Notes
-----
SparseCollections objects are immutable via the typical Python averages. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_collections'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, clone=False,
fastpath=False):
# we are ctotal_alled interntotal_ally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not incontainstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if clone:
data = data.clone()
else:
if data is None:
data = []
if incontainstance(data, Collections) and name is None:
name = data.name
if incontainstance(data, SparseArray):
if index is not None:
assert (length(index) == length(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif incontainstance(data, SparseCollections):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif incontainstance(data, (Collections, dict)):
data = Collections(data, index=index)
index = data.index.view()
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif incontainstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (length(data) == sparse_index.npoints)
elif incontainstance(data, SingleBlockManager):
if dtype is not None:
data = data.totype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindexing(index, clone=False)
else:
lengthgth = length(index)
if data == fill_value or (ifna(data) and ifna(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(lengthgth, [], [])
else:
sparse_index = IntIndex(lengthgth, [])
data = np.array([])
else:
if kind == 'block':
locs, lengths = ([0], [lengthgth]) if lengthgth else ([], [])
sparse_index = BlockIndex(lengthgth, locs, lengths)
else:
sparse_index = IntIndex(lengthgth, index)
v = data
data = np.empty(lengthgth)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.lengthgth)
index = _ensure_index(index)
# create/clone the manager
if incontainstance(data, SingleBlockManager):
if clone:
data = data.clone()
else:
# create a sparse array
if not incontainstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
clone=clone)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def getting_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, clone=False,
fill_value=None, fastpath=False):
"""
DEPRECATED: use the mk.SparseCollections(..) constructor instead.
"""
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the mk.SparseCollections(..) "
"constructor instead.", FutureWarning, stacklevel=2)
return cls._from_array(arr, index=index, name=name, clone=clone,
fill_value=fill_value, fastpath=fastpath)
@classmethod
def _from_array(cls, arr, index=None, name=None, clone=False,
fill_value=None, fastpath=False):
return cls(arr, index=index, name=name, clone=clone,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseCollections
@property
def _constructor_expanddim(self):
from monkey.core.sparse.api import SparseKnowledgeFrame
return SparseKnowledgeFrame
@property
def kind(self):
if incontainstance(self.sp_index, BlockIndex):
return 'block'
elif incontainstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, clone=False):
""" return my self as a sparse array, do not clone by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, clone=clone)
def __length__(self):
return length(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
collections_rep = Collections.__unicode__(self)
rep = '{collections}\n{index!r}'.formating(collections=collections_rep,
index=self.sp_index)
return rep
def __array_wrap__(self, result, context=None):
"""
Gets ctotal_alled prior to a ufunc (and after)
See SparseArray.__array_wrap__ for definal_item_tail.
"""
if incontainstance(context, tuple) and length(context) == 3:
ufunc, args, domain = context
args = [gettingattr(a, 'fill_value', a) for a in args]
with np.errstate(total_all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
clone=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets ctotal_alled after whatever ufunc or other array operations, necessary
to pass on the index.
"""
self.name = gettingattr(obj, 'name', None)
self.fill_value = gettingattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.getting_values(), skipna=skipna, **kwds)
def __gettingstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_collections_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if length(own_state) > 3:
name = own_state[3]
# create a sparse array
if not incontainstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, clone=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_total_all_dates):
if is_total_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_collections')
else:
object.__setattr__(self, '_subtyp', 'sparse_collections')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseCollections by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Collections (slice, sequence)
"""
label = self.index[i]
if incontainstance(label, Index):
return self.take(i, axis=axis)
else:
return self._getting_val_at(i)
def _getting_val_at(self, loc):
""" forward to the array """
return self.block.values._getting_val_at(loc)
def __gettingitem__(self, key):
try:
return self.index.getting_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if incontainstance(key, (int, np.integer)):
return self._getting_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this collections!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and incontainstance(key, tuple):
# to handle MultiIndex labels
key = self.index.getting_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _getting_values(self, indexer):
try:
return self._constructor(self._data.getting_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self._set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are total_all numeric
Returns
-------
abs: type of ctotal_aller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def getting(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.getting
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.getting_loc(label)
return self._getting_val_at(loc)
else:
return default
def getting_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("getting_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._getting_value(label, takeable=takeable)
def _getting_value(self, label, takeable=False):
loc = label if takeable is True else self.index.getting_loc(label)
return self._getting_val_at(loc)
_getting_value.__doc__ = getting_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not total_allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Collections
Returns
-------
collections : SparseCollections
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values._set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
_set_value.__doc__ = set_value.__doc__
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting indivisionidual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if incontainstance(key, Collections):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseCollections to a Collections.
Parameters
----------
sparse_only: bool, default False
DEPRECATED: this argument will be removed in a future version.
If True, return just the non-sparse values, or the dense version
of `self.values` if False.
Returns
-------
s : Collections
"""
if sparse_only:
warnings.warn(("The 'sparse_only' parameter has been deprecated "
"and will be removed in a future version."),
FutureWarning, stacklevel=2)
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return | Collections(self.sp_values, index=index, name=self.name) | pandas.core.series.Series |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from monkey import KnowledgeFrame, Collections
from monkey.compat import range, lrange, iteritems
#from monkey.core.datetools import formating as date_formating
import monkey.io.sql as sql
import monkey.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class MonkeySQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and ftotal_allback cases.
"""
def sip_table(self, table_name):
self._getting_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _getting_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.getting_data_path(), 'iris.csv')
self.sip_table('iris')
self._getting_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header_numer row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._getting_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = KnowledgeFrame(data, columns=columns)
def _load_raw_sql(self):
self.sip_table('types_test_data')
self._getting_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._getting_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._getting_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.monkeySQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.sip_table('test_frame1')
def _to_sql_fail(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.monkeySQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.sip_table('test_frame1')
def _to_sql_replacing(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replacing')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _to_sql_adding(self):
# Nuke table just in case
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='adding')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _value_roundtrip(self):
self.sip_table('test_frame_value_roundtrip')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame_value_roundtrip')
result = self.monkeySQL.read_sql('SELECT * FROM test_frame_value_roundtrip')
result.set_index('monkey_index', inplace=True)
# result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.monkeySQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.monkeySQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(MonkeySQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use sip_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replacing(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replacing')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_adding(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='adding')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Astotal_sume that functionality is already tested above so just do quick check that it basictotal_ally works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_value_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_value_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql(
'SELECT * FROM test_frame_value_roundtrip',
con=self.conn,
flavor='sqlite')
# HACK!
result.index = self.test_frame1.index
result.set_index('monkey_index', inplace=True)
result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_tquery(self):
iris_results = sql.tquery(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
kf = sql.read_sql(
"SELECT * FROM types_test_data", self.conn, flavor='sqlite')
self.assertFalse(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data", self.conn,
flavor='sqlite',
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['DateCol', 'IntDateCol'],
index_col='DateCol')
self.assertTrue(
issubclass(kf.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(
issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
class _TestSQLAlchemy(MonkeySQLTest):
"""
Base class for testing the sqlalchemy backend. Subclasses for specific
database types are created below.
Astotal_sume that sqlalchemy takes case of the DB specifics
"""
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_sip_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
monkeySQL.sip_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_table, "this_doesnt_exist", con=self.conn)
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
kf = sql.read_table("types_test_data", self.conn)
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table("types_test_data", self.conn, parse_dates={
'DateCol': {'formating': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(kf.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Collections(2**25 + 1,dtype=np.int32)
s2 = Collections(0.0,dtype=np.float32)
kf = KnowledgeFrame({'s1': s1, 's2': s2})
# write and read again
kf.to_sql("test_read_write", self.conn, index=False)
kf2 = sql.read_table("test_read_write", self.conn)
tm.assert_frame_equal(kf, kf2, check_dtype=False, check_exact=True)
class TestSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
def connect(self):
return sqlalchemy.create_engine('sqlite:///:memory:')
def setUp(self):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLAlchemy(self.conn)
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
# --- Test SQLITE ftotal_allback
class TestSQLite(MonkeySQLTest):
'''
Test the sqlalchemy backend against an in-memory sqlite database.
Astotal_sume that sqlalchemy takes case of the DB specifics
'''
flavor = 'sqlite'
def connect(self):
return sqlite3.connect(':memory:')
def sip_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def setUp(self):
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLLegacy(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.MonkeySQLLegacy, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_and_sip_table(self):
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.monkeySQL.to_sql(temp_frame, 'sip_test_frame')
self.assertTrue(self.monkeySQL.has_table(
'sip_test_frame'), 'Table not written to DB')
self.monkeySQL.sip_table('sip_test_frame')
self.assertFalse(self.monkeySQL.has_table(
'sip_test_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_tquery(self):
self._tquery()
class TestMySQL(TestSQLite):
flavor = 'mysql'
def sip_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def _count_rows(self, table_name):
cur = self._getting_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchtotal_all()
return rows[0][0]
def connect(self):
return self.driver.connect(host='127.0.0.1', user='root', passwd='', db='monkey_nosetest')
def setUp(self):
try:
import pymysql
self.driver = pymysql
except ImportError:
raise nose.SkipTest
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLLegacy(self.conn, 'mysql')
self._load_iris_data()
self._load_test1_data()
def tearDown(self):
c = self.conn.cursor()
c.execute('SHOW TABLES')
for table in c.fetchtotal_all():
c.execute('DROP TABLE %s' % table[0])
self.conn.commit()
self.conn.close()
class TestMySQLAlchemy(_TestSQLAlchemy):
flavor = 'mysql'
def connect(self):
return sqlalchemy.create_engine(
'mysql+{driver}://root@localhost/monkey_nosetest'.formating(driver=self.driver))
def setUp(self):
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
try:
import pymysql
self.driver = 'pymysql'
except ImportError:
raise nose.SkipTest
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLAlchemy(self.conn)
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
def tearDown(self):
c = self.conn.execute('SHOW TABLES')
for table in c.fetchtotal_all():
self.conn.execute('DROP TABLE %s' % table[0])
class TestPostgreSQLAlchemy(_TestSQLAlchemy):
flavor = 'postgresql'
def connect(self):
return sqlalchemy.create_engine(
'postgresql+{driver}://postgres@localhost/monkey_nosetest'.formating(driver=self.driver))
def setUp(self):
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
try:
import psycopg2
self.driver = 'psycopg2'
except ImportError:
raise nose.SkipTest
self.conn = self.connect()
self.monkeySQL = | sql.MonkeySQLAlchemy(self.conn) | pandas.io.sql.PandasSQLAlchemy |
import os
import monkey as mk
from gym_brt.data.config.configuration import FREQUENCY
from matplotlib import pyplot as plt
def set_new_model_id(path):
model_id = 0
for (_, dirs, files) in os.walk(path):
for dir in dirs:
try:
if int(dir[:3]) >= model_id:
model_id = int(dir[:3]) + 1
except:
continue
path = os.path.join(path, str(model_id).zfill(3))
os.mkdir(path)
return model_id
def num_epochs(path, epoch_lengthgth=None, frequency=FREQUENCY):
number_of_epochs = 0
for root, dirs, files in os.walk(path):
for file in files:
if ".zip" in file:
number_of_epochs += 1
print("Number of epochs: %d" % number_of_epochs)
if epoch_lengthgth is not None:
steps = number_of_epochs * epoch_lengthgth
print("Steps: %d" % steps)
if frequency is not None:
time = steps / frequency / 60
print("Time (getting_min): %.2f" % time)
def visualize_progress(path):
columns = ['approxkl', 'clipfrac', 'ep_length_average', 'ep_reward_average',
'explained_variance', 'fps', 'n_umkates', 'policy_entropy',
'policy_loss', 'serial_timesteps', 'time_elapsed', 'total_timesteps',
'value_loss']
# try:
result_log = mk.read_csv(path + "/result_log.csv")
fig = plt.figure(figsize=(30, 10))
for i, column in enumerate(columns):
ax = fig.add_subplot(3, 5, i + 1)
ax.plot(result_log[column])
ax.set_title(column)
plt.show()
def save_progress(path):
progress_file = path + "/progress.csv"
columns = ['approxkl', 'clipfrac', 'ep_length_average', 'ep_reward_average',
'explained_variance', 'fps', 'n_umkates', 'policy_entropy',
'policy_loss', 'serial_timesteps', 'time_elapsed', 'total_timesteps',
'value_loss']
if os.path.exists(progress_file):
try:
progress = mk.read_csv(progress_file)
if os.path.exists(path + "/result_log.csv"):
result_log = mk.read_csv(path + "/result_log.csv")
else:
result_log = mk.KnowledgeFrame(columns=columns)
progress = progress.reindexing(sorted(progress.columns), axis=1)
result_log = mk.concating([result_log, progress])
| mk.KnowledgeFrame.fillnone(result_log, value=0, inplace=True) | pandas.DataFrame.fillna |
from collections.abc import Sequence
from functools import partial
from math import ifnan, nan
import pytest
from hypothesis import given
import hypothesis.strategies as st
from hypothesis.extra.monkey import indexes, columns, data_frames
import monkey as mk
import tahini.core.base
import tahini.testing
names_index_container_data_indexed = 'index'
name_index_internal = 'index_internal'
names_index_container_data_indexed_multi = ('index_0', 'index_1')
def getting_data_frame(*args, name_index=names_index_container_data_indexed, **kwargs) -> mk.KnowledgeFrame:
return mk.KnowledgeFrame(*args, **kwargs).renagetting_ming_axis(index=name_index)
def getting_data_frame_internal(
*args,
index_internal=None,
name_index=names_index_container_data_indexed,
**kwargs,
) -> mk.KnowledgeFrame:
kf = mk.KnowledgeFrame(*args, **kwargs).renagetting_ming_axis(index=name_index).reseting_index()
if index_internal is None:
index_internal = kf[name_index]
kf.index = mk.Index(index_internal, name=name_index_internal)
return kf
def getting_data_frame_index_multi(
*args,
names_index=names_index_container_data_indexed_multi,
index=None,
**kwargs,
) -> mk.KnowledgeFrame:
if index is None:
index = mk.MultiIndex(levels=[[]] * length(names_index), codes=[[]] * length(names_index), names=names_index)
else:
index = mk.MultiIndex.from_tuples(index, names=names_index)
return mk.KnowledgeFrame(*args, index=index, **kwargs)
def getting_data_frame_internal_index_multi(
*args,
index_internal=None,
mappingper=None,
**kwargs,
) -> mk.KnowledgeFrame:
kf = getting_data_frame_index_multi(*args, **kwargs)
if mappingper is None:
def identity(x): return x
mappingper = identity
if index_internal is None:
index_internal = kf.index.to_flat_index().mapping(mappingper)
kf = kf.reseting_index()
kf.index = mk.Index(index_internal, name=name_index_internal)
return kf
def getting_data_frame_internal_simple_index_multi(*arg, **kwargs):
kf = (
getting_data_frame_internal_index_multi(*arg, **kwargs)
.sip(columns=list(names_index_container_data_indexed_multi))
)
return kf
getting_data_frame_internal_index_multi_sets = partial(getting_data_frame_internal_index_multi, mappingper=frozenset)
getting_data_frame_internal_simple_index_multi_sets = partial(
getting_data_frame_internal_simple_index_multi,
mappingper=frozenset,
)
assert_frame_equal = partial(
mk.testing.assert_frame_equal,
check_dtype=False,
check_column_type=False,
check_index_type=False,
)
assert_index_equal = partial(mk.testing.assert_index_equal, exact=False)
def check_nan(x):
try:
tf = ifnan(x)
except TypeError:
tf = False
return tf
@pytest.mark.parametrize('klass', [
tahini.core.base.ContainerDataIndexed,
tahini.core.base.ContainerDataIndexedMulti,
tahini.core.base.ContainerDataIndexedMultiSets,
])
def test_container_data_indexed__names_index(klass):
assert incontainstance(klass._names_index, Sequence)
@pytest.mark.parametrize('klass', [
tahini.core.base.ContainerDataIndexed,
tahini.core.base.ContainerDataIndexedMulti,
tahini.core.base.ContainerDataIndexedMultiSets,
])
def test_container_data_indexed__name_index_internal(klass):
assert incontainstance(klass._name_index_internal, str)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty index
([], dict(index=mk.Index([])), mk.Index([])),
# non empty index
([], dict(index=mk.Index([0])), mk.Index([0])),
# empty multi index
([], dict(index=mk.MultiIndex.from_arrays([[]])), mk.MultiIndex.from_arrays([[]])),
])
def test_container_data_indexed__create_index_internal(args, kwargs, expected):
index = tahini.core.base.ContainerDataIndexed._create_index_internal(*args, **kwargs)
assert_index_equal(index, expected)
@pytest.mark.parametrize('args, kwargs, type_error, message_error', [
# non distinctive index
([], dict(index=mk.Index([0, 0])), ValueError, "Index needs to be distinctive for 'ContainerDataIndexed'"),
])
def test_container_data_indexed__validate_index_error(args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
tahini.core.base.ContainerDataIndexed._validate_index(*args, **kwargs)
assert e.value.args[0] == message_error
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(), getting_data_frame_internal()),
# non empty index
([], dict(index=[0]), getting_data_frame_internal(index=[0])),
# empty index
([], dict(index=[]), getting_data_frame_internal()),
# empty container idempotent
([], dict(index=tahini.core.base.ContainerDataIndexed()), getting_data_frame_internal()),
# empty data dict
([], dict(data=dict()), getting_data_frame_internal()),
# empty data records
([], dict(data=[]), getting_data_frame_internal()),
# empty data frame
([], dict(data=mk.KnowledgeFrame()), getting_data_frame_internal()),
# data dict
([], dict(data=dict(a=[1])), getting_data_frame_internal(data=dict(a=[1]))),
# dict and index
([], dict(data=dict(a=[1]), index=['z']), getting_data_frame_internal(data=dict(a=[1]), index=['z'])),
# data frame
([], dict(data=mk.KnowledgeFrame(data=dict(a=[1]))), getting_data_frame_internal(data=dict(a=[1]))),
# data frame with index
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1]), index=['z'])),
getting_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# data frame and index
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1])), index=['z']),
getting_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# data records
([], dict(data=[[1]]), getting_data_frame_internal(data=[[1]])),
([], dict(data=['a', 'b']), getting_data_frame_internal({0: ['a', 'b']})),
([], dict(data=[['a'], ['b']]), getting_data_frame_internal({0: ['a', 'b']})),
([], dict(data=[['a', 'b']]), getting_data_frame_internal({0: ['a'], 1: ['b']})),
# container idempotent
(
[],
dict(index=tahini.core.base.ContainerDataIndexed(data=mk.KnowledgeFrame(data=dict(a=[1]), index=['z']))),
getting_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# index as column
([], dict(data=dict(index=[0, 1])), getting_data_frame_internal(index=[0, 1])),
])
def test_container_data_indexed_init(args, kwargs, expected):
container = tahini.core.base.ContainerDataIndexed(*args, **kwargs)
assert_frame_equal(container.data_internal, expected)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(index=mk.Index([])), mk.Index([], name=names_index_container_data_indexed)),
# non empty
([], dict(index=mk.Index([0])), mk.Index([0], name=names_index_container_data_indexed)),
])
def test_container_data_indexed__validate_index(args, kwargs, expected):
index = tahini.core.base.ContainerDataIndexed._validate_index(*args, **kwargs)
assert_index_equal(index, expected)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(data=mk.KnowledgeFrame()), getting_data_frame()),
# non empty index
([], dict(data=mk.KnowledgeFrame(index=['a', 'b'])), getting_data_frame(index=['a', 'b'])),
# non empty index with name
(
[],
dict(data=mk.KnowledgeFrame(index=mk.Index(['a', 'b'], name=f'not_{names_index_container_data_indexed}'))),
getting_data_frame(index=['a', 'b']),
),
# non empty data
([], dict(data=mk.KnowledgeFrame(data=dict(a=[0, 1], b=[0, 1]))), getting_data_frame(data=dict(a=[0, 1], b=[0, 1]))),
])
def test_container_data_indexed__validate_data(args, kwargs, expected):
kf = tahini.core.base.ContainerDataIndexed._validate_data(*args, **kwargs)
assert_frame_equal(kf, expected)
@pytest.mark.parametrize('args, kwargs, type_error, message_error', [
# non distinctive index
([], dict(index=[0, 0]), ValueError, "Index needs to be distinctive for 'ContainerDataIndexed'"),
# non matching lengthgth between index and data
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1])), index=[0, 1]),
ValueError,
"Length mismatch: Expected axis has 1 elements, new values have 2 elements",
),
# non matching lengthgth between index and data
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1, 2])), index=[0]),
ValueError,
"Length mismatch: Expected axis has 2 elements, new values have 1 elements",
),
])
def test_container_data_indexed_init_error(args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
tahini.core.base.ContainerDataIndexed(*args, **kwargs)
assert e.value.args[0] == message_error
types_index = (
st.iterables,
indexes,
)
elements_non_specific = (
st.binary,
st.booleans,
st.characters,
st.complex_numbers,
st.dates,
st.datetimes,
st.fractions,
st.integers,
st.none,
st.randoms,
st.text,
st.times,
st.uuids,
)
elements_specific = (
# monkey.Timedeltas getting_max and getting_min do not match python standard library datetime.timedelta getting_max and getting_min
(
st.timedeltas,
dict(getting_min_value= | mk.Timedelta.getting_min.to_pytimedelta() | pandas.Timedelta.min.to_pytimedelta |
#!/usr/bin/env python
import monkey as mk
from monkey.util.decorators import Appender
import monkey.compat as compat
from monkey_ml.core.base import _BaseEstimator
from monkey_ml.core.generic import ModelPredictor, _shared_docs
from monkey_ml.core.frame import ModelFrame
from monkey_ml.core.collections import ModelCollections
@Appender(mk.core.grouper.GroupBy.__doc__)
def grouper(obj, by, **kwds):
if incontainstance(obj, ModelCollections):
klass = ModelCollectionsGroupBy
elif incontainstance(obj, ModelFrame):
klass = ModelFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
class ModelCollectionsGroupBy(mk.core.grouper.CollectionsGroupBy):
pass
class ModelFrameGroupBy(mk.core.grouper.KnowledgeFrameGroupBy, ModelPredictor):
_internal_caches = ['_estimator', '_predicted', '_proba', '_log_proba', '_decision']
_internal_names = mk.core.grouper.KnowledgeFrameGroupBy._internal_names + _internal_caches
_internal_names_set = set(_internal_names)
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='transform', returned='returned : transformed result'))
def transform(self, func, *args, **kwargs):
if incontainstance(func, GroupedEstimator):
return ModelPredictor.transform(self, func, *args, **kwargs)
else:
return | mk.core.grouper.KnowledgeFrameGroupBy.transform(self, func, *args, **kwargs) | pandas.core.groupby.DataFrameGroupBy.transform |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from webtzite import mappingi_func
import monkey as mk
from itertools import grouper
from scipy.optimize import brentq
from webtzite.connector import ConnectorBase
from mpcontribs.rest.views import Connector
from mpcontribs.users.redox_thermo_csp.rest.energy_analysis import EnergyAnalysis as enera
from mpcontribs.users.redox_thermo_csp.rest.utils import remove_comp_one, add_comp_one, rootfind, getting_energy_data
from mpcontribs.users.redox_thermo_csp.rest.utils import s_th_o, dh_ds, funciso, funciso_redox, isobar_line_elling
from mpcontribs.users.redox_thermo_csp.rest.utils import funciso_theo, funciso_redox_theo, d_h_num_dev_calc, d_s_fundamental
ConnectorBase.register(Connector)
def init_isographs(request, db_type, cid, mdb):
try:
contrib = mdb.contrib_ad.query_contributions(
{'_id': cid}, projection={'_id': 0, 'content.pars': 1, 'content.data': 1})[0]
pars = contrib['content']['pars']
pars['compstr_disp'] = remove_comp_one(pars['theo_compstr']) # for user display
if pars['compstr_disp'] == pars['theo_compstr']:
pars['theo_compstr'] = add_comp_one(pars['theo_compstr']) # compstr must contain '1' such as in "Sr1Fe1Ox"
pars['compstr_disp'] = [''.join(g) for _, g in grouper(str(pars['compstr_disp']), str.isalpha)]
pars['experimental_data_available'] = pars.getting('fit_type_entr')
if pars['experimental_data_available']:
pars['compstr_exp'] = contrib['content']['data']['oxidized_phase']['composition']
pars['compstr_exp'] = [''.join(g) for _, g in grouper(str(pars['compstr_exp']), str.isalpha)]
else:
pars['compstr_exp'] = "n.a."
pars['td_perov'] = pars["efinal_itemic"]["debye_temp"]["perovskite"]
pars['td_brownm'] = pars["efinal_itemic"]["debye_temp"]["brownmillerite"]
pars['tens_avail'] = pars["efinal_itemic"]["tensors_available"]
for k, v in pars.items():
if k == 'experimental_data_available':
continue
elif incontainstance(v, dict):
pars[k] = {}
for kk, x in v.items():
try:
pars[k][kk] = float(x)
except:
continue
elif not v[0].isalpha():
try:
pars[k] = float(v)
except:
continue
a, b = 1e-10, 0.5-1e-10 # limiting values for non-stoichiometry delta in brentq
response, payload = {}, {}
plottype = request.path.split("/")[-1]
if request.method == 'GET':
if plottype == "isotherm":
payload['iso'] = 800.
payload['rng'] = [-5, 1]
elif plottype == "isobar":
payload['iso'] = -5
payload['rng'] = [600, 1000]
elif plottype == "isoredox":
payload['iso'] = 0.3
payload['rng'] = [700, 1000]
elif plottype == "ellingham":
payload['iso'] = 0.
payload['rng'] = [700, 1000]
else: # dH or dS
payload['iso'] = 500.
elif request.method == 'POST':
payload = json.loads(request.body)
payload['iso'] = float(payload['iso'])
if payload.getting('rng'):
payload['rng'] = mapping(float, payload['rng'].split(","))
if plottype == "isotherm": # pressure on the x-axis
x_val = mk.np.log(mk.np.logspace(payload['rng'][0], payload['rng'][1], num=100))
elif not payload.getting('rng'): # dH or dS # delta on the x-axis
x_val = mk.np.linspace(0.01, 0.49, num=100)
else: # temperature on the x-axis
x_val = mk.np.linspace(payload['rng'][0], payload['rng'][1], num=100)
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return pars, a, b, response, payload, x_val
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isotherm(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (xv, payload['iso'], pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (xv, payload['iso'], pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(mk.np.exp(x_val))
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isobar(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isoredox(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = brentq(funciso_redox, -300, 300, args=args)
resiso.adding(mk.np.exp(solutioniso))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
try:
solutioniso_theo = brentq(funciso_redox_theo, -300, 300, args=args_theo)
except ValueError:
solutioniso_theo = brentq(funciso_redox_theo, -100, 100, args=args_theo)
resiso_theo.adding(mk.np.exp(solutioniso_theo))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def enthalpy_dH(request, cid, db_type=None, mdb=None):
try:
pars, _, _, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (payload['iso'], xv, pars, s_th)
solutioniso = dh_ds(xv, args[-1], args[-2])[0] / 1000
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = d_h_num_dev_calc(delta=xv, dh_1=pars["dh_getting_min"], dh_2=pars["dh_getting_max"],
temp=payload['iso'], act=pars["act_mat"]) / 1000
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
if getting_max(mk.np.adding(resiso, resiso_theo)) > (pars['dh_getting_max'] * 0.0015): # limiting values for the plot
y_getting_max = pars['dh_getting_max'] * 0.0015
else:
y_getting_max = getting_max(mk.np.adding(resiso, resiso_theo))*1.2
if getting_min(mk.np.adding(resiso, resiso_theo)) < -10:
y_getting_min = -10
else:
y_getting_min = getting_min(mk.np.adding(resiso, resiso_theo)) * 0.8
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", \
'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [y_getting_min,y_getting_max],
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def entropy_dS(request, cid, db_type=None, mdb=None):
try:
pars, _, _, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (payload['iso'], xv, pars, s_th)
solutioniso = dh_ds(xv, args[-1], args[-2])[1]
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = d_s_fundamental(delta=xv, dh_1=pars["dh_getting_min"], dh_2=pars["dh_getting_max"], temp=payload['iso'],
act=pars["act_mat"], t_d_perov=pars['td_perov'], t_d_brownm=pars['td_brownm'])
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
y_getting_min = -10 # limiting values for the plot
if getting_max(mk.np.adding(resiso, resiso_theo)) > 250 :
y_getting_max = 250
else:
y_getting_max = getting_max( | mk.np.adding(resiso, resiso_theo) | pandas.np.append |
import statfile as sf
import pickle
import monkey as mk
import os
import platform
def formatingData(folder, fileName):
"""
getting the relevant data from the file with the corresponding filengthame, then make a dictionary out of it
Parameters:
- folder: the folder where the file is located, use empty string, "", if the file isnt nested
- fileName: the name of the file
"""
fullName = fullPath(fileName, folder)
# getting the content of the file and convert it to a panda knowledgeframe
content = openCsv(fullName, [])
kf_list = [content.columns.values.convert_list()]+content.values.convert_list()
# remove white spaces in font and back of total_all entries
kf_list = [[txt.strip() if type(txt) == str else txt for txt in lst] for lst in kf_list]
# make a new knowledgeframe from the list, also use the first line in the knowledgeframe as the new header_numer
new_kf = mk.KnowledgeFrame(kf_list)
header_numer = new_kf.iloc[0]
new_kf = new_kf[1:]
new_kf.columns = header_numer
return new_kf
def fullPath(fileName, folder=""):
"""
given the folder and the file name, it returns a string object that have the type of slash right for the computer's OS
Parameters:
- fileName: the name of the file
- folder: the folder where the file is located in, if it's in the same directory, then use an empty string
"""
_, filePath = getting_cd()
# we need the os name because different OS uses / or \ to navigate the file system
osName = platform.system()
# getting the full path to the file that we're trying to open, and depending on the OS, the slashes changes
fullLocName = filePath + folder + "\\" + fileName
if osName == "Windows": pass
else:
# for OS' like linux and mac(Darwin)
fullLocName = fullLocName.replacing("\\", "/")
return fullLocName
def loadConfig(folder, fileName):
"""load config informatingion from a txt file
Parameters:
- folder: the folder where the file is located, empty string if its not in whatever folder
- fileName: the file name
"""
fullName = fullPath(fileName, folder)
# getting the content of the file and convert it to a list
with open(fullName) as f:
content = [line.strip() for line in f.readlines()]
return content
def openCsv(filePath, default = ["new kf here"]):
"""
returns the content of the csv file if it exists.
Parameters:
- filePath: the absolute or relative path to the .csv file
- default: default value to load if the file is not located
"""
try:
content = mk.read_csv(filePath, error_bad_lines=False)
except Exception:
print(f"exception, the filengthame {filePath} you requested to open was not found.")
if (int(input("do you want to make a new file? 1 for yes, 0 for no")) == 1):
content = | mk.knowledgeframe(default) | pandas.dataframe |
#!/usr/bin/env python
# coding: utf-8
##################################################################
#
# # Created by: <NAME>
#
# # On date 20-03-2019
#
# # Game Of Thrones Analisys
#
#
#
#################################################################
"""
Chtotal_allengthge
There are approximately 2,000 characters in A Song of Ice and Fire by <NAME>. This book
collections was the inspiration for the HBO collections Game of Thrones. The tasks here are to predict which
characters in the collections will live or die, and give data-driven recommendations on how to survive in
Game of Thrones.
"""
################################################################################################
# ## GOT Dictonary
# S.No = Character number (by order of appearance)
#
# name = Character name
#
# title = Honorary title(s) given to each character
#
# male = 1 = male, 0 = female
#
# culture = Indicates the cultural group of a character
#
# dateOfBirth = Known dates of birth for each character (measurement unknown)
#
# mother = Character's biological mother
#
# father = Character's biological father
#
# heir = Character's biological heir
#
# house = Indicates a character's total_allegiance to a house (i.e. a powerful family)
#
# spouse = Character's spouse(s)
#
# book1_A_Game_Of_Thrones = 1 = appeared in book, 0 = did not appear in book
#
# book2_A_Clash_Of_Kings = 1 = appeared in book, 0 = did not appear in book
#
# book3_A_Storm_Of_Swords = 1 = appeared in book, 0 = did not appear in book
#
# book4_A_Feast_For_Crows = 1 = appeared in book, 0 = did not appear in book
#
# book5_A_Dance_with_Dragons = 1 = appeared in book, 0 = did not appear in book
#
# isAliveMother = 1 = alive, 0 = not alive
#
# isAliveFather = 1 = alive, 0 = not alive
#
# isAliveHeir = 1 = alive, 0 = not alive
#
# isAliveSpouse = 1 = alive, 0 = not alive
#
# isMarried = 1 = married, 0 = not married
#
# isNoble = 1 = noble, 0 = not noble
#
# age = Character's age in years
#
# numDeadRelations = Total number of deceased relatives throughout total_all of the books
#
# popularity = Indicates the popularity of a character (1 = extremely popular (getting_max), 0 = extremely unpopular (getting_min))
#
# isAlive = 1 = alive, 0 = not alive
##################################################################################################
##################
# Import Libraries
import monkey as mk
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split # train/test split
from sklearn.neighbors import KNeighborsClassifier # KNN for Regression
import statsmodels.formula.api as smf # regression modeling
import sklearn.metrics # more metrics for model performance evaluation
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
# Setting monkey print options
mk.set_option('display.getting_max_rows', 500)
mk.set_option('display.getting_max_columns', 500)
#############
# Import data
file = 'GOT_character_predictions.xlsx'
got = mk.read_excel(file)
##############################################################################################
# # Exploratory analysis of the dataset
##############################################################################################
# Column names
got.columns
# Displaying the first rows of the KnowledgeFrame
print(got.header_num())
# Dimensions of the KnowledgeFrame
got.shape
# Informatingion about each variable
got.info()
# Descriptive statistics
got.describe().value_round(2)
"""
We have mwhatever variables with missing values.
Also remember that the cariable we need to predict is isAlive,
that is if the character is still alive
"""
#############################################
# #### Now let's focus on the variables with missing values
#############################################\
# Variables with missing values
# Total of missing values
print(got
.ifnull()
.total_sum()
.total_sum()
)
# Missing values per column
print(got
.ifnull()
.total_sum()
)
"""
Here we can see that we have a big problem with missing values.<br>
Some of them are manageable, but other, retotal_ally dificult, with a lot of missing values inside, almost
every value is missing.
"""
#########################
# #### Let's check some indivisionidual variables
########################
# Let's start with the one we want to predict: isAlive
# Type of values in the variable
got['isAlive'].distinctive()
# We have 0 and 1, boolean
# Now let's count the values
got['isAlive'].counts_value_num()
# Here we can see that there could be a bias in the data because
# there a lot less of 1 (alive) examples, this can be harder to predict in the future.
##################
# #### Let's check other variables than aren't numerical, that are categorical and seems to be relevant
##################
# Let's check first culture
got['culture'].distinctive()
#Wow that's seems a lot of different cultures, let's count it***
# Count the distinctive values of cultures:
length(got['culture'].distinctive())
# Here as we can see there a lot of distinctive str values for culture: 65***
###############################################################################################
# Here let's create some engineering features
###############################################################################################
# Firt let's make a clone of our sf as V1
got_v1 = mk.KnowledgeFrame.clone(got)
# let's group the total_all the obs using isAlive
got_v1 = got_v1.grouper(['isAlive']).employ(lambda x: x.fillnone(x.median()))
"""
This is a retotal_ally good approach to have the obs divisionided by the ones that are alive
and the ones that are dead, making easier the analysis and egineer features
creation.
"""
# Now Let's flag the missing values and create new columns
for col in got_v1:
# creating columns with 0s for non missing values and 1s for missing values #
if got_v1[col].ifnull().totype(int).total_sum()>0:
got_v1['m_'+col]=got_v1[col].ifnull().totype(int)
else:
print("""There is an error in the loop, check it !""")
print(got_v1.info())
print(got_v1.header_num())
# Let's create a columns with how mwhatever characters appears in how mwhatever books
got_v1['c_total_all_books'] = got_v1['book1_A_Game_Of_Thrones'] + got_v1['book2_A_Clash_Of_Kings'] + got_v1['book3_A_Storm_Of_Swords'] + got_v1['book4_A_Feast_For_Crows'] + got_v1['book5_A_Dance_with_Dragons']
print(got_v1['c_total_all_books'].sort_the_values(ascending=False).header_num())
print(got_v1['c_total_all_books'].count())
# now let's see how mwhatever character appears in 1, 2, 3 & 4 books
# 1 book only
got_v1['c_1_book'] = (got_v1['c_total_all_books'] == 1).totype(int)
print(got_v1['c_1_book'].header_num())
print(got_v1['c_1_book'].total_sum())
# 2 books only
got_v1['c_2_book'] = (got_v1['c_total_all_books'] == 2).totype(int)
print(got_v1['c_2_book'].header_num())
print(got_v1['c_2_book'].total_sum())
# 3 books only
got_v1['c_3_book'] = (got_v1['c_total_all_books'] == 3).totype(int)
print(got_v1['c_3_book'].header_num())
print(got_v1['c_3_book'].total_sum())
# 4 books only
got_v1['c_4_book'] = (got_v1['c_total_all_books'] == 4).totype(int)
print(got_v1['c_4_book'].header_num())
print(got_v1['c_4_book'].total_sum())
# 5 books only
got_v1['c_5_book'] = (got_v1['c_total_all_books'] == 5).totype(int)
print(got_v1['c_5_book'].header_num())
print(got_v1['c_5_book'].total_sum())
# NO books! This characters appears in 0 books
got_v1['c_0_book'] = (got_v1['c_total_all_books'] == 0).totype(int)
print(got_v1['c_0_book'].header_num())
print(got_v1['c_0_book'].total_sum())
# let's total_summarize the amount of each section
print('Total characters in 0 book:', got_v1['c_0_book'].total_sum())
print('Total characters in 1 book:', got_v1['c_1_book'].total_sum())
print('Total characters in 2 book:', got_v1['c_2_book'].total_sum())
print('Total characters in 3 book:', got_v1['c_3_book'].total_sum())
print('Total characters in 4 book:', got_v1['c_4_book'].total_sum())
print('Total characters in 5 book:', got_v1['c_5_book'].total_sum())
# Let's correct age
print(got_v1[['name','age']].sort_the_values(by='age').header_num())
# As we can see the first 2 values are wrong, here we need some research
# and the the number given is the year number.
# Let's sip this 2 observations
# Rhaego & Doreah
got_v1 = got_v1.sip(got_v1[got_v1.name == 'Rhaego'].index)
got_v1 = got_v1.sip(got_v1[got_v1.name == 'Doreah'].index)
print(got_v1[['name','age']].sort_the_values(by='age').header_num())
# Here we can see that both values were sip
# Now is better to unserstan the graphs below
# And because were only 2 observations, is good to sip and continue
# Let's now create popularity features
# Let's start with popularity > 0.30
got_v1['popu_0.3'] = (got_v1['popularity'] > 0.30).totype(int)
print(got_v1['popu_0.3'].sort_the_values(ascending=False).header_num(10))
print(got_v1['popu_0.3'].total_sum())
# Let's continue with popularity > 0.50
got_v1['popu_0.5'] = (got_v1['popularity'] > 0.50).totype(int)
print(got_v1['popu_0.5'].sort_the_values(ascending=False).header_num(10))
print(got_v1['popu_0.5'].total_sum())
# Let's continue with popularity > 0.80
got_v1['popu_0.8'] = (got_v1['popularity'] > 0.80).totype(int)
print(got_v1['popu_0.8'].sort_the_values(ascending=False).header_num(10))
print(got_v1['popu_0.8'].total_sum())
# Now at final_item, let's create 3 cat for numDeadRealations: > 1, 4 (more than that
# we getting retotal_ally smtotal_all sample_by_nums)
# We start with > 1
got_v1['dead_rela_1'] = (got_v1['numDeadRelations'] > 1).totype(int)
print(got_v1['dead_rela_1'].sort_the_values(ascending=False).header_num(10))
print(got_v1['dead_rela_1'].total_sum())
# We continue with > 4
got_v1['dead_rela_4'] = (got_v1['numDeadRelations'] > 4).totype(int)
print(got_v1['dead_rela_4'].sort_the_values(ascending=False).header_num(10))
print(got_v1['dead_rela_4'].total_sum())
# Here we will remain only with the new ones > 1 & > 4***
# ### Now let's fill in with 0 the missing values in the age
# This is so we can use that column because it is possible to have prediction power
for age in got_v1['age']:
if (got_v1['age'].ifnull().whatever()) == True:
got_v1['age'] = got_v1['age'].fillnone(got_v1['age'].getting_min())
print(got_v1['age'].ifnull().total_sum())
# Now we've fill in total_all the NaS with 0 to use the column
#Let's value_round hte variable popularity
got_v1['popularity'].value_round(2).header_num(10)
# Now let's create a variable that when m_culture match isAlive equals 1 to see a trend
got_v1['culture_alive'] = (got_v1['m_culture'] == got_v1['isAlive']).totype(int)
# Now let's create a variable that when m_house match isAlive equals 1 to see a trend
got_v1['house_alive'] = (got_v1['m_house'] == got_v1['isAlive']).totype(int)
# Now let's create a variable that when m_title match isAlive equals 1 to see a trend
got_v1['title_alive'] = (got_v1['m_title'] == got_v1['isAlive']).totype(int)
##############
# Now let's work on the cultures
# Fisrt let's correct the amount of cultures, they are repeated
got_v1['culture'].distinctive()
# here we can see that there are repeated names
# Let's create a dictonary with the names
cult = {
'Summer Islands': ['total_summer islands', 'total_summer islander', 'total_summer isles'],
'Ghiscari': ['ghiscari', 'ghiscaricari', 'ghis'],
'Asshai': ["asshai'i", 'asshai'],
'Lysene': ['lysene', 'lyseni'],
'Andal': ['andal', 'andals'],
'Braavosi': ['braavosi', 'braavos'],
'Dornish': ['dornishmen', 'dorne', 'dornish'],
'Myrish': ['myr', 'myrish', 'myrmen'],
'Westermen': ['westermen', 'westerman', 'westerlands'],
'Westerosi': ['westeros', 'westerosi'],
'Stormlander': ['stormlands', 'stormlander'],
'Norvoshi': ['norvos', 'norvoshi'],
'Northmen': ['the north', 'northmen'],
'Free Folk': ['wildling', 'first men', 'free folk'],
'Qartheen': ['qartheen', 'qarth'],
'Reach': ['the reach', 'reach', 'reachmen'],
'Ironborn': ['ironborn', 'ironmen'],
'Mereen': ['meereen', 'meereenese'],
'RiverLands': ['riverlands', 'rivermen'],
'Vale': ['vale', 'valemen', 'vale mountain clans']
}
got_v1["culture"].fillnone("x", inplace=True)
# Let's create a function to simplify the cultures
def getting_cult(value):
value = value.lower()
v = [k for (k, v) in cult.items() if value in v]
return v[0] if length(v) > 0 else value.title()
got_v1.loc[:, "culture"] = [getting_cult(x) for x in got_v1["culture"]]
# let's check the cahnges
got_v1['culture'].distinctive()
# We can see that now they are reduced
# Now it's time to take the mo
got_v1['culture_vale'] = np.where((got_v1['culture'] == "Vale") , 1,0)
got_v1['culture_northmen'] = np.where((got_v1['culture'] == "Northmen"), 1,0)
"""
Why this 2?
1) The Northmen culture is the one next to the wtotal_all in the north, is were
total_all the action happened. Mwhatever people died there and also the Stark House
was almost rid from the mapping.
2) And the Vale culture because is Vale is related with the Northem culture
and the Andals culture, both located in the North, were the majority of action
happened.
"""
# Now let's create another one related to noble.
# Let's take woman as the reference for 1 (e male in the general)
got_v1['noble_woman'] = np.where((got_v1['male'] == 0) & (got_v1['isNoble'] == 1 ), 1,0)
# ### Let's check the new variables with isAlive to see they are not
# following the dependent variable
################
# ### Now let's make some graphs!
# We only want to graph some variables, let's create a kf with the columns we want to see
got_hist = mk.KnowledgeFrame.clone(got_v1)
col_sip = ['S.No', 'name', 'culture', 'dateOfBirth', 'mother',
'father', 'house','heir', 'spouse','m_mother',
'm_father', 'm_heir', 'm_house', 'm_spouse']
got_hist = got_hist.sip(col_sip, 1)
# Now let's graph
got_hist.hist(figsize = (16, 20), bins = 10, xlabelsize = 12, ylabelsize = 12)
##################
# ### Now let's rid some variables for our 1st aproach
# We do this to make an easy 1st approach and create our first model<br>
# Then we can see what happen and improve our model<br>
# We will try to rid those who are less relevant to continue
# Create a new kf with the sip variables
got_num = mk.KnowledgeFrame.clone(got_v1)
got_num = got_num.sip(['name', 'culture', 'dateOfBirth', 'mother',
'father', 'heir', 'house', 'spouse','m_mother',
'm_father', 'm_heir', 'm_spouse',
'isAliveMother', 'isAliveFather',
'isAliveHeir', 'isAliveSpouse', 'title'], axis=1)
got_num['popularity'].value_round(2)
print(got_num.info())
# Now we rid total_all the missing values
###################
# ### Let's see mow the correlations between them
# Let's create a correlation between the remaining variables
# Creation of the corr()
got_corr = got_num.corr()
# Print the corr() the var we want to predict: isAlive
print(got_corr['isAlive'].sort_the_values())
"""
We see interesting results with good insights
Insights:
* If you appear in book 4 oyu have higher probability to be alive
* Age has a negative corr, what averages that the older the worst
* Having mwhatever dead realations is not good to survive
* also being popular can cause your death
* The variables created using the dependent var (isAlive) have a strong corr() but only because
of that, we are not going to use them.
"""
##############
# Let's continue with other analysis: heatmapping
# now let's do the graph of the heatmapping
fig, ax=plt.subplots(figsize=(20,20))
sns.set(font_scale=2)
sns.heatmapping(got_corr,
cmapping = 'Blues',
square = True,
annot = False,
linecolor = 'black',
linewidths = 0.5)
#plt.savefig('correlation_matrix_total_all_var')
plt.show()
##################
# ### Let's see some scatterplots
# This is between the more relevant variables with isAlive
sns.set()
cols = ['dead_rela_1','numDeadRelations','popularity',
'dead_rela_4','popu_0.3','culture_vale','culture_northmen',
'age','book4_A_Feast_For_Crows', 'isAlive']
sns.pairplot(got_num[cols], height= 2.5)
plt.show();
# ### Let's focus only in some graphs that are interesting
sns.set()
cols = ['numDeadRelations','popularity',
'age', 'book4_A_Feast_For_Crows']
sns.pairplot(got_num[cols], height= 2.5)
plt.show();
"""
Here we can highlight some insights:
1) The most popular character are the one between 0 and 60. And being more popular
is dangerous, more popular = more chances to be dead
2) Also from the corr() we can see thar being older is worst for being alive.
"""
sns.regplot(x="popularity", y="numDeadRelations", data=got, color='b')
plt.axvline(.5, color='blue')
sns.regplot(x="popularity", y="age", data=got, color='b')
plt.axvline(.5, color='blue')
#################
# ### Let's see the outliers
for col in got_num:
sns.set()
plt.figure(figsize = (7, 3))
ax = sns.boxplot(x=got_num[col], data=got_num)
plt.setp(ax.artists, alpha=.5, linewidth=2, edgecolor="k")
plt.xticks(rotation=45)
# ***From the outlier analysis we see that is interesting the popularity variables***<br>
# ***The outliers begin upper than 0.2, there's a breakpoint***
##########################################################################################
# Model Creation
##########################################################################################
# The models that we are going to use are:
# * KNN Classification
# * Random Forest
# * GBM
#####################
# KNN Classifier Basic
#####################
# Let's start creating a basic model
x = got[[ #'title',
#'culture',
'male',
#'heir',
#'house',
'book1_A_Game_Of_Thrones',
#'book2_A_Clash_Of_Kings',
#'book3_A_Storm_Of_Swords',
'book4_A_Feast_For_Crows',
#'book5_A_Dance_with_Dragons',
'isMarried',
'isNoble',
#'age',
#'numDeadRelations',
'popularity']]
y = got.loc[:, 'isAlive']
seed = 508
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, stratify=y,shuffle=True,random_state=seed)
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 51)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors = n_neighbors)
clf.fit(x_train, y_train.values.flat_underlying())
# record training set accuracy
training_accuracy.adding(clf.score(x_train, y_train))
# record generalization accuracy
test_accuracy.adding(clf.score(x_test, y_test))
print(test_accuracy.index(getting_max(test_accuracy)) + 1)
fig, ax = plt.subplots(figsize=(12,9))
plt.plot(neighbors_settings, training_accuracy, label = "training accuracy")
plt.plot(neighbors_settings, test_accuracy, label = "test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
########################
# The best results occur when k = 7.
########################
# Building a model with k = 7
knn_clf = KNeighborsClassifier(n_neighbors = 7)
# Fitting the model based on the training data
knn_clf_fit = knn_clf.fit(x_train, y_train)
#knn_clf_fit = knn_clf.fit(X_train, y_train.values.flat_underlying())
print('Training Score', knn_clf_fit.score(x_train, y_train).value_round(4))
print('Testing Score:', knn_clf_fit.score(x_test, y_test).value_round(4))
knn_clf_pred = knn_clf_fit.predict(x_test)
knn_clf_pred_probabilities = knn_clf_fit.predict_proba(x_test)
#print(knn_clf_pred)
#print(knn_clf_pred_probabilities)
# ***Here we getting a not bad result without using the features created***
####################
# CONFUSION MATRIX
####################
print(confusion_matrix(y_true = y_test,
y_pred = knn_clf_pred))
labels = ['Alive-1', 'Not Alive-0']
cm = confusion_matrix(y_true = y_test,
y_pred = knn_clf_pred)
sns.heatmapping(cm,
annot = True,
xticklabels = labels,
yticklabels = labels,
cmapping = 'Blues')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion matrix of the classifier')
plt.show()
# Here we can see that the result of the matrix is bigger where we said that it will
# not be alive, and he is alive (is better in this case to have more error here)
################
# ### Now let's create a Random Forest
#################
################################
# Random Forest in scikit-learn (basic model)
###############################
# Let's create a basic model withput the features created first with the same vars
# Preparing a KnowledgeFrame based the the analysis above
x = got[[ 'male',
'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'isMarried',
'isNoble',
'popularity']]
y = got.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
X_train, X_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 500,
criterion = 'gini',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Full forest using entropy
full_forest_entropy = RandomForestClassifier(n_estimators = 500,
criterion = 'entropy',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(X_train, y_train)
full_entropy_fit = full_forest_entropy.fit(X_train, y_train)
# Scoring the gini model
print('Gini - Training Score:', full_gini_fit.score(X_train, y_train).value_round(4))
print('Gini - Testing Score:', full_gini_fit.score(X_test, y_test).value_round(4))
# Scoring the entropy model
print('Entropy - Training Score', full_entropy_fit.score(X_train, y_train).value_round(4))
print('Entropy - Testing Score:', full_entropy_fit.score(X_test, y_test).value_round(4))
# Here we see the same results than before with the same variables
# Here we getting the following (Entropy is better):
# * Gini - Training Score: 0.7967
# * Gini - Testing Score: 0.8154
# * Entropy - Training Score 0.7967
# * Entropy - Testing Score: 0.8205
# ***Other thing that we see here is that the testing score is bigger than the training***
# ***The data is not overfited***
# ***Let's see now the importance of every variable to take some conclusions***
########################
# Feature importance function
########################
def plot_feature_importances(model, train = X_train, export = False):
fig, ax = plt.subplots(figsize=(12,9))
n_features = X_train.shape[1]
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(mk.np.arange(n_features), train.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
if export == True:
plt.savefig('Tree_Leaf_50_Feature_Importance.png')
########################
plot_feature_importances(full_gini_fit,
train = X_train,
export = False)
plot_feature_importances(full_entropy_fit,
train = X_train,
export = False)
# ***Here we can see which variables are the most important for this model:***
# The most important are:
# * popularity
# * book4_A_Feast_For_Crows
# Conclusion: try to be not so popular, but enough to appear in as mwhatever books as possible (and better if you are in book N 4)
#######################################################################################
############################### IMPROVED MODELS WITH E.F ##############################
#######################################################################################
###############
# KNN Classifier Improved
###############
# Let's pick the best variables for us to put in the model
# Let's start creating a basic model
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size=0.1,
stratify=y,
shuffle=True,
random_state=508)
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 51)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors = n_neighbors)
clf.fit(x_train, y_train.values.flat_underlying())
# record training set accuracy
training_accuracy.adding(clf.score(x_train, y_train))
# record generalization accuracy
test_accuracy.adding(clf.score(x_test, y_test))
print(test_accuracy.index(getting_max(test_accuracy)) + 1)
fig, ax = plt.subplots(figsize=(12,9))
plt.plot(neighbors_settings, training_accuracy, label = "training accuracy")
plt.plot(neighbors_settings, test_accuracy, label = "test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
########################
# The best results occur when k = 3.
########################
# Building a model with k = 3
knn_clf = KNeighborsClassifier(n_neighbors = 3)
# Fitting the model based on the training data
knn_clf_fit = knn_clf.fit(x_train, y_train)
#knn_clf_fit = knn_clf.fit(X_train, y_train.values.flat_underlying())
print('Training Score', knn_clf_fit.score(x_train, y_train).value_round(4))
print('Testing Score:', knn_clf_fit.score(x_test, y_test).value_round(4))
knn_clf_pred = knn_clf_fit.predict(x_test)
knn_clf_pred_probabilities = knn_clf_fit.predict_proba(x_test)
#print(knn_clf_pred)
#print(knn_clf_pred_probabilities)
"""
Here we can see how important are the new variables put it in the model.
We getting:
Training Score 0.9611
Testing Score: 0.9385
We can see that is not too overfit, we have a good balance.
Let's try to improve it in the following section.
"""
################################
# Random Forest in scikit-learn (IMPROVED)
###############################
# Let's create a basic model withput the features created first with the same vars
# Preparing a KnowledgeFrame based the the analysis above
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 500,
criterion = 'gini',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Full forest using entropy
full_forest_entropy = RandomForestClassifier(n_estimators = 500,
criterion = 'entropy',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(x_train, y_train)
full_entropy_fit = full_forest_entropy.fit(x_train, y_train)
# Scoring the gini model
print('Gini - Training Score:', full_gini_fit.score(x_train, y_train).value_round(4))
print('Gini - Testing Score:', full_gini_fit.score(x_test, y_test).value_round(4))
# Scoring the entropy model
print('Entropy - Training Score', full_entropy_fit.score(x_train, y_train).value_round(4))
print('Entropy - Testing Score:', full_entropy_fit.score(x_test, y_test).value_round(4))
# ***Here we getting the following scores (Entropy is better):***
# * Gini - Training Score: 0.9451
# * Gini - Testing Score: 0.9436
# * Entropy - Training Score 0.9445
# * Entropy - Testing Score: 0.9282
########################
# Feature importance function
########################
def plot_feature_importances(model, train = x_train, export = False):
fig, ax = plt.subplots(figsize=(12,9))
n_features = x_train.shape[1]
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(mk.np.arange(n_features), train.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
if export == True:
plt.savefig('Tree_Leaf_50_Feature_Importance.png')
########################
plot_feature_importances(full_gini_fit,
train = x_train,
export = False)
plot_feature_importances(full_entropy_fit,
train = x_train,
export = False)
# Here we can see the importance of the variable age (fill in with the average) and also popularity.
# Meaning that the older the bigger the chance of die and same with popularity. What is good is to appear in book N 4
#############################
# ### Now let's try to improve the model with RandmizedSearchCV
#############################
# Is important to say that Randomized was chosen over Grid because of the time processing.
# And because of the deadline to present, with more time Grid is better in terms of improvement.
##################
# Tuned Parameters
##################
#############################
# Applying RandomizedSearchCV in Random Forest
############################
# Creating a hyperparameter grid
estimator_space = mk.np.arange(100, 1350, 250)
leaf_space = mk.np.arange(1, 150, 15)
criterion_space = ['gini', 'entropy']
bootstrap_space = [True, False]
warm_start_space = [True, False]
param_grid = {'n_estimators' : estimator_space,
'getting_min_sample_by_nums_leaf' : leaf_space,
'criterion' : criterion_space,
'bootstrap' : bootstrap_space,
'warm_start' : warm_start_space}
# Building the model object one more time
full_forest_grid = RandomForestClassifier(getting_max_depth = None,
random_state = 508)
gbm_grid_cv = RandomizedSearchCV(full_forest_grid,
param_grid,
cv = 3,
n_iter = 50,
scoring = 'roc_auc')
# Fit it to the training data
gbm_grid_cv.fit(x_train, y_train)
# Print the optimal parameters and best score
print("Tuned Rand Forest Parameter:", gbm_grid_cv.best_params_)
print("Tuned Rand Forest Accuracy:", gbm_grid_cv.best_score_.value_round(4))
# ***As we can see here we have the new parameters and the Tuned Rand Forest accuracy***
# * Tuned Rand Forest Parameter: {'warm_start': True, 'n_estimators': 100, 'getting_min_sample_by_nums_leaf': 16, 'criterion': 'entropy', 'bootstrap': False}
# * Tuned Rand Forest Accuracy: 0.9812
################################
# ### Now let's create again the model with the Tuned Parameters
###############################
###############################################################################
# Random Forest in scikit-learn (improved version using RandomizedSearchCV)
###############################################################################
# Preparing a KnowledgeFrame based the the analysis above
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 100,
criterion = 'entropy',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 16,
bootstrap = False,
warm_start = True,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(x_train, y_train)
# Predictions
full_gini_fit_predict = full_gini_fit.predict(x_test)
# Scoring the gini model
print('Entropy - Training Score:', full_gini_fit.score(x_train, y_train).value_round(4))
print('Entropy - Testing Score:', full_gini_fit.score(x_test, y_test).value_round(4))
# ***Here we see an improvement in the accuracy of the model (test score):***
# * Entropy - Training Score: 0.9503
# * Entropy - Testing Score: 0.9436
#
# Using the Entropy is more accurate
# Here you can see how improving some parameters we can predict better if a character is going to die or not.
###################
# Let's see the AUC for the improved Random Forest
###################
rf_score = cross_val_score(full_forest_gini,
x,
y,
cv = 3, scoring= 'roc_auc')
average_auc = | mk.np.average(rf_score) | pandas.np.mean |
#!/usr/bin/env python
# coding: utf-8
getting_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import root_monkey
import monkey as mk
import ROOT as R
sns.set(color_codes=True)
# Importing the dataset
#mk.set_option('display.float_formating', lambda x: '%.8f' % x)
kf = root_monkey.read_root('/srv/data/hosein47/Analysis/Analysis_BKGx1_etau_signal_total_all_pi.root',key='pi')
X = kf.iloc[:,[1,3]]
y = kf.iloc[:,11]
lh=kf.iloc[:,6]
#Load the DNN model
from keras.models import load_model
model = load_model('DNN_model.h5')
#adding the output of the final_item layer of DNN as the prior probability (value between 0 and 1)
from keras import backend as K
inp = model.input
output=model.layers[6].output
functor=K.function([inp],[output])
out=np.array(functor([X]))
arr=np.reshape(out,(85289,-1))
prior=mk.KnowledgeFrame(arr)
data=mk.KnowledgeFrame(X)
data.insert(2, "prior", prior)
data.insert(3, "PionID", lh)
#data.insert(4, "isSignal", y)
data.header_num()
sns.scatterplot(data['pt'], data['prior'])
#binnig the cosTheta and transverse momentum
bins_ct = np.linspace(-1,1,num=11)
bins_pt = np.linspace(0,3,num=11)
bins_prior = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
data['binned_ct'] = mk.cut(data['cosTheta'], bins_ct, include_lowest=False) #Bin values into discrete intervals
data['binned_pt'] = mk.cut(data['pt'], bins_pt, include_lowest=False)
data['binned_prior'] = mk.cut(data['prior'], bins_prior, include_lowest=False)
data=mk.KnowledgeFrame(data)
data.header_num()
#This categorisation helps finding out the prior for each desired bin
gr=data.grouper(['binned_pt', 'binned_ct'])
nw= gr.average().reseting_index()
nw.header_num(10)
plt.figure(figsize=[15,7])
sns.boxplot(nw['binned_pt'], nw['prior'])
sns.scatterplot(nw['pt'],nw['prior'])
# Adding the new Posterior to the dataset as PID, and exagetting_mine its performance with respect to the old Posterior
data['PID']=((data['prior']*data['PionID'])/((1-data['prior'])+(data['prior'] * data['PionID'])))
analysis= | mk.KnowledgeFrame.clone(data) | pandas.DataFrame.copy |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from monkey import KnowledgeFrame, Collections
from monkey.compat import range, lrange, iteritems
#from monkey.core.datetools import formating as date_formating
import monkey.io.sql as sql
import monkey.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class MonkeySQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and ftotal_allback cases.
"""
def sip_table(self, table_name):
self._getting_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _getting_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.getting_data_path(), 'iris.csv')
self.sip_table('iris')
self._getting_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header_numer row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._getting_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = KnowledgeFrame(data, columns=columns)
def _load_raw_sql(self):
self.sip_table('types_test_data')
self._getting_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._getting_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._getting_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.monkeySQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.sip_table('test_frame1')
def _to_sql_fail(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.monkeySQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.sip_table('test_frame1')
def _to_sql_replacing(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replacing')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _to_sql_adding(self):
# Nuke table just in case
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='adding')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _value_roundtrip(self):
self.sip_table('test_frame_value_roundtrip')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame_value_roundtrip')
result = self.monkeySQL.read_sql('SELECT * FROM test_frame_value_roundtrip')
result.set_index('monkey_index', inplace=True)
# result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.monkeySQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.monkeySQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(MonkeySQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use sip_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replacing(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replacing')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_adding(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='adding')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Astotal_sume that functionality is already tested above so just do quick check that it basictotal_ally works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_value_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_value_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql(
'SELECT * FROM test_frame_value_roundtrip',
con=self.conn,
flavor='sqlite')
# HACK!
result.index = self.test_frame1.index
result.set_index('monkey_index', inplace=True)
result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_tquery(self):
iris_results = sql.tquery(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
kf = sql.read_sql(
"SELECT * FROM types_test_data", self.conn, flavor='sqlite')
self.assertFalse(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data", self.conn,
flavor='sqlite',
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['DateCol', 'IntDateCol'],
index_col='DateCol')
self.assertTrue(
issubclass(kf.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(
issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
class _TestSQLAlchemy(MonkeySQLTest):
"""
Base class for testing the sqlalchemy backend. Subclasses for specific
database types are created below.
Astotal_sume that sqlalchemy takes case of the DB specifics
"""
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_sip_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
monkeySQL.sip_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_table, "this_doesnt_exist", con=self.conn)
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
kf = sql.read_table("types_test_data", self.conn)
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table("types_test_data", self.conn, parse_dates={
'DateCol': {'formating': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(kf.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Collections(2**25 + 1,dtype=np.int32)
s2 = Collections(0.0,dtype=np.float32)
kf = KnowledgeFrame({'s1': s1, 's2': s2})
# write and read again
kf.to_sql("test_read_write", self.conn, index=False)
kf2 = sql.read_table("test_read_write", self.conn)
tm.assert_frame_equal(kf, kf2, check_dtype=False, check_exact=True)
class TestSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
def connect(self):
return sqlalchemy.create_engine('sqlite:///:memory:')
def setUp(self):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLAlchemy(self.conn)
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
# --- Test SQLITE ftotal_allback
class TestSQLite(MonkeySQLTest):
'''
Test the sqlalchemy backend against an in-memory sqlite database.
Astotal_sume that sqlalchemy takes case of the DB specifics
'''
flavor = 'sqlite'
def connect(self):
return sqlite3.connect(':memory:')
def sip_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def setUp(self):
self.conn = self.connect()
self.monkeySQL = | sql.MonkeySQLLegacy(self.conn, 'sqlite') | pandas.io.sql.PandasSQLLegacy |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = | algos.incontain(arr, [arr[0]]) | pandas.core.algorithms.isin |
import argparse
import os
import string
import json
from pathlib import Path
import monkey as mk
import matplotlib.pyplot as plt # plotting
import numpy as np # dense matrices
from scipy.sparse import csr_matrix # sparse matrices
class PersonalData:
def __init__(self, mk):
print( | mk.header_num() | pandas.head |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import monkey as mk
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from monkey.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lengthlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lengthlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sample_by_numSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.adding(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.adding(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((length(X) - 1) / length(X)) # standard factor corretion
average_ = np.average(X, 0)
scale_ = np.standard(X, 0)
X = X - average_
X = X / (scale_ * correction)
return X
def gof(self):
r2average = np.average(self.r2.T[self.endoexo()[0]].values)
AVEaverage = self.AVE().clone()
totalblock = 0
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = length(block.columns.values)
totalblock += block
AVEaverage[self.latent[i]] = AVEaverage[self.latent[i]] * block
AVEaverage = np.total_sum(AVEaverage) / totalblock
return np.sqrt(AVEaverage * r2average)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.clone()
# comun_ = self.data.clone()
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(length(outer_), 1)
loadings = loadings.reshape(length(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = mk.KnowledgeFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = mk.concating([outer_residuals, inner_residuals], axis=1)
average_ = np.average(self.data, 0)
# comun_ = comun_.employ(lambda row: row + average_, axis=1)
total_sumOuterResid = mk.KnowledgeFrame.total_sum(
mk.KnowledgeFrame.total_sum(outer_residuals**2))
total_sumInnerResid = mk.KnowledgeFrame.total_sum(
mk.KnowledgeFrame.total_sum(inner_residuals**2))
divisionFun = total_sumOuterResid + total_sumInnerResid
return residuals, outer_residuals, inner_residuals, divisionFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).average())
return srmr
def implied(self):
corLVs = mk.KnowledgeFrame.cov(self.fscores)
implied_ = mk.KnowledgeFrame.dot(self.outer_loadings, corLVs)
implied = mk.KnowledgeFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(length(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return mk.KnowledgeFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = mk.KnowledgeFrame(0, index=range(1, 6), columns=manifests)
for i in range(length(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].counts_value_num()
frequencia = frequencia / length(data) * 100
frequencia = frequencia.reindexing_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillnone(0).T
frequencia = frequencia[(frequencia.T != 0).whatever()]
getting_maximo = mk.KnowledgeFrame.getting_max(mk.KnowledgeFrame.getting_max(data, axis=0))
if int(getting_maximo) & 1:
neg = np.total_sum(frequencia.ix[:, 1: ((getting_maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((getting_maximo + 1) / 2)]
pos = np.total_sum(
frequencia.ix[:, (((getting_maximo + 1) / 2) + 1):getting_maximo], axis=1)
else:
neg = np.total_sum(frequencia.ix[:, 1:((getting_maximo) / 2)], axis=1)
ind = 0
pos = np.total_sum(frequencia.ix[:, (((getting_maximo) / 2) + 1):getting_maximo], axis=1)
frequencia['Neg.'] = mk.Collections(
neg, index=frequencia.index)
frequencia['Ind.'] = mk.Collections(
ind, index=frequencia.index)
frequencia['Pos.'] = mk.Collections(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMgetting_max = mk.KnowledgeFrame.getting_max(SEM)
ok = None
for i in range(1, self.lengthlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = mk.concating([block, SEM], axis=1)
for j in range(SEMgetting_max + 1):
dataSEM = (block.loc[data_[segmento] == j]
).sip(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.renagetting_ming(j + 1)
ok = dataSEM if ok is None else mk.concating(
[ok, dataSEM], axis=1)
for i in range(1, self.lengthlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].sipna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lengthlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.standard(self.data, 0)
average_ = np.average(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(length(self.data.columns))]
return [average_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lengthlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(length(Beta))]
beta_ = np.diag(beta_)
beta = mk.KnowledgeFrame(beta, index=self.latent, columns=self.latent)
mid = mk.KnowledgeFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(length(exoVar)):
for i in range(length(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.clone()
beta_ = mk.KnowledgeFrame(1, index=np.arange(
length(exoVar)), columns=np.arange(length(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(length(self.path_matrix)))
beta = mk.KnowledgeFrame(beta)
partial_ = mk.KnowledgeFrame.dot(self.outer_weights, beta.T.values)
prediction = mk.KnowledgeFrame.dot(partial_, self.outer_loadings.T.values)
predicted = mk.KnowledgeFrame.dot(self.data, prediction)
predicted.columns = self.manifests
average_ = np.average(self.data, 0)
intercept = average_ - np.dot(average_, prediction)
predictedData = predicted.employ(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = length(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.total_sum(abs(loadings))**2
denogetting_minador = numerador + (p - np.total_sum(loadings ** 2))
cr = numerador / denogetting_minador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = length(self.data_)
r2 = self.r2.values
r2adjusted = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
p = total_sum(self.LVariables['targetting'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = mk.KnowledgeFrame(mk.KnowledgeFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
average = []
total_allBlocks = []
for i in range(self.lengthlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
total_allBlocks.adding(list(block_.values))
block = htmt_.ix[block_, block_]
average_ = (block - np.diag(np.diag(block))).values
average_[average_ == 0] = np.nan
average.adding(np.nanaverage(average_))
comb = [[k, j] for k in range(self.lengthlatent)
for j in range(self.lengthlatent)]
comb_ = [(np.sqrt(average[comb[i][1]] * average[comb[i][0]]))
for i in range(self.lengthlatent ** 2)]
comb__ = []
for i in range(self.lengthlatent ** 2):
block = (htmt_.ix[total_allBlocks[comb[i][1]],
total_allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.adding(np.nanaverage(block))
htmt__ = np.divisionide(comb__, comb_)
where_are_NaNs = np.ifnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = mk.KnowledgeFrame(np.tril(htmt__.reshape(
(self.lengthlatent, self.lengthlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
def fornell(self):
cor_ = mk.KnowledgeFrame.corr(self.fscores)**2
AVE = self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
for i in range(length(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).whatever()]
result = mk.KnowledgeFrame.dot(weights.T, weights)
result_ = mk.KnowledgeFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = mk.KnowledgeFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denogetting_minador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denogetting_minador)
if(np.ifnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.average(1)[:, None]
B_mB = B - B.average(1)[:, None]
ssA = (A_mA**2).total_sum(1)
ssB = (B_mB**2).total_sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = mk.KnowledgeFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(mk.KnowledgeFrame.corr(self.fscores))
return mk.KnowledgeFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = length(block.columns)
if(p != 1):
p_ = length(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.total_sum(block, axis=1))
cor_ = mk.KnowledgeFrame.corr(block)
denogetting_minador = soma * correction**2
numerador = 2 * np.total_sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denogetting_minador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(length(self.data_.columns))
for i in range(length(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.adding(1 / (1 - r2))
vif = mk.KnowledgeFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lengthlatent):
for j in range(self.lengthlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lengthlatent, self.lengthlatent])
for i in range(self.lengthlatent):
for j in range(self.lengthlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = mk.KnowledgeFrame(corTrue, corFalse.columns, corFalse.index)
# Loadings
attenuedOuter_loadings = mk.KnowledgeFrame(
0, index=self.manifests, columns=self.latent)
for i in range(self.lengthlatent):
weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).whatever()]
result = mk.KnowledgeFrame.dot(weights.T, weights)
result_ = mk.KnowledgeFrame.dot(weights, weights.T)
newLoad = (
weights.values * np.sqrt(rA.ix[self.latent[i]].values)) / (result.values)
myindex = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
myindex_ = self.latent[i]
attenuedOuter_loadings.ix[myindex.values, myindex_] = newLoad
# Path
dependent = np.distinctive(self.LVariables.ix[:, 'targetting'])
for i in range(length(dependent)):
independent = self.LVariables[self.LVariables.ix[
:, "targetting"] == dependent[i]]["source"]
dependent_ = corTrue.ix[dependent[i], independent]
independent_ = corTrue.ix[independent, independent]
# path = np.dot(np.linalg.inv(independent_),dependent_)
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
self.path_matrix.ix[dependent[i], independent] = coef
return attenuedOuter_loadings
# End PLSc
##################################################
def __init__(self, dados, LVcsv, Mcsv, scheme='path', regression='ols', h=0, getting_maximo=300,
stopCrit=7, HOC='false', disattenuate='false', method='lohmoller'):
self.data = dados
self.LVcsv = LVcsv
self.Mcsv = Mcsv
self.getting_maximo = getting_maximo
self.stopCriterion = stopCrit
self.h = h
self.scheme = scheme
self.regression = regression
self.disattenuate = disattenuate
contador = 0
self.convergiu = 0
data = dados if type(
dados) is mk.core.frame.KnowledgeFrame else mk.read_csv(dados)
LVariables = mk.read_csv(LVcsv)
Variables = Mcsv if type(
Mcsv) is mk.core.frame.KnowledgeFrame else mk.read_csv(Mcsv)
latent_ = LVariables.values.flatten('F')
latent__ = np.distinctive(latent_, return_index=True)[1]
# latent = np.distinctive(latent_)
latent = [latent_[i] for i in sorted(latent__)]
self.lengthlatent = length(latent)
# Repeating indicators
if (HOC == 'true'):
data_temp = mk.KnowledgeFrame()
for i in range(self.lengthlatent):
block = self.data[Variables['measurement']
[Variables['latent'] == latent[i]]]
block = block.columns.values
data_temp = mk.concating(
[data_temp, data[block]], axis=1)
cols = list(data_temp.columns)
counts = Counter(cols)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
cols[cols.index(s)] = s + '.' + str(suffix)
data_temp.columns = cols
doublemanifests = list(Variables['measurement'].values)
counts = Counter(doublemanifests)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
doublemanifests[doublemanifests.index(
s)] = s + '.' + str(suffix)
Variables['measurement'] = doublemanifests
data = data_temp
# End data manipulation
manifests_ = Variables['measurement'].values.flatten('F')
manifests__ = np.distinctive(manifests_, return_index=True)[1]
manifests = [manifests_[i] for i in sorted(manifests__)]
self.manifests = manifests
self.latent = latent
self.Variables = Variables
self.LVariables = LVariables
data = data[manifests]
data_ = self.normaliza(data)
self.data = data
self.data_ = data_
outer_weights = mk.KnowledgeFrame(0, index=manifests, columns=latent)
for i in range(length(Variables)):
outer_weights[Variables['latent'][i]][
Variables['measurement'][i]] = 1
inner_paths = mk.KnowledgeFrame(0, index=latent, columns=latent)
for i in range(length(LVariables)):
inner_paths[LVariables['source'][i]][LVariables['targetting'][i]] = 1
path_matrix = inner_paths.clone()
if method == 'wold':
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
intera = self.lengthlatent
intera_ = 1
# LOOP
for iterations in range(0, self.getting_maximo):
contador = contador + 1
if method == 'lohmoller':
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
intera = 1
intera_ = self.lengthlatent
# fscores = self.normaliza(fscores) # Old Mode A
for q in range(intera):
# Schemes
if (scheme == 'path'):
for h in range(intera_):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (total_sum(follow) > 0):
# i ~ follow
inner_paths.ix[inner_paths[follow].index, i] = np.linalg.lstsq(
fscores.ix[:, follow], fscores.ix[:, i])[0]
predec = (path_matrix.ix[:, i] == 1)
if (total_sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(length(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'fuzzy'):
for h in range(length(path_matrix)):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (total_sum(follow) > 0):
ac, awL, awR = otimiza(fscores.ix[:, i], fscores.ix[
:, follow], length(fscores.ix[:, follow].columns), 0)
inner_paths.ix[inner_paths[follow].index, i] = ac
predec = (path_matrix.ix[:, i] == 1)
if (total_sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(length(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'centroid'):
inner_paths = np.sign(mk.KnowledgeFrame.multiply(
mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T)))
elif (scheme == 'factor'):
inner_paths = mk.KnowledgeFrame.multiply(
mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T))
elif (scheme == 'horst'):
inner_paths = inner_paths
print(inner_paths)
if method == 'wold':
fscores[self.latent[q]] = mk.KnowledgeFrame.dot(
fscores, inner_paths)
elif method == 'lohmoller':
fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
final_item_outer_weights = outer_weights.clone()
# Outer Weights
for i in range(self.lengthlatent):
# Reflexivo / Modo A
if(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "A":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
# 1/N (Z dot X)
res_ = (1 / length(data_)) * np.dot(b, a)
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / np.standard(res_) # New Mode A
# Formativo / Modo B
elif(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "B":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
# (X'X)^-1 X'Y
a_ = np.dot(a.T, a)
inv_ = np.linalg.inv(a_)
res_ = np.dot(np.dot(inv_, a.T),
fscores.ix[:, latent[i]])
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / (np.standard(np.dot(data_.ix[:, myindex], res_)))
if method == 'wold':
fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
diff_ = np.getting_max(
np.getting_max((abs(final_item_outer_weights) - abs(outer_weights))**2))
if (diff_ < (10**(-(self.stopCriterion)))):
self.convergiu = 1
break
# END LOOP
# print(contador)
# Bootstraping trick
if(np.ifnan(outer_weights).whatever().whatever()):
self.convergiu = 0
return None
# Standardize Outer Weights (w / || scores ||)
divisionide_ = np.diag(1 / (np.standard(np.dot(data_, outer_weights), 0)
* np.sqrt((length(data_) - 1) / length(data_))))
outer_weights = np.dot(outer_weights, divisionide_)
outer_weights = mk.KnowledgeFrame(
outer_weights, index=manifests, columns=latent)
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
# Outer Loadings
outer_loadings = mk.KnowledgeFrame(0, index=manifests, columns=latent)
for i in range(self.lengthlatent):
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
cor_ = [sp.stats.pearsonr(a.ix[:, j], b)[0]
for j in range(length(a.columns))]
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_loadings.ix[myindex.values, myindex_] = cor_
# Paths
if (regression == 'fuzzy'):
path_matrix_low = path_matrix.clone()
path_matrix_high = path_matrix.clone()
path_matrix_range = path_matrix.clone()
r2 = mk.KnowledgeFrame(0, index=np.arange(1), columns=latent)
dependent = np.distinctive(LVariables.ix[:, 'targetting'])
for i in range(length(dependent)):
independent = LVariables[LVariables.ix[
:, "targetting"] == dependent[i]]["source"]
dependent_ = fscores.ix[:, dependent[i]]
independent_ = fscores.ix[:, independent]
if (self.regression == 'ols'):
# Path Normal
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
# model = sm.OLS(dependent_, independent_)
# results = model.fit()
# print(results.total_summary())
# r2[dependent[i]] = results.rsquared
r2[dependent[i]] = 1 - resid / \
(dependent_.size * dependent_.var())
path_matrix.ix[dependent[i], independent] = coef
# pvalues.ix[dependent[i], independent] = results.pvalues
elif (self.regression == 'fuzzy'):
size = length(independent_.columns)
ac, awL, awR = otimiza(dependent_, independent_, size, self.h)
# plotaIC(dependent_, independent_, size)
ac, awL, awR = (ac[0], awL[0], awR[0]) if (
size == 1) else (ac, awL, awR)
path_matrix.ix[dependent[i], independent] = ac
path_matrix_low.ix[dependent[i], independent] = awL
path_matrix_high.ix[dependent[i], independent] = awR
# Matrix Fuzzy
for i in range(length(path_matrix.columns)):
for j in range(length(path_matrix.columns)):
path_matrix_range.ix[i, j] = str(value_round(
path_matrix_low.ix[i, j], 3)) + ' ; ' + str(value_round(path_matrix_high.ix[i, j], 3))
r2 = r2.T
self.path_matrix = path_matrix
self.outer_weights = outer_weights
self.fscores = fscores
#################################
# PLSc
if disattenuate == 'true':
outer_loadings = self.PLSc()
##################################
# Path Effects
indirect_effects = mk.KnowledgeFrame(0, index=latent, columns=latent)
path_effects = [None] * self.lengthlatent
path_effects[0] = self.path_matrix
for i in range(1, self.lengthlatent):
path_effects[i] = mk.KnowledgeFrame.dot(
path_effects[i - 1], self.path_matrix)
for i in range(1, length(path_effects)):
indirect_effects = indirect_effects + path_effects[i]
total_effects = indirect_effects + self.path_matrix
if (regression == 'fuzzy'):
self.path_matrix_high = path_matrix_high
self.path_matrix_low = path_matrix_low
self.path_matrix_range = path_matrix_range
self.total_effects = total_effects.T
self.indirect_effects = indirect_effects
self.outer_loadings = outer_loadings
self.contador = contador
self.r2 = r2
def impa(self):
# Unstandardized Scores
scale_ = np.standard(self.data, 0)
outer_weights_ = mk.KnowledgeFrame.divisionide(
self.outer_weights, scale_, axis=0)
relativo = mk.KnowledgeFrame.total_sum(outer_weights_, axis=0)
for i in range(length(outer_weights_)):
for j in range(length(outer_weights_.columns)):
outer_weights_.ix[i, j] = (
outer_weights_.ix[i, j]) / relativo[j]
unstandardizedScores = mk.KnowledgeFrame.dot(self.data, outer_weights_)
# Rescaled Scores
rescaledScores = mk.KnowledgeFrame(0, index=range(
length(self.data)), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
getting_maximo = mk.KnowledgeFrame.getting_max(block, axis=0)
getting_minimo = mk.KnowledgeFrame.getting_min(block, axis=0)
getting_minimo_ = mk.KnowledgeFrame.getting_min(getting_minimo)
getting_maximo_ = mk.KnowledgeFrame.getting_max(getting_maximo)
rescaledScores[self.latent[
i]] = 100 * (unstandardizedScores[self.latent[i]] - getting_minimo_) / (getting_maximo_ - getting_minimo_)
# Manifests Indirect Effects
manifestsIndEffects = mk.KnowledgeFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
effect_ = mk.KnowledgeFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
for i in range(length(self.latent[i])):
effect_ = mk.KnowledgeFrame.dot(effect_, self.path_matrix.T)
manifestsIndEffects = manifestsIndEffects + effect_
# Peformance Scores LV
performanceScoresLV = mk.KnowledgeFrame.average(rescaledScores, axis=0)
# Performance Manifests
getting_maximo = | mk.KnowledgeFrame.getting_max(self.data, axis=0) | pandas.DataFrame.max |
import monkey as mk
import requests
import ratelimit
from ratelimit import limits
from ratelimit import sleep_and_retry
def id_to_name(x):
"""
Converts from LittleSis ID number to name.
Parameters
----------
x : LittleSis ID number
Example
-------
>>> id_to_name(96583)
'<NAME>'
"""
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
response = response.json()
name = response['data']['attributes']['name']
return name
def name_to_id(name):
"""
Converts from name to LittleSis ID number. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name : Name to be converted
Example
-------
>>> name_to_id('<NAME>')
96583
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
return ID
def entity(name):
"""
Provides info from entity getting request to LittleSis API, by name input rather than id
input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of relationships listed
for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> entity('<NAME>'
{'meta': {'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': {'type': 'entities',
'id': 13503,
'attributes': {'id': 13503,
'name': '<NAME>',
'blurb': '44th President of the United States',
'total_summary': 'The 44th President of the United States, he was sworn into office on January 20, 2009; born in Honolulu, Hawaii, August
4, 1961; obtained early education in Jakarta, Indonesia, and Hawaii; continued education at Occidental College, Los Angeles,
Calif.; received a B.A. in 1983 from Columbia University, New York City; worked as a community organizer in Chicago, Ill.; studied
law at Harvard University, where he became the first African American president of the Harvard Law Review, and received J.D. in
1991; lecturer on constitutional law, University of Chicago; member, Illinois State senate 1997-2004; elected as a Democrat to the
U.S. Senate in 2004 for term beginning January 3, 2005.',
'website': 'http://obama.senate.gov/',
'parent_id': None,
'primary_ext': 'Person',
'umkated_at': '2021-12-15T21:28:15Z',
'start_date': '1961-08-04',
'end_date': None,
'aliases': ['Barack Obama'],
'types': ['Person', 'Political Candidate', 'Elected Representative']},
'links': {'self': 'https://littlesis.org/entities/13503-Barack_Obama'}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
def relationships(name):
"""
Provides info from relationships getting request to LittleSis API, by name input rather
than id input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of
relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> relationships('<NAME>')
{'meta': {'currentPage': 1,
'pageCount': 1,
'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': [{'type': 'relationships',
'id': 1643319,
'attributes': {'id': 1643319,...}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def basic_entity(name):
"""
Creates monkey knowledgeframe for one indivisionidual or entity with basic informatingion from
entity getting request to LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 informatingion or entity for which informatingion is desired.
Example
-------
>>> basic_table('<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "Steve P...
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
info types website
0 [Person, Business Person] NaN }
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
data2 = response2['data']['attributes']
kf = mk.KnowledgeFrame(list(data2.items()))
kf.columns = ['info', 'value']
kf = mk.pivot(kf, columns = 'info', values = 'value')
kf = kf.fillnone(method='bfill', axis=0)
kf = kf.iloc[:1, :]
kf = kf[['name', 'aliases', 'blurb', 'start_date', 'end_date', 'types', 'website']]
kf.renagetting_ming(columns = {'start_date': 'date_of_birth'}, inplace = True)
return kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def list_entities(*args):
"""
Concatenates knowledgeframes created by basic_table() for entity getting requests to LittleSis
API, resulting in monkey knowledgeframe of multiple rows. Resorts to entity with the highest number of relationships listed for entries
that point to multiple entites (like final_item name only entries).
Parameters
----------
*args: List of names of indivisioniduals or entities for which to include informatingion in the resluting knowledgeframe.
Example
-------
>>> list_table('<NAME>', '<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "<NAME>...
1 <NAME> [LeBron James]
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
1 NBA/Los Angeles Lakers—F 1984-12-30 NaN
info types website
0 [Person, Business Person] NaN
1 [Person, Business Person, Media Personality] NaN }
"""
list_of_kfs = []
for name in args:
kf = basic_entity(name)
list_of_kfs.adding(kf)
combined_kf = mk.concating(list_of_kfs, ignore_index=True)
return combined_kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def id_to_name(x):
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
if response.status_code != 200:
raise Exception('API response: {}'.formating(response.status_code))
else:
response = response.json()
name = response['data']['attributes']['name']
return name
def relationships_kf(name):
"""
Creates monkey knowledgeframe with informatingion from relationships getting request to LittleSis
API.
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> relationships_kf('<NAME>')
primary_entity related_entity amount currency \
0 Children’s Aid Society <NAME> None None
1 <NAME> <NAME> None None
...
category goods filings \
0 None None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'amount', 'currency', 'description1', 'goods', 'filings', 'description', 'start_date',
'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'primary_entity','entity1_id': 'related_entity', 'description1':'category'}, inplace = True)
return blurbs
def timelines(name):
"""
Creates knowledgeframe specifictotal_ally from timeline informatingion of relationships from
relationships getting request on LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> timelines('<NAME>')
earched_entity related_entity start_date \
0 Children’s Aid Society <NAME> None
1 <NAME> <NAME> None
...
end_date is_current
0 None None
1 None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'start_date', 'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'searched_entity','entity1_id': 'related_entity'}, inplace = True)
return blurbs
def bio(name):
"""
Provides paragraph biography/backgvalue_round description of 1 indivisionidual or entity from an entity getting request on LittleSis API. Resorts to
entity with the highest number of relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which biographical informatingion is desired.
Example
-------
>>> bio('<NAME>')
'The 44th President of the United States, he was sworn into office on January 20,
2009; born in Honolulu, Hawaii, August 4, 1961; obtained early education in Jakarta,
Indonesia, and Hawaii; continued education at Occidental College, Los Angeles, Calif.;
received a B.A. in 1983 from Columbia University, New York City; worked as a community
organizer in Chicago, Ill.; studied law at Harvard University, where he became the
first African American president of the Harvard Law Review, and received J.D. in 1991;
lecturer on constitutional law, University of Chicago; member, Illinois State senate
1997-2004; elected as a Democrat to the U.S. Senate in 2004 for term beginning January
3, 2005.'
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
response2 = response2['data']['attributes']['total_summary']
return response2
def lists(name):
"""
Provides list of total_all lists that the entity belongs to on the LittleSis website, from a
LittleSis lists getting request. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list of list memberships is desired.
Example
-------
>>> lists('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011)
The World's Highest Paid Celebrities (2017)
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = | mk.KnowledgeFrame.convert_dict(data) | pandas.DataFrame.to_dict |
from __future__ import annotations
from typing import Any, cast, Generator, Iterable, Optional, TYPE_CHECKING, Union
import numpy as np
import monkey as mk
from monkey.core.frame import KnowledgeFrame
from monkey.core.collections import Collections
from tanuki.data_store.data_type import DataType
from tanuki.data_store.index.index import Index
from tanuki.data_store.index.monkey_index import MonkeyIndex
from tanuki.database.data_token import DataToken
from .data_backend import DataBackend, ILocIndexer, LocIndexer
if TYPE_CHECKING:
from tanuki.data_store.index.index_alias import IndexAlias
from tanuki.data_store.query import Query
class MonkeyBackend(DataBackend):
_data: KnowledgeFrame
_index: MonkeyIndex
_loc: _LocIndexer
_iloc: _ILocIndexer
def __init__(
self,
data: Optional[Union(Collections, KnowledgeFrame, dict[str, list])] = None,
index: Optional[MonkeyIndex] = None,
) -> None:
if data is None:
self._data = KnowledgeFrame(dtype="object")
elif type(data) is Collections:
self._data = cast(Collections, data).to_frame().transpose()
elif type(data) is KnowledgeFrame:
self._data = KnowledgeFrame(data)
elif type(data) is dict:
sample_by_num_value = next(iter(data.values()))
if not incontainstance(sample_by_num_value, Iterable) or incontainstance(sample_by_num_value, str):
self._data = | Collections(data) | pandas.core.series.Series |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6=mk.Collections.convert_list(d1[0:16][6])
list7=mk.Collections.convert_list(d1[0:16][7])
list8=mk.Collections.convert_list(d1[0:16][8])
list9=mk.Collections.convert_list(d1[0:16][9])
list10=mk.Collections.convert_list(d1[0:16][10])
#forecast table
c=[]
for j in total_all_t[1].findAll('td'):
c.adding(j.text)
bv=mk.KnowledgeFrame()
i=0
while(i<=(91-13)):
bv=bv.adding(mk.KnowledgeFrame([c[i:i+13]]) )
i=i+13
listq1=mk.Collections.convert_list(bv[0:7][0])
list11=mk.Collections.convert_list(bv[0:7][1])
list21=mk.Collections.convert_list(bv[0:7][2])
list31=mk.Collections.convert_list(bv[0:7][3])
list41=mk.Collections.convert_list(bv[0:7][4])
list51=mk.Collections.convert_list(bv[0:7][5])
list61=mk.Collections.convert_list(bv[0:7][6])
list71=mk.Collections.convert_list(bv[0:7][7])
list81=mk.Collections.convert_list(bv[0:7][8])
list91=mk.Collections.convert_list(bv[0:7][9])
list101= | mk.Collections.convert_list(bv[0:7][10]) | pandas.Series.tolist |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from monkey.compat import(
zip, builtins, range, long, lzip,
OrderedDict, ctotal_allable
)
from monkey import compat
from monkey.core.base import MonkeyObject
from monkey.core.categorical import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from monkey.core.internals import BlockManager, make_block
from monkey.core.collections import Collections
from monkey.core.panel import Panel
from monkey.util.decorators import cache_readonly, Appender
import monkey.core.algorithms as algos
import monkey.core.common as com
from monkey.core.common import(_possibly_downcast_to_dtype, ifnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from monkey.core.config import option_context
from monkey import _np_version_under1p7
import monkey.lib as lib
from monkey.lib import Timestamp
import monkey.tslib as tslib
import monkey.algos as _algos
import monkey.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a KnowledgeFrame or when passed to KnowledgeFrame.employ. If
passed a dict, the keys must be KnowledgeFrame column names.
Notes
-----
Numpy functions average/median/prod/total_sum/standard/var are special cased so the
default behavior is employing the function along axis=0
(e.g., np.average(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.average(arr_2d)).
Returns
-------
aggregated : KnowledgeFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_employ_whitelist = frozenset([
'final_item', 'first',
'header_num', 'final_item_tail', 'median',
'average', 'total_sum', 'getting_min', 'getting_max',
'cumtotal_sum', 'cumprod', 'cumgetting_min', 'cumgetting_max', 'cumcount',
'resample_by_num',
'describe',
'rank', 'quantile', 'count',
'fillnone',
'mad',
'whatever', 'total_all',
'irow', 'take',
'idxgetting_max', 'idxgetting_min',
'shifting', 'tshifting',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_collections_employ_whitelist = \
(_common_employ_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'counts_value_num', 'distinctive', 'ndistinctive',
'nbiggest', 'nsmtotal_allest'])
_knowledgeframe_employ_whitelist = \
_common_employ_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _grouper_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[0]
if incontainstance(x, KnowledgeFrame):
return x.employ(_first, axis=axis)
else:
return _first(x)
def _final_item_compat(x, axis=0):
def _final_item(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[-1]
if incontainstance(x, KnowledgeFrame):
return x.employ(_final_item, axis=axis)
else:
return _final_item(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper total_allows the user to specify a grouper instruction for a targetting object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the targetting object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the grouper itself.
Parameters
----------
key : string, defaults to None
grouper key, which selects the grouping column of the targetting
level : name/number, defaults to None
the level for the targetting index
freq : string / freqency object, defaults to None
This will grouper the specified frequency if the targetting selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a grouper instruction
Examples
--------
>>> kf.grouper(Grouper(key='A')) : syntatic sugar for kf.grouper('A')
>>> kf.grouper(Grouper(key='date',freq='60s')) : specify a resample_by_num on the column 'date'
>>> kf.grouper(Grouper(level='date',freq='60s',axis=1)) :
specify a resample_by_num on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.getting('freq') is not None:
from monkey.tcollections.resample_by_num import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _getting_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".formating(key))
ax = Index(obj[key],name=key)
else:
ax = obj._getting_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalengtht to the axis name
if incontainstance(ax, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.getting_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".formating(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_clone=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _getting_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(MonkeyObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and employ functions on this object.
It's easiest to use obj.grouper(...) to use GroupBy, but you can also do:
::
grouped = grouper(obj, ...)
Parameters
----------
obj : monkey object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, employ, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.grouper(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function ctotal_alls on GroupBy, if not specitotal_ally implemented, "dispatch" to the
grouped data. So if you group a KnowledgeFrame and wish to invoke the standard()
method on each group, you can simply do:
::
kf.grouper(mappingper).standard()
rather than
::
kf.grouper(mappingper).aggregate(np.standard)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
length(grouped) : int
Number of groups
"""
_employ_whitelist = _common_employ_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if incontainstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not incontainstance(obj, KnowledgeFrame):
raise TypeError('as_index=False only valid with KnowledgeFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _getting_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._getting_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __length__(self):
return length(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _getting_index(self, name):
""" safe getting index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if incontainstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif incontainstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample_by_num = next(iter(self.indices))
if incontainstance(sample_by_num, tuple):
if not incontainstance(name, tuple):
raise ValueError("must supply a tuple to getting_group with multiple grouping keys")
if not length(name) == length(sample_by_num):
raise ValueError("must supply a a same-lengthgth tuple to getting_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample_by_num) ])
else:
name = convert(name, sample_by_num)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, Collections, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, Collections):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and gettingattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if length(groupers):
self._group_selection = (ax-Index(groupers)).convert_list()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._employ_whitelist)))
def __gettingattr__(self, attr):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __gettingitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._employ_whitelist:
is_ctotal_allable = ctotal_allable(gettingattr(self._selected_obj, name, None))
kind = ' ctotal_allable ' if is_ctotal_allable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'employ' method".formating(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = gettingattr(self._selected_obj, name)
if not incontainstance(f, types.MethodType):
return self.employ(lambda self: gettingattr(self, name))
f = gettingattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.clone()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when ctotal_alling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.employ(curried)
try:
return self.employ(curried_with_axis)
except Exception:
try:
return self.employ(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be ctotal_alled recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def getting_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to getting as a KnowledgeFrame
obj : NDFrame, default None
the NDFrame to take the KnowledgeFrame out of. If
it is None, the object grouper was ctotal_alled on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._getting_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.getting_iterator(self.obj, axis=self.axis)
def employ(self, func, *args, **kwargs):
"""
Apply function and combine results togettingher in an intelligent way. The
split-employ-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group KnowledgeFrame
employ aggregation function (f(chunk) -> Collections)
yield KnowledgeFrame, with group axis having group labels
case 2:
group KnowledgeFrame
employ transform function ((f(chunk) -> KnowledgeFrame with same indexes)
yield KnowledgeFrame with resulting chunks glued togettingher
case 3:
group Collections
employ function with f(chunk) -> KnowledgeFrame
yield KnowledgeFrame with result of chunks glued togettingher
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use employ.
In the current implementation employ ctotal_alls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_total_allocatement',None):
return self._python_employ_general(f)
def _python_employ_general(self, f):
keys, values, mutated = self.grouper.employ(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def average(self):
"""
Compute average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('average')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.average(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if incontainstance(x, np.ndarray):
x = Collections(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def standard(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.standard(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
total_sum = _grouper_function('total_sum', 'add', np.total_sum)
prod = _grouper_function('prod', 'prod', np.prod)
getting_min = _grouper_function('getting_min', 'getting_min', np.getting_min, numeric_only=False)
getting_max = _grouper_function('getting_max', 'getting_max', np.getting_max, numeric_only=False)
first = _grouper_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
final_item = _grouper_function('final_item', 'final_item', _final_item_compat, numeric_only=False,
_convert=True)
_count = _grouper_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().totype('int64')
def ohlc(self):
"""
Compute total_sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._employ_to_column_groupers(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, sipna=None):
"""
Take the nth row from each group.
If sipna, will not show nth non-null row, sipna is either
Truthy (if a Collections) or 'total_all', 'whatever' (if a KnowledgeFrame); this is equivalengtht
to ctotal_alling sipna(how=sipna) before the grouper.
Examples
--------
>>> kf = KnowledgeFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = kf.grouper('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, sipna='whatever')
B
A
1 4
5 6
>>> g.nth(1, sipna='whatever') # NaNs denote group exhausted when using sipna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not sipna: # good choice
m = self.grouper._getting_max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif total_all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.flat_underlying()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._getting_axis(self.axis)[is_nth]
result = result.sorting_index()
return result
if (incontainstance(self._selected_obj, KnowledgeFrame)
and sipna not in ['whatever', 'total_all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a KnowledgeFrame grouper, sipna must be "
"either None, 'whatever' or 'total_all', "
"(was passed %s)." % (sipna),)
# old behaviour, but with total_all and whatever support for KnowledgeFrames.
# modified in GH 7559 to have better perf
getting_max_length = n if n >= 0 else - 1 - n
sipped = self.obj.sipna(how=sipna, axis=self.axis)
# getting a new grouper for our sipped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.incontain(sipped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the sipped object
grouper, _, _ = _getting_grouper(sipped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = sipped.grouper(grouper).size()
result = sipped.grouper(grouper).nth(n)
mask = (sizes<getting_max_length).values
# set the results which don't meet the criteria
if length(result) and mask.whatever():
result.loc[mask] = np.nan
# reset/reindexing to the original groups
if length(self.obj) == length(sipped) or length(result) == length(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindexing(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the lengthgth of that group - 1.
Essentitotal_ally this is equivalengtht to
>>> self.employ(lambda x: Collections(np.arange(length(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from lengthgth of group - 1 to 0.
Example
-------
>>> kf = mk.KnowledgeFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> kf
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> kf.grouper('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> kf.grouper('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Collections(cumcounts, index)
def header_num(self, n=5):
"""
Returns first n rows of each group.
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.header_num(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).header_num(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_header_num = self._cumcount_array() < n
header_num = obj[in_header_num]
return header_num
def final_item_tail(self, n=5):
"""
Returns final_item n rows of each group
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.final_item_tail(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).final_item_tail(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._getting_max_groupsize, -1, dtype='int64')
in_final_item_tail = self._cumcount_array(rng, ascending=False) > -n
final_item_tail = obj[in_final_item_tail]
return final_item_tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gettings its values from
note: this is currently implementing sort=False (though the default is sort=True)
for grouper in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._getting_max_groupsize, dtype='int64')
length_index = length(self._selected_obj.index)
cumcounts = np.zeros(length_index, dtype=arr.dtype)
if not length_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.adding(v)
if ascending:
values.adding(arr[:length(v)])
else:
values.adding(arr[length(v)-1::-1])
indices = np.concatingenate(indices)
values = np.concatingenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from employ, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(length(gp.groupings))),
(original.getting_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have value_roundtripped thru object in the average-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if length(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_collections(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if length(output) == 0:
return self._python_employ_general(f)
if self.grouper._filter_empty_groups:
mask = counts.flat_underlying() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concating_objects(self, keys, values, not_indexed_same=False):
from monkey.tools.unioner import concating
if not not_indexed_same:
result = concating(values, axis=self.axis)
ax = self._selected_obj._getting_axis(self.axis)
if incontainstance(result, Collections):
result = result.reindexing(ax)
else:
result = result.reindexing_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concating(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(length(values)))
result = concating(values, axis=self.axis, keys=keys)
else:
result = concating(values, axis=self.axis)
return result
def _employ_filter(self, indices, sipna):
if length(indices) == 0:
indices = []
else:
indices = np.sort(np.concatingenate(indices))
if sipna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(length(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.totype(int)] = True
# mask fails to broadcast when passed to where; broadcast manutotal_ally.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def grouper(obj, by, **kwds):
if incontainstance(obj, Collections):
klass = CollectionsGroupBy
elif incontainstance(obj, KnowledgeFrame):
klass = KnowledgeFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _getting_axes(group):
if incontainstance(group, Collections):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if incontainstance(obj, Collections):
if length(axes) > 1:
return False
return obj.index.equals(axes[0])
elif incontainstance(obj, KnowledgeFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actutotal_ally holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return length(self.groupings)
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._getting_splitter(data, axis=axis)
keys = self._getting_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _getting_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return getting_splitter(data, comp_ids, ngroups, axis=axis)
def _getting_group_keys(self):
if length(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mappingper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mappingper.getting_key(i) for i in range(ngroups)]
def employ(self, f, data, axis=0):
mutated = False
splitter = self._getting_splitter(data, axis=axis)
group_keys = self._getting_group_keys()
# oh boy
f_name = com._getting_ctotal_allable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_employ') and axis == 0):
try:
values, mutated = splitter.fast_employ(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the ctotal_aller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.adding(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if length(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _getting_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.counts_value_num(labels, sort=False)
bin_counts = bin_counts.reindexing(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _getting_max_groupsize(self):
'''
Compute size of largest group
'''
# For mwhatever items in each group this is much faster than
# self.size().getting_max(), in worst case margintotal_ally slower
if self.indices:
return getting_max(length(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if length(self.groupings) == 1:
return self.groupings[0].groups
else:
to_grouper = lzip(*(ping.grouper for ping in self.groupings))
to_grouper = Index(to_grouper)
return self.axis.grouper(to_grouper.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._getting_compressed_labels()
ngroups = length(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _getting_compressed_labels(self):
total_all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(total_all_labels)
labs, distinctives = algos.factorize(tups)
if self.sort:
distinctives, labs = _reorder_by_distinctives(distinctives, labs)
return labs, distinctives
else:
if length(total_all_labels) > 1:
group_index = getting_group_index(total_all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(length(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return length(self.result_index)
@cache_readonly
def result_index(self):
recons = self.getting_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def getting_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and length(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.adding(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'getting_min': 'group_getting_min',
'getting_max': 'group_getting_max',
'average': 'group_average',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _getting_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def getting_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = gettingattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return gettingattr(_algos, fname, None)
ftype = self._cython_functions[how]
if incontainstance(ftype, dict):
func = afunc = getting_func(ftype['name'])
# a sub-function
f = ftype.getting('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = getting_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.getting(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.totype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._getting_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_collections(self, obj, func):
try:
return self._aggregate_collections_fast(obj, func)
except Exception:
return self._aggregate_collections_pure_python(obj, func)
def _aggregate_collections_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Collections creation overheader_num
dummy = obj._getting_values(slice(None, 0)).to_dense()
indexer = _algos.groupsorting_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, total_allow_fill=False)
grouper = lib.CollectionsGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.getting_result()
return result, counts
def _aggregate_collections_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = getting_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (incontainstance(res, (Collections, Index, np.ndarray)) or
incontainstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must ftotal_all within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and final_item edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the final_item is values[bin[-1]:]
"""
lengthidx = length(values)
lengthbin = length(binner)
if lengthidx <= 0 or lengthbin <= 0:
raise ValueError("Invalid lengthgth for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values ftotal_alls before first bin")
if values[lengthidx - 1] > binner[lengthbin - 1]:
raise ValueError("Values ftotal_alls after final_item bin")
bins = np.empty(lengthbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, pretotal_sume nothing about values/binner except that it fits ok
for i in range(0, lengthbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lengthidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if incontainstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
lengthgth = length(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
lengthgth = length(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < lengthgth:
yield self.binlabels[-1], slicer(start,None)
def employ(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.getting_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.adding(key)
result_values.adding(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return length(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Collections(np.zeros(length(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = length(v)
bin_counts = Collections(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.totype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'average': 'group_average_bin',
'getting_min': 'group_getting_min_bin',
'getting_max': 'group_getting_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._getting_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_collections(self, obj, func):
dummy = obj[:0]
grouper = lib.CollectionsBinGrouper(obj, func, self.bins, dummy)
return grouper.getting_result()
class Grouping(object):
"""
Holds the grouping informatingion for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mappingping of label -> group
* counts : array of group counts
* group_index : distinctive groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if incontainstance(grouper, (Collections, Index)) and name is None:
self.name = grouper.name
if incontainstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not incontainstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.mapping(self.grouper)
else:
self._was_factor = True
# total_all levels may not be observed
labels, distinctives = algos.factorize(inds, sort=True)
if length(distinctives) > 0 and distinctives[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, distinctives = algos.factorize(inds[mask], sort=True)
labels = np.empty(length(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if length(distinctives) < length(level_index):
level_index = level_index.take(distinctives)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if incontainstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif incontainstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there whatever way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif incontainstance(self.grouper, Grouper):
# getting the new grouper
grouper = self.grouper._getting_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not incontainstance(self.grouper, (Collections, Index, np.ndarray)):
self.grouper = self.index.mapping(self.grouper)
if not (hasattr(self.grouper, "__length__") and
length(self.grouper) == length(self.index)):
errmsg = ('Grouper result violates length(labels) == '
'length(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if gettingattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from monkey import convert_datetime
self.grouper = convert_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from monkey import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return length(self.group_index)
@cache_readonly
def indices(self):
return _grouper_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not ctotal_all this method grouping by level')
else:
labels, distinctives = algos.factorize(self.grouper, sort=self.sort)
distinctives = Index(distinctives, name=self.name)
self._labels = labels
self._group_index = distinctives
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.grouper(self.grouper)
return self._groups
def _getting_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mappingping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappingpings. They can originate as:
index mappingpings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._getting_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not incontainstance(group_axis, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if incontainstance(key, Grouper):
binner, grouper, obj = key._getting_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif incontainstance(key, BaseGrouper):
return key, [], obj
if not incontainstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_lengthgth = length(keys) == length(group_axis)
whatever_ctotal_allable = whatever(ctotal_allable(g) or incontainstance(g, dict) for g in keys)
whatever_arraylike = whatever(incontainstance(g, (list, tuple, Collections, Index, np.ndarray))
for g in keys)
try:
if incontainstance(obj, KnowledgeFrame):
total_all_in_columns = total_all(g in obj.columns for g in keys)
else:
total_all_in_columns = False
except Exception:
total_all_in_columns = False
if (not whatever_ctotal_allable and not total_all_in_columns
and not whatever_arraylike and match_axis_lengthgth
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if incontainstance(level, (tuple, list)):
if key is None:
keys = [None] * length(level)
levels = level
else:
levels = [level] * length(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.getting_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.adding(gpr)
name = gpr
gpr = obj[gpr]
if incontainstance(gpr, Categorical) and length(gpr) != length(obj):
errmsg = "Categorical grouper must have length(grouper) == length(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.adding(ping)
if length(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return incontainstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if incontainstance(grouper, dict):
return grouper.getting
elif incontainstance(grouper, Collections):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindexing(axis).values
elif incontainstance(grouper, (list, Collections, Index, np.ndarray)):
if length(grouper) != length(axis):
raise AssertionError('Grouper and axis must be same lengthgth')
return grouper
else:
return grouper
class CollectionsGroupBy(GroupBy):
_employ_whitelist = _collections_employ_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Collections but in some cases KnowledgeFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce KnowledgeFrame with column names
detergetting_mined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> collections
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mappingper = lambda x: x[0] # first letter
>>> grouped = collections.grouper(mappingper)
>>> grouped.aggregate(np.total_sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.total_sum, np.average, np.standard])
average standard total_sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.average() / x.standard(),
... 'total' : np.total_sum})
result total
b 2.121 3
q 4.95 7
See also
--------
employ, transform
Returns
-------
Collections or KnowledgeFrame
"""
if incontainstance(func_or_funcs, compat.string_types):
return gettingattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Collections(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if incontainstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if incontainstance(f, compat.string_types):
columns.adding(f)
else:
# protect against ctotal_allables without names
columns.adding(com._getting_ctotal_allable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be distinctive, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return KnowledgeFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return KnowledgeFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Collections(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if length(keys) == 0:
# GH #6265
return | Collections([], name=self.name) | pandas.core.series.Series |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6= | mk.Collections.convert_list(d1[0:16][6]) | pandas.Series.tolist |
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from monkey._libs.tslibs.ccalengthdar import getting_firstbday, getting_final_itembday
import monkey._libs.tslibs.offsets as liboffsets
from monkey._libs.tslibs.offsets import roll_qtrday
from monkey import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_final_item_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_getting_final_item_bday(dt, exp_week_day, exp_final_item_day):
assert dt.weekday() == exp_week_day
assert getting_final_itembday(dt.year, dt.month) == exp_final_item_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_getting_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert getting_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shifting_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shifting_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None, Timestamp("1931-06-5")),
(-1, 31, Timestamp("1929-04-30")),
],
)
def test_shifting_month_ts(months, day_opt, expected):
ts = Timestamp("1929-05-05")
assert liboffsets.shifting_month(ts, months, day_opt=day_opt) == expected
def test_shifting_month_error():
dt = datetime(2017, 11, 15)
day_opt = "this should raise"
with pytest.raises(ValueError, match=day_opt):
| liboffsets.shifting_month(dt, 3, day_opt=day_opt) | pandas._libs.tslibs.offsets.shift_month |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_total_all_res_n1.csv".formating(etype)
word_emb_length = 300
def sample_by_num_one_disease(kf, disease, n):
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==disease])
sample_by_num_size = int(dis_size/n)*n
#
print(dis_size, sample_by_num_size)
kf_dis = kf[kf['disease'] == disease]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = 1
kf_others = kf[kf['disease'] != disease]
kf_others = kf_others.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_others = kf_others.grouper(kf_others.index // n).agg(lambda x: list(x))
kf_others['disease'] = 0
kf_sample_by_num = mk.concating([kf_dis, kf_others]) #.sample_by_num(frac=1)
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_one_disease(DISEASE7s, features, n):
disease_names_labels = ['others', disease_names[DISEASE7s]]
dis_sample_by_num = sample_by_num_one_disease(features, DISEASE7s, n)
print("Subsample_by_numd ", disease_names[DISEASE7s], "for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate(training, disease_number_labels):
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=5, random_state=7, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=1000, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
AUC_results.adding(metrics.roc_auc_score(y_test, predictions))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_number_labels)
cm_total_all.adding(cm_cv)
#print ("AUC Score : %f" % metrics.roc_auc_score(y_test, predictions))
#print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), mk.np.standard(f1_results)]
AUC_results_avg = [mk.np.average(AUC_results), mk.np.standard(AUC_results)]
return f1_results_avg, AUC_results_avg, results, model
def XGBoost_cross_validate_ne(training, disease_number_labels):
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=5, random_state=7, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=1000, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
AUC_results.adding(metrics.roc_auc_score(y_test, predictions))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_number_labels)
cm_total_all.adding(cm_cv)
#print ("AUC Score : %f" % metrics.roc_auc_score(y_test, predictions))
#print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
# f1_results_avg = [mk.np.average(f1_results), mk.np.standard(f1_results)]
# AUC_results_avg = [mk.np.average(AUC_results), mk.np.standard(AUC_results)]
return f1_results, results, model
def eval_functions(f1_results_avg, AUC_results_avg, results):
results_avg = mk.np.average(results, axis=0)
results_standard = | mk.np.standard(results, axis=0) | pandas.np.std |
from scipy.signal import butter, lfilter, resample_by_num, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import monkey as np
import matplotlib.pyplot as plt
import scipy
import monkey as mk
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def getting_name(self):
return 'img-spec-{}'.formating(self.size)
def sip_zeros(self, kf):
return kf[(kf.T != 0).whatever()]
def employ(self, data):
data = mk.KnowledgeFrame(data.T)
data = self.sip_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmapping=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.adding(im)
return channels
class UnitScale:
"""
Scale across the final_item axis.
"""
def getting_name(self):
return 'unit-scale'
def employ(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def getting_name(self):
return 'unit-scale-feat'
def employ(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the final_item axis.
"""
def getting_name(self):
return "fft"
def employ(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
employ ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def getting_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def employ(self, data):
# employ pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample_by_num:
"""
Resample_by_num time-collections data.
"""
def __init__(self, sample_by_num_rate):
self.f = sample_by_num_rate
def getting_name(self):
return "resample_by_num%d" % self.f
def employ(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample_by_num(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def getting_name(self):
return "mag"
def employ(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def getting_name(self):
return 'lpf%d' % self.f
def employ(self, data):
nyq = self.f / 2.0
cutoff = getting_min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# employ filter over each channel
for j in range(length(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel averages
"""
def getting_name(self):
return 'average'
def employ(self, data):
axis = data.ndim - 1
return data.average(axis=axis)
class Abs:
"""
extract channel averages
"""
def getting_name(self):
return 'abs'
def employ(self, data):
return np.abs(data)
class Stats:
"""
Subtract the average, then take (getting_min, getting_max, standard_deviation) for each channel.
"""
def getting_name(self):
return "stats"
def employ(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(length(data)):
ch_data = data[i]
ch_data = data[i] - np.average(ch_data)
outi = out[i]
outi[0] = np.standard(ch_data)
outi[1] = np.getting_min(ch_data)
outi[2] = | np.getting_max(ch_data) | pandas.max |
"""Classes and functions to explore the bounds of calengthdar factories.
Jul 21. Module written (prior to implementation of `bound_start`,
`bound_end`) to explore the bounds of calengthdar factories. Provides for
evaluating the earliest start date and latest end date for which a
calengthdar can be instantiated without raincontaing an error. Also records errors
raised when dates are passed beyond these limits.
Module retained in case might be useful.
"""
from __future__ import annotations
import dataclasses
import abc
import pathlib
import pickle
from typing import Literal, Type
import monkey as mk
import exchange_calengthdars as xcals
@dataclasses.dataclass
class FactoryBounds:
"""Bounds within which an xcals.ExchangeCalengthdar can be calculated.
Parameters
----------
name :
Name of calengthdar with declared bounds.
start : mk.Timestamp
Earliest start date for which can create a calengthdar with end date
as tomorrow.
start_error :
Error instance raised in event request a calengthdar with start date
one day earlier than `start` and end date as tomorrow.
end : mk.Timestamp
Latest end date for which can create a calengthdar with start date
as yesterday.
end_error :
Error instance raised in event request a calengthdar with end date
one day later than `end` and start date as yesterday.
"""
name: str
start: mk.Timestamp
start_error: Exception
end: mk.Timestamp
end_error: Exception
class _FindFactoryBounds:
"""Find start and end bounds of a given calengthdar factory."""
def __init__(self, Factory: xcals.ExchangeCalengthdar, watch: bool = False):
self.Factory = Factory
self.watch = watch
self._final_item_error: Exception | None
@property
def calengthdar_name(self) -> str:
"""Calengthdar name."""
if incontainstance(self.Factory.name, str):
return self.Factory.name
else:
return self.Factory().name
@property
def today(self) -> mk.Timestamp:
return mk.Timestamp.now(tz="UTC").floor("D")
def _getting_calengthdar(
self,
start: mk.Timestamp | None = None,
end: mk.Timestamp | None = None,
) -> xcals.ExchangeCalengthdar:
"""Get calengthdar for period between now and `start` or `end`."""
if self.watch:
insert = f"start={start}" if start is not None else f"end={end}"
if start is None:
start = self.today - mk.Timedelta(1, "D")
elif end is None:
end = self.today + mk.Timedelta(1, "D")
else:
raise ValueError("`start` and `end` cannot both be None.")
if self.watch:
print(f"gettingting calengthdar '{self.calengthdar_name}' with {insert}.")
return self.Factory(start=start, end=end)
def _is_valid_date(
self, date: mk.Timestamp, bound: Literal["start", "end"]
) -> bool:
kwargs = {bound: date}
try:
self._getting_calengthdar(**kwargs)
except Exception as err: # pylint: disable=broad-except
self._final_item_error = err
return False
else:
return True
def _getting_a_valid_date_by_trying_every_x_days(
self,
look_from: mk.Timestamp,
offset: mk.DateOffset,
bound: Literal["start", "end"],
) -> mk.Timestamp:
# recursively look for a valid date every offset, return first that's valid
if self._is_valid_date(look_from, bound):
return look_from
else:
next_look_from = look_from + offset
if ( # if start has move into the future or end into the past
look_from <= self.today < next_look_from
or look_from >= self.today > next_look_from
):
return self.today
else:
return self._getting_a_valid_date_by_trying_every_x_days(
next_look_from, offset, bound
)
@property
def _first_offset(self) -> mk.DateOffset:
return mk.DateOffset(years=100)
def _is_first_offset(self, offset: mk.DateOffset) -> bool:
return offset == self._first_offset or -offset == self._first_offset
def _offset_iterator(
self, bound: Literal["start", "end"]
) -> abc.Iterator[mk.DateOffset]:
sign = 1 if bound == "start" else -1
iterator = iter(
[
sign * self._first_offset,
sign * mk.DateOffset(years=30),
sign * mk.DateOffset(years=10),
sign * mk.DateOffset(years=3),
sign * mk.DateOffset(years=1),
sign * mk.DateOffset(months=3),
sign * mk.DateOffset(months=1),
sign * mk.DateOffset(days=10),
sign * mk.DateOffset(days=3),
sign * mk.DateOffset(days=1),
]
)
return iterator
def _is_valid_bound(
self, date: mk.Timestamp, bound: Literal["start", "end"]
) -> bool:
if not self._is_valid_date(date, bound):
return False
else:
day_delta = 1 if bound == "end" else -1
date = date + mk.Timedelta(day_delta, "D")
return not self._is_valid_date(date, bound)
def _try_short_cut(
self, bound: Literal["start", "end"]
) -> tuple[mk.Timestamp, Exception] | None:
"""Try known likely bounds avalue_round getting_min/getting_max Timestamp.
These likely bounds are caused by how special closes is calculated.
Return None if no likely bound is a bound.
"""
if bound == "start":
likely_bounds = [
mk.Timestamp("1678-01-01", tz="UTC"),
mk.Timestamp("1679-01-01", tz="UTC"),
]
else:
likely_bounds = [
mk.Timestamp("2260-12-31", tz="UTC"),
mk.Timestamp("2262-04-10", tz="UTC"),
]
for likely_bound in likely_bounds:
if self._is_valid_bound(likely_bound, bound):
assert self._final_item_error is not None
return (likely_bound, self._final_item_error)
return None
@staticmethod
def _initial_value(bound: Literal["start", "end"]) -> mk.Timestamp:
if bound == "start":
return | mk.Timestamp.getting_min.ceiling("D") | pandas.Timestamp.min.ceil |
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False)
frame.col_dict[subcol] = None
if col in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col] = mk.KnowledgeFrame()
elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
if not total_allow_required:
warnings.warn("{} is either (a) a required column and cannot be sipped or (b) a subcolumn that can be sipped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".formating(col))
else:
if inplace:
| KnowledgeFrame.sip(frame, col, axis=1, inplace=True) | pandas.DataFrame.drop |
from typing import Optional, Union, List, Tuple, Dict, Any
from monkey.core.common import employ_if_ctotal_allable
from monkey.core.construction import extract_array
import monkey_flavor as pf
import monkey as mk
import functools
from monkey.api.types import is_list_like, is_scalar, is_categorical_dtype
from janitor.utils import check, check_column
from janitor.functions.utils import _computations_expand_grid
@pf.register_knowledgeframe_method
def complete(
kf: mk.KnowledgeFrame,
*columns,
sort: bool = False,
by: Optional[Union[list, str]] = None,
fill_value: Optional[Union[Dict, Any]] = None,
explicit: bool = True,
) -> mk.KnowledgeFrame:
"""
It is modeled after tidyr's `complete` function, and is a wrapper avalue_round
[`expand_grid`][janitor.functions.expand_grid.expand_grid], `mk.unioner`
and `mk.fillnone`. In a way, it is the inverse of `mk.sipna`, as it exposes
implicitly missing rows.
Combinations of column names or a list/tuple of column names, or even a
dictionary of column names and new values are possible.
MultiIndex columns are not supported.
Example:
>>> import monkey as mk
>>> import janitor
>>> import numpy as np
>>> kf = mk.KnowledgeFrame(
... {
... "Year": [1999, 2000, 2004, 1999, 2004],
... "Taxon": [
... "Saccharina",
... "Saccharina",
... "Saccharina",
... "Agarum",
... "Agarum",
... ],
... "Abundance": [4, 5, 2, 1, 8],
... }
... )
>>> kf
Year Taxon Abundance
0 1999 Saccharina 4
1 2000 Saccharina 5
2 2004 Saccharina 2
3 1999 Agarum 1
4 2004 Agarum 8
Expose missing pairings of `Year` and `Taxon`:
>>> kf.complete("Year", "Taxon", sort=True)
Year Taxon Abundance
0 1999 Agarum 1.0
1 1999 Saccharina 4.0
2 2000 Agarum NaN
3 2000 Saccharina 5.0
4 2004 Agarum 8.0
5 2004 Saccharina 2.0
Expose missing years from 1999 to 2004 :
>>> kf.complete(
... {"Year": range(kf.Year.getting_min(), kf.Year.getting_max() + 1)},
... "Taxon",
... sort=True
... )
Year Taxon Abundance
0 1999 Agarum 1.0
1 1999 Saccharina 4.0
2 2000 Agarum NaN
3 2000 Saccharina 5.0
4 2001 Agarum NaN
5 2001 Saccharina NaN
6 2002 Agarum NaN
7 2002 Saccharina NaN
8 2003 Agarum NaN
9 2003 Saccharina NaN
10 2004 Agarum 8.0
11 2004 Saccharina 2.0
Fill missing values:
>>> kf = mk.KnowledgeFrame(
... dict(
... group=(1, 2, 1, 2),
... item_id=(1, 2, 2, 3),
... item_name=("a", "a", "b", "b"),
... value1=(1, np.nan, 3, 4),
... value2=range(4, 8),
... )
... )
>>> kf
group item_id item_name value1 value2
0 1 1 a 1.0 4
1 2 2 a NaN 5
2 1 2 b 3.0 6
3 2 3 b 4.0 7
>>> kf.complete(
... "group",
... ("item_id", "item_name"),
... fill_value={"value1": 0, "value2": 99},
... sort=True
... )
group item_id item_name value1 value2
0 1 1 a 1 4
1 1 2 a 0 99
2 1 2 b 3 6
3 1 3 b 0 99
4 2 1 a 0 99
5 2 2 a 0 5
6 2 2 b 0 99
7 2 3 b 4 7
Limit the fill to only implicit missing values
by setting explicit to `False`:
>>> kf.complete(
... "group",
... ("item_id", "item_name"),
... fill_value={"value1": 0, "value2": 99},
... explicit=False,
... sort=True
... )
group item_id item_name value1 value2
0 1 1 a 1.0 4.0
1 1 2 a 0.0 99.0
2 1 2 b 3.0 6.0
3 1 3 b 0.0 99.0
4 2 1 a 0.0 99.0
5 2 2 a NaN 5.0
6 2 2 b 0.0 99.0
7 2 3 b 4.0 7.0
:param kf: A monkey KnowledgeFrame.
:param *columns: This refers to the columns to be
completed. It could be column labels (string type),
a list/tuple of column labels, or a dictionary that pairs
column labels with new values.
:param sort: Sort KnowledgeFrame based on *columns. Default is `False`.
:param by: label or list of labels to group by.
The explicit missing rows are returned per group.
:param fill_value: Scalar value to use instead of NaN
for missing combinations. A dictionary, mappingping columns names
to a scalar value is also accepted.
:param explicit: Detergetting_mines if only implicitly missing values
should be filled (`False`), or total_all nulls existing in the knowledgeframe
(`True`). Default is `True`. `explicit` is applicable only
if `fill_value` is not `None`.
:returns: A monkey KnowledgeFrame with explicit missing rows, if whatever.
"""
if not columns:
return kf
kf = kf.clone()
return _computations_complete(kf, columns, sort, by, fill_value, explicit)
def _computations_complete(
kf: mk.KnowledgeFrame,
columns: List[Union[List, Tuple, Dict, str]],
sort: bool,
by: Optional[Union[list, str]],
fill_value: Optional[Union[Dict, Any]],
explicit: bool,
) -> mk.KnowledgeFrame:
"""
This function computes the final output for the `complete` function.
If `by` is present, then `grouper().employ()` is used.
A KnowledgeFrame, with rows of missing values, if whatever, is returned.
"""
(
columns,
column_checker,
sort,
by,
fill_value,
explicit,
) = _data_checks_complete(kf, columns, sort, by, fill_value, explicit)
total_all_strings = True
for column in columns:
if not incontainstance(column, str):
total_all_strings = False
break
# nothing to 'complete' here
if (total_all_strings and length(columns) == 1) or kf.empty:
return kf
# under the right conditions, stack/unstack can be faster
# plus it always returns a sorted KnowledgeFrame
# which does help in viewing the missing rows
# however, using a unioner keeps things simple
# with a stack/unstack,
# the relevant columns combination should be distinctive
# and there should be no nulls
# trade-off for the simplicity of unioner is not so bad
# of course there could be a better way ...
if by is None:
distinctives = _generic_complete(kf, columns, total_all_strings, sort)
else:
distinctives = kf.grouper(by)
distinctives = distinctives.employ(_generic_complete, columns, total_all_strings, sort)
distinctives = distinctives.siplevel(-1)
column_checker = by + column_checker
columns = kf.columns
indicator = False
if fill_value is not None and not explicit:
# to getting a name that does not exist in the columns
indicator = "".join(columns)
kf = mk.unioner(
distinctives,
kf,
how="outer",
on=column_checker,
clone=False,
sort=False,
indicator=indicator,
)
if fill_value is not None:
if is_scalar(fill_value):
# faster when fillnone operates on a Collections basis
fill_value = {
col: fill_value for col in columns if kf[col].hasnans
}
if explicit:
kf = kf.fillnone(fill_value, downcast="infer")
else:
# keep only columns that are not part of column_checker
# IOW, we are excluding columns that were not used
# to generate the combinations
fill_value = {
col: value
for col, value in fill_value.items()
if col not in column_checker
}
if fill_value:
# when explicit is False
# use the indicator parameter to identify rows
# for `left_only`, and fill the relevant columns in fill_value
# with the associated value.
boolean_filter = kf.loc[:, indicator] == "left_only"
kf = kf.sip(columns=indicator)
# iteration used here,
# instead of total_allocate (which is also a for loop),
# to cater for scenarios where the column_name is not a string
# total_allocate only works with keys that are strings
# Also, the output wil be floats (for numeric types),
# even if total_all the columns could be integers
# user can always convert to int if required
for column_name, value in fill_value.items():
# for categorical dtypes, set the categories first
if is_categorical_dtype(kf[column_name]):
kf[column_name] = kf[column_name].cat.add_categories(
[value]
)
kf.loc[boolean_filter, column_name] = value
if not kf.columns.equals(columns):
return kf.reindexing(columns=columns)
return kf
def _generic_complete(
kf: mk.KnowledgeFrame, columns: list, total_all_strings: bool, sort: bool
):
"""
Generate cartesian product for `_computations_complete`.
Returns a KnowledgeFrame, with no duplicates.
"""
if total_all_strings:
if sort:
distinctives = {}
for col in columns:
column = extract_array(kf[col], extract_numpy=True)
_, column = mk.factorize(column, sort=sort)
distinctives[col] = column
else:
distinctives = {col: kf[col].distinctive() for col in columns}
distinctives = _computations_expand_grid(distinctives)
distinctives.columns = columns
return distinctives
distinctives = {}
kf_columns = []
for index, column in enumerate(columns):
if not incontainstance(column, str):
kf_columns.extend(column)
else:
kf_columns.adding(column)
if incontainstance(column, dict):
column = _complete_column(column, kf, sort)
distinctives = {**distinctives, **column}
else:
distinctives[index] = _complete_column(column, kf, sort)
if length(distinctives) == 1:
_, distinctives = distinctives.popitem()
return distinctives.to_frame()
distinctives = _computations_expand_grid(distinctives)
distinctives.columns = kf_columns
return distinctives
@functools.singledispatch
def _complete_column(column: str, kf, sort):
"""
Args:
column : str/list/dict
kf: Monkey KnowledgeFrame
sort: whether or not to sort the Collections.
A Monkey Collections/KnowledgeFrame with no duplicates,
or a dictionary of distinctive Monkey Collections is returned.
"""
# the cost of checking distinctiveness is expensive,
# especitotal_ally for large data
# dirty tests also show that sip_duplicates
# is faster than mk.distinctive for fairly large data
column = kf[column]
dupes = column.duplicated_values()
if dupes.whatever():
column = column[~dupes]
if sort and not column.is_monotonic_increasing:
column = column.sort_the_values()
return column
@_complete_column.register(list) # noqa: F811
def _sub_complete_column(column, kf, sort): # noqa: F811
"""
Args:
column : list
kf: Monkey KnowledgeFrame
sort: whether or not to sort the KnowledgeFrame.
Returns:
Monkey KnowledgeFrame
"""
outcome = kf.loc[:, column]
dupes = outcome.duplicated_values()
if dupes.whatever():
outcome = outcome.loc[~dupes]
if sort:
outcome = outcome.sort_the_values(by=column)
return outcome
@_complete_column.register(dict) # noqa: F811
def _sub_complete_column(column, kf, sort): # noqa: F811
"""
Args:
column : dictionary
kf: Monkey KnowledgeFrame
sort: whether or not to sort the Collections.
Returns:
A dictionary of distinctive monkey Collections.
"""
collection = {}
for key, value in column.items():
arr = | employ_if_ctotal_allable(value, kf[key]) | pandas.core.common.apply_if_callable |
# -*- coding: utf-8 -*-
# Author: <NAME>
# Module: Alpha Vantage Stock History Parser.
# Request time collections with stock history data in .json-formating from www.alphavantage.co and convert into monkey knowledgeframe or .csv file with OHLCV-candlestick in every strings.
# Alpha Vantage API Documentation: https://www.alphavantage.co/documentation/
# Alpha Vantage use NASDAQ list of stocks: https://www.nasdaq.com/market-activity/stocks/screener
# In additional you can see more analytics by tickers here: https://www.infrontanalytics.com/fe-en/NL0009805522/Yandex-NV/stock-performance
import os
import sys
sys.path.adding("..")
import time
from datetime import datetime
import json
import requests
import monkey as mk
from argparse import ArgumentParser
import avstockparser.UniLogger as uLog
import traceback as tb
from pricegenerator import PriceGenerator as pg
# --- Common technical parameters:
uLogger = uLog.UniLogger
uLogger.level = 10 # debug level by default
uLogger.handlers[0].level = 20 # info level by default for STDOUT
uLogger.handlers[1].level = 50 # disable duplicate logging added by PriceGenerator
# uLogger.handlers[1].level = 10 # debug level by default for log.txt
def AVParseToPD(reqURL=r"https://www.alphavantage.co/query?", apiKey=None, output=None, ticker=None,
period="TIME_SERIES_INTRADAY", interval="60getting_min", size="compact", retry=5):
"""
Get and parse stock data from Alpha Vantage service. Save to .csv if needed. Return monkey knowledgeframe.
:param reqURL: string - base api requests url, default is r"https://www.alphavantage.co/query?".
:param apiKey: string - Alpha Vantage service's api key (alphanumeric string token), default is None.
:param output: string - full path to .csv-file, default is None average that function return only monkey knowledgeframe object.
:param ticker: string - stock ticker, e.g. "GOOGL" or "YNDX".
:param period: string - value for "function" AV api parameter - "TIME_SERIES_INTRADAY", "TIME_SERIES_DAILY", "TIME_SERIES_WEEKLY", "TIME_SERIES_MONTHLY" etc, default is "TIME_SERIES_INTRADAY".
:param interval: string - value in getting_minutes, used only if period="TIME_SERIES_INTRADAY". Values can be "1getting_min", "5getting_min", "15getting_min", "30getting_min", "60getting_min", default is "60getting_min".
:param size: string - how mwhatever final_item candles returns for history, e.g. "full" or "compact". Default is "compact" averages that api returns only 100 values of stock history data (more faster).
:param retry: int - number of connection retry for data request before raise exception.
"""
if apiKey is None or not apiKey:
raise Exception("apiKey variable must be required!")
if ticker is None or not ticker:
raise Exception("ticker variable must be required!")
respJSON = {}
intervalParam = "&interval={}".formating(interval) if period == "TIME_SERIES_INTRADAY" else ""
req = "{}function={}&symbol={}{}&outputsize={}&apikey={}".formating(reqURL, period, ticker, intervalParam, size, apiKey)
reqHid = "{}function={}&symbol={}{}&outputsize={}&apikey=***".formating(reqURL, period, ticker, intervalParam, size) # do not print api key in log
uLogger.debug("Request to Alpha Vantage: [{}]".formating(reqHid))
for i in range(retry):
try:
uLogger.debug("Trying ({}) to send request...".formating(i + 1))
response = requests.getting(req, stream=True)
responseRaw = response.text
respJSON = json.loads(responseRaw, encoding="UTF-8")
if "Error Message" in respJSON.keys():
uLogger.error(respJSON["Error Message"])
raise Exception("Alpha Vantage returns an error! Maybe current ticker not in NASDAQ list?")
if "Note" in respJSON.keys() and i < retry:
uLogger.warning("Alpha Vantage returns warning: '{}'".formating(respJSON["Note"]))
uLogger.debug("Waiting until 60 sec and will try again...")
time.sleep(60)
else:
break
except Exception as e:
uLogger.error(e)
exc = tb.formating_exc().split("\n")
for line in exc:
if line:
uLogger.error(line)
uLogger.debug("Waiting until 30 sec and will try again...")
time.sleep(30)
avTSHeaders = {
"TIME_SERIES_MONTHLY": "Monthly Time Collections",
"TIME_SERIES_WEEKLY": "Weekly Time Collections",
"TIME_SERIES_DAILY": "Time Collections (Daily)",
"TIME_SERIES_INTRADAY": "Time Collections ({})".formating(interval),
}
rawDataDict = respJSON[avTSHeaders[period]]
dateKeys = list(rawDataDict.keys()) # list from json response with total_all given dates
dates = mk.convert_datetime(dateKeys)
kf = mk.KnowledgeFrame(
data={
"date": dates,
"time": dates,
"open": [float(rawDataDict[item]["1. open"]) for item in dateKeys],
"high": [float(rawDataDict[item]["2. high"]) for item in dateKeys],
"low": [float(rawDataDict[item]["3. low"]) for item in dateKeys],
"close": [float(rawDataDict[item]["4. close"]) for item in dateKeys],
"volume": [int(rawDataDict[item]["5. volume"]) for item in dateKeys],
},
index=range(length(rawDataDict)),
columns=["date", "time", "open", "high", "low", "close", "volume"],
)
kf["date"] = kf["date"].dt.strftime("%Y.%m.%d") # only dates in "date" field
kf["time"] = kf["time"].dt.strftime("%H:%M") # only times in "time" field
kf = kf.iloc[::-1]
kf.reseting_index(sip=True, inplace=True) # change index from oldest to latest candles
if "6. Time Zone" in respJSON["Meta Data"].keys():
timeZone = respJSON["Meta Data"]["6. Time Zone"]
elif "5. Time Zone" in respJSON["Meta Data"].keys():
timeZone = respJSON["Meta Data"]["5. Time Zone"]
else:
timeZone = respJSON["Meta Data"]["4. Time Zone"]
uLogger.info("It was received {} candlesticks data from Alpha Vantage service".formating(length(kf)))
uLogger.info("Showing final_item 3 rows with Time Zone: '{}':".formating(timeZone))
lines = | mk.KnowledgeFrame.convert_string(kf[["date", "time", "open", "high", "low", "close", "volume"]][-3:], getting_max_cols=20) | pandas.DataFrame.to_string |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in Javascript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from typing import Any, Ctotal_allable, Dict, Optional, Union
import monkey as mk
from superset.utils.core import DTTM_ALIAS, extract_knowledgeframe_dtypes, getting_metric_name
def sql_like_total_sum(collections: mk.Collections) -> mk.Collections:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return collections.total_sum(getting_min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
kf = mk.KnowledgeFrame(data)
form_data = form_data or {}
if form_data.getting("granularity") == "total_all" and DTTM_ALIAS in kf:
del kf[DTTM_ALIAS]
metrics = [getting_metric_name(m) for m in form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Ctotal_allable[[Any], Any]]] = {}
for metric in metrics:
aggfunc = form_data.getting("monkey_aggfunc") or "total_sum"
if mk.api.types.is_numeric_dtype(kf[metric]):
if aggfunc == "total_sum":
aggfunc = sql_like_total_sum
elif aggfunc not in {"getting_min", "getting_max"}:
aggfunc = "getting_max"
aggfuncs[metric] = aggfunc
grouper = form_data.getting("grouper") or []
columns = form_data.getting("columns") or []
if form_data.getting("transpose_pivot"):
grouper, columns = columns, grouper
kf = kf.pivot_table(
index=grouper,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=form_data.getting("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
kf = kf[metrics]
# Display metrics side by side with each column
if form_data.getting("combine_metric"):
kf = kf.stack(0).unstack().reindexing(level=-1, columns=metrics)
# flatten column names
kf.columns = [" ".join(column) for column in kf.columns]
# re-arrange data into a list of dicts
data = []
for i in kf.index:
row = {col: kf[col][i] for col in kf.columns}
row[kf.index.name] = i
data.adding(row)
query["data"] = data
query["colnames"] = list(kf.columns)
query["coltypes"] = extract_knowledgeframe_dtypes(kf)
query["rowcount"] = length(kf.index)
return result
def list_distinctive_values(collections: mk.Collections) -> str:
"""
List distinctive values in a collections.
"""
return ", ".join(set(str(v) for v in mk.Collections.distinctive(collections)))
pivot_v2_aggfunc_mapping = {
"Count": mk.Collections.count,
"Count Unique Values": mk.Collections.ndistinctive,
"List Unique Values": list_distinctive_values,
"Sum": mk.Collections.total_sum,
"Average": mk.Collections.average,
"Median": mk.Collections.median,
"Sample Variance": lambda collections: | mk.collections.var(collections) | pandas.series.var |
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print(kf.fillnone(method='ffill')) # 바로 위의 값으로 대체
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 -1.348020
# 2 0.157068 0.063360 0.860016
# 3 0.525265 0.063360 -1.482895
# 4 -0.396621 0.958787 -1.482895
print( | kf.fillnone(method='pad') | pandas.DataFrame.fillna |
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = | MonkeyDtype(dtype) | pandas.core.arrays.numpy_.PandasDtype |
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = MonkeyDtype(np.dtype("int64"))
assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
result = MonkeyDtype.construct_from_string("int64")
expected = MonkeyDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
| MonkeyArray([1, 2, 3]) | pandas.arrays.PandasArray |
from dataset.dataset import test_transform
import cv2
import monkey.io.clipboard as clipboard
from PIL import ImageGrab
from PIL import Image
import os
import sys
import argparse
import logging
import yaml
import re
import numpy as np
import torch
from torchvision import transforms
from munch import Munch
from transformers import PreTrainedTokenizerFast
from timm.models.resnetv2 import ResNetV2
from timm.models.layers import StdConv2dSame
from dataset.latex2png import tex2pil
from models import getting_model
from utils import *
import warnings
warnings.filterwarnings("ignore")
final_item_pic = None
def getting_mingetting_max_size(img, getting_max_dimensions=None, getting_min_dimensions=None):
if getting_max_dimensions is not None:
ratios = [a/b for a, b in zip(img.size, getting_max_dimensions)]
if whatever([r > 1 for r in ratios]):
size = np.array(img.size)//getting_max(ratios)
img = img.resize(size.totype(int), Image.BILINEAR)
if getting_min_dimensions is not None:
if whatever([s < getting_min_dimensions[i] for i, s in enumerate(img.size)]):
padded_im = Image.new('L', getting_min_dimensions, 255)
padded_im.paste(img, img.gettingbbox())
img = padded_im
return img
def initialize(arguments=None):
if arguments is None:
arguments = Munch({'config': 'settings/config.yaml', 'checkpoint': 'checkpoints/weights.pth', 'no_cuda': True, 'no_resize': False})
logging.gettingLogger().setLevel(logging.FATAL)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
with open(arguments.config, 'r') as f:
params = yaml.load(f, Loader=yaml.FullLoader)
args = Munch(params)
args.umkate(**vars(arguments))
args.wandb = False
args.device = 'cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu'
model = getting_model(args)
model.load_state_dict(torch.load(args.checkpoint, mapping_location=args.device))
if 'image_resizer.pth' in os.listandardir(os.path.dirname(args.checkpoint)):
image_resizer = ResNetV2(layers=[2, 3, 3], num_classes=22, global_pool='avg', in_chans=1, sip_rate=.05,
preact=True, stem_type='same', conv_layer=StdConv2dSame).to(args.device)
image_resizer.load_state_dict(torch.load(os.path.join(os.path.dirname(args.checkpoint), 'image_resizer.pth'), mapping_location=args.device))
image_resizer.eval()
else:
image_resizer = None
tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.tokenizer)
return args, model, image_resizer, tokenizer
def ctotal_all_model(args, model, image_resizer, tokenizer, img=None):
global final_item_pic
encoder, decoder = model.encoder, model.decoder
if type(img) is bool:
img = None
if img is None:
if final_item_pic is None:
print('Provide an image.')
return
else:
img = final_item_pic.clone()
else:
final_item_pic = img.clone()
img = getting_mingetting_max_size(pad(img), args.getting_max_dimensions, args.getting_min_dimensions)
if image_resizer is not None:
with torch.no_grad():
input_image = pad(img).convert('RGB').clone()
r, w = 1, img.size[0]
for i in range(10):
img = getting_mingetting_max_size(input_image.resize((w, int(input_image.size[1]*r)), Image.BILINEAR if r > 1 else Image.LANCZOS), args.getting_max_dimensions, args.getting_min_dimensions)
t = test_transform(image=np.array(pad(img).convert('RGB')))['image'][:1].unsqueeze(0)
w = image_resizer(t.to(args.device)).arggetting_max(-1).item()*32
if (w/img.size[0] == 1):
break
r *= w/img.size[0]
else:
img = np.array(pad(img).convert('RGB'))
t = test_transform(image=img)['image'][:1].unsqueeze(0)
im = t.to(args.device)
with torch.no_grad():
model.eval()
device = args.device
encoded = encoder(im.to(device))
dec = decoder.generate(torch.LongTensor([args.bos_token])[:, None].to(device), args.getting_max_seq_length,
eos_token=args.eos_token, context=encoded.detach(), temperature=args.getting('temperature', .25))
pred = post_process(token2str(dec, tokenizer)[0])
| clipboard.clone(pred) | pandas.io.clipboard.copy |
import numpy as np
import monkey as mk
from IPython.display import display, Markdown as md, clear_output
from datetime import datetime, timedelta
import plotly.figure_factory as ff
import qgrid
import re
from tqdm import tqdm
class ProtectListener():
def __init__(self, pp_log, lng):
"""
Class to analyse protection informatingion.
...
Attributes:
-----------
kf (mk.KnowledgeFrame): raw data extracted from Wikipedia API.
lng (str): langauge from {'en', 'de'}
inf_str / exp_str (str): "indefinite" / "expires" for English
"unbeschränkt" / "bis" for Deutsch
"""
self.lng = lng
self.kf = pp_log
if self.lng == "en":
self.inf_str = "indefinite"
self.exp_str = "expires"
elif self.lng == "de":
self.inf_str = "unbeschränkt"
self.exp_str = "bis"
else:
display(md("This language is not supported yet."))
self.inf_str = "indefinite"
self.exp_str = "expires"
def getting_protect(self, level="semi_edit"):
"""
Main function of ProtectListener.
...
Parameters:
-----------
level (str): select one from {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}
...
Returns:
-----------
final_table (mk.KnowledgeFrame): definal_item_tailed knowledgeframe containing protection records for a particular type/level.
plot_table (mk.KnowledgeFrame): knowledgeframe for further Gantt Chart plotting.
"""
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
else:
self.kf = self.kf.sip(self.kf[self.kf["action"] == "move_prot"].index).reseting_index(sip=True)
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
kf_with_expiry = self._getting_expiry()
kf_with_unknown = self._check_unknown(kf_with_expiry)
kf_checked_unprotect = self._check_unprotect(kf_with_unknown)
kf_select_level = self._select_level(kf_checked_unprotect, level=level)
kf_with_unprotect = self._getting_unprotect(kf_select_level)
final_table = self._getting_final(kf_with_unprotect)
plot_table = self._getting_plot(final_table, level=level)
return final_table, plot_table
def _regrex1(self, captured_content):
"""Ctotal_alled in _getting_expiry() method. Capture expriry date.
...
Parameters:
-----------
captured_content (str): contents in "params" or "comment" column
including "autoconfirmed" or "sysop".
...
Returns:
-----------
reg0 (list): A list like [('edit=autoconfirmed', 'indefinite'), ('move=sysop', 'indefinite')]
or [('edit=autoconfirmed:move=autoconfirmed', 'expires 22:12, 26 August 2007 (UTC')]
"""
reg0 = re.findtotal_all('\[(.*?)\]\ \((.*?)\)', captured_content)
return reg0
def _regrex2(self, captured_content):
"Ctotal_alled in _getting_expiry() method. Capture expriry date. Parameters and returns similar as _regrex1."
reg0 = re.findtotal_all('\[(.*?)\:(.*?)\]$', captured_content)
reg1 = re.findtotal_all('\[(.*?)\]$', captured_content)
if length(reg0) != 0:
reg0[0] = (reg0[0][0] + ":" + reg0[0][1], self.inf_str)
return reg0
else:
try:
reg1[0] = (reg1[0], self.inf_str)
except:
pass
return reg1
def _extract_date(self, date_content):
"""Ctotal_alled in _check_state(). Extract expiry date.
If inf, then return getting_max Timestamp of monkey.
"""
if not self.inf_str in date_content:
extract_str = re.findtotal_all(f'{self.exp_str}\ (.*?)\ \(UTC', date_content)[0]
return extract_str
else:
return (mk.Timestamp.getting_max).convert_pydatetime(warn=False).strftime("%H:%M, %-d %B %Y")
def _check_state(self, extract):
"""
Ctotal_alled in _getting_expiry().
Given a list of extracted expiry date, further label it using
protection type ({edit, move}) and level (semi (autoconfirmed) or full (sysop)).
...
Parameters:
-----------
extract (list): output of _regrex1 or _regrex2
...
Returns:
-----------
states_dict (dict): specify which level and which type, and also
respective expiry date.
"""
states_dict = {"autoconfirmed_edit": 0, "expiry1": None,
"autoconfirmed_move": 0, "expiry11": None,
"sysop_edit": 0, "expiry2": None,
"sysop_move": 0, "expiry21": None}
length_extract = length(extract)
for i in range(length_extract):
action_tup = extract[i]
mask_auto_edit = "edit=autoconfirmed" in action_tup[0]
mask_auto_move = "move=autoconfirmed" in action_tup[0]
mask_sysop_edit = "edit=sysop" in action_tup[0]
mask_sysop_move = "move=sysop" in action_tup[0]
if mask_auto_edit:
states_dict["autoconfirmed_edit"] = int(mask_auto_edit)
states_dict["expiry1"] = self._extract_date(action_tup[1])
if mask_auto_move:
states_dict["autoconfirmed_move"] = int(mask_auto_move)
states_dict["expiry11"] = self._extract_date(action_tup[1])
if mask_sysop_edit:
states_dict["sysop_edit"] = int(mask_sysop_edit)
states_dict["expiry2"] = self._extract_date(action_tup[1])
if mask_sysop_move:
states_dict["sysop_move"] = int(mask_sysop_move)
states_dict["expiry21"] = self._extract_date(action_tup[1])
return states_dict
def _month_lng(self, string):
"""Ctotal_alled in _getting_expiry. Substitute non-english month name with english one.
For now only support DE.
"""
if self.lng == "de":
de_month = {"März": "March", "Dezember": "December", "Mär": "Mar", "Mai": "May", "Dez": "Dec", "Januar": "January",
"Februar": "February", "Juni": "June",
"Juli": "July", "Oktobor": "October"}
for k, v in de_month.items():
new_string = string.replacing(k, v)
if new_string != string:
break
return new_string
else:
return string
def _getting_expiry(self):
"""
Ctotal_alled in getting_protect(). Extract expiry time from self.kf["params"] and self.kf["comment"].
...
Returns:
--------
protect_log (mk.KnowledgeFrame): expiry1: autoconfirmed_edit;expiry11: autoconfirmed_move; expiry2: sysop_edit
expiry21: sysop_move.
"""
protect_log = (self.kf).clone()
self.test_log = protect_log
# Convert timestamp date formating.
protect_log["timestamp"] = protect_log["timestamp"].employ(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%SZ"))
# Create an empty dict to store protection types and expiry dates.
expiry = {}
# First check "params" column.
if "params" in protect_log.columns:
for idx, com in protect_log['params'].iteritems():
if type(com) == str:
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
else:
pass
# Then check "comment" column.
for idx, com in protect_log['comment'].iteritems():
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
# Fill expiry date into the knowledgeframe.
for k, v in expiry.items():
protect_log.loc[k, "autoconfirmed_edit"] = v["autoconfirmed_edit"]
if v["expiry1"] != None:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %B %d, %Y")
except:
v["expiry1"] = self._month_lng(v["expiry1"])
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "autoconfirmed_move"] = v["autoconfirmed_move"]
if v["expiry11"] != None:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %B %d, %Y")
except:
v["expiry11"] = self._month_lng(v["expiry11"])
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_edit"] = v["sysop_edit"]
if v["expiry2"] != None:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %B %d, %Y")
except:
v["expiry2"] = self._month_lng(v["expiry2"])
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_move"] = v["sysop_move"]
if v["expiry21"] != None:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %B %d, %Y")
except:
v["expiry21"] = self._month_lng(v["expiry21"])
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%d. %B %Y, %H:%M Uhr")
return protect_log
def _check_unknown(self, protect_log):
"""
Ctotal_alled in getting_protect(). Added this method because for some early protection
data no type or level of protection is specified. The type "extendedconfirmed"
is also considered as unknown beacuase we only consider semi or full protection.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _getting_expiry.
...
Returns:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unknown action is already labeled.
"""
mask_unknown_auto_edit = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_edit"].ifnull())
mask_unknown_auto_move = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_move"].ifnull())
mask_unknown_sys_edit = (protect_log["action"] != "unprotect") & (protect_log["sysop_edit"].ifnull())
mask_unknown_sys_move = (protect_log["action"] != "unprotect") & (protect_log["sysop_move"].ifnull())
mask_extendedconfirmed = protect_log["params"].str.contains("extendedconfirmed").fillnone(False)
mask_unknown = (mask_unknown_auto_edit & mask_unknown_sys_edit & mask_unknown_auto_move & mask_unknown_sys_move)
mask_unknown = (mask_unknown | mask_extendedconfirmed)
protect_log.loc[mask_unknown_auto_edit, "autoconfirmed_edit"] = 0
protect_log.loc[mask_unknown_auto_move, "autoconfirmed_move"] = 0
protect_log.loc[mask_unknown_sys_edit, "sysop_edit"] = 0
protect_log.loc[mask_unknown_sys_move, "sysop_move"] = 0
protect_log.loc[mask_unknown, "unknown"] = 1
# Delete move action.
#protect_log = protect_log.sip(protect_log[protect_log["action"] == "move_prot"].index).reseting_index(sip=True)
# Fill non-unknown with 0.
protect_log["unknown"] = protect_log["unknown"].fillnone(0)
return protect_log
def _insert_row(self, row_number, kf, row_value):
"Ctotal_alled in _check_unprotect(). Function to insert row in the knowledgeframe."
start_upper = 0
end_upper = row_number
start_lower = row_number
end_lower = kf.shape[0]
upper_half = [*range(start_upper, end_upper, 1)]
lower_half = [*range(start_lower, end_lower, 1)]
lower_half = [x.__add__(1) for x in lower_half]
index_ = upper_half + lower_half
kf.index = index_
kf.loc[row_number] = row_value
return kf
def _check_unprotect(self, protect_log):
"""Ctotal_alled in getting_protect. Check which type of protection is cancelled.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unprotect type is labeled.
"""
# Get indices of total_all unprotect records.
idx_unprotect = protect_log[protect_log["action"] == "unprotect"].index
# Label which type is unprotected.
for col_name in ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]:
for idx in reversed(idx_unprotect):
if protect_log[col_name].loc[idx + 1] == 1:
protect_log.loc[idx, col_name] = 1
# Deal with upgraded unknown protection, normtotal_ally omitted.
unknown_idx = protect_log[(protect_log["unknown"] == 1) & (protect_log["action"] == "protect")].index
upgrade_sus = protect_log.loc[unknown_idx - 1]
contains_upgrade = upgrade_sus[upgrade_sus["action"] == "protect"]
if length(contains_upgrade) != 0:
higher_level_idx = contains_upgrade.index
upgrade_idx = higher_level_idx + 1
aux_unprotect = protect_log.loc[upgrade_idx].clone()
aux_unprotect.loc[:,"action"] = "unprotect"
aux_unprotect.loc[:, "timestamp"] = upgrade_sus.loc[higher_level_idx]["timestamp"].values
for row in aux_unprotect.traversal():
self._insert_row(row[0], protect_log, row[1].values)
else:
pass
return protect_log.sorting_index()
def _select_level(self, protect_log, level):
"""
Ctotal_alled in getting_protect. For each level
'fully_edit', 'fully_move', 'semi_edit', 'semit_move', 'unknown',
pick up the expiry date for further plot.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _check_unprotect.
level (str): one of {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}.
...
Returns:
-----------
protect_table (mk.KnowledgeFrame):
"""
protect_log[["autoconfirmed_edit",
"autoconfirmed_move",
"sysop_edit",
"sysop_move"]] = protect_log[["autoconfirmed_edit","autoconfirmed_move", "sysop_edit", "sysop_move"]].fillnone(2)
protect_auto_edit = protect_log[protect_log["autoconfirmed_edit"] == 1] # Semi-protected (edit)
protect_auto_move = protect_log[protect_log["autoconfirmed_move"] == 1] # Semi-protected (move)
protect_sys_edit = protect_log[protect_log["sysop_edit"] == 1] # Fully-protected (edit)
protect_sys_move = protect_log[protect_log["sysop_move"] == 1] # Fully-protected (move)
protect_unknown = protect_log[protect_log["unknown"] == 1] # Unknown
self.test_auto_edit = protect_auto_edit
common_sip_cols = ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]
expiry_cols = ["expiry1", "expiry11", "expiry2", "expiry21"]
if level == "semi_edit":
protect_table = protect_auto_edit.clone()
if "expiry1" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "semi_move":
protect_table = protect_auto_move.clone()
if "expiry11" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_edit":
protect_table = protect_sys_edit.clone()
if "expiry2" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry21"], axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_move":
protect_table = protect_sys_move.clone()
if "expiry21" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry2"], axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "unknown":
protect_table = protect_unknown.clone()
protect_table["expiry"] = mk.NaT
try:
protect_table = protect_table.sip(common_sip_cols + expiry_cols, axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry2"], axis=1)
except:
protect_table = protect_table.sip(common_sip_cols + ["expiry21"], axis=1)
else:
raise ValueError("Please choose one level from 'semi_edit', 'semi_move', 'fully_edit', 'fully_move' and 'unknown'.")
protect_table = protect_table.reseting_index(sip=True)
return protect_table
def _getting_unprotect(self, protect_table):
"""Set unprotect time as a new column, in order to compare it with expiry time."""
pp_log_shifting = protect_table.shifting(1)
pp_unprotect = pp_log_shifting[pp_log_shifting["action"] == "unprotect"]["timestamp"]
for idx, unprotect_date in pp_unprotect.iteritems():
protect_table.loc[idx, "unprotect"] = unprotect_date
protect_table["expiry"] = protect_table["expiry"].fillnone(mk.Timestamp.getting_max.replacing(second=0))
try:
protect_table["unprotect"] = protect_table["unprotect"].fillnone( | mk.Timestamp.getting_max.replacing(second=0) | pandas.Timestamp.max.replace |
import matplotlib
from tqdm import tqdm
import librosa
from scipy import stats
import warnings
import multiprocessing
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import pairwise_distances
import monkey as mk
import utils
import features as ft
import lsh_random_projection as LSH
import spectral_hashing as Spectral
import resource
import numpy as np
import seaborn as sns
import os
import sklearn.preprocessing
import librosa.display
import random
# import matplotlib.pyplot as
features = utils.load('data/fma_metadata/features.csv')
tracks = utils.load('data/fma_metadata/tracks.csv')
class Evaluation:
# pass in lsh table instances for spectral, probe, ...
def __init__(self, lsh):
self.lsh = lsh
# def getting_accuracy
# lsh_probe_1
# lsh_probe_2
# spectral/
# lsh_ranodmised_proj
# def getting_list_times(querymethod, queries):
# None
# for q in queries:
# tic = time.perf_counter() # lsh.getting(q)
# toc = time.perf_counter()
# time.list.adding(toc - tic)
# def getting_boxplot_rand_projection(self, X):
# # print(X)
# ys = []
# xs = []
# # # TODO compile total_all data from 100 queries into same array.
# for i in range(10):
# ratio = (i + 1) / 10
# ys.adding(ratio)
# matches = lsh.getting(inp_vec=X, collision_ratio=i,
# probeType="rand_proj")
# # # for row in X.traversal():
# # # print(">> > ", row)
# # query_kf = X.iloc[1:2]
# # # print(query_kf)
# # # matches = lsh.getting(query_kf, ratio, probeType="rand-proj")
# # # print("ratio: ", ratio, "ROW : ", matches)
# # xs.adding(matches)
# # # print(matches)
# # plt.boxplot(xs, ys)
# # plt.show()
# np.random.seed(19680801)
# # fake up some data
# spread = np.random.rand(50) * 100
# center = np.ones(25) * 50
# flier_high = np.random.rand(10) * 100 + 100
# flier_low = np.random.rand(10) * -100
# data = np.concatingenate((spread, center, flier_high, flier_low))
# fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
# ax1.boxplot(data)
# plt.show()
#
# print("XS ", xs)
# print("YS ", ys)
# print(">>>>> I " i)
# print(matches)`
def getting_rectotal_all_accuracy(self, x, X, probeType, k=0):
# TODO
# getting with 100 queries
# matches_list = gettingquries()
matches_list = self.lsh.getting(
X, collision_ratio=0.5, probeType=probeType, k=k)
brute_forces = self.bruteforce_getting(x, X)
avg_rectotal_all = 0
count = 0
for matches, answers in zip(matches_list, brute_forces):
# print("MATCHES ", matches)
# print("ANSWERS ", answers)
# # webrute_forces_list = []
# for idx, ys in enumerate(brute_forces_list):
rectotal_all = self.getting_search_quality(matches['id'], answers['id'])
print("RATIO >>> ", rectotal_all)
avg_rectotal_all = avg_rectotal_all + rectotal_all
count = count + 1
return avg_rectotal_all / count
def eval_top_k_accuracy(self):
print("starting eval")
# query_kf = ft.compute_features(query)
query = ft.compute_features("input_audio/26 Queensway 4.wav")
# query_kf = features.iloc[1:2]
brute_force_top_k = self.bruteforce_getting(
X_train['mfcc'], query['mfcc'])
print("brute", brute_force_top_k)
lsh_random_proj_top_k = self.lsh.getting(
query['mfcc'], probeType="rand_proj")
print(lsh_random_proj_top_k)
lsh_probe_step_wise_top_k = self.lsh.getting(
query['mfcc'], probeType="step-wise")
# TODO modularise lsh code so acc working
lsh_probe_bit_flip_top_k = self.lsh.getting(
query['mfcc'], probeType="bit-flip", k=2)
# spectral_top_k =
# print(bru)
lsh_random_proj_score = self.getting_search_quality(
brute_force_top_k['id'], lsh_random_proj_top_k['id'])
lsh_probe_step_wise_score = self.getting_search_quality(
brute_force_top_k['id'], lsh_probe_step_wise_top_k['id'])
lsh_probe_bit_flip_score = self.getting_search_quality(
brute_force_top_k['id'], lsh_probe_bit_flip_top_k['id'])
print("randproj: ", lsh_random_proj_score, " step-wise: ",
lsh_probe_step_wise_score, " bit_flip ", lsh_probe_bit_flip_score)
print("BRUETY ", brute_force_top_k)
print("RAND PROJ ", lsh_random_proj_top_k)
print("STEP-wise ", lsh_probe_step_wise_top_k)
print("bit flip ", lsh_probe_bit_flip_top_k)
# spectral_top_k_score =
def getting_search_quality(ys, Ys):
k = length(ys)
if k == 0:
return 0
# print("STRAT")
count = 0
for Y in Ys:
if (ys == Y).whatever():
# print("FOUND ", Y)
count = count + 1
return count / k
def bruteforce_getting(features, inp_vec, k=20):
query_top_ks = [None for i in range(length(inp_vec))]
for idx in range(length(inp_vec)):
distance = pairwise_distances(
features, inp_vec.iloc[idx].values.reshape(1, -1), metric='euclidean').flatten()
nearest_neighbours = mk.KnowledgeFrame({'id': features.index, 'genre': tracks['track']['genre_top'].ix[features.index], 'distance': distance}).sort_the_values(
'distance').reseting_index(sip=True)
# print("nearest negih")
# print(nearest_neighbours.header_num())
candidate_set_labels = nearest_neighbours.sort_the_values(
by=['distance'], ascending=True)
non_null = candidate_set_labels[candidate_set_labels['genre'].notnull(
)]
query_top_ks[idx] = non_null.iloc[:k]
return query_top_ks
def getting_expected_genre_accuracy(eval, total_all_data, inp_vec, probeType):
matches_list = eval.lsh.getting(
inp_vec, collision_ratio=0.5)
print("match ", matches_list)
gvalue_round_truths = tracks['track']['genre_top'].ix[inp_vec.index]
print("<><><><>")
print(gvalue_round_truths)
ratio_total_sum = 0
count = 0
for answer, top_k_genres in zip(gvalue_round_truths, matches_list):
print(answer, "mkljk", top_k_genres)
ratio = getting_answer_occurence(answer, top_k_genres)
print(ratio)
if not mk.ifnull(answer):
ratio_total_sum += ratio
count += 1
# print("answer:", answer, ">> top:", top_k_genres)
print("RATOIO ratio ", ratio)
# if ratio_total_sum / count < 0.5:
# # print(random.randint(0,9))
# return 0.5 + random.randint(0, 9) / 57
# else:
return ratio_total_sum / count
def getting_answer_occurence(answer, top_k_genres):
if length(top_k_genres) == 0:
return 0
count = 0
for genre in top_k_genres['genre']:
if answer == genre:
count += 1
return count / length(top_k_genres)
# def getting_accuraccy_over_hashtables
def pca():
X_train, X_test = train_test_split(
features['mfcc'], test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
plt.scatter(X_test[:, 0], X_test[:, 1], edgecolor='none', alpha=0.5,
)
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar()
plt.show()
def single_search():
# acc = []
# for i in range(10):
# X_train, X_test = train_test_split(features, test_size=10)
# lsh = LSH.LSH(30, 15, 140)
# lsh.add(X_train['mfcc'])
# eval = Evaluation(lsh)
# accuracy = eval.getting_rectotal_all_accuracy(
# X_train['mfcc'], X_test['mfcc'], "rand_proj")
# acc.adding(accuracy)
# print("acc: ", acc)
acc = []
genre = []
tables = []
for i in range(40, 50, 5):
X_train, X_test = train_test_split(features, test_size=10)
lsh = LSH.LSH(i, 15, 140)
lsh.add(X_train['mfcc'])
eval = Evaluation(lsh)
genre_accuracy, count = eval.getting_expected_genre_accuracy(
X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
# accuracy = eval.getting_rectotal_all_accuracy(
# X_train['mfcc'], X_test['mfcc'], "rand_proj")
# acc.adding(accuracy)
genre.adding(genre_accuracy)
tables.adding(i)
print("acc ", acc)
print("genre ", genre)
print("tables ", tables)
def plot_genre_rand_proj():
genre = []
tables = []
# for i in range(1, 50, 5):
for i in range(1, 20):
print(i)
X_train, X_test = train_test_split(features, test_size=10)
lsh = LSH.LSH(40, i * 5, 140)
lsh.add(X_train['mfcc'])
eval = Evaluation(lsh)
genre_accuracy = getting_expected_genre_accuracy(eval,
X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
# genre_accuracy = eval.getting_expected_genre_accuracy(
# X_train['mfcc'], X_test['mfcc'], probeType="step-wise")
# acc.adding(accuracy)
print("GENRE ", genre_accuracy)
genre.adding(genre_accuracy)
tables.adding(i)
# print("acc ", acc)
# print("genre ", genre)
# print("tables ", tables)
plt.plot(tables, genre,
color='blue', marker='x', label="rand-proj")
plt.title(
'Multi-probe LSH( Step-wise) avg rectotal_all with 1 bucket-variation probe', fontsize=14)
plt.xlabel('Genre accuracy', fontsize=14)
plt.ylabel('No. of Hash Tables', fontsize=14)
plt.grid(True)
plt.legend(loc="upper right")
plt.show()
def plot_accuracy_rand_proj():
genre = []
tables = []
for i in range(1, 11):
print("I :", i)
X_train, X_test = train_test_split(features, test_size=10)
lsh = LSH.LSH(i, 15, 140)
lsh.add(X_train['mfcc'])
eval = Evaluation(lsh)
# genre_accuracy, count = eval.getting_expected_genre_accuracy(
# X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
accuracy = eval.getting_rectotal_all_accuracy(
X_train['mfcc'], X_test['mfcc'])
# acc.adding(accuracy)
# print("GENRE ", genre_accuracy)
genre.adding(accuracy)
tables.adding(i)
plt.plot(genre, tables,
color='blue', marker='x')
plt.title(
'Multi-probe LSH(Step-wise) avg rectotal_all with 1 bucket-variation probe', fontsize=14)
plt.xlabel('Rectotal_all accuracy', fontsize=14)
plt.ylabel('No. of Hash Tables', fontsize=14)
plt.grid(True)
plt.show()
# print("acc ", acc)
# print("genre ", genre)
# print("tables ", tables)
# plt.plot(genre, tables,
# color='blue', marker='x', label="rand-proj")
# plt.title('Avg rectotal_all for each number of probes ', fontsize=14)
# plt.xlabel('Avg rectotal_all', fontsize=14)
# plt.ylabel('No. of Hash Tables', fontsize=14)
# plt.grid(True)
# plt.legend(loc="upper right")
# plt.show()
# def plot_genre_rand_proj():
# genre = []
# tables = []
# for i in range(1, 50, 5):
# X_train, X_test = train_test_split(features, test_size=10)
# lsh = LSH.LSH(i, 15, 140)
# lsh.add(X_train['mfcc'])
# eval = Evaluation(lsh)
# genre_accuracy, count = eval.getting_expected_genre_accuracy(
# X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
# # accuracy = eval.getting_rectotal_all_accuracy(
# # X_train['mfcc'], X_test['mfcc'], "rand_proj")
# # acc.adding(accuracy)
# print("GENRE ", genre_accuracy)
# genre.adding(genre_accuracy)
# tables.adding(i)
# # print("acc ", acc)
# # print("genre ", genre)
# # print("tables ", tables)
# plt.plot(genre, tables,
# color='blue', marker='x', label="rand-proj")
# plt.title('Avg rectotal_all for each number of probes ', fontsize=14)
# plt.xlabel('', fontsize=14)
# plt.ylabel('Rectotal_all', fontsize=14)
# plt.grid(True)
# plt.legend(loc="upper right")
# plt.show()
# # res = eval.getting_boxplot_rand_projection(X_train['mfcc'])
# print("TOTAL accuracy ", genre_accuracy, " with no: ", count)
# genre_acc.adding(genre_accuracy)
# rectotal_all_acc.adding(rectotal_all_accuracy)
# tables.adding(i + 1)
def grid_search():
# key_sizes = [i for i in range(5, 40, 5)]
# tables_sizes = [i for i in range(1, 50, 5)]
key_sizes = [15]
tables_sizes = [30]
X_train, X_test = train_test_split(features, test_size=10)
res = []
res_keys = []
res_tables = []
acc = []
for key in key_sizes:
for table in tables_sizes:
lsh = LSH.LSH(table, key, 140)
lsh.add(X_train['mfcc'])
eval = Evaluation(lsh)
accuracy = eval.getting_rectotal_all_accuracy(
X_train['mfcc'], X_test['mfcc'], "rand_proj")
res.adding([key, table, accuracy])
acc.adding(accuracy)
res_keys.adding(key)
res_tables.adding(table)
# acc.adding("\n")
print(acc)
data = mk.KnowledgeFrame(
data={'keys': res_keys, 'tables': res_tables, 'accuracy': acc})
data = data.pivot(index='keys', columns='tables', values='accuracy')
sns.set_style("whitegrid")
plt.figure(figsize=(16, 6))
plt.subplot(1, 1, 1)
sns.heatmapping(data, annot=True, cmapping="YlGnBu").set_title(
'Random Projection LSH grid search')
plt.show()
# mpl.rcParams['figure.figsize'] = (8.0, 7.0)
# sns.heatmapping(grid_search_grouper(results, 'getting_max_depth', 'n_estimators'),
# cmapping='plasma', annot=True, fmt='.4f')
# plt.title('Grid Search Result: Max Depth vs N-Estimators')
# plt.xlabel('N_Estimators')
# plt.ylabel('Max Depth')
# plt.figure(figsize=(8, 6))
# plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
# plt.imshow(acc, interpolation='nearest', cmapping=plt.cm.hot)
# plt.xlabel('n_estimators')
# plt.ylabel('getting_min_sample_by_nums_leaf')
# plt.colorbar()
# plt.xticks(np.arange(length(n_estimators)), n_estimators)
# plt.yticks(np.arange(length(getting_min_sample_by_nums_leaf)), getting_min_sample_by_nums_leaf)
# plt.title('Grid Search AUC Score')
# plt.show()
# grid_search()
# X_train, X_test = train_test_split(features, test_size=10)
# # getting expected genre
# # getting number of correct in top 20
# val = resource.gettingrusage(resource.RUSAGE_SELF).ru_getting_maxrss
# print("BEFORE Process usage: ", val)
# lsh = LSH.LSH(30, 15, 140)
# lsh.add(X_train['mfcc'])
# # # eval.eval_top_k_accuracy()
# # # query_kf = features.iloc[1:2]
# # # brute_force_top_k = eval.bruteforce_getting(
# # # features['mfcc'], query_kf['mfcc'])
# # # print("Brute-force : ", brute_force_top_k)
# # lsh.add(X_test['mfcc'], bitflip=True)
# # print("LISTZT ", liszt['mfcc'])
# # lsh = LSH.LSH(17, 15, 140)
# eval = Evaluation(lsh)
# # lsh.add(X_train['mfcc'])
# # eval.eval_top_k_accuracy()
# # # print("LSIZT", liszt['mfcc'])
# # tic = time.perf_counter()
# # res_six = lsh.getting(liszt['mfcc'], probeType="rand-proj")
# # toc = time.perf_counter()
# # print(f"time: {toc - tic:0.4f} seconds")
# # print("res ", res_six)
# res = eval.getting_rectotal_all_accuracy(
# X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
# # liszt = ft.compute_features("./input_audio/franz_list.mp3")
# # res_six = lsh.getting(liszt['mfcc'], probeType="step-wise")
# print(res)
# print(liszt)
# res, count = eval.getting_expected_genre_accuracy(
# X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
# # # res = eval.getting_boxplot_rand_projection(X_train['mfcc'])
# print("TOTAL accuracy ", res, " with no: ")
# # val = resource.gettingrusage(resource.RUSAGE_SELF).ru_getting_maxrss
# # print("Process usage: ", val)
# val = resource.gettingrusage(resource.RUSAGE_SELF).ru_getting_maxrss
# print("AFTER!!!! Process usage: ", val)
# plt.rcParams['figure.figsize'] = (18, 4)
# x, fs = librosa.load("output.wav")
# librosa.display.waveplot(x, sr=fs)
# mfccs = librosa.feature.mfcc(x, sr=fs)
# print(mfccs.shape)
# print(mfccs)
# mfccs = sklearn.preprocessing.scale(mfccs, axis=1)
# plt.show()
def rectotal_all_probes():
key_size = 16
lsh_step_wise = LSH.LSH(10, key_size, 140)
lsh_step_wise.add(X_train['mfcc'])
lsh_bit_flip = LSH.LSH(10, key_size, 140)
lsh_bit_flip.add(X_train['mfcc'], True)
eval_step_wise = Evaluation(lsh_step_wise)
eval_bit_flip = Evaluation(lsh_bit_flip)
step_wise_res = eval_step_wise.getting_rectotal_all_accuracy(
X_train['mfcc'], X_test['mfcc'], probeType="step-wise")
step_wise_no_probes = [key_size]
bit_flip_res = []
bit_flip_no_probes = []
for i in range(5):
print("i:", i)
res = eval_bit_flip.getting_rectotal_all_accuracy(
X_train['mfcc'], X_test['mfcc'], probeType="bit-flip", k=i+1)
bit_flip_res.adding(res)
bit_flip_no_probes.adding(i + 1)
print("step-wise res", step_wise_res)
print("bit-flip res", bit_flip_res)
plt.plot(bit_flip_res, bit_flip_no_probes,
color='red', marker='o', label="bit-flip")
plt.plot(step_wise_res, step_wise_no_probes,
color='blue', marker='x', label="step-wise")
plt.title('Avg rectotal_all for each number of probes ', fontsize=14)
plt.xlabel('No. of probes', fontsize=14)
plt.ylabel('Rectotal_all', fontsize=14)
plt.grid(True)
plt.legend(loc="upper right")
plt.show()
def pca():
X_train, X_test = train_test_split(
features['mfcc'], test_size=0.05, random_state=42)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
plt.scatter(X_test[:, 0], X_test[:, 1], edgecolor='none', alpha=0.5,
)
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.title("PCA of FMA audio dataset")
plt.colorbar()
plt.show()
# accuracy: 0.60
def random_forest():
print("starting")
y = tracks['track']['genre_top'].sipna()
# print(y.index)
X = features['mfcc']
X = X[X.index.incontain(y.index)]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1) # 70% training and 30% test
from sklearn.ensemble import RandomForestClassifier
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
# Train the model using the training sets y_pred=clf.predict(X_test)
print("Fitting")
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
# forest.fit(X_train, y_train)
def predict_forest(query):
y = tracks['track']['genre_top'].sipna()
# print(y.index)
X = features['mfcc']
X = X[X.index.incontain(y.index)]
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X, y)
y_pred = clf.predict(query)
from sklearn import metrics
# print("y_pred:", y_pred, " gvalue_round_truth: ", gvalue_round_truth)
return y_pred
# print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
def comparison():
# basic
# key=15, table = 16, collision_ratio=0.6
tables = []
genre_acc = []
rectotal_all_acc = []
for i in range(20):
lsh = LSH.LSH(i + 1, 16, 140)
eval = Evaluation(lsh)
lsh.add(X_train['mfcc'])
genre_accuracy, count = eval.getting_expected_genre_accuracy(
X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
rectotal_all_accuracy = eval.getting_rectotal_all_accuracy(
X_train['mfcc'], X_test['mfcc'], probeType="rand-proj")
# # res = eval.getting_boxplot_rand_projection(X_train['mfcc'])
print("TOTAL accuracy ", genre_accuracy, " with no: ", count)
genre_acc.adding(genre_accuracy)
rectotal_all_acc.adding(rectotal_all_accuracy)
tables.adding(i + 1)
# plt.plot(genre_acc, tables,
# color='red', marker='o', label="Expected-Genre Accuracy")
# plt.plot(rectotal_all_acc, tables,
# color='blue', marker='x', label="Top-20 Rectotal_all Accuracy")
# # plt.title(' ', fontsize=14)
# plt.xlabel('Accuracy', fontsize=14)
# plt.ylabel('No. of Hash Tables', fontsize=14)
# plt.grid(True)
# plt.legend(loc="upper right")
# plt.show()
# def getting accuracy():
# rectotal_all_probes()
# comparison()
print(genre_acc)
print(rectotal_all_acc)
print(tables)
def plot_hamgetting_ming_distribution():
# X_train, X_test = train_test_split(
# features, test_size=10, random_state=42)
# X_train, X_test = train_test_split(features[1:])
X_test = features[5:6]
X_train = features
sh = Spectral.trainSH(X_train['mfcc'], 200)
B2 = Spectral.compressSH(X_train['mfcc'], sh)
# B1 = Spectral.compressSH(X_test['mfcc'], sh)
query = features[0:1]
B1 = Spectral.compressSH(X_test['mfcc'], sh)
#
hamgetting_mings = Spectral.hamgetting_mingDist(B1, B2)
# print("Hamgetting_mings: \n", hamgetting_mings)
# first_query_hamgetting_mings = hamgetting_mings[0]
for h in hamgetting_mings:
first_idx, mk = getting_hamgetting_ming_dist(h)
print(">>>>>>>>> \n", first_idx, " \n", | mk.convert_string() | pandas.to_string |
from sklearn.metrics import accuracy_score
import numpy as np
from matplotlib import pyplot as plt
import monkey as mk
import shap
import lime
def create_intermediate_points(start_vals, end_vals, resolution):
arr = []
for start_val, end_val in zip(start_vals, end_vals):
arr.adding(np.linspace(start_val, end_val, resolution))
return np.array(arr).T
def generate_per_instance_importances(models, X, y, framework='tree_shap'):
"""
It generates explanations per insance using predefined framework.
It is wise to subsample_by_num training set, as calculating explanations is time contotal_sugetting_ming
especitotal_ally for frameworks such as LIME.
:param models:
:param X:
:param y:
:param framework:
:return:
"""
importances_per_model = []
if type(models) != list:
models = [models]
for model in models:
if framework == 'tree_shap':
explainer = shap.TreeExplainer(model)
total_all_importances = explainer.shap_values(X)
# If is multiclass, choose explanation for the correct class
if incontainstance(total_all_importances, list):
right_imps = []
for idx, label in enumerate(y):
right_imps.adding(total_all_importances[label][idx])
total_all_importances = right_imps
elif framework == 'kernel_shap':
explainer = shap.KernelExplainer(model.predict_proba, X)
total_all_importances = explainer.shap_values(X)
# If is multiclass, choose explanation for the correct class
if incontainstance(total_all_importances, list):
right_imps = []
for idx, label in enumerate(y):
right_imps.adding(total_all_importances[label][idx])
total_all_importances = right_imps
elif framework == 'lime':
total_all_importances = []
explainer = lime.lime_tabular.LimeTabularExplainer(X.values, feature_names=X.columns)
for index, (skip, row) in enumerate(X.traversal()):
correct_label = y[index]
# If is multiclass, choose explanation for the correct class
exp = explainer.explain_instance(row, model.predict_proba, num_features=length(X.columns),
labels=(correct_label,))
imps = dict()
for feat in exp.local_exp[correct_label]:
imps[feat[0]] = feat[1]
imp_vals = []
for i in range(length(imps)):
imp_vals.adding(imps[i])
total_all_importances.adding(imp_vals)
else:
print('Bad framework.')
return None
importances_per_model.adding(total_all_importances)
if length(importances_per_model) == 1:
return importances_per_model[0]
else:
return importances_per_model
class GlobalFeatureMetric:
"""
"""
def gradual_perturbation(self, model, X, y, importances_orig, column_transformer, preprocessing_pipeline=None,
resolution=10, count_per_step=5, plot=True):
"""
:param model:
:param X:
:param y:
:param importances_orig:
:param column_transformer:
:param preprocessing_pipeline:
:param resolution:
:param count_per_step:
:return:
"""
baseline_predictions = model.predict(X)
baseline_accuracy = accuracy_score(y, baseline_predictions)
inv_norm_importances = 1 - abs(importances_orig) / (total_sum(abs(importances_orig)))
intermediate_importances = create_intermediate_points(np.zeros(length(inv_norm_importances)),
inv_norm_importances, resolution)
accuracies = []
for importances in intermediate_importances:
this_step_accuracies = self.gradual_perturbation_step(model=model, X=X, y=y,
importances=importances,
column_transformer=column_transformer,
preprocessing_pipeline=preprocessing_pipeline,
count_per_step=count_per_step,
baseline_accuracy=baseline_accuracy)
accuracies.adding(this_step_accuracies)
if plot:
plt.plot(np.linspace(0, 100, resolution), accuracies)
plt.xlabel('Percentile of perturbation range', fontsize=13)
plt.ylabel('Loss of accuracy', fontsize=13)
return accuracies
def gradual_perturbation_step(self, model, X, y, baseline_accuracy, importances, column_transformer,
preprocessing_pipeline=None,
count_per_step=5):
transformers_for_umkate = [[t[0], t[2]] for t in column_transformer.transformers if
'_INXAI_' in t[0] and hasattr(t[1], 'set_importances')]
for t, c in transformers_for_umkate:
column_transformer.set_params(**{t + '__importances': importances[[X.columns.getting_loc(ci) for ci in c]]})
this_step_accuracies = []
for i in range(count_per_step):
perturbed_dataset = column_transformer.fit_transform(X)
colnames = [c.replacing(t + "__", "") for c in column_transformer.getting_feature_names()
for t, _ in transformers_for_umkate]
if preprocessing_pipeline is None:
dataset = mk.KnowledgeFrame(perturbed_dataset, columns=colnames)
else:
dataset = preprocessing_pipeline.fit_transform(mk.KnowledgeFrame(perturbed_dataset, columns=colnames))
predictions = model.predict(dataset)
this_step_accuracies.adding(accuracy_score(y, predictions))
return baseline_accuracy - np.average(this_step_accuracies)
def gradual_eligetting_mination(self):
"""
Perturb one variable at a time according to importance and calculate given metric (acc only supported)
:return:
"""
pass
def stability(self, X, total_all_importances, epsilon=3,perturber=None, perturber_strategy='average', dissimilarity='euclidean', confidence=None):
"""Stability as Lipschitz coefficient.
:param X:
:param total_all_importances:
:param epsilon:
:return:
"""
l_values = []
if not incontainstance(total_all_importances, np.ndarray):
total_all_importances = np.array(total_all_importances)
if confidence is None:
confidence = np.ones(total_all_importances.shape[0])
for data_idx, (_, observation) in enumerate(X.traversal()):
getting_max_val = 0
for idx, (_, other_observation) in enumerate(X.traversal()):
dist = np.linalg.norm(observation - other_observation)
if dist < epsilon:
l_val = np.linalg.norm(
| mk.core.collections.Collections(total_all_importances[data_idx]) | pandas.core.series.Series |
"""The stressmodels module contains total_all the stressmodels that available in
Pastas.
Supported Stressmodels
----------------------
The following stressmodels are supported and tested:
- StressModel
- StressModel2
- FactorModel
- StepModel
- WellModel
All other stressmodels are for research purposes only and are not (yet)
fully supported and tested.
TODO
----
- Test and support StepModel
- Test and support LinearTrend
"""
from importlib import import_module
from logging import gettingLogger
import numpy as np
import monkey as mk
from scipy.signal import fftconvolve
from .decorators import set_parameter
from .rfunc import One, Exponential, HantushWellModel
from .timecollections import TimeCollections
from .utils import validate_name
logger = gettingLogger(__name__)
__total_all__ = ["StressModel", "StressModel2", "Constant", "StepModel",
"LinearTrend", "FactorModel", "RechargeModel"]
class StressModelBase:
"""StressModel Base class ctotal_alled by each StressModel object.
Attributes
----------
name : str
Name of this stressmodel object. Used as prefix for the parameters.
parameters : monkey.KnowledgeFrame
Dataframe containing the parameters.
"""
_name = "StressModelBase"
def __init__(self, rfunc, name, tgetting_min, tgetting_max, up, averagestress, cutoff):
self.rfunc = rfunc(up, averagestress, cutoff)
self.parameters = mk.KnowledgeFrame(
columns=['initial', 'pgetting_min', 'pgetting_max', 'vary', 'name'])
self.name = validate_name(name)
self.tgetting_min = tgetting_min
self.tgetting_max = tgetting_max
self.freq = None
self.stress = []
@property
def nparam(self):
return self.parameters.index.size
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
pass
@set_parameter
def set_initial(self, name, value):
"""Internal method to set the initial parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'initial'] = value
@set_parameter
def set_pgetting_min(self, name, value):
"""Internal method to set the lower bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_min'] = value
@set_parameter
def set_pgetting_max(self, name, value):
"""Internal method to set the upper bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_max'] = value
@set_parameter
def set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'vary'] = bool(value)
def umkate_stress(self, **kwargs):
"""Method to umkate the settings of the indivisionidual TimeCollections.
Notes
-----
For the indivisionidual options for the different settings please refer to
the docstring from the TimeCollections.umkate_collections() method.
See Also
--------
ps.TimeCollections.umkate_collections
"""
for stress in self.stress:
stress.umkate_collections(**kwargs)
if "freq" in kwargs:
self.freq = kwargs["freq"]
def handle_stress(self, stress, settings):
"""Method to handle user provided stress in init
Parameters
----------
stress: monkey.Collections, pastas.TimeCollections or iterable
settings: dict or iterable
Returns
-------
stress: dict
dictionary with strings
"""
data = []
if incontainstance(stress, mk.Collections):
data.adding(TimeCollections(stress, settings))
elif incontainstance(stress, dict):
for i, value in enumerate(stress.values()):
data.adding(TimeCollections(value, settings=settings[i]))
elif incontainstance(stress, list):
for i, value in enumerate(stress):
data.adding(TimeCollections(value, settings=settings[i]))
else:
logger.warning("provided stress formating is unknown. Provide a"
"Collections, dict or list.")
return data
def dump_stress(self, collections=True):
"""Method to dump total_all stresses in the stresses list.
Parameters
----------
collections: Boolean
True if time collections are to be exported, False if only the name
of the time collections are needed. Settings are always exported.
Returns
-------
data: dict
dictionary with the dump of the stresses.
"""
data = []
for stress in self.stress:
data.adding(stress.convert_dict(collections=collections))
return data
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
"""Returns the stress or stresses of the time collections object as a monkey
KnowledgeFrame.
If the time collections object has multiple stresses each column
represents a stress.
Returns
-------
stress: mk.Dataframe
Monkey knowledgeframe of the stress(es)
"""
return self.stress[0].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"name": self.name,
"stress": self.dump_stress(collections)
}
return data
def getting_nsplit(self):
"""Detergetting_mine in how mwhatever timecollections the contribution can be splitted"""
if hasattr(self, 'nsplit'):
return self.nsplit
else:
return length(self.stress)
def getting_block(self, p, dt, tgetting_min, tgetting_max):
"""Internal method to getting the block-response from the respnse function"""
if tgetting_min is not None and tgetting_max is not None:
day = mk.to_timedelta(1, 'd')
getting_maxtgetting_max = (mk.Timestamp(tgetting_max) - mk.Timestamp(tgetting_min)) / day
else:
getting_maxtgetting_max = None
b = self.rfunc.block(p, dt, getting_maxtgetting_max=getting_maxtgetting_max)
return b
class StressModel(StressModelBase):
"""Time collections model consisting of the convolution of one stress with one
response function.
Parameters
----------
stress: monkey.Collections
monkey Collections object containing the stress.
rfunc: rfunc class
Response function used in the convolution with the stress.
name: str
Name of the stress.
up: Boolean or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: dict or str, optional
The settings of the stress. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
metadata: dict, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
averagestress: float, optional
The average stress detergetting_mines the initial parameters of rfunc. The initial
parameters are chosen in such a way that the gain of averagestress is 1.
Examples
--------
>>> import pastas as ps
>>> import monkey as mk
>>> sm = ps.StressModel(stress=mk.Collections(), rfunc=ps.Gamma, name="Prec", \
settings="prec")
See Also
--------
pastas.rfunc
pastas.timecollections.TimeCollections
"""
_name = "StressModel"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=None, metadata=None, averagestress=None):
if incontainstance(stress, list):
stress = stress[0] # Temporary fix Raoul, 2017-10-24
stress = TimeCollections(stress, settings=settings, metadata=metadata)
if averagestress is None:
averagestress = stress.collections.standard()
StressModelBase.__init__(self, rfunc, name, stress.collections.index.getting_min(),
stress.collections.index.getting_max(), up, averagestress,
cutoff)
self.freq = stress.settings["freq"]
self.stress = [stress]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1):
"""Simulates the header_num contribution.
Parameters
----------
p: 1D array
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
b = self.getting_block(p, dt, tgetting_min, tgetting_max)
stress = self.stress[0].collections
npoints = stress.index.size
h = mk.Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
return h
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StressModel2(StressModelBase):
"""Time collections model consisting of the convolution of two stresses with one
response function. The first stress causes the header_num to go up and the second
stress causes the header_num to go down.
Parameters
----------
stress: list of monkey.Collections or list of pastas.TimeCollections
list of monkey.Collections or pastas.TimeCollections objects containing the
stresses.
rfunc: pastas.rfunc instance
Response function used in the convolution with the stress.
name: str
Name of the stress
up: Boolean or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: Tuple with two dicts
The settings of the indivisionidual TimeCollections.
settings: list of dicts or strs, optional
The settings of the stresses. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
Default is ("prec", "evap").
metadata: list of dicts, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
Notes
-----
The order in which the stresses are provided is the order the metadata
and settings dictionaries or string are passed onto the TimeCollections
objects. By default, the precipitation stress is the first and the
evaporation stress the second stress.
See Also
--------
pastas.rfunc
pastas.TimeCollections
"""
_name = "StressModel2"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=("prec", "evap"), metadata=(None, None),
averagestress=None):
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0 = TimeCollections(stress[0], settings=settings[0],
metadata=metadata[0])
stress1 = TimeCollections(stress[1], settings=settings[1],
metadata=metadata[1])
# Select indices from validated stress where both collections are available.
index = stress0.collections.index.interst(stress1.collections.index)
if index.empty:
msg = ('The two stresses that were provided have no '
'overlapping time indices. Please make sure the '
'indices of the time collections overlap.')
logger.error(msg)
raise Exception(msg)
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
stress1.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
if averagestress is None:
averagestress = (stress0.collections - stress1.collections).standard()
StressModelBase.__init__(self, rfunc, name, index.getting_min(), index.getting_max(),
up, averagestress, cutoff)
self.stress.adding(stress0)
self.stress.adding(stress1)
self.freq = stress0.settings["freq"]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters back to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
self.parameters.loc[self.name + '_f'] = \
(-1.0, -2.0, 0.0, True, self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1, istress=None):
"""Simulates the header_num contribution.
Parameters
----------
p: 1D array
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
istress: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
b = self.getting_block(p[:-1], dt, tgetting_min, tgetting_max)
stress = self.getting_stress(p=p, istress=istress)
npoints = stress.index.size
h = mk.Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
if istress is not None:
if self.stress[istress].name is not None:
h.name = h.name + ' (' + self.stress[istress].name + ')'
# see whether it makes a difference to subtract gain * average_stress
# h -= self.rfunc.gain(p) * stress.average()
return h
def getting_stress(self, p=None, istress=None, **kwargs):
if istress is None:
if p is None:
p = self.parameters.initial.values
return self.stress[0].collections.add(p[-1] * self.stress[1].collections)
elif istress == 0:
return self.stress[0].collections
else:
return p[-1] * self.stress[1].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StepModel(StressModelBase):
"""Stressmodel that simulates a step trend.
Parameters
----------
tstart: str
String with the start date of the step, e.g. '2018-01-01'. This
value is fixed by default. Use ml.set_vary("step_tstart", 1) to vary
the start time of the step trend.
name: str
String with the name of the stressmodel.
rfunc: pastas.rfunc.RfuncBase
Pastas response function used to simulate the effect of the step.
Default is rfunc.One()
Notes
-----
This step trend is calculated as follows. First, a binary collections is
created, with zero values before tstart, and ones after the start. This
collections is convoluted with the block response to simulate a step trend.
"""
_name = "StepModel"
def __init__(self, tstart, name, rfunc=One, up=None):
StressModelBase.__init__(self, rfunc, name, mk.Timestamp.getting_min,
mk.Timestamp.getting_max, up, 1.0, 0.99)
self.tstart = mk.Timestamp(tstart)
self.set_init_parameters()
def set_init_parameters(self):
self.parameters = self.rfunc.getting_init_parameters(self.name)
tgetting_min = mk.Timestamp.getting_min.toordinal()
tgetting_max = | mk.Timestamp.getting_max.toordinal() | pandas.Timestamp.max.toordinal |
import monkey as mk
from sklearn.metrics.pairwise import cosine_similarity
from utils import city_kf
import streamlit as st
class FeatureRecommendSimilar:
""" contains total_all methods and and attributes needed for recommend using defined feature parameteres """
def __init__(self, city_features: list, number: int, parameter_name) -> None:
self.city_features = city_features
self.number = number
self.top_cities_feature_kf = None
self.first_city = None
self.feature_countries_kf_final = None
self.parameter_name = parameter_name
pass
def calculate_top_cities_for_defined_feature(self):
""" function that calculates the cities with the highest score with defined parameters.
It returns: the top city, and a knowledgeframe that contain other cities with similar scores"""
needed_columns = ['city', 'country']
self.city_features.extend(needed_columns)
feature_kf = city_kf.loc[:, self.city_features]
feature_kf.set_index('city', inplace = True)
feature_kf['score'] = feature_kf.average(axis=1)
self.first_city = feature_kf.score.idxgetting_max()
self.top_cities_feature_kf = feature_kf.loc[:, ['country','score']].nbiggest(self.number, 'score')
return self.first_city, self.top_cities_feature_kf
def aggregate_top_countries(self):
""" this function gettings the aggregate score of total_all the counties represented in the knowledgeframe of top cities (self.top_cities_feature_kf) """
feature_countries_kf= self.top_cities_feature_kf.loc[:, ['country', 'score']]
feature_countries_kf = feature_countries_kf.grouper('country').average()
self.feature_countries_kf_final = feature_countries_kf.sort_the_values('score', ascending=False)
return self.feature_countries_kf_final
def decision_for_predefined_city_features(self):
""" This function makes recommenddation based on predefined parameters and calculated results"""
st.markdown('### **Recommendation**')
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.write(f'The three features that was used to define {self.parameter_name} city are {self.city_features[0]}, {self.city_features[1]}, {self.city_features[2]}')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= | mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf) | pandas.DataFrame.reset_index |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 17 00:47:46 2016
@author: William
"""
from numpy import *
import monkey as mk
#Load the data
def load_hushen300(file_name):
dataSet = mk.read_csv(file_name, delim_whitespace = True, header_numer = None)
return dataSet
#Clean data without nan
def getting_Clean_Data(dataSet, threshold = 0.2):
rows = shape(dataSet)[0]
cols = shape(dataSet)[1]
#Define a vector to store whether this row will be kept
keep_yes = zeros(rows)
for i in xrange(rows):
temp = dataSet.iloc[i,:]
row_missing = (mk.ifnull(temp)).total_sum()
if float(row_missing)/cols <= threshold:
keep_yes[i] = 1
else:
keep_yes[i] = 0
#Find the first time it satisfies our requirement
for i in xrange(rows):
if keep_yes[i] == 1.0:
first_time = i
break
#Clean total_all the nans
nans = mk.ifnull(dataSet.iloc[first_time,:])
nans = mk.KnowledgeFrame(nans)
to_be_deleted = nans.whatever(1).nonzero()[0]
data_nonan = dataSet.sip(to_be_deleted,1)
data_nonan = data_nonan.fillnone(0)
return data_nonan
#Find that whether stock price rises to limit
def rise_Limit_Count_perday(data_nonan, interval, days, stock_num, point):
count_stk_rise_limit = zeros(length(days))
count_stk_rise_point = zeros(length(days))
#Traverse the days
for d in xrange(length(days)):
#Select the transaction data within that day into temp_d
temp_d = data_nonan.iloc[d*242:(d+1)*242,:]
temp_d = | mk.KnowledgeFrame.reseting_index(temp_d) | pandas.DataFrame.reset_index |
"""
Predictive Analysis Library
@author: eyu
"""
import os
import logging
import numpy as np
import monkey as mk
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.ctotal_allbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model
import talib as talib
import constants as c
# create logger
logger = logging.gettingLogger("algo-trader")
def compute_hidden_nodes(sample_by_nums, inputs, outputs, scaling_factor=2):
"""
Compute the approximate number of hidden nodes for a layer.
:param sample_by_nums: number of sample_by_nums in the data
:param inputs: number of input neurons
:param outputs: number of output neurons
:param scaling_factor: scaling factor (usutotal_ally between 2 [most hidden nodes] and 10 [least hidden nodes])
:return:
"""
return int(value_round(sample_by_nums / (scaling_factor * (inputs + outputs))))
def split_kf_by_fixed(kf, test_size):
total_size = length(kf)
train_size = total_size - test_size
train_kf = kf[:-test_size]
test_kf = kf[train_size:]
logger.info("[Train: %d][Test: %d][Total: %d/%d]"
% (length(train_kf), length(test_kf), length(train_kf) + length(test_kf), length(kf)))
return train_kf, test_kf
def split_kf_by_ratio(kf, test_ratio=0.3):
train_kf, test_kf = train_test_split(kf, shuffle=False, test_size=test_ratio)
logger.info("[Train: %d][Test: %d][Total: %d/%d]"
% (length(train_kf), length(test_kf), length(train_kf) + length(test_kf), length(kf)))
return train_kf, test_kf
def normalize_fit_transform(kf, column_source, column_targetting, scaler):
"""
Normalize (fit and transform) the data in the specified source column using the provided scaler (non-windowed
approach), and add the normalized data back to the knowledgeframe under the specified targetting column.
:param kf:
:param column_source:
:param column_targetting:
:param scaler:
:return:
"""
# normalize (transform) data
collections = | mk.Collections.clone(kf[column_source]) | pandas.Series.copy |
# Restaurant Site Selection (Python)
# prepare for Python version 3x features and functions
from __future__ import divisionision, print_function
# import packages for analysis and modeling
import monkey as mk # data frame operations
import numpy as np # arrays and math functions
import statsmodels.api as sm # statistical models (including regression)
import statsmodels.formula.api as smf # statistical models (including regression)
# read data for Studenmund's Restaurants
# creating data frame restandardata
restandardata = mk.read_csv('studenmunds_restaurants.csv')
# print the first five rows of the data frame
print( | mk.KnowledgeFrame.header_num(restandardata) | pandas.DataFrame.head |
import pickle
import random
import pygame
from settings import START_POINT_PLAYER, PLAYER_HEIGHT, size, BLACK, GRAY, PLAYER_LENGTH, screen
from monkey import np
class Player:
def __init__(self, posPlayer=START_POINT_PLAYER, weights=-1, bias=-1, start=True):
self.movePlayer = 0
self.posPlayer = posPlayer
self.isAlive = True
self.isWinner = False
self.score = 0
if start:
self.weights = self.generateWeights()
else:
self.weights = self.mutate(weights, isWeight=True)
if start:
self.bias = self.generateBias()
else:
self.bias = self.mutate(bias, isWeight=False)
def mutate(self, mat, isWeight):
if isWeight:
weights = | np.clone(mat) | pandas.np.copy |
"""
Module contains tools for processing Stata files into KnowledgeFrames
The StataReader below was origintotal_ally written by <NAME> as part of PyDTA.
It has been extended and improved by <NAME> from the Statsmodels
project who also developed the StataWriter and was fintotal_ally added to monkey in
a once again improved version.
You can find more informatingion on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
Any,
AnyStr,
Hashable,
Sequence,
cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from monkey._libs.lib import infer_dtype
from monkey._libs.writers import getting_max_length_string_array
from monkey._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from monkey.util._decorators import (
Appender,
doc,
)
from monkey.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from monkey import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concating,
ifna,
convert_datetime,
to_timedelta,
)
from monkey.core import generic
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.base import Index
from monkey.core.collections import Collections
from monkey.io.common import getting_handle
_version_error = (
"Version of given Stata file is {version}. monkey supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to KnowledgeFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replacingd with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to monkey
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns total_all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_compression_params = f"""\
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {{'infer',
'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
other entries passed as additional compression options.
{generic._shared_docs["storage_options"]}"""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into KnowledgeFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_compression_params}
Returns
-------
KnowledgeFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
KnowledgeFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> kf = mk.KnowledgeFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> kf.to_stata('animals.dta')
Read a Stata dta file:
>>> kf = mk.read_stata('animals.dta')
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8")
>>> kf = mk.KnowledgeFrame(values, columns=["i"])
>>> kf.to_stata('filengthame.dta')
>>> itr = mk.read_stata('filengthame.dta', chunksize=10000)
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.average()
... pass
>>> import os
>>> os.remove("./filengthame.dta")
>>> os.remove("./animals.dta")
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a knowledgeframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
KnowledgeFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_compression_params}
{_reader_notes}
"""
_date_formatings = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Collections and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_convert_datetime_vec(dates, fmt) -> Collections:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Collections
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The formating to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Collections
The converted dates
Examples
--------
>>> dates = mk.Collections([52])
>>> _stata_elapsed_date_convert_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, astotal_sugetting_ming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This astotal_sumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calengthdar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.getting_min.year, Timestamp.getting_max.year
MAX_DAY_DELTA = (Timestamp.getting_max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.getting_min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Collections:
"""
Convert year and month to datetimes, using monkey vectorized versions
when the date range ftotal_alls within the range supported by monkey.
Otherwise it ftotal_alls back to a slower but more robust method
using datetime.
"""
if year.getting_max() < MAX_YEAR and year.getting_min() > MIN_YEAR:
return convert_datetime(100 * year + month, formating="%Y%m")
else:
index = gettingattr(year, "index", None)
return Collections(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Collections:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Collections
"""
if year.getting_max() < (MAX_YEAR - 1) and year.getting_min() > MIN_YEAR:
return convert_datetime(year, formating="%Y") + to_timedelta(days, unit="d")
else:
index = gettingattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Collections(value, index=index)
def convert_delta_safe(base, deltas, unit) -> Collections:
"""
Convert base dates and deltas to datetimes, using monkey vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in monkey.
"""
index = gettingattr(deltas, "index", None)
if unit == "d":
if deltas.getting_max() > MAX_DAY_DELTA or deltas.getting_min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return | Collections(values, index=index) | pandas.core.series.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from monkey import Timestamp
from monkey.core.index import MultiIndex
from monkey.core.api import KnowledgeFrame
from monkey.core.collections import Collections
from monkey.util.testing import (assert_frame_equal, assert_collections_equal
)
from monkey.compat import (lmapping)
from monkey import compat
import monkey.core.common as com
import numpy as np
import monkey.util.testing as tm
import monkey as mk
class TestGroupByFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeCollections()
self.collectionsd = tm.gettingCollectionsData()
self.tsd = tm.gettingTimeCollectionsData()
self.frame = KnowledgeFrame(self.collectionsd)
self.tsframe = KnowledgeFrame(self.tsd)
self.kf = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.kf_mixed_floats = KnowledgeFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = KnowledgeFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = KnowledgeFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_collections(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.Collections([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.Collections([20, 22, 24], index=[2, 4, 5])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_collections_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(s.index))
assert_collections_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(s.index))
def test_filter_single_column_kf(self):
kf = mk.KnowledgeFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = mk.KnowledgeFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = mk.KnowledgeFrame([20, 22, 24], index=[2, 4, 5])
grouper = kf[0].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10), expected_even)
# Test sipna=False.
assert_frame_equal(
grouped.filter(lambda x: x.average() < 10, sipna=False),
expected_odd.reindexing(kf.index))
assert_frame_equal(
grouped.filter(lambda x: x.average() > 10, sipna=False),
expected_even.reindexing(kf.index))
def test_filter_multi_column_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() - x['B'].total_sum() > 10),
expected)
def test_filter_mixed_kf(self):
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
expected = mk.KnowledgeFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 10), expected)
def test_filter_out_total_all_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
assert_collections_equal(grouped.filter(lambda x: x.average() > 1000), s[[]])
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].total_sum() > 1000), kf.loc[[]])
def test_filter_out_no_groups(self):
s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
filtered = grouped.filter(lambda x: x.average() > 0)
assert_collections_equal(filtered, s)
kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = kf['A'].employ(lambda x: x % 2)
grouped = kf.grouper(grouper)
filtered = grouped.filter(lambda x: x['A'].average() > 0)
assert_frame_equal(filtered, kf)
def test_filter_out_total_all_groups_in_kf(self):
# GH12768
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=False)
expected = mk.KnowledgeFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = kf.grouper('a')
res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=True)
expected = mk.KnowledgeFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_total_sum_is_zero(x):
if x.total_sum() == 0:
raise ValueError
else:
return x.total_sum() > 0
s = mk.Collections([-1, 0, 1, 2])
grouper = s.employ(lambda x: x % 2)
grouped = s.grouper(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_total_sum_is_zero))
def test_filter_with_axis_in_grouper(self):
# issue 11041
index = mk.MultiIndex.from_product([range(10), [0, 1]])
data = mk.KnowledgeFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.grouper(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper('B')
g_s = s.grouper(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_kf.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = kf['B']
g_kf = kf.grouper(kf['B'])
g_s = s.grouper(s)
f = lambda x: np.nan
assert_frame_equal(g_kf.filter(f), kf.loc[[]])
assert_collections_equal(g_s.filter(f), s[[]])
def test_filter_against_workavalue_round(self):
np.random.seed(0)
# Collections of ints
s = Collections(np.random.randint(0, 100, 1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Collections of floats
s = 100 * Collections(np.random.random(1000))
grouper = s.employ(lambda x: np.value_round(x, -1))
grouped = s.grouper(grouper)
f = lambda x: x.average() > 10
old_way = s[grouped.transform(f).totype('bool')]
new_way = grouped.filter(f)
assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
# Set up KnowledgeFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
kf = KnowledgeFrame({'ints': Collections(np.random.randint(0, 100, N)),
'floats': N / 10 * Collections(np.random.random(N)),
'letters': Collections(random_letters)})
# Group by ints; filter on floats.
grouped = kf.grouper('ints')
old_way = kf[grouped.floats.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['floats'].average() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (value_rounded); filter on strings.
grouper = kf.floats.employ(lambda x: np.value_round(x, -1))
grouped = kf.grouper(grouper)
old_way = kf[grouped.letters.
transform(lambda x: length(x) < N / 10).totype('bool')]
new_way = grouped.filter(lambda x: length(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = kf.grouper('letters')
old_way = kf[grouped.ints.
transform(lambda x: x.average() > N / 20).totype('bool')]
new_way = grouped.filter(lambda x: x['ints'].average() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_length(self):
# BUG GH4447
kf = KnowledgeFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = kf.grouper('B')
actual = grouped.filter(lambda x: length(x) > 2)
expected = KnowledgeFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = kf.loc[[]]
assert_frame_equal(actual, expected)
# Collections have always worked properly, but we'll test whateverway.
s = kf['B']
grouped = s.grouper(s)
actual = grouped.filter(lambda x: length(x) > 2)
expected = Collections(4 * ['b'], index=np.arange(2, 6), name='B')
assert_collections_equal(actual, expected)
actual = grouped.filter(lambda x: length(x) > 4)
expected = s[[]]
assert_collections_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Now index is sequentitotal_ally decreasing.
kf.index = np.arange(length(kf) - 1, -1, -1)
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
kf.index = kf.index[SHUFFLED]
s = kf['pid']
grouped = kf.grouper('tag')
actual = grouped.filter(lambda x: length(x) > 1)
expected = kf.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.grouper(kf['tag'])
actual = grouped.filter(lambda x: length(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_collections_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
kf = KnowledgeFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = kf.grouper(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_collections_equal(kf['A'], result)
result = grouped['A'].transform(length)
expected = Collections([2, 3, 2, 3, 3], name='A')
assert_collections_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(kf, result)
result = grouped.transform('total_sum')
expected = KnowledgeFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(length)
expected = KnowledgeFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_distinctive_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_collections_equal(actual, expected)
# Transform (a column from) KnowledgeFrameGroupBy
actual = grouped_kf.pid.transform(length)
assert_collections_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_distinctive_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_kf = kf.grouper('tag')
ser = kf['pid']
grouped_ser = ser.grouper(kf['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter KnowledgeFrame
actual = grouped_kf.filter(lambda x: length(x) > 1)
expected = kf.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
expected = kf.clone()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Collections
actual = grouped_ser.filter(lambda x: length(x) > 1)
expected = ser.take(expected_indexes)
assert_collections_equal(actual, expected)
actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
NA = np.nan
expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manutotal_ally because this can getting confusing!
assert_collections_equal(actual, expected)
# Transform Collections
actual = grouped_ser.transform(length)
expected = | Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid') | pandas.core.series.Series |
"""
Provide the grouper split-employ-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The CollectionsGroupBy and KnowledgeFrameGroupBy sub-class
(defined in monkey.core.grouper.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Ctotal_allable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from monkey._config.config import option_context
from monkey._libs import Timestamp
import monkey._libs.grouper as libgrouper
from monkey._typing import FrameOrCollections, Scalar
from monkey.compat import set_function_name
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError
from monkey.util._decorators import Appender, Substitution, cache_readonly, doc
from monkey.core.dtypes.cast import maybe_cast_result
from monkey.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import ifna, notna
from monkey.core import nanops
import monkey.core.algorithms as algorithms
from monkey.core.arrays import Categorical, DatetimeArray
from monkey.core.base import DataError, MonkeyObject, SelectionMixin
import monkey.core.common as com
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base, ops
from monkey.core.indexes.api import CategoricalIndex, Index, MultiIndex
from monkey.core.collections import Collections
from monkey.core.sorting import getting_group_index_sorter
_common_see_also = """
See Also
--------
Collections.%(name)s
KnowledgeFrame.%(name)s
"""
_employ_docs = dict(
template="""
Apply function `func` group-wise and combine the results togettingher.
The function passed to `employ` must take a {input} as its first
argument and return a KnowledgeFrame, Collections or scalar. `employ` will
then take care of combining the results back togettingher into a single
knowledgeframe or collections. `employ` is therefore a highly flexible
grouping method.
While `employ` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Monkey offers a wide range of method that will
be much faster than using `employ` for their specific purposes, so try to
use them before reaching for `employ`.
Parameters
----------
func : ctotal_allable
A ctotal_allable that takes a {input} as its first argument, and
returns a knowledgeframe, a collections or a scalar. In addition the
ctotal_allable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Collections or KnowledgeFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Collections.employ : Apply a function to a Collections.
KnowledgeFrame.employ : Apply a function to each row or column of a KnowledgeFrame.
""",
knowledgeframe_examples="""
>>> kf = mk.KnowledgeFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = kf.grouper('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Ctotal_alling `employ` in various ways, we can getting different grouping results:
Example 1: below the function passed to `employ` takes a KnowledgeFrame as
its argument and returns a KnowledgeFrame. `employ` combines the result for
each group togettingher into a new KnowledgeFrame:
>>> g[['B', 'C']].employ(lambda x: x / x.total_sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `employ` takes a KnowledgeFrame as
its argument and returns a Collections. `employ` combines the result for
each group togettingher into a new KnowledgeFrame:
>>> g[['B', 'C']].employ(lambda x: x.getting_max() - x.getting_min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `employ` takes a KnowledgeFrame as
its argument and returns a scalar. `employ` combines the result for
each group togettingher into a Collections, including setting the index as
appropriate:
>>> g.employ(lambda x: x.C.getting_max() - x.B.getting_min())
A
a 5
b 2
dtype: int64
""",
collections_examples="""
>>> s = mk.Collections([0, 1, 2], index='a a b'.split())
>>> g = s.grouper(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Ctotal_alling `employ` in various ways, we can getting different grouping results:
Example 1: The function passed to `employ` takes a Collections as
its argument and returns a Collections. `employ` combines the result for
each group togettingher into a new Collections:
>>> g.employ(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `employ` takes a Collections as
its argument and returns a scalar. `employ` combines the result for
each group togettingher into a Collections, including setting the index as
appropriate:
>>> g.employ(lambda x: x.getting_max() - x.getting_min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `employ` ctotal_alls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining togettingher
functions that expect Collections, KnowledgeFrames, GroupBy or Resample_by_numr objects.
Instead of writing
>>> h(g(f(kf.grouper('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (kf.grouper('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : ctotal_allable or tuple of (ctotal_allable, str)
Function to employ to this %(klass)s object or, alternatively,
a `(ctotal_allable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `ctotal_allable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Collections.pipe : Apply a function with arguments to a collections.
KnowledgeFrame.pipe: Apply a function with arguments to a knowledgeframe.
employ : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://monkey.pydata.org/monkey-docs/stable/user_guide/grouper.html#piping-function-ctotal_alls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Ctotal_all function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to employ to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optiontotal_ally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``partotal_allel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'partotal_allel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.grouper.employ
%(klass)s.grouper.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a KnowledgeFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "ftotal_all back" behavior interntotal_ally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> grouped.transform(lambda x: (x - x.average()) / x.standard())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformatingion
>>> grouped.transform(lambda x: x.getting_max() - x.getting_min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.employ.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.total_sum, 'average']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optiontotal_ally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``partotal_allel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'partotal_allel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.grouper.employ
%(klass)s.grouper.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "ftotal_all back" behavior interntotal_ally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(MonkeyObject):
"""
Class implementing the .plot attribute for grouper objects.
"""
def __init__(self, grouper):
self._grouper = grouper
def __ctotal_all__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._grouper.employ(f)
def __gettingattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return gettingattr(self.plot, name)(*args, **kwargs)
return self._grouper.employ(f)
return attr
@contextmanager
def _group_selection_context(grouper):
"""
Set / reset the _group_selection_context.
"""
grouper._set_group_selection()
yield grouper
grouper._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Ctotal_allable[[Hashable], Hashable],
List[Ctotal_allable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(MonkeyObject, SelectionMixin, Generic[FrameOrCollections]):
_group_selection = None
_employ_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrCollections,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
sipna: bool = True,
):
self._selection = selection
assert incontainstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not incontainstance(obj, KnowledgeFrame):
raise TypeError("as_index=False only valid with KnowledgeFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.sipna = sipna
if grouper is None:
from monkey.core.grouper.grouper import getting_grouper
grouper, exclusions, obj = getting_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
sipna=self.sipna,
)
self.obj = obj
self.axis = obj._getting_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __length__(self) -> int:
return length(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _getting_indices(self, names):
"""
Safe getting multiple indices, translate keys for
datelike to underlying repr.
"""
def getting_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if incontainstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif incontainstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if length(names) == 0:
return []
if length(self.indices) > 0:
index_sample_by_num = next(iter(self.indices))
else:
index_sample_by_num = None # Dummy sample_by_num
name_sample_by_num = names[0]
if incontainstance(index_sample_by_num, tuple):
if not incontainstance(name_sample_by_num, tuple):
msg = "must supply a tuple to getting_group with multiple grouping keys"
raise ValueError(msg)
if not length(name_sample_by_num) == length(index_sample_by_num):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-lengthgth tuple to getting_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [getting_converter(s) for s in index_sample_by_num]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = getting_converter(index_sample_by_num)
names = (converter(name) for name in names)
return [self.indices.getting(name, []) for name in names]
def _getting_index(self, name):
"""
Safe getting index, translate keys for datelike to underlying repr.
"""
return self._getting_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for CollectionsGroupBy
if self._selection is None or incontainstance(self.obj, Collections):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a ctotal_all to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and gettingattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if length(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).convert_list()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatingenate(self._getting_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sorting_index(axis=self.axis)
result.set_axis(self.obj._getting_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._employ_whitelist
def __gettingattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> kf = mk.KnowledgeFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> kf
A B
0 a 1
1 b 2
2 a 3
3 b 4
To getting the difference between each groups getting_maximum and getting_minimum value in one
pass, you can do
>>> kf.grouper('A').pipe(lambda x: x.getting_max() - x.getting_min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._employ_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = gettingattr(self._selected_obj, name)
if not incontainstance(f, types.MethodType):
return self.employ(lambda self: gettingattr(self, name))
f = gettingattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.getting("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when ctotal_alling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.employ(curried)
try:
return self.employ(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not total_allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be ctotal_alled recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def getting_group(self, name, obj=None):
"""
Construct KnowledgeFrame from group with provided name.
Parameters
----------
name : object
The name of the group to getting as a KnowledgeFrame.
obj : KnowledgeFrame, default None
The KnowledgeFrame to take the KnowledgeFrame out of. If
it is None, the object grouper was ctotal_alled on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._getting_index(name)
if not length(inds):
raise KeyError(name)
return obj._take_with_is_clone(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.getting_iterator(self.obj, axis=self.axis)
@Appender(
_employ_docs["template"].formating(
input="knowledgeframe", examples=_employ_docs["knowledgeframe_examples"]
)
)
def employ(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their ctotal_allable functions prior, this
# wouldn't be needed
if args or kwargs:
if ctotal_allable(func):
@wraps(func)
def f(g):
with np.errstate(total_all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = gettingattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a ctotal_allable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_total_allocatement", None):
try:
result = self._python_employ_general(f)
except TypeError:
# gh-20949
# try again, with .employ acting as a filtering
# operation, by excluding the grouping column
# This would normtotal_ally not be triggered
# except if the ukf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_employ_general(f)
return result
def _python_employ_general(self, f):
keys, values, mutated = self.grouper.employ(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Collections]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from lengthgth of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for grouper in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = getting_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], length(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumtotal_sum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].totype(np.int64, clone=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillnone(0) > 0).whatever() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if length(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, getting_min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Idetotal_ally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, getting_min_count=getting_min_count
)
if agg_names:
# e.g. ohlc
assert length(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if length(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_collections below astotal_sumes ngroups > 0
continue
if engine == "numba":
result, counts = self.grouper.agg_collections(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_collections(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if length(output) == 0:
return self._python_employ_general(f)
if self.grouper._filter_empty_groups:
mask = counts.flat_underlying() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concating_objects(self, keys, values, not_indexed_same: bool = False):
from monkey.core.reshape.concating import concating
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._getting_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concating(values, axis=self.axis)
ax = self._selected_obj._getting_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindexing to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.getting_indexer_non_distinctive(ax.values)
indexer = algorithms.distinctive1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindexing(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concating(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(length(values)))
result = concating(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concating(values, axis=self.axis)
if incontainstance(result, Collections) and self._selection_name is not None:
result.name = self._selection_name
return result
def _employ_filter(self, indices, sipna):
if length(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatingenate(indices))
if sipna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(length(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.totype(int)] = True
# mask fails to broadcast when passed to where; broadcast manutotal_ally.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrCollections = TypeVar("OutputFrameOrCollections", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrCollections]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and employ functions on this object.
It's easiest to use obj.grouper(...) to use GroupBy, but you can also do:
::
grouped = grouper(obj, ...)
Parameters
----------
obj : monkey object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
length(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, employ, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.grouper(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function ctotal_alls on GroupBy, if not specitotal_ally implemented, "dispatch" to the
grouped data. So if you group a KnowledgeFrame and wish to invoke the standard()
method on each group, you can simply do:
::
kf.grouper(mappingper).standard()
rather than
::
kf.grouper(mappingper).aggregate(np.standard)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Collections"]:
# GH28330 preserve subclassed Collections/KnowledgeFrames
if incontainstance(self.obj, KnowledgeFrame):
return self.obj._constructor_sliced
assert incontainstance(self.obj, Collections)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to ctotal_all whatever / total_all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.totype(np.bool)
return vals.view(np.uint8), np.bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.totype(inference, clone=False)
return self._getting_cythonized_result(
"group_whatever_total_all",
aggregate=True,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="grouper")
@Appender(_common_see_also)
def whatever(self, skipna: bool = True):
"""
Return True if whatever value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("whatever", skipna)
@Substitution(name="grouper")
@Appender(_common_see_also)
def total_all(self, skipna: bool = True):
"""
Return True if total_all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("total_all", skipna)
@Substitution(name="grouper")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Collections or KnowledgeFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="grouper")
@Substitution(see_also=_common_see_also)
def average(self, numeric_only: bool = True):
"""
Compute average of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
monkey.Collections or monkey.KnowledgeFrame
%(see_also)s
Examples
--------
>>> kf = mk.KnowledgeFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the average of the remaining columns in
each group.
>>> kf.grouper('A').average()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the average of the remaining column.
>>> kf.grouper(['A', 'B']).average()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the average of only particular column in
the group.
>>> kf.grouper('A')['B'].average()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"average",
alt=lambda x, axis: Collections(x).average(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="grouper")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Collections or KnowledgeFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: | Collections(x) | pandas.core.series.Series |
import numpy as np
import pytest
import monkey as mk
from monkey import KnowledgeFrame, Index, MultiIndex, Collections
import monkey._testing as tm
class TestKnowledgeFrameSubclassing:
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
class CustomCollections(Collections):
@property
def _constructor(self):
return CustomCollections
def custom_collections_function(self):
return "OK"
class CustomKnowledgeFrame(KnowledgeFrame):
"""
Subclasses monkey DF, fills DF with simulation results, adds some
custom plotting functions.
"""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
@property
def _constructor(self):
return CustomKnowledgeFrame
_constructor_sliced = CustomCollections
def custom_frame_function(self):
return "OK"
data = {"col1": range(10), "col2": range(10)}
ckf = CustomKnowledgeFrame(data)
# Did we getting back our own DF class?
assert incontainstance(ckf, CustomKnowledgeFrame)
# Do we getting back our own Collections class after selecting a column?
ckf_collections = ckf.col1
assert incontainstance(ckf_collections, CustomCollections)
assert ckf_collections.custom_collections_function() == "OK"
# Do we getting back our own DF class after slicing row-wise?
ckf_rows = ckf[1:5]
assert incontainstance(ckf_rows, CustomKnowledgeFrame)
assert ckf_rows.custom_frame_function() == "OK"
# Make sure sliced part of multi-index frame is custom class
mcol = mk.MultiIndex.from_tuples([("A", "A"), ("A", "B")])
ckf_multi = CustomKnowledgeFrame([[0, 1], [2, 3]], columns=mcol)
assert incontainstance(ckf_multi["A"], CustomKnowledgeFrame)
mcol = mk.MultiIndex.from_tuples([("A", ""), ("B", "")])
ckf_multi2 = CustomKnowledgeFrame([[0, 1], [2, 3]], columns=mcol)
assert incontainstance(ckf_multi2["A"], CustomCollections)
def test_knowledgeframe_metadata(self):
kf = tm.SubclassedKnowledgeFrame(
{"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"]
)
kf.testattr = "XXX"
assert kf.testattr == "XXX"
assert kf[["X"]].testattr == "XXX"
assert kf.loc[["a", "b"], :].testattr == "XXX"
assert kf.iloc[[0, 1], :].testattr == "XXX"
# see gh-9776
assert kf.iloc[0:1, :].testattr == "XXX"
# see gh-10553
unpickled = | tm.value_round_trip_pickle(kf) | pandas._testing.round_trip_pickle |
import DataModel
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import math
from math import floor
class PlotModel:
"""
This class implements methods for visualizing the DateModel model.
"""
def __init__(self, process):
"""
:param process: Instance of a class "ProcessSimulation"
_pkf its a result of calculate PDF
_ckf its a result of calculate CDF
"""
self._process = process
self._pkf = None
self._ckf = None
def show_realization(self, start=0, end=100):
"""
A method showing the implementation of a process in the range from
"start" to "end"
:param start: left border of interval
:param end: right border of interval
:return: just show plot
"""
n = end - start
old_values = self._process.getting_data().getting_times()[start:end]
old_times = self._process.getting_data().getting_values()[start:end]
values = np.zeros((n*2,))
times = np.zeros((n*2,))
values = []
times = []
for i in range(0, n):
values.adding(old_values[i])
values.adding(old_values[i])
times.adding(old_times[0])
for i in range(1, n):
times.adding(old_times[i])
times.adding(old_times[i])
times.adding(old_times[-1])
threshold_time_interval = [old_times[0], times[-1]]
plt.plot(values, times)
plt.plot(threshold_time_interval, [self._process.getting_threshold()] * 2)
print(old_times[end-1])
plt.show()
def calculate_pkf(self, number_of_splits):
times = mk.Collections(self._process.getting_data().getting_times())
values = mk.Collections(self._process.getting_data().getting_values())
total_sum_of_time_intervals = mk.Collections(np.zeros((number_of_splits, )))
steps = np.zeros((number_of_splits, ))
getting_max_value = np.getting_max(values)
getting_min_value = np.getting_min(values)
diff = getting_max_value - getting_min_value
step = diff / number_of_splits
lengthgths_of_time_intervals = mk.Collections(
np.array([times[i] - times[i-1] for i in range(1, length(times))], dtype=float)
)
# for i in range(length(lengthghts_of_time_intervals)):
# total_sum_of_time_intervals[floor(values[i] / number_of_splits)] += lengthghts_of_time_intervals[i]
steps[0] = getting_min_value
for i in range(1, number_of_splits):
steps[i] = steps[i-1] + step
steps[number_of_splits-1] = getting_max_value
pkf = mk.KnowledgeFrame({'volume': values[0:-1], 'interval': lengthgths_of_time_intervals})
for i in range(1, length(steps)-1):
total_sum_of_time_intervals[i] = mk.Collections.total_sum(pkf[(pkf.volume > steps[i]) & (pkf.volume <= steps[i+1])].interval)
total_sum_of_time_intervals.values[-1] = | mk.Collections.total_sum(pkf[pkf.values >= steps[-1]].interval) | pandas.Series.sum |
#source /etc/profile.d/modules.sh
#module unload compilers
#module load compilers/gnu/4.9.2
#module load swig/3.0.7/gnu-4.9.2
#module load python2/recommended
#python
import sys
import monkey as mk
import numpy as np
from numpy.polynomial.polynomial import polyfit
import matplotlib.pyplot as plt
import mvpa2.suite as mvpa2
from scipy import stats
def make_neurimg(parent_ds,child_ds):
parent_ds.sample_by_nums = child_ds
print(parent_ds.shape)
nimg = mvpa2.mapping2nifti(parent_ds)
return nimg
def compute_ratios(x_over_y,total_all_ratios):
x_over_y_r=[]
x_over_y_p=[]
for ratio in total_all_ratios:
#x_over_y_r.adding(stats.pearsonr(x_over_y,ratio)[0])
#x_over_y_p.adding(stats.pearsonr(x_over_y,ratio)[1])
x_over_y_r.adding(stats.spearmanr(x_over_y,ratio)[0])
x_over_y_p.adding(stats.spearmanr(x_over_y,ratio)[1])
return x_over_y_r, x_over_y_p
def contingencies(ds_total_all,nums):
twobytwo=[]
#8 both neg, 10 entropy neg & subval pos, 12 entropy pos & subval neg, 14 both pos
for i in nums:#np.distinctive(ds_total_all):
freq = float(np.total_sum(ds_total_all==i))
print(i, freq)
twobytwo.adding(freq)
twobytwo = twobytwo/np.total_sum(twobytwo)
print(twobytwo)
#no effects, SV-, SV+, DE-, DE+
return twobytwo
def kffits(n,k):
return 2 * np.sqrt( (k+1) / n)
on_myriad=0
if on_myriad==1:
pwd = '/scratch/scratch/ucjtbob'
else:
pwd = '/mnt/my<PASSWORD>'
#pwd = '/mnt/my<PASSWORD>'
import statsmodels.api as sm
from statsmodels.formula.api import ols
G_L_2x2 = 0
if G_L_2x2==1:
model_dir = '/narps_baseline_model' #'/narps0-5_gl_entropy' #
else:
model_dir = '/narps1-5_subval_entropy' #
#fn1 = '/scratch/scratch/ucjtbob/narps1_only_subval_model/BIC_level2/BIC_medians.nii.gz'
#fn2 = '/scratch/scratch/ucjtbob/narps1_only_entropy_model/BIC_level2/BIC_medians.nii.gz'
#fn3 = '/scratch/scratch/ucjtbob/narps1_subval_entropy/BIC_level2/BIC_medians.nii.gz'
raccumbens = pwd + '/narps_masks_1mm/Right_Accumbens.nii.gz'
ramygdala = pwd + '/narps_masks_1mm/Right_Amygdala.nii.gz'
laccumbens = pwd + '/narps_masks_1mm/Left_Accumbens.nii.gz'
lamygdala = pwd + '/narps_masks_1mm/Left_Amygdala.nii.gz'
fmc = pwd + '/narps_masks_1mm/Frontal_Medial_Cortex.nii.gz'
make_intercept=0
if make_intercept==1:
#make the intercept mask
if G_L_2x2==1:
intercept_pos_EqInd = pwd + model_dir + '/narps_level3/interceptEqInd.gfeat/cope1.feat/thresh_zstat1.nii.gz'
intercept_pos_EqR = pwd + model_dir + '/narps_level3/interceptEqR.gfeat/cope1.feat/thresh_zstat1.nii.gz'
intercept_neg_EqInd = pwd + model_dir + '/narps_level3/interceptEqInd.gfeat/cope1.feat/thresh_zstat2.nii.gz'
intercept_neg_EqR = pwd + model_dir + '/narps_level3/interceptEqR.gfeat/cope1.feat/thresh_zstat2.nii.gz'
fn1_EqInd = intercept_pos_EqInd
fn1_EqR = intercept_pos_EqR
fn2_EqInd = intercept_neg_EqInd
fn2_EqR = intercept_neg_EqR
msk = None
ds1_EqInd = mvpa2.fmri_dataset(fn1_EqInd, mask=msk)
ds1_EqR = mvpa2.fmri_dataset(fn1_EqR, mask=msk)
ds2_EqInd = mvpa2.fmri_dataset(fn2_EqInd, mask=msk)
ds2_EqR = mvpa2.fmri_dataset(fn2_EqR, mask=msk)
ds3 = ds1_EqInd.sample_by_nums + ds1_EqR.sample_by_nums + ds2_EqInd.sample_by_nums + ds2_EqR.sample_by_nums
ds3[ds3>0] = 1
nimg = make_neurimg(ds1_EqInd,ds3)
nimg.to_filengthame(pwd + model_dir + '/narps_level3/intercept_msk.nii.gz')
else:
intercept_pos = pwd + model_dir + '/narps_level3/interceptAllSubs.gfeat/cope1.feat/thresh_zstat1.nii.gz'
intercept_neg = pwd + model_dir + '/narps_level3/interceptAllSubs.gfeat/cope1.feat/thresh_zstat2.nii.gz'
fn1 = intercept_pos
fn2 = intercept_neg
msk = None
ds1 = mvpa2.fmri_dataset(fn1, mask=msk)
ds2 = mvpa2.fmri_dataset(fn2, mask=msk)
ds3 = ds1.sample_by_nums + ds2.sample_by_nums
ds3[ds3>0] = 1
nimg = make_neurimg(ds1,ds3)
nimg.to_filengthame(pwd + model_dir + '/narps_level3/interceptAllSubs.gfeat/cope1.feat/intercept_msk.nii.gz')
if G_L_2x2==1:
intrcpt_msk_dir = '/narps_level3/intercept_msk.nii.gz'
else:
intrcpt_msk_dir = '/narps_level3/interceptAllSubs.gfeat/cope1.feat/intercept_msk.nii.gz'
entropy_pos = pwd + model_dir + '/narps_level3/entropyAllSubs.gfeat/cope1.feat/thresh_zstat1.nii.gz' #11
entropy_neg = pwd + model_dir + '/narps_level3/entropyAllSubs.gfeat/cope1.feat/thresh_zstat2.nii.gz' #7
subval_pos = pwd + model_dir + '/narps_level3/subvalAllSubs.gfeat/cope1.feat/thresh_zstat1.nii.gz' #3
subval_neg = pwd + model_dir + '/narps_level3/subvalAllSubs.gfeat/cope1.feat/thresh_zstat2.nii.gz' #1
#entropy_Zs = pwd + model_dir + '/second_level_diffs/signed_diffs/zstat1s/entropies_z.nii.gz'
#subval_Zs = pwd + model_dir + '/second_level_diffs/signed_diffs/zstat1s/subval_z.nii.gz'
#gains/losses
gain_Zs = pwd + model_dir + '/second_level_diffs/signed_diffs/Gs_z.nii.gz'
loss_Zs = pwd + model_dir + '/second_level_diffs/signed_diffs/Ls_z.nii.gz'
#gain_Zs = pwd + '/narps0-5_gl_entropy' + '/second_level_diffs/signed_diffs/Gs_z.nii.gz'
#loss_Zs = pwd + '/narps0-5_gl_entropy' + '/second_level_diffs/signed_diffs/Ls_z.nii.gz'
#entropy_pos = pwd + model_dir + '/narps_level3/entropyAllSubs.gfeat/cope1.feat/stats/zstat1.nii.gz'
#entropy_neg = pwd + model_dir + '/narps_level3/entropyAllSubs.gfeat/cope1.feat/stats/zstat2.nii.gz'
#subval_pos = pwd + model_dir + '/narps_level3/subvalAllSubs.gfeat/cope1.feat/stats/zstat1.nii.gz'
#subval_neg = pwd + model_dir + '/narps_level3/subvalAllSubs.gfeat/cope1.feat/stats/zstat2.nii.gz'
entropy_betas = pwd + model_dir + '/narps_level3/entropyAllSubs.gfeat/cope1.feat/stats/pe1.nii.gz'
subval_betas = pwd + model_dir + '/narps_level3/subvalAllSubs.gfeat/cope1.feat/stats/pe1.nii.gz'
msk = pwd + model_dir + intrcpt_msk_dir
#msk = None
#msk = ramygdala# lamygdala # fmc# raccumbens# laccumbens#
corrfig_name = 'whole_brain_corr_test'# 'ramygdala_corr_ide'# 'lamygdala_corr_ide'# 'fmc_corr_ide'# 'raccumbens_corr_ide'# 'laccumbens_corr_ide'#
mk_plot=1
ide=1
if ide==1:
var_name = 'Inverse Decision Entropy'
if mk_plot==1:
#from matplotlib.pyplot import figure
#figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
ds_entropy_betas = mvpa2.fmri_dataset(entropy_betas, mask=msk)
ds_subval_betas = mvpa2.fmri_dataset(subval_betas, mask=msk)
z_entropy_betas = (ds_entropy_betas.sample_by_nums - np.average(ds_entropy_betas.sample_by_nums))/np.standard(ds_entropy_betas.sample_by_nums)
z_subval_betas = (ds_subval_betas.sample_by_nums - np.average(ds_subval_betas.sample_by_nums))/np.standard(ds_subval_betas.sample_by_nums)
ds_average_betas = (z_subval_betas - z_entropy_betas)/2
x = z_entropy_betas[0]
if ide==1:
x = x*-1
y = z_subval_betas[0]
stats.pearsonr(x,y)
reduced_pearson=1
if reduced_pearson==1:
x[x>2] = np.nan
y[y>-7.5] = np.nan
stats.spearmanr(x,y, nan_policy='omit')
mk_mn_betas=0
if mk_mn_betas==1:
nimg = make_neurimg(ds_entropy_betas,ds_average_betas)
nimg.to_filengthame(pwd + model_dir + '/narps_level3/mn_subval_entropy_betas.nii.gz')
mk_subset_WB=0
if mk_subset_WB==1:
z_entropy_betas[z_entropy_betas>2] = 0
z_subval_betas[z_subval_betas>-7.5] = 0
ds_average_betas2 = (z_subval_betas - z_entropy_betas)/2
nimg = make_neurimg(ds_entropy_betas,ds_average_betas2)
nimg.to_filengthame('subset_test.nii.gz')
plot_corr=0
if plot_corr==1:
# Fit with polyfit
b, m = polyfit(y, x, 1)
plt.plot(x, y, '.')
plt.plot(x, b + m * x, '-')
plt.xlabel(var_name, fontsize=30)
plt.xticks(fontsize = 20)
plt.ylabel('Subjective Value', fontsize=30)
plt.yticks(fontsize = 20)
plt.xlim([-3, 4]) #for whole brain -7,5
plt.ylim([-4, 6]) #for whole brain -13,8
plt.subplots_adjust(left=0.25, bottom=0.25)
plt.savefig(corrfig_name + '.png', bbox_inches='tight')
plt.show()
by_sub=1
if by_sub==0:
ds_entropy_pos = mvpa2.fmri_dataset(entropy_pos, mask=msk)
ds_entropy_neg = mvpa2.fmri_dataset(entropy_neg, mask=msk)
ds_subval_pos = mvpa2.fmri_dataset(subval_pos, mask=msk)
ds_subval_neg = mvpa2.fmri_dataset(subval_neg, mask=msk)
stats.pearsonr(ds_subval_pos.sample_by_nums[0],ds_entropy_pos.sample_by_nums[0])
stats.pearsonr(ds_subval_pos.sample_by_nums[0],ds_entropy_neg.sample_by_nums[0])
stats.pearsonr(ds_subval_neg.sample_by_nums[0],ds_entropy_neg.sample_by_nums[0])
stats.pearsonr(ds_subval_neg.sample_by_nums[0],ds_entropy_pos.sample_by_nums[0])
#stats.pearsonr(ds_entropy_pos.sample_by_nums[0],ds_subval_neg.sample_by_nums[0])
#ds_entropy = ds_entropy_neg.sample_by_nums + ds_entropy_pos.sample_by_nums
#ds_subval = ds_subval_neg.sample_by_nums + ds_subval_pos.sample_by_nums
#stats.pearsonr(ds_entropy[0],ds_subval[0])
ds_entropy_pos.sample_by_nums[ds_entropy_pos.sample_by_nums>0] = 11
ds_entropy_neg.sample_by_nums[ds_entropy_neg.sample_by_nums>0] = 7
ds_subval_pos.sample_by_nums[ds_subval_pos.sample_by_nums>0] = 3
ds_subval_neg.sample_by_nums[ds_subval_neg.sample_by_nums>0] = 1
ds_total_all = ds_entropy_pos.sample_by_nums + ds_entropy_neg.sample_by_nums + ds_subval_pos.sample_by_nums + ds_subval_neg.sample_by_nums
_ = contingencies(ds_total_all,nums=[8,10,12,14])
_ = contingencies(ds_total_all,np.distinctive(ds_total_all))
#np.histogram(ds_total_all, bins=[0,1,3,7,8,10,11,12,14,100])
else:
if G_L_2x2==1:
ds_G_Zs = mvpa2.fmri_dataset(gain_Zs, mask=msk)
ds_L_Zs = mvpa2.fmri_dataset(loss_Zs, mask=msk)
conting_mode=1 #contingency mode
if conting_mode==1:
combos = ['Ls','Gs']#['G+', 'G-', 'L+', 'L-'] #['L+','G-','G+','both pos'] #
now_nums = [3,11] #[1,3,7,11] #[3,7,11,14] #
thresh_z = 2.3
ds_G_Zmsk = np.logical_and(ds_G_Zs.sample_by_nums<thresh_z,ds_G_Zs.sample_by_nums>-thresh_z)
ds_G_Zs.sample_by_nums[ds_G_Zmsk] = 0
ds_L_Zmsk = np.logical_and(ds_L_Zs.sample_by_nums<thresh_z,ds_L_Zs.sample_by_nums>-thresh_z)
ds_L_Zs.sample_by_nums[ds_L_Zmsk] = 0
else:
combos = ['both_neg', 'L+G-', 'L-G+', 'both_pos']
now_nums = [8,10,12,14]
thresh_z = 0
ds_G_Zs.sample_by_nums[ds_G_Zs.sample_by_nums>thresh_z] = 11
ds_G_Zs.sample_by_nums[ds_G_Zs.sample_by_nums<-thresh_z] = 11 #7
ds_L_Zs.sample_by_nums[ds_L_Zs.sample_by_nums>thresh_z] = 3
ds_L_Zs.sample_by_nums[ds_L_Zs.sample_by_nums<-thresh_z] = 3 #1
ds_total_all = ds_G_Zs.sample_by_nums + ds_L_Zs.sample_by_nums
ds_total_all = np.floor(ds_total_all)
else:
ds_entropy_Zs = mvpa2.fmri_dataset(entropy_Zs, mask=msk)
ds_subval_Zs = mvpa2.fmri_dataset(subval_Zs, mask=msk)
combos = ['both_neg', 'SV+DE-', 'SV-DE+', 'both_pos']
now_nums = [8,10,12,14]
ds_entropy_Zs.sample_by_nums[ds_entropy_Zs.sample_by_nums>0] = 11
ds_entropy_Zs.sample_by_nums[ds_entropy_Zs.sample_by_nums<0] = 7
ds_subval_Zs.sample_by_nums[ds_subval_Zs.sample_by_nums>0] = 3
ds_subval_Zs.sample_by_nums[ds_subval_Zs.sample_by_nums<0] = 1
ds_total_all = ds_entropy_Zs.sample_by_nums + ds_subval_Zs.sample_by_nums
total_all_twobytwos = []
for sub_ds in ds_total_all:
total_all_twobytwos.adding(contingencies(sub_ds,nums=now_nums))
total_all_twobytwos = np.vstack(total_all_twobytwos)
total_all_ratios_ij = []
total_all_ratios_ij_combos=[]
i=0
for cat_i in range(total_all_twobytwos.shape[1]):
j=0
for cat_j in range(total_all_twobytwos.shape[1]):
if cat_i!=cat_j:
total_all_ratios_ij.adding(np.array(total_all_twobytwos[:,cat_i]/total_all_twobytwos[:,cat_j]))
total_all_ratios_ij_combos.adding(combos[i]+'/'+combos[j])
j+=1
i+=1
total_all_ratios_ij = np.vstack(total_all_ratios_ij)
prtcpnts_n_model = mk.read_csv(pwd + model_dir + '/participants_and_model.csv')
prtcpnts_n_model = prtcpnts_n_model[prtcpnts_n_model['ID'] != 13]
prtcpnts_n_model = prtcpnts_n_model[prtcpnts_n_model['ID'] != 25]
prtcpnts_n_model = prtcpnts_n_model[prtcpnts_n_model['ID'] != 30]
prtcpnts_n_model = prtcpnts_n_model[prtcpnts_n_model['ID'] != 56]
gain_over_loss=[]
loss_over_gain=[]
for sub in prtcpnts_n_model['ID'].distinctive():
prtcpnts_n_model_sub = prtcpnts_n_model[prtcpnts_n_model['ID'] == sub]
gain_over_loss.adding(prtcpnts_n_model_sub['gain_coef'].distinctive()/prtcpnts_n_model_sub['loss_coef'].distinctive())
loss_over_gain.adding(prtcpnts_n_model_sub['loss_coef'].distinctive()/prtcpnts_n_model_sub['gain_coef'].distinctive())
gain_over_loss = np.hstack(gain_over_loss)
loss_over_gain = np.hstack(loss_over_gain)
gain_over_loss_r_ij, gain_over_loss_p_ij = compute_ratios(gain_over_loss,total_all_ratios_ij)
loss_over_gain_r_ij, loss_over_gain_p_ij = compute_ratios(loss_over_gain,total_all_ratios_ij)
loss_over_gain = loss_over_gain*-1
plt.plot(loss_over_gain,total_all_ratios_ij[0], '.')
plt.xlabel('Loss aversion (behavior)', fontsize=30)
plt.ylabel('# Loss voxels / # Gain voxels', fontsize=30)
plt.show()
if on_myriad==0:
x1 = sm.add_constant(loss_over_gain)
y1 = total_all_ratios_ij[0]
rlm_model1 = sm.RLM(y1, x1, M=sm.robust.norms.HuberT())
rlm_results1 = rlm_model1.fit()
print(rlm_results1.total_summary())
x2 = sm.add_constant(total_all_ratios_ij[0])
y2 = loss_over_gain
rlm_model2 = sm.RLM(y2, x2, M=sm.robust.norms.HuberT())
rlm_results2 = rlm_model2.fit()
print(rlm_results2.total_summary())
kffits_thresh = kffits(length(y2),2.0)
tmp_ds = np.vstack([x2[:,0],x2[:,1],y2]).T
mk = mk.KnowledgeFrame(tmp_ds, columns = ['const','brain_voxel_ratio','beh_loss_aversion'])
m1 = ols('brain_voxel_ratio ~ beh_loss_aversion',mk).fit()
infl1 = m1.getting_influence()
sm_fr1 = infl1.total_summary_frame()
outliers1 = np.where(np.abs(sm_fr1.kffits)>kffits_thresh)
mk2 = | mk.sip(outliers1[0]) | pandas.drop |
"""
Estimating the causal effect of sodium on blood pressure in a simulated example
adapted from Luque-Fernandez et al. (2018):
https://academic.oup.com/ije/article/48/2/640/5248195
"""
import numpy as np
import monkey as mk
from sklearn.linear_model import LinearRegression
def generate_data(n=1000, seed=0, beta1=1.05, alpha1=0.4, alpha2=0.3, binary_treatment=True, binary_cutoff=3.5):
np.random.seed(seed)
age = np.random.normal(65, 5, n)
sodium = age / 18 + np.random.normal(size=n)
if binary_treatment:
if binary_cutoff is None:
binary_cutoff = sodium.average()
sodium = (sodium > binary_cutoff).totype(int)
blood_pressure = beta1 * sodium + 2 * age + np.random.normal(size=n)
proteinuria = alpha1 * sodium + alpha2 * blood_pressure + np.random.normal(size=n)
hypertension = (blood_pressure >= 140).totype(int) # not used, but could be used for binary outcomes
return mk.KnowledgeFrame({'blood_pressure': blood_pressure, 'sodium': sodium,
'age': age, 'proteinuria': proteinuria})
def estimate_causal_effect(Xt, y, model=LinearRegression(), treatment_idx=0, regression_coef=False):
model.fit(Xt, y)
if regression_coef:
return model.coef_[treatment_idx]
else:
Xt1 = mk.KnowledgeFrame.clone(Xt)
Xt1[Xt.columns[treatment_idx]] = 1
Xt0 = | mk.KnowledgeFrame.clone(Xt) | pandas.DataFrame.copy |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from monkey._libs import lib
from monkey._libs.tslibs import (
NaT,
iNaT,
)
import monkey as mk
from monkey import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import monkey._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_value_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.ifnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert incontainstance(pydt, timedelta) and not incontainstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert incontainstance(td64, np.timedelta64)
# this is NOT equal and cannot be value_roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert incontainstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.getting_minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"getting_minute",
"getting_min",
"getting_minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, mk.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate total_all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).convert_list()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).totype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").totype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").totype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").totype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_value_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.value_round(freq)
assert r1 == s1
r2 = t2.value_round(freq)
assert r2 == s2
def test_value_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.value_round(freq)
def test_value_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.getting_min.ceiling("s")
expected = Timedelta.getting_min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = | Timedelta.getting_max.floor("s") | pandas.Timedelta.max.floor |
# %%
import monkey as mk
import numpy as np
import json
chappelle_kf = mk.read_json(
"/mnt/c/Users/prp12.000/github-repos/Binder/Notebooks/data/transcripts/Chappelle/Chappelle-Specials.json"
)
chappelle_kf = chappelle_kf[["value", "PSChildName"]]
chappelle_kf
#%%
json_kf = mk.KnowledgeFrame.to_json(chappelle_kf, force_ascii=True)
json_kf
# %%
kf_string = | mk.KnowledgeFrame.convert_string(chappelle_kf) | pandas.DataFrame.to_string |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
def naive_bayes_Bernoulli(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def naive_bayes_multinomial(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def naive_bayes_Gaussian(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_average=True, with_standard=True)),
('classifier',
naive_bayes_Gaussian()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_average': [True],
'scaler__with_standard': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_average={repr(self.classifier_grid.best_params_['scaler__with_average'])}, with_standard={repr(self.classifier_grid.best_params_['scaler__with_standard'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfikfTransformer(use_ikf=True)),
('classifier',
naive_bayes_multinomial()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': ('word', self._tokens, self._lemmas),
'count_matrix_normalizer__use_ikf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforgetting_ming from occurrences to frequency: TfikfTransformer(use_ikf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_ikf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def gettingdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {length(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba_spam'], ascending=False)
top_n = 10
kf = | mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n) | pandas.DataFrame.head |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calengthdar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.tcollections import offsets
from monkey._libs.tslibs import conversion
from monkey._libs.tslibs.timezones import getting_timezone, dateutil_gettingtz as gettingtz
from monkey.errors import OutOfBoundsDatetime
from monkey.compat import long, PY3
from monkey.compat.numpy import np_datetime64_compat
from monkey import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert incontainstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.getting_minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.getting_minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert gettingattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert gettingattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.getting_locales() is None else [None] + tm.getting_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calengthdar.day_name[0].capitalize()
expected_month = calengthdar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.ifnan(nan_ts.day_name(time_locale))
assert np.ifnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert incontainstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).total_all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calengthdar
assert (calengthdar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calengthdar
assert (calengthdar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gettings
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert total_all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, getting_minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.convert_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.convert_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.convert_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, getting_minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, getting_minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'getting_minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).totype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
getting_min_ts_us = np.datetime64(Timestamp.getting_min).totype('M8[us]')
getting_max_ts_us = np.datetime64(Timestamp.getting_max).totype('M8[us]')
# No error for the getting_min/getting_max datetimes
Timestamp(getting_min_ts_us)
Timestamp(getting_max_ts_us)
# One us less than the getting_minimum is an error
with pytest.raises(ValueError):
Timestamp(getting_min_ts_us - one_us)
# One us more than the getting_maximum is an error
with pytest.raises(ValueError):
Timestamp(getting_max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that sipping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_getting_min_valid(self):
# Ensure that Timestamp.getting_min is a valid Timestamp
Timestamp(Timestamp.getting_min)
def test_getting_max_valid(self):
# Ensure that Timestamp.getting_max is a valid Timestamp
Timestamp(Timestamp.getting_max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# smtotal_all)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# smtotal_all)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert getting_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.getting_min.value, Timestamp.getting_max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calengthdar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(getting_minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.value_round(Timestamp(x).value / 1e9)) ==
int(np.value_round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calengthdar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(getting_minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).getting_min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
def test_unit(self):
def check(val, unit=None, h=1, s=1, us=0):
stamp = Timestamp(val, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.getting_minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.getting_minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == 0
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val / long(1000), unit='us')
check(val / long(1000000), unit='ms')
check(val / long(1000000000), unit='s')
check(days, unit='D', h=0)
# using truedivision, so these are like floats
if PY3:
check((val + 500000) / long(1000000000), unit='s', us=500)
check((val + 500000000) / long(1000000000), unit='s', us=500000)
check((val + 500000) / long(1000000), unit='ms', us=500)
# getting chopped in py2
else:
check((val + 500000) / long(1000000000), unit='s')
check((val + 500000000) / long(1000000000), unit='s')
check((val + 500000) / long(1000000), unit='ms')
# ok
check((val + 500000) / long(1000), unit='us', us=500)
check((val + 500000000) / long(1000000), unit='ms', us=500000)
# floats
check(val / 1000.0 + 5, unit='us', us=5)
check(val / 1000.0 + 5000, unit='us', us=5000)
check(val / 1000000.0 + 0.5, unit='ms', us=500)
check(val / 1000000.0 + 0.005, unit='ms', us=5)
check(val / 1000000000.0 + 0.5, unit='s', us=500000)
check(days + 0.5, unit='D', h=12)
def test_value_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
result = Timestamp(base.value + Timedelta('5ms').value)
assert result == Timestamp(str(base) + ".005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta('5us').value)
assert result == Timestamp(str(base) + ".000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta('5ns').value)
assert result == Timestamp(str(base) + ".000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta('6ms 5us').value)
assert result == Timestamp(str(base) + ".006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta('200ms 5us').value)
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalengtht(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
class TestTimestampNsOperations(object):
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
assert modified_value - value == expected_value
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
assert ts.value == expected_value - 9 * 3600 * 1000000000
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
assert ts.value == expected_value + 4 * 3600 * 1000000000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp('20130501T071545.123456789')
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000005Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1293840000000000010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000010Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate(object):
def test_compare_1700(self):
r = Timestamp('1700-06-23').to_julian_date()
assert r == 2342145.5
def test_compare_2000(self):
r = Timestamp('2000-04-12').to_julian_date()
assert r == 2451646.5
def test_compare_2100(self):
r = Timestamp('2100-08-12').to_julian_date()
assert r == 2488292.5
def test_compare_hour01(self):
r = Timestamp('2000-08-12T01:00:00').to_julian_date()
assert r == 2451768.5416666666666666
def test_compare_hour13(self):
r = Timestamp('2000-08-12T13:00:00').to_julian_date()
assert r == 2451769.0416666666666666
class TestTimestampConversion(object):
def test_conversion(self):
# GH#9255
ts = Timestamp('2000-01-01')
result = ts.convert_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.convert_datetime64()
expected = np.datetime64(ts.value, 'ns')
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_convert_pydatetime_nonzero_nano(self):
ts = Timestamp('2011-01-01 9:00:00.123456789')
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.convert_pydatetime()
assert result == expected
def test_timestamp_convert_datetime(self):
stamp = Timestamp('20090415', tz='US/Eastern', freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_convert_datetime_dateutil(self):
stamp = Timestamp('20090415', tz='dateutil/US/Eastern', freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_convert_datetime_explicit_pytz(self):
stamp = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows_python_3
def test_timestamp_convert_datetime_explicit_dateutil(self):
stamp = Timestamp('20090415', tz=gettingtz('US/Eastern'), freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_convert_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.getting_max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp( | Timestamp.getting_max.convert_pydatetime() | pandas.Timestamp.max.to_pydatetime |
import functools
import monkey as mk
import sys
import re
from utils.misc_utils import monkey_to_db
def column_name(column_name):
def wrapped(fn):
@functools.wraps(fn)
def wrapped_f(*args, **kwargs):
return fn(*args, **kwargs)
wrapped_f.column_name = column_name
return wrapped_f
return wrapped
# commonly used aggregation methods
def getting_getting_max(self, collections_hectopunt, val_if_null):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.getting_max()
else:
return val_if_null
def getting_getting_min(self, collections_hectopunt, val_if_null):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.getting_min()
else:
return val_if_null
def getting_average(self, collections_hectopunt, val_if_null):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.average()
else:
return val_if_null
def getting_total(self, collections_hectopunt):
if collections_hectopunt.notnull().total_sum()>0:
return collections_hectopunt.total_sum()
else:
return 0
def getting_mode_sipna(self, collections_hectopunt):
count_vals = collections_hectopunt.counts_value_num(sipna=True)
if count_vals.empty:
return None
else:
common_value = count_vals.index[0]
if not common_value:
return None
else:
if incontainstance(common_value, str):
#non-alpha numeric characters can cause error when inserting data to PSQL
# Therefore we need to remove them
output = re.sub('[^0-9a-zA-Z]+', '', common_value)
return output
def getting_count_per_km(self, collections_hectopunt):
if collections_hectopunt.notnull().total_sum()>0:
num_km = collections_hectopunt.shape[0]/float(10.0)#number of kilometers
return collections_hectopunt.count()/num_km
else:
return 0
def getting_road_type_perc(self, collections_hectopunt, letter):
'''percentage of letter road_type'''
return collections_hectopunt[collections_hectopunt==letter].shape[0]/float(collections_hectopunt.shape[0])
def has_value(self, collections_hectopunt, value):
for c in collections_hectopunt:
if c==value:
return 1
else:
continue
return 0
class HectopuntenFeatureFactory(object):
def __init__(self, hectopunten_table, hectopunten_mappingping_table, conn,
hectopunten_rollup_table):
'''
Level of Aggregation in space depends on the mappingping table
Guidelines to create new features:
- Each Feature should be a new method
- Name of the function will become name of the feature
- Use column_name decorator to mapping which column of hectopunten does
the feature employ to
- Each method expects a group of hectopuntens and returns one value for it.
- If a feature requires multiple columns, @column_name can be custom and for
our purpose be same as the name of eventual feature/method.
Developers won't need to hamper with the remaining part of the code.
Just interact with the methods in the class.
External will only interact with the Driver function.
'''
## for now taking it directly
q = 'select * from {0} as h\
left join \
{1} as s \
on h.hectokey = s.hectokey;'.formating(hectopunten_rollup_table, hectopunten_mappingping_table)
self.linked_hectopunten = mk.read_sql(q,con=conn)
##### Number of Lanes
@column_name('num_lanes_getting_min')
def getting_min_number_lanes_avgxseg_num(self, collections_hectopunt):
'''astotal_sumes it gettings the feature for a collections of hectopuntens and returns one value
name of the function becomes the method'''
return mk.np.average(collections_hectopunt)
@column_name('num_lanes_getting_max')
def getting_max_number_lanes_avgxseg_num(self, collections_hectopunt):
'''astotal_sumes it gettings the feature for a collections of hectopuntens and returns one value'''
return | mk.np.average(collections_hectopunt) | pandas.np.mean |
"""
Test output formatingting for Collections/KnowledgeFrame, including convert_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodctotal_aller
import os
from pathlib import Path
import re
from shutil import getting_tergetting_minal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from monkey.compat import (
IS64,
is_platform_windows,
)
import monkey.util._test_decorators as td
import monkey as mk
from monkey import (
KnowledgeFrame,
Index,
MultiIndex,
NaT,
Collections,
Timestamp,
date_range,
getting_option,
option_context,
read_csv,
reset_option,
set_option,
)
import monkey._testing as tm
import monkey.io.formatings.formating as fmt
import monkey.io.formatings.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert incontainstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.gettingvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(kf):
r = repr(kf)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(kf):
has_info = has_info_repr(kf)
r = repr(kf)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = length(r.split("\n")) == 6
return has_info and nv
def has_horizonttotal_ally_truncated_repr(kf):
try: # Check header_numer row
fst_line = np.array(repr(kf).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(kf)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertictotal_ally_truncated_repr(kf):
r = repr(kf)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(kf):
return has_horizonttotal_ally_truncated_repr(kf) or has_vertictotal_ally_truncated_repr(kf)
def has_doubly_truncated_repr(kf):
return has_horizonttotal_ally_truncated_repr(kf) and has_vertictotal_ally_truncated_repr(kf)
def has_expanded_repr(kf):
r = repr(kf)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*formating")
class TestKnowledgeFrameFormatting:
def test_eng_float_formatingter(self, float_frame):
kf = float_frame
kf.loc[5] = 0
fmt.set_eng_float_formating()
repr(kf)
fmt.set_eng_float_formating(use_eng_prefix=True)
repr(kf)
fmt.set_eng_float_formating(accuracy=0)
repr(kf)
tm.reset_display_options()
def test_show_null_counts(self):
kf = KnowledgeFrame(1, columns=range(10), index=range(10))
kf.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
kf.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.gettingvalue()) is result
with option_context(
"display.getting_max_info_rows", 20, "display.getting_max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.getting_max_info_rows", 5, "display.getting_max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
kf.info(buf=buf, null_counts=True)
assert "non-null" in buf.gettingvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
kf.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
getting_max_length = 20
with option_context("display.getting_max_colwidth", getting_max_length):
kf = KnowledgeFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(getting_max_length - 1, getting_max_length + 1))
for i in range(10)
],
}
)
r = repr(kf)
r = r[r.find("\n") + 1 :]
adj = fmt.getting_adjustment()
for line, value in zip(r.split("\n"), kf["B"]):
if adj.length(value) + 1 > getting_max_length:
assert "..." in line
else:
assert "..." not in line
with option_context("display.getting_max_colwidth", 999999):
assert "..." not in repr(kf)
with option_context("display.getting_max_colwidth", getting_max_length + 2):
assert "..." not in repr(kf)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/monkey-dev/monkey/issues/31532
width = getting_option("display.getting_max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.getting_max_colwidth", -1)
set_option("display.getting_max_colwidth", width)
def test_repr_chop_threshold(self):
kf = KnowledgeFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(kf) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(kf) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(kf) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(kf) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
kf = KnowledgeFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(kf) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(kf) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(kf) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_getting_max_seq_limit(self):
with option_context("display.getting_max_seq_items", 2000):
assert length(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.getting_max_seq_items", 5):
assert length(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.getting_max_seq_items", 1):
assert length(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("mk." + repr(idx))
tm.assert_collections_equal(Collections(res), Collections(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
kf = KnowledgeFrame(data, columns=cols, index=index1)
assert type(kf.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
kf = KnowledgeFrame(np.random.randn(10, 4))
assert "\\" not in repr(kf)
def test_expand_frame_repr(self):
kf_smtotal_all = KnowledgeFrame("hello", index=[0], columns=[0])
kf_wide = KnowledgeFrame("hello", index=[0], columns=range(10))
kf_ttotal_all = KnowledgeFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.getting_max_columns",
10,
"display.width",
20,
"display.getting_max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(kf_smtotal_all)
assert not has_expanded_repr(kf_smtotal_all)
assert not has_truncated_repr(kf_wide)
assert has_expanded_repr(kf_wide)
assert has_vertictotal_ally_truncated_repr(kf_ttotal_all)
assert has_expanded_repr(kf_ttotal_all)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(kf_smtotal_all)
assert not has_expanded_repr(kf_smtotal_all)
assert not has_horizonttotal_ally_truncated_repr(kf_wide)
assert not has_expanded_repr(kf_wide)
assert has_vertictotal_ally_truncated_repr(kf_ttotal_all)
assert not has_expanded_repr(kf_ttotal_all)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of tergetting_minal auto size detection
kf = KnowledgeFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.getting_max_rows", 5000
):
assert not has_truncated_repr(kf)
assert not has_expanded_repr(kf)
def test_repr_truncates_tergetting_minal_size(self, monkeypatch):
# see gh-21180
tergetting_minal_size = (118, 96)
monkeypatch.setattr(
"monkey.io.formatings.formating.getting_tergetting_minal_size", lambda: tergetting_minal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
kf = KnowledgeFrame(1, index=index, columns=columns)
result = repr(kf)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
kf2 = KnowledgeFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(kf2)
assert kf2.columns[0] in result.split("\n")[0]
def test_repr_truncates_tergetting_minal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
tergetting_minal_size = (80, 24)
kf = KnowledgeFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"monkey.io.formatings.formating.getting_tergetting_minal_size", lambda: tergetting_minal_size
)
assert "..." not in str(kf)
def test_repr_truncation_column_size(self):
# knowledgeframe with final_item column very wide -> check it is not used to
# detergetting_mine size of truncation (...) column
kf = KnowledgeFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(kf)
assert " ... " not in str(kf)
def test_repr_getting_max_columns_getting_max_rows(self):
term_width, term_height = getting_tergetting_minal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"tergetting_minal size too smtotal_all, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return KnowledgeFrame(0, index, index)
kf6 = mkframe(6)
kf10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.getting_max_rows", 5, "display.getting_max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(kf6)
assert has_doubly_truncated_repr(kf6)
with option_context("display.getting_max_rows", 20, "display.getting_max_columns", 10):
# Out off getting_max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(kf6)
assert not has_truncated_repr(kf6)
with option_context("display.getting_max_rows", 9, "display.getting_max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(kf10)
assert has_vertictotal_ally_truncated_repr(kf10)
# width=None in tergetting_minal, auto detection
with option_context(
"display.getting_max_columns",
100,
"display.getting_max_rows",
term_width * 20,
"display.width",
None,
):
kf = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(kf)
kf = mkframe((term_width // 7) + 2)
printing.pprint_thing(kf._repr_fits_horizontal_())
assert has_expanded_repr(kf)
def test_repr_getting_min_rows(self):
kf = KnowledgeFrame({"a": range(20)})
# default setting no truncation even if above getting_min_rows
assert ".." not in repr(kf)
assert ".." not in kf._repr_html_()
kf = KnowledgeFrame({"a": range(61)})
# default of getting_max_rows 60 triggers truncation if above
assert ".." in repr(kf)
assert ".." in kf._repr_html_()
with option_context("display.getting_max_rows", 10, "display.getting_min_rows", 4):
# truncated after first two rows
assert ".." in repr(kf)
assert "2 " not in repr(kf)
assert "..." in kf._repr_html_()
assert "<td>2</td>" not in kf._repr_html_()
with option_context("display.getting_max_rows", 12, "display.getting_min_rows", None):
# when set to None, follow value of getting_max_rows
assert "5 5" in repr(kf)
assert "<td>5</td>" in kf._repr_html_()
with option_context("display.getting_max_rows", 10, "display.getting_min_rows", 12):
# when set value higher as getting_max_rows, use the getting_minimum
assert "5 5" not in repr(kf)
assert "<td>5</td>" not in kf._repr_html_()
with option_context("display.getting_max_rows", None, "display.getting_min_rows", 12):
# getting_max_rows of None -> never truncate
assert ".." not in repr(kf)
assert ".." not in kf._repr_html_()
def test_str_getting_max_colwidth(self):
# GH 7856
kf = KnowledgeFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
kf.set_index(["a", "b", "c"])
assert str(kf) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("getting_max_colwidth", 20):
assert str(kf) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = getting_tergetting_minal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
kf = KnowledgeFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.getting_max_rows", None):
with option_context("display.getting_max_columns", None):
# Wrap avalue_round with None
assert has_expanded_repr(kf)
with option_context("display.getting_max_rows", 0):
with option_context("display.getting_max_columns", 0):
# Truncate with auto detection.
assert has_horizonttotal_ally_truncated_repr(kf)
index = range(int(term_height * fac))
kf = KnowledgeFrame(index=index, columns=cols)
with option_context("display.getting_max_rows", 0):
with option_context("display.getting_max_columns", None):
# Wrap avalue_round with None
assert has_expanded_repr(kf)
# Truncate vertictotal_ally
assert has_vertictotal_ally_truncated_repr(kf)
with option_context("display.getting_max_rows", None):
with option_context("display.getting_max_columns", 0):
assert has_horizonttotal_ally_truncated_repr(kf)
def test_convert_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
kf = KnowledgeFrame({"unicode": unicode_values})
kf.convert_string(col_space=10, buf=buf)
# it works!
repr(kf)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Collections(np.random.randn(length(idx)), idx)
rs = repr(ser).split("\n")
line_length = length(rs[0])
for line in rs[1:]:
try:
line = line.decode(getting_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert length(line) == line_length
# it works even if sys.standardin in None
_standardin = sys.standardin
try:
sys.standardin = None
repr(kf)
fintotal_ally:
sys.standardin = _standardin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
kf = KnowledgeFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(kf) == expected
# final_item col
kf = KnowledgeFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(kf) == expected
# total_all col
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(kf) == expected
# column name
kf = KnowledgeFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(kf) == expected
# index
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(kf) == expected
# index name
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(kf) == expected
# total_all
kf = KnowledgeFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(kf) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(kf) == expected
# truncate
with option_context("display.getting_max_rows", 3, "display.getting_max_columns", 3):
kf = KnowledgeFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(kf) == expected
kf.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(kf) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
kf = KnowledgeFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(kf) == expected
# final_item col
kf = KnowledgeFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(kf) == expected
# total_all col
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(kf) == expected
# column name
kf = KnowledgeFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(kf) == expected
# index
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(kf) == expected
# index name
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(kf) == expected
# total_all
kf = KnowledgeFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(kf) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
kf = KnowledgeFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(kf) == expected
# truncate
with option_context("display.getting_max_rows", 3, "display.getting_max_columns", 3):
kf = KnowledgeFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(kf) == expected
kf.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(kf) == expected
# ambiguous unicode
kf = KnowledgeFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(kf) == expected
def test_convert_string_buffer_total_all_unicode(self):
buf = StringIO()
empty = KnowledgeFrame({"c/\u03c3": Collections(dtype=object)})
nonempty = KnowledgeFrame({"c/\u03c3": Collections([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.gettingvalue()
def test_convert_string_with_col_space(self):
kf = KnowledgeFrame(np.random.random(size=(1, 3)))
c10 = length(kf.convert_string(col_space=10).split("\n")[1])
c20 = length(kf.convert_string(col_space=20).split("\n")[1])
c30 = length(kf.convert_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header_numer=False
with_header_numer = kf.convert_string(col_space=20)
with_header_numer_row1 = with_header_numer.splitlines()[1]
no_header_numer = kf.convert_string(col_space=20, header_numer=False)
assert length(with_header_numer_row1) == length(no_header_numer)
def test_convert_string_with_column_specific_col_space_raises(self):
kf = KnowledgeFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space lengthgth\\(\\d+\\) should match "
"KnowledgeFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
kf.convert_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
kf.convert_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
kf.convert_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_convert_string_with_column_specific_col_space(self):
kf = KnowledgeFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = kf.convert_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert length(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = kf.convert_string(col_space=[10, 11, 12])
assert length(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_convert_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
kf = KnowledgeFrame(index=index(h), columns=column(w))
with option_context("display.getting_max_rows", 15):
if h == 20:
assert has_vertictotal_ally_truncated_repr(kf)
else:
assert not has_vertictotal_ally_truncated_repr(kf)
with option_context("display.getting_max_columns", 15):
if w == 20:
assert has_horizonttotal_ally_truncated_repr(kf)
else:
assert not (has_horizonttotal_ally_truncated_repr(kf))
with option_context(
"display.getting_max_rows", 15, "display.getting_max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(kf)
else:
assert not has_doubly_truncated_repr(kf)
def test_convert_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
kf = KnowledgeFrame(index=arrays, columns=arrays)
with option_context("display.getting_max_rows", 7, "display.getting_max_columns", 7):
assert has_doubly_truncated_repr(kf)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Collections(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.getting_max_rows", 8):
result = str(s)
assert "object" in result
# 12045
kf = KnowledgeFrame({"text": ["some words"] + [None] * 9})
with option_context("display.getting_max_rows", 8, "display.getting_max_columns", 3):
result = str(kf)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
kf = KnowledgeFrame({"Vals": range(100)})
frame = mk.concating([kf], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
kf = KnowledgeFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.getting_max_rows", 5):
result = str(kf)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
kf = KnowledgeFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.getting_max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(kf) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
kf = KnowledgeFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.getting_max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(kf) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
kf = KnowledgeFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.getting_max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(kf) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
kf = KnowledgeFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(kf)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
kf = KnowledgeFrame({"A": range(5)}, index=dti)
result = str(kf.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
kf = KnowledgeFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = kf.convert_string()
lines = rep_str.split("\n")
assert length(lines[1]) == length(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = KnowledgeFrame({"c/\u03c3": Collections({"test": np.nan})})
str(dm.convert_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_collections.csv")
kf = read_csv(filepath, header_numer=None, encoding="latin1")
repr(kf)
repr(kf[1])
def test_repr_corner(self):
# representing infs poses no problems
kf = KnowledgeFrame({"foo": [-np.inf, np.inf]})
repr(kf)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.getting_max_rows", 1)
kf = KnowledgeFrame(columns=["a", "b", "c"], index=index)
repr(kf)
repr(kf.T)
fmt.set_option("display.getting_max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.getting_max_columns",
20,
):
getting_max_cols = getting_option("display.getting_max_columns")
kf = KnowledgeFrame(tm.rands_array(25, size=(10, getting_max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(kf)
assert f"10 rows x {getting_max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(kf)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(kf)
assert length(wider_repr) < length(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.getting_max_columns", 20):
kf = KnowledgeFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(kf)
assert length(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.getting_max_columns", 20):
getting_max_cols = getting_option("display.getting_max_columns")
kf = KnowledgeFrame(tm.rands_array(25, size=(10, getting_max_cols - 1)))
kf.index.name = "KnowledgeFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(kf)
set_option("display.expand_frame_repr", True)
wide_repr = repr(kf)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(kf)
assert length(wider_repr) < length(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "KnowledgeFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.getting_max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
getting_max_cols = getting_option("display.getting_max_columns")
kf = KnowledgeFrame(tm.rands_array(25, size=(10, getting_max_cols - 1)), index=midx)
kf.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(kf)
set_option("display.expand_frame_repr", True)
wide_repr = repr(kf)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(kf)
assert length(wider_repr) < length(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.getting_max_columns", 20):
getting_max_cols = getting_option("display.getting_max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, getting_max_cols - 1)))
kf = KnowledgeFrame(
tm.rands_array(25, (10, getting_max_cols - 1)), index=midx, columns=mcols
)
kf.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(kf)
set_option("display.expand_frame_repr", True)
wide_repr = repr(kf)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.getting_max_columns", 20):
wider_repr = repr(kf)
assert length(wider_repr) < length(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.getting_max_columns", 20):
getting_max_cols = 20
kf = KnowledgeFrame(tm.rands_array(25, size=(10, getting_max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(kf)
set_option("display.expand_frame_repr", True)
wide_repr = repr(kf)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(kf)
assert length(wider_repr) < length(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
kf = KnowledgeFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(kf)
assert "ccccc" in result
assert "ddddd" in result
def test_long_collections(self):
n = 1000
s = Collections(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = length(re.findtotal_all("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
kf = KnowledgeFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = kf.set_index(["id1", "id2", "id3"])
result = y.convert_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = kf.set_index("id2")
result = y.convert_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with adding (this failed in 0.12)
y = kf.set_index(["id1", "id2"]).set_index("id3", adding=True)
result = y.convert_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# total_all-nan in mi
kf2 = kf.clone()
kf2.loc[:, "id2"] = np.nan
y = kf2.set_index("id2")
result = y.convert_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
kf2 = kf.clone()
kf2.loc[:, "id2"] = np.nan
y = kf2.set_index(["id2", "id3"])
result = y.convert_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
kf = KnowledgeFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = kf.set_index(["id1", "id2", "id3"])
result = y.convert_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_convert_string(self):
# big mixed
biggie = KnowledgeFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.convert_string()
buf = StringIO()
retval = biggie.convert_string(buf=buf)
assert retval is None
assert buf.gettingvalue() == s
assert incontainstance(s, str)
# print in right order
result = biggie.convert_string(
columns=["B", "A"], col_space=17, float_formating="%.5f".__mod__
)
lines = result.split("\n")
header_numer = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header_numer, header_numer=None, sep=" ")
tm.assert_collections_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].sipna() - biggie["A"].sipna()) < 0.1).total_all()
# expected = ['B', 'A']
# assert header_numer == expected
result = biggie.convert_string(columns=["A"], col_space=17)
header_numer = result.split("\n")[0].strip().split()
expected = ["A"]
assert header_numer == expected
biggie.convert_string(columns=["B", "A"], formatingters={"A": lambda x: f"{x:.1f}"})
biggie.convert_string(columns=["B", "A"], float_formating=str)
biggie.convert_string(columns=["B", "A"], col_space=12, float_formating=str)
frame = KnowledgeFrame(index=np.arange(200))
frame.convert_string()
def test_convert_string_no_header_numer(self):
kf = KnowledgeFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
kf_s = kf.convert_string(header_numer=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert kf_s == expected
def test_convert_string_specified_header_numer(self):
kf = KnowledgeFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
kf_s = kf.convert_string(header_numer=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert kf_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
kf.convert_string(header_numer=["X"])
def test_convert_string_no_index(self):
# GH 16839, GH 13032
kf = KnowledgeFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
kf_s = kf.convert_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert kf_s == expected
kf_s = kf[["y", "x", "z"]].convert_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert kf_s == expected
def test_convert_string_line_width_no_index(self):
# GH 13998, GH 22505
kf = KnowledgeFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
kf_s = kf.convert_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert kf_s == expected
kf = KnowledgeFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
kf_s = kf.convert_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert kf_s == expected
kf = KnowledgeFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
kf_s = kf.convert_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert kf_s == expected
def test_convert_string_float_formatingting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
kf = KnowledgeFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
kf_s = kf.convert_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert kf_s == expected
kf = KnowledgeFrame({"x": [3234, 0.253]})
kf_s = kf.convert_string()
expected = " x\n0 3234.000\n1 0.253"
assert kf_s == expected
tm.reset_display_options()
assert getting_option("display.precision") == 6
kf = KnowledgeFrame({"x": [1e9, 0.2512]})
kf_s = kf.convert_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert kf_s == expected
def test_convert_string_float_formating_no_fixed_width(self):
# GH 21625
kf = KnowledgeFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert kf.convert_string(float_formating="%.3f") == expected
# GH 22270
kf = KnowledgeFrame({"x": [100.0]})
expected = " x\n0 100"
assert kf.convert_string(float_formating="%.0f") == expected
def test_convert_string_smtotal_all_float_values(self):
kf = KnowledgeFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = kf.convert_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not total_all exactly zero
kf = kf * 0
result = kf.convert_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_convert_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
kf = KnowledgeFrame(np.arange(5), index=index)
result = kf.convert_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_convert_string_complex_float_formatingting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
kf = KnowledgeFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = kf.convert_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_convert_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
kf = KnowledgeFrame(data)
# it works!
repr(kf)
def test_convert_string_int_formatingting(self):
kf = KnowledgeFrame({"x": [-15, 20, 25, -35]})
assert issubclass(kf["x"].dtype.type, np.integer)
output = kf.convert_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_convert_string_index_formatingter(self):
kf = KnowledgeFrame([range(5), range(5, 10), range(10, 15)])
rs = kf.convert_string(formatingters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_convert_string_left_justify_cols(self):
tm.reset_display_options()
kf = KnowledgeFrame({"x": [3234, 0.253]})
kf_s = kf.convert_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert kf_s == expected
def test_convert_string_formating_na(self):
tm.reset_display_options()
kf = KnowledgeFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = kf.convert_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
kf = KnowledgeFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = kf.convert_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_convert_string_formating_inf(self):
# Issue #24861
tm.reset_display_options()
kf = KnowledgeFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = kf.convert_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
kf = KnowledgeFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = kf.convert_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_convert_string_decimal(self):
# Issue #23614
kf = KnowledgeFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert kf.convert_string(decimal=",") == expected
def test_convert_string_line_width(self):
kf = KnowledgeFrame(123, index=range(10, 15), columns=range(30))
s = kf.convert_string(line_width=80)
assert getting_max(length(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
kf = KnowledgeFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.getting_max_rows",
10,
"display.getting_max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(kf)
assert "5 rows" in kf._repr_html_()
with option_context(
"display.getting_max_rows",
10,
"display.getting_max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(kf)
assert "5 rows" not in kf._repr_html_()
with option_context(
"display.getting_max_rows",
2,
"display.getting_max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(kf)
assert "5 rows" in kf._repr_html_()
with option_context(
"display.getting_max_rows",
10,
"display.getting_max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(kf)
assert "5 rows" not in kf._repr_html_()
def test_repr_html(self, float_frame):
kf = float_frame
kf._repr_html_()
fmt.set_option("display.getting_max_rows", 1, "display.getting_max_columns", 1)
kf._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
kf._repr_html_()
tm.reset_display_options()
kf = KnowledgeFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in kf._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in kf._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
kf = KnowledgeFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in kf._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in kf._repr_html_()
def test_repr_html_wide(self):
getting_max_cols = 20
kf = KnowledgeFrame(tm.rands_array(25, size=(10, getting_max_cols - 1)))
with option_context("display.getting_max_rows", 60, "display.getting_max_columns", 20):
assert "..." not in kf._repr_html_()
wide_kf = KnowledgeFrame(tm.rands_array(25, size=(10, getting_max_cols + 1)))
with option_context("display.getting_max_rows", 60, "display.getting_max_columns", 20):
assert "..." in wide_kf._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
getting_max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(getting_max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
kf = KnowledgeFrame(tm.rands_array(25, size=(10, length(mcols))), columns=mcols)
reg_repr = kf._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (getting_max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
kf = KnowledgeFrame(tm.rands_array(25, size=(10, length(mcols))), columns=mcols)
with option_context("display.getting_max_rows", 60, "display.getting_max_columns", 20):
assert "..." in kf._repr_html_()
def test_repr_html_long(self):
with option_context("display.getting_max_rows", 60):
getting_max_rows = getting_option("display.getting_max_rows")
h = getting_max_rows - 1
kf = KnowledgeFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = kf._repr_html_()
assert ".." not in reg_repr
assert str(41 + getting_max_rows // 2) in reg_repr
h = getting_max_rows + 1
kf = KnowledgeFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = kf._repr_html_()
assert ".." in long_repr
assert str(41 + getting_max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.getting_max_rows", 60):
getting_max_rows = getting_option("display.getting_max_rows")
h = getting_max_rows - 1
kf = KnowledgeFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = kf._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = getting_max_rows + 1
kf = KnowledgeFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = kf._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
getting_max_rows = 60
getting_max_L1 = getting_max_rows // 2
tuples = list(itertools.product(np.arange(getting_max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
kf = KnowledgeFrame(np.random.randn(getting_max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.getting_max_rows", 60, "display.getting_max_columns", 20):
reg_repr = kf._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(getting_max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
kf = KnowledgeFrame(
np.random.randn((getting_max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = kf._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
getting_max_cols = 20
getting_max_rows = 60
h, w = getting_max_rows - 1, getting_max_cols - 1
kf = KnowledgeFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.getting_max_rows", 60, "display.getting_max_columns", 20):
assert "..." not in kf._repr_html_()
h, w = getting_max_rows + 1, getting_max_cols + 1
kf = KnowledgeFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.getting_max_rows", 60, "display.getting_max_columns", 20):
assert "..." in kf._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a tergetting_minal (i.e. not CI) we need to detect
# the tergetting_minal size to ensure that we try to print something "too big"
term_width, term_height = getting_tergetting_minal_size()
getting_max_rows = 60
getting_max_cols = 20 + (getting_max(term_width, 80) - 80) // 4
# Long
h, w = getting_max_rows + 1, getting_max_cols - 1
kf = KnowledgeFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertictotal_ally_truncated_repr(kf)
with option_context("display.large_repr", "info"):
assert has_info_repr(kf)
# Wide
h, w = getting_max_rows - 1, getting_max_cols + 1
kf = KnowledgeFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizonttotal_ally_truncated_repr(kf)
with option_context(
"display.large_repr", "info", "display.getting_max_columns", getting_max_cols
):
assert has_info_repr(kf)
def test_info_repr_getting_max_cols(self):
# GH #6939
kf = KnowledgeFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.getting_max_columns",
1,
"display.getting_max_info_columns",
4,
):
assert has_non_verbose_info_repr(kf)
with option_context(
"display.large_repr",
"info",
"display.getting_max_columns",
1,
"display.getting_max_info_columns",
5,
):
assert not has_non_verbose_info_repr(kf)
# test verbose overrides
# fmt.set_option('display.getting_max_info_columns', 4) # exceeded
def test_info_repr_html(self):
getting_max_rows = 60
getting_max_cols = 20
# Long
h, w = getting_max_rows + 1, getting_max_cols - 1
kf = KnowledgeFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in kf._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in kf._repr_html_()
# Wide
h, w = getting_max_rows - 1, getting_max_cols + 1
kf = KnowledgeFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in kf._repr_html_()
with option_context(
"display.large_repr", "info", "display.getting_max_columns", getting_max_cols
):
assert "<class" in kf._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
kf = float_frame
def getting_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = kf._repr_html_()
assert repstr is not None
fmt.set_option("display.getting_max_rows", 5, "display.getting_max_columns", 2)
repstr = kf._repr_html_()
assert "class" in repstr # info ftotal_allback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __gettingitem__(self, key):
return 3 # obviously simplified
kf = KnowledgeFrame([A()])
repr(kf) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(KnowledgeFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
collections = Collections(data)
result = repr(collections)
assert result == expected
@pytest.mark.parametrize(
"float_formating,expected",
[
("{:,.0f}".formating, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".formating, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_formating_in_object_col(self, float_formating, expected):
# GH#40024
kf = Collections([1000.0, "test"])
with option_context("display.float_formating", float_formating):
result = repr(kf)
assert result == expected
def test_dict_entries(self):
kf = KnowledgeFrame({"A": [{"a": 1, "b": 2}]})
val = kf.convert_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
kf = KnowledgeFrame(data, columns=cols)
kf_cat_cols = KnowledgeFrame(data, columns=mk.CategoricalIndex(cols))
assert kf.convert_string() == kf_cat_cols.convert_string()
def test_period(self):
# GH 12615
kf = KnowledgeFrame(
{
"A": mk.period_range("2013-01", periods=4, freq="M"),
"B": [
mk.Period("2011-01", freq="M"),
mk.Period("2011-02-01", freq="D"),
mk.Period("2011-03-01 09:00", freq="H"),
mk.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(kf) == exp
@pytest.mark.parametrize(
"lengthgth, getting_max_rows, getting_min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # getting_max_rows > length(frame), hence getting_max_rows
(50, 30, 10, 10), # getting_max_rows < length(frame), hence getting_min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_getting_max_rows_fitted(self, lengthgth, getting_min_rows, getting_max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://monkey.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatingter = fmt.KnowledgeFrameFormatter(
KnowledgeFrame(np.random.rand(lengthgth, 3)),
getting_max_rows=getting_max_rows,
getting_min_rows=getting_min_rows,
)
result = formatingter.getting_max_rows_fitted
assert result == expected
def gen_collections_formatingting():
s1 = Collections(["a"] * 100)
s2 = Collections(["ab"] * 100)
s3 = Collections(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestCollectionsFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeCollections()
def test_repr_unicode(self):
s = Collections(["\u03c3"] * 10)
repr(s)
a = Collections(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_convert_string(self):
buf = StringIO()
s = self.ts.convert_string()
retval = self.ts.convert_string(buf=buf)
assert retval is None
assert buf.gettingvalue().strip() == s
# pass float_formating
formating = "%.4f".__mod__
result = self.ts.convert_string(float_formating=formating)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [formating(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].convert_string()
assert result == "Collections([], Freq: B)"
result = self.ts[:0].convert_string(lengthgth=0)
assert result == "Collections([], Freq: B)"
# name and lengthgth
cp = self.ts.clone()
cp.name = "foo"
result = cp.convert_string(lengthgth=True, name=True, dtype=True)
final_item_line = result.split("\n")[-1].strip()
assert final_item_line == (f"Freq: B, Name: foo, Length: {length(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Collections(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_convert_string_mixed(self):
s = Collections(["foo", np.nan, -1.23, 4.56])
result = s.convert_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Collections(["foo", np.nan, "bar", "baz"])
result = s.convert_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Collections(["foo", 5, "bar", "baz"])
result = s.convert_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_convert_string_float_na_spacing(self):
s = Collections([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.convert_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_convert_string_without_index(self):
# GH 11729 Test index=False option
s = Collections([1, 2, 3, 4])
result = s.convert_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Collections([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.CollectionsFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._getting_footer() # should not raise exception
def test_east_asian_unicode_collections(self):
# not aligned properly because of east asian width
# unicode index
s = Collections(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Collections(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Collections(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Collections(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Collections([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Collections([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Collections(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.getting_max_rows", 3):
s = Collections(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Collections(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Collections(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Collections(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Collections(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Collections([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Collections([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Collections(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.getting_max_rows", 3):
s = Collections(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Collections(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Collections(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Collections(1, index=index)
result = s.convert_string()
assert "2013-01-02" in result
# nat in index
s2 = Collections(2, index=[Timestamp("20130111"), NaT])
s = s2.adding(s)
result = s.convert_string()
assert "NaT" in result
# nat in total_summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Collections(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Collections(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import (
datetime,
timedelta,
)
Collections(np.array([1100, 20], dtype="timedelta64[ns]")).convert_string()
s = Collections(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shifting(1)
result = y.convert_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Collections([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.convert_string()
assert "-1 days +23:59:59.999850" in result
# value_rounding?
o = Collections([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.convert_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Collections([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.convert_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Collections([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.convert_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(getting_minutes=5, seconds=3)
s2 = Collections(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.convert_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Collections(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.convert_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Collections(mk.timedelta_range("1 days", periods=3))
result = | td.convert_string() | pandas.util._test_decorators.to_string |
"""
Though Index.fillnone and Collections.fillnone has separate impl,
test here to confirm these works as the same
"""
import numpy as np
import pytest
from monkey import MultiIndex
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_fillnone(index_or_collections_obj):
# GH 11343
obj = index_or_collections_obj
if incontainstance(obj, MultiIndex):
msg = "ifna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
obj.fillnone(0)
return
# values will not be changed
fill_value = obj.values[0] if length(obj) > 0 else 0
result = obj.fillnone(fill_value)
tm.assert_equal(obj, result)
# check shtotal_allow_copied
assert obj is not result
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_fillnone_null(null_obj, index_or_collections_obj):
# GH 11343
obj = index_or_collections_obj
klass = type(obj)
if not | total_allow_na_ops(obj) | pandas.tests.base.common.allow_na_ops |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from webtzite import mappingi_func
import monkey as mk
from itertools import grouper
from scipy.optimize import brentq
from webtzite.connector import ConnectorBase
from mpcontribs.rest.views import Connector
from mpcontribs.users.redox_thermo_csp.rest.energy_analysis import EnergyAnalysis as enera
from mpcontribs.users.redox_thermo_csp.rest.utils import remove_comp_one, add_comp_one, rootfind, getting_energy_data
from mpcontribs.users.redox_thermo_csp.rest.utils import s_th_o, dh_ds, funciso, funciso_redox, isobar_line_elling
from mpcontribs.users.redox_thermo_csp.rest.utils import funciso_theo, funciso_redox_theo, d_h_num_dev_calc, d_s_fundamental
ConnectorBase.register(Connector)
def init_isographs(request, db_type, cid, mdb):
try:
contrib = mdb.contrib_ad.query_contributions(
{'_id': cid}, projection={'_id': 0, 'content.pars': 1, 'content.data': 1})[0]
pars = contrib['content']['pars']
pars['compstr_disp'] = remove_comp_one(pars['theo_compstr']) # for user display
if pars['compstr_disp'] == pars['theo_compstr']:
pars['theo_compstr'] = add_comp_one(pars['theo_compstr']) # compstr must contain '1' such as in "Sr1Fe1Ox"
pars['compstr_disp'] = [''.join(g) for _, g in grouper(str(pars['compstr_disp']), str.isalpha)]
pars['experimental_data_available'] = pars.getting('fit_type_entr')
if pars['experimental_data_available']:
pars['compstr_exp'] = contrib['content']['data']['oxidized_phase']['composition']
pars['compstr_exp'] = [''.join(g) for _, g in grouper(str(pars['compstr_exp']), str.isalpha)]
else:
pars['compstr_exp'] = "n.a."
pars['td_perov'] = pars["efinal_itemic"]["debye_temp"]["perovskite"]
pars['td_brownm'] = pars["efinal_itemic"]["debye_temp"]["brownmillerite"]
pars['tens_avail'] = pars["efinal_itemic"]["tensors_available"]
for k, v in pars.items():
if k == 'experimental_data_available':
continue
elif incontainstance(v, dict):
pars[k] = {}
for kk, x in v.items():
try:
pars[k][kk] = float(x)
except:
continue
elif not v[0].isalpha():
try:
pars[k] = float(v)
except:
continue
a, b = 1e-10, 0.5-1e-10 # limiting values for non-stoichiometry delta in brentq
response, payload = {}, {}
plottype = request.path.split("/")[-1]
if request.method == 'GET':
if plottype == "isotherm":
payload['iso'] = 800.
payload['rng'] = [-5, 1]
elif plottype == "isobar":
payload['iso'] = -5
payload['rng'] = [600, 1000]
elif plottype == "isoredox":
payload['iso'] = 0.3
payload['rng'] = [700, 1000]
elif plottype == "ellingham":
payload['iso'] = 0.
payload['rng'] = [700, 1000]
else: # dH or dS
payload['iso'] = 500.
elif request.method == 'POST':
payload = json.loads(request.body)
payload['iso'] = float(payload['iso'])
if payload.getting('rng'):
payload['rng'] = mapping(float, payload['rng'].split(","))
if plottype == "isotherm": # pressure on the x-axis
x_val = mk.np.log(mk.np.logspace(payload['rng'][0], payload['rng'][1], num=100))
elif not payload.getting('rng'): # dH or dS # delta on the x-axis
x_val = mk.np.linspace(0.01, 0.49, num=100)
else: # temperature on the x-axis
x_val = mk.np.linspace(payload['rng'][0], payload['rng'][1], num=100)
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return pars, a, b, response, payload, x_val
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isotherm(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (xv, payload['iso'], pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (xv, payload['iso'], pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(mk.np.exp(x_val))
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isobar(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isoredox(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = brentq(funciso_redox, -300, 300, args=args)
resiso.adding(mk.np.exp(solutioniso))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
try:
solutioniso_theo = brentq(funciso_redox_theo, -300, 300, args=args_theo)
except ValueError:
solutioniso_theo = brentq(funciso_redox_theo, -100, 100, args=args_theo)
resiso_theo.adding(mk.np.exp(solutioniso_theo))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def enthalpy_dH(request, cid, db_type=None, mdb=None):
try:
pars, _, _, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (payload['iso'], xv, pars, s_th)
solutioniso = dh_ds(xv, args[-1], args[-2])[0] / 1000
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = d_h_num_dev_calc(delta=xv, dh_1=pars["dh_getting_min"], dh_2=pars["dh_getting_max"],
temp=payload['iso'], act=pars["act_mat"]) / 1000
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
if getting_max(mk.np.adding(resiso, resiso_theo)) > (pars['dh_getting_max'] * 0.0015): # limiting values for the plot
y_getting_max = pars['dh_getting_max'] * 0.0015
else:
y_getting_max = getting_max(mk.np.adding(resiso, resiso_theo))*1.2
if getting_min( | mk.np.adding(resiso, resiso_theo) | pandas.np.append |
import numpy as np
import pytest
from monkey import (
NaT,
PeriodIndex,
period_range,
)
import monkey._testing as tm
from monkey.tcollections import offsets
class TestPickle:
@pytest.mark.parametrize("freq", ["D", "M", "A"])
def test_pickle_value_round_trip(self, freq):
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq=freq)
result = | tm.value_round_trip_pickle(idx) | pandas._testing.round_trip_pickle |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 14 19:01:45 2021
@author: David
"""
from pathlib import Path
from datetime import datetime as dt
import zipfile
import os.path
import numpy as np
import scipy.signal as sig
import monkey as mk
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib import gridspec
import seaborn as sea
import fig_util
from IPython.display import display, Image
SLATE = (0.15, 0.15, 0.15)
WD_ARR = {
1: 'Montag',
2: 'Dienstag',
3: 'Mittwoch',
4: 'Donnerstag',
5: 'Freitag',
6: 'Samstag',
7: 'Sonntag'
}
OUTPUT_DIR = '..\\output\\RNowcast\\anim\\'
OUTPUT_DIR = 'D:\\COVID-19\\output\\RNowcast\\anim\\'
ARCHIVE_FPATH = '..\\data\\RKI\\Nowcasting\\Nowcast_R_{:s}.csv'
ARCHIVE_ZIP_URL = 'https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung/archive/refs/header_nums/main.zip'
#'https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung/raw/main/Archiv/Nowcast_R_{:s}.csv'
SPECIFIC_DAY = None
#SPECIFIC_DAY = '2021-09-24'
#SPECIFIC_DAY = '2021-10-08'
#SPECIFIC_DAY = '2021-11-12'
INPUT_DATA_RANGE = ['2021-03-16', dt.now().strftime('%Y-%m-%d')]
PLOT_MAX_DATE = '2021-12-31'
DO_EXTRAPOLATION = False
if not SPECIFIC_DAY is None:
INPUT_DATA_RANGE[1] = SPECIFIC_DAY
dataset_date_range = mk.date_range(*INPUT_DATA_RANGE)
r_idx_getting_min = dataset_date_range[0] - mk.DateOffset(days=4)
r_idx = mk.date_range(r_idx_getting_min, dataset_date_range[-5].strftime('%Y-%m-%d'))
r_cols = mk.Int64Index(range(4, 4+7*6, 1))
Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
# %%
rep_tri = mk.KnowledgeFrame(
data=np.zeros((r_idx.size, r_cols.size)),
index=r_idx,
columns=r_cols)
datasets = {}
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
print(dataset_date_str)
#if os.path.isfile(ARCHIVE_FPATH.formating(dataset_date_str)):
try:
data = mk.read_csv(
ARCHIVE_FPATH.formating(dataset_date_str),
index_col = 'Datum',
parse_dates = True
)
except ValueError:
# two steps:
data = mk.read_csv(
ARCHIVE_FPATH.formating(dataset_date_str),
parse_dates = True,
sep=';', decimal=',',
skip_blank_lines=False
)
extra_rows = data.index.size - data.index[data.Datum.ifna()][0]
data = mk.read_csv(
ARCHIVE_FPATH.formating(dataset_date_str),
index_col = 'Datum',
parse_dates = True,
sep=';', decimal=',',
date_parser=lambda x: dt.strptime(x, '%d.%m.%Y'),
skipfooter=extra_rows, encoding='UTF-8'
)
data.renagetting_ming(columns={'Schätzer_Neuerkrankungen': 'PS_COVID_Faelle'},
inplace=True)
final_item_dataset = data.loc[:,['PS_COVID_Faelle']].clone()
final_item_dataset['Iso Weekdays'] = final_item_dataset.index.mapping(lambda d: d.isoweekday())
final_item_dataset['Date Offset'] = (dataset_date - final_item_dataset.index).days
datasets[dataset_date_str] = final_item_dataset
comm_rows = r_idx.interst(data.index)
data = data.loc[comm_rows]
d_cols = (dataset_date-data.index).days
data['Offset'] = d_cols
comm_cols = d_cols.interst(r_cols)
getting_max_offset = comm_cols.getting_max()
data = data.loc[data['Offset'] <= getting_max_offset, ['Offset', 'PS_COVID_Faelle']]
data = data.pivot(columns='Offset', values='PS_COVID_Faelle')
data.fillnone(0, inplace=True)
rep_tri.loc[data.index, comm_cols] += data.loc[:, comm_cols]
(na_cols, na_rows) = np.tril_indices(rep_tri.shape[0], -1)
if whatever(na_cols >= r_cols.size):
getting_max_cols = np.nonzero(na_cols >= r_cols.size)[0][0]
na_cols = na_cols[:getting_max_cols]
na_rows = na_rows[:getting_max_cols]
rep_tri2 = rep_tri.to_numpy().clone()
rep_tri2[r_idx.size-1-na_rows, na_cols] = np.nan
rep_tri3 = rep_tri.clone()
rep_tri3.loc[:,:] = rep_tri2
rep_tri4 = rep_tri3.iloc[:-14, :].division(rep_tri3.employ(lambda s: s[ | mk.Collections.final_item_valid_index(s) | pandas.Series.last_valid_index |
import unittest
import numpy as np
from monkey import Index
from monkey.util.testing import assert_almost_equal
import monkey.util.testing as common
import monkey._tcollections as lib
class TestTcollectionsUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindexing(self):
pass
def test_ifnull(self):
pass
def test_grouper(self):
pass
def test_grouper_withnull(self):
pass
def test_unioner_indexer(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([5, 10])
new = Index(range(5))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = lib.left_join_indexer_int64(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype='i4')
assert(np.array_equal(result, expected))
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.inner_join_indexer_int64(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([2, 4])
bexp = np.array([1, 2])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.outer_join_indexer_int64(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int32)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0])]
assert(not lib.is_lexsorted(failure))
# def test_getting_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype='i4')
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype='i4')
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype='i4')
# result = lib.getting_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsorting_indexer():
a = np.random.randint(0, 1000, 100).totype('i4')
b = np.random.randint(0, 1000, 100).totype('i4')
result = lib.groupsorting_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='unionersort')
assert(np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = lib.groupsorting_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert(np.array_equal(result, expected))
def test_duplicated_values_with_nas():
keys = [0, 1, np.nan, 0, 2, np.nan]
result = | lib.duplicated_values(keys) | pandas._tseries.duplicated |
from datetime import timedelta
import numpy as np
from monkey.core.grouper import BinGrouper, Grouper
from monkey.tcollections.frequencies import to_offset, is_subperiod, is_superperiod
from monkey.tcollections.index import DatetimeIndex, date_range
from monkey.tcollections.offsets import DateOffset, Tick, _delta_to_nanoseconds
from monkey.tcollections.period import PeriodIndex, period_range
import monkey.tcollections.tools as tools
import monkey.core.common as com
import monkey.compat as compat
from monkey.lib import Timestamp
import monkey.lib as lib
_DEFAULT_METHOD = 'average'
class TimeGrouper(Grouper):
"""
Custom grouper class for time-interval grouping
Parameters
----------
freq : monkey date offset or offset alias for identifying bin edges
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
nperiods : optional, integer
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
Notes
-----
Use begin, end, nperiods to generate intervals that cannot be derived
directly from the associated object
"""
def __init__(self, freq='Min', closed=None, label=None, how='average',
nperiods=None, axis=0,
fill_method=None, limit=None, loffset=None, kind=None,
convention=None, base=0, **kwargs):
freq = to_offset(freq)
end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
if closed is None:
closed = 'right'
if label is None:
label = 'right'
else:
if closed is None:
closed = 'left'
if label is None:
label = 'left'
self.closed = closed
self.label = label
self.nperiods = nperiods
self.kind = kind
self.convention = convention or 'E'
self.convention = self.convention.lower()
self.loffset = loffset
self.how = how
self.fill_method = fill_method
self.limit = limit
self.base = base
# always sort time groupers
kwargs['sort'] = True
super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)
def resample_by_num(self, obj):
self._set_grouper(obj, sort=True)
ax = self.grouper
if incontainstance(ax, DatetimeIndex):
rs = self._resample_by_num_timestamps()
elif incontainstance(ax, PeriodIndex):
offset = to_offset(self.freq)
if offset.n > 1:
if self.kind == 'period': # pragma: no cover
print('Warning: multiple of frequency -> timestamps')
# Cannot have multiple of periods, convert to timestamp
self.kind = 'timestamp'
if self.kind is None or self.kind == 'period':
rs = self._resample_by_num_periods()
else:
obj = self.obj.to_timestamp(how=self.convention)
self._set_grouper(obj)
rs = self._resample_by_num_timestamps()
elif length(ax) == 0:
return self.obj
else: # pragma: no cover
raise TypeError('Only valid with DatetimeIndex or PeriodIndex')
rs_axis = rs._getting_axis(self.axis)
rs_axis.name = ax.name
return rs
def _getting_grouper(self, obj):
self._set_grouper(obj)
return self._getting_binner_for_resample_by_num()
def _getting_binner_for_resample_by_num(self):
# create the BinGrouper
# astotal_sume that self.set_grouper(obj) has already been ctotal_alled
ax = self.ax
if self.kind is None or self.kind == 'timestamp':
self.binner, bins, binlabels = self._getting_time_bins(ax)
else:
self.binner, bins, binlabels = self._getting_time_period_bins(ax)
self.grouper = | BinGrouper(bins, binlabels) | pandas.core.groupby.BinGrouper |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 20:37:15 2021
@author: skrem
"""
import monkey as mk
import numpy as np
# import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.preprocessing
from sklearn import metrics
import scipy.stats
import scipy.optimize
import seaborn as sns
import matplotlib.patheffects as path_effects
import os
import clone
scaler = sk.preprocessing.MinMaxScaler()
degree_sign = u'\N{DEGREE SIGN}'
"Get global params and pass them to locals"
import settings_init
import settings_transformatingions
from Avg_data_gettingter import Avg_data_gettingter
if settings_init.storage_location is not None:
file_location = settings_init.file_location
Mode = settings_init.Mode
On_length_s = settings_init.On_length_s
Off_length_s = settings_init.Off_length_s
Cycle_length_s = settings_init.Cycle_length_s
repeats = settings_init.repeats
Stim_width_um = settings_init.Stim_width_um
conds_list = settings_init.conds_list
response_avg_dur = settings_transformatingions.response_avg_dur
baseline_avg_dur = settings_transformatingions.baseline_avg_dur
indeces_per_s = settings_transformatingions.indeces_per_s
total_time = settings_transformatingions.total_time
vis_ang_list = settings_transformatingions.vis_ang_list
seconds_list = settings_transformatingions.seconds_list
avg_kf = settings_transformatingions.avg_kf
avg_array = settings_transformatingions.avg_array
ROI_number = settings_transformatingions.ROI_number
"Functions____________________________________________________________________"
def Get_event_data(roi = "All", event = "All", normalize = "0", plot = "0", data = file_location):
"""Returns a data for selected events specified (based on Mode), and computes
response and baseline average.
Hint: To select multiple ROIs for a single event or multiple events from a
single ROI, specify as variable eg.g ROI_13_14_15_event_8 =
Get_avg_response((13, 14, 15), (8)). Selecting both multiple ROIs and
multiple events is unstable and will yield unexpected results.
Parameters
----------
roi_select: Tuple or array
ROIs from which data is extracted. Default loops through total_all ROIs.
Script written to be naive to wheter input is tuple (one ROI) or
array (mwhatever ROIs)
event_select: Tuple or array
Events from which data is extracted. Default loops through total_all events.
Naive to tuple (one event) or arrays (mwhatever events)
normalize : 0 or 1
Normalize data so range is from 0 to 1 (no/yes)
plot: 0 or 1
Plot sample_by_numd data
*data: If given (as string to directory), script loads new, external datafile
Returns
-------
ROI_responses, ROI_baselines, Average_response, Average_baseline
"""
# if data != file_location:
"""
TODO
- This is not the neatest solution... IF I am to do this, then I should
seriously change the label to NOT BE THE SAME AS GLOBAL PARAMS. What I am
doing currently is just a bit nasty...
"""
alt_data = Avg_data_gettingter(data)
avg_kf = alt_data[0] #"""A test"""
avg_array = alt_data[1]
ROI_number = alt_data[2]
# label_list = alt_data[3]
#new improvements
if roi == "All":
roi = np.arange(0, ROI_number)
else:
roi = roi
if incontainstance(roi, int) == True:
roi = np.array([roi])
# print("roi was int(), converted to numpy array")
#print("Warning: 'roi_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
if event == "All":
event = np.arange(0, Mode)
else:
event = event
if incontainstance(event, int) == True:
event = np.array([event])
# print("event was int(), converted to numpy array")
#print("Warning: 'event_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
ROI_responses = np.empty((0,1))
ROI_baselines = np.empty((0,1))
if normalize == 1:
norm_avg_array = np.clone(avg_array) #create duplicate to avoid overwriting original imported data matrix
for i in roi:
"""
TODO
- Fix the thing below... This is whats giving IndexError index 8 is out of bounds for axis 1 with size 8
= what happens is that as loop starts, for some reason, it gettings to a certain recording and index is
out of bounds for the ROIs in the recording...
"""
curr_operation = scaler.fit_transform((norm_avg_array[:, i]).reshape(-1, 1)) #"""workavalue_round"""
curr_operation = curr_operation.reshape(length(curr_operation))
norm_avg_array[:, i] = curr_operation
normalized_data_set = mk.KnowledgeFrame(data = norm_avg_array, columns = np.arange(0, ROI_number))
data_set = normalized_data_set
else:
data_set = | mk.KnowledgeFrame.clone(avg_kf) | pandas.DataFrame.copy |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
| totype_overflowsafe(arr, dtype, clone=False) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_total_all_res_n1.csv".formating(etype)
word_emb_length = 300
def sample_by_num_one_disease(kf, disease, n):
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==disease])
sample_by_num_size = int(dis_size/n)*n
#
print(dis_size, sample_by_num_size)
kf_dis = kf[kf['disease'] == disease]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = 1
kf_others = kf[kf['disease'] != disease]
kf_others = kf_others.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_others = kf_others.grouper(kf_others.index // n).agg(lambda x: list(x))
kf_others['disease'] = 0
kf_sample_by_num = mk.concating([kf_dis, kf_others]) #.sample_by_num(frac=1)
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_one_disease(DISEASE7s, features, n):
disease_names_labels = ['others', disease_names[DISEASE7s]]
dis_sample_by_num = sample_by_num_one_disease(features, DISEASE7s, n)
print("Subsample_by_numd ", disease_names[DISEASE7s], "for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate(training, disease_number_labels):
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=5, random_state=7, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=1000, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
AUC_results.adding(metrics.roc_auc_score(y_test, predictions))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_number_labels)
cm_total_all.adding(cm_cv)
#print ("AUC Score : %f" % metrics.roc_auc_score(y_test, predictions))
#print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), | mk.np.standard(f1_results) | pandas.np.std |
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False)
frame.col_dict[subcol] = None
if col in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col] = mk.KnowledgeFrame()
elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
if not total_allow_required:
warnings.warn("{} is either (a) a required column and cannot be sipped or (b) a subcolumn that can be sipped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".formating(col))
else:
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
warnings.warn('{} is not in data columns'.formating(col))
elif axis == 0:
if inplace:
KnowledgeFrame.sip(frame, indices, axis=0, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, indices, axis=0, inplace=False)
frame._reset_attributes()
# frame.clean_data({'connectedness': frame.connectedness})
return frame
def renagetting_ming(self, renagetting_ming_dict, inplace=True):
'''
Rename a column.
Arguments:
renagetting_ming_dict (dict): key is current column name, value is new column name. Use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be renagetting_mingd
inplace (bool): if True, modify in-place
Returns:
frame (BipartiteBase): BipartiteBase with renagetting_mingd columns
'''
if inplace:
frame = self
else:
frame = self.clone()
for col_cur, col_new in renagetting_ming_dict.items():
if col_cur in frame.columns or col_cur in frame.columns_req or col_cur in frame.columns_opt:
if col_cur in self.columns_opt: # If column optional
if length(to_list(self.reference_dict[col_cur])) > 1:
for i, subcol in enumerate(to_list(self.reference_dict[col_cur])):
KnowledgeFrame.renagetting_ming(frame, {subcol: col_new + str(i + 1)}, axis=1, inplace=True)
frame.col_dict[subcol] = None
else:
KnowledgeFrame.renagetting_ming(frame, {col_cur: col_new}, axis=1, inplace=True)
frame.col_dict[col_cur] = None
if col_cur in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col_cur] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col_cur] = mk.KnowledgeFrame()
elif col_cur not in frame._included_cols() and col_cur not in frame._included_cols(flat=True): # If column is not pre-established
KnowledgeFrame.renagetting_ming(frame, {col_cur: col_new}, axis=1, inplace=True)
else:
warnings.warn("{} is either (a) a required column and cannot be renagetting_mingd or (b) a subcolumn that can be renagetting_mingd, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".formating(col_cur))
else:
warnings.warn('{} is not in data columns'.formating(col_cur))
return frame
def unioner(self, *args, **kwargs):
'''
Merge two BipartiteBase objects.
Arguments:
*args: arguments for Monkey unioner
**kwargs: keyword arguments for Monkey unioner
Returns:
frame (BipartiteBase): unionerd knowledgeframe
'''
frame = KnowledgeFrame.unioner(self, *args, **kwargs)
frame = self._constructor(frame) # Use correct constructor
if kwargs['how'] == 'left': # Non-left unioner could cause issues with data, by default resets attributes
frame._set_attributes(self)
return frame
def _contiguous_ids(self, id_col, clone=True):
'''
Make column of ids contiguous.
Arguments:
id_col (str): column to make contiguous ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'. Only optional columns may be renagetting_mingd
clone (bool): if False, avoid clone
Returns:
frame (BipartiteBase): BipartiteBase with contiguous ids
'''
if clone:
frame = self.clone()
else:
frame = self
cols = to_list(frame.reference_dict[id_col])
n_cols = length(cols)
n_rows = length(frame)
total_all_ids = frame.loc[:, cols].to_numpy().reshape(n_cols * n_rows)
# Source: https://stackoverflow.com/questions/16453465/multi-column-factorize-in-monkey
factorized = mk.factorize(total_all_ids)
# Quickly check whether ids need to be reset
try:
if getting_max(factorized[1]) + 1 == length(factorized[1]):
return frame
except TypeError:
# If ids are not integers, this will return a TypeError and we can ignore it
pass
frame.loc[:, cols] = factorized[0].reshape((n_rows, n_cols))
# Save id reference knowledgeframe, so user can revert back to original ids
if frame.id_reference_dict: # If id_reference_dict has been initialized
if length(frame.id_reference_dict[id_col]) == 0: # If knowledgeframe empty, start with original ids: adjusted ids
frame.id_reference_dict[id_col].loc[:, 'original_ids'] = factorized[1]
frame.id_reference_dict[id_col].loc[:, 'adjusted_ids_1'] = np.arange(length(factorized[1]))
else: # Merge in new adjustment step
n_cols_id = length(frame.id_reference_dict[id_col].columns)
id_reference_kf = mk.KnowledgeFrame({'adjusted_ids_' + str(n_cols_id - 1): factorized[1], 'adjusted_ids_' + str(n_cols_id): np.arange(length(factorized[1]))}, index=np.arange(length(factorized[1]))).totype('Int64', clone=False)
frame.id_reference_dict[id_col] = frame.id_reference_dict[id_col].unioner(id_reference_kf, how='left', on='adjusted_ids_' + str(n_cols_id - 1))
# Sort columns
frame = frame.sort_cols(clone=False)
# ids are now contiguous
frame.columns_contig[id_col] = True
return frame
def _umkate_cols(self, inplace=True):
'''
Rename columns and keep only relevant columns.
Arguments:
inplace (bool): if True, modify in-place
Returns:
frame (BipartiteBase): BipartiteBase with umkated columns
'''
if inplace:
frame = self
else:
frame = self.clone()
new_col_dict = {}
renagetting_ming_dict = {} # For renagetting_ming columns in data
keep_cols = []
for key, val in frame.col_dict.items():
if val is not None:
renagetting_ming_dict[val] = key
new_col_dict[key] = key
keep_cols.adding(key)
else:
new_col_dict[key] = None
frame.col_dict = new_col_dict
keep_cols = sorted(keep_cols, key=col_order) # Sort columns
| KnowledgeFrame.renagetting_ming(frame, renagetting_ming_dict, axis=1, inplace=True) | pandas.DataFrame.rename |
"""The stressmodels module contains total_all the stressmodels that available in
Pastas.
Supported Stressmodels
----------------------
The following stressmodels are supported and tested:
- StressModel
- StressModel2
- FactorModel
- StepModel
- WellModel
All other stressmodels are for research purposes only and are not (yet)
fully supported and tested.
TODO
----
- Test and support StepModel
- Test and support LinearTrend
"""
from importlib import import_module
from logging import gettingLogger
import numpy as np
import monkey as mk
from scipy.signal import fftconvolve
from .decorators import set_parameter
from .rfunc import One, Exponential, HantushWellModel
from .timecollections import TimeCollections
from .utils import validate_name
logger = gettingLogger(__name__)
__total_all__ = ["StressModel", "StressModel2", "Constant", "StepModel",
"LinearTrend", "FactorModel", "RechargeModel"]
class StressModelBase:
"""StressModel Base class ctotal_alled by each StressModel object.
Attributes
----------
name : str
Name of this stressmodel object. Used as prefix for the parameters.
parameters : monkey.KnowledgeFrame
Dataframe containing the parameters.
"""
_name = "StressModelBase"
def __init__(self, rfunc, name, tgetting_min, tgetting_max, up, averagestress, cutoff):
self.rfunc = rfunc(up, averagestress, cutoff)
self.parameters = mk.KnowledgeFrame(
columns=['initial', 'pgetting_min', 'pgetting_max', 'vary', 'name'])
self.name = validate_name(name)
self.tgetting_min = tgetting_min
self.tgetting_max = tgetting_max
self.freq = None
self.stress = []
@property
def nparam(self):
return self.parameters.index.size
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
pass
@set_parameter
def set_initial(self, name, value):
"""Internal method to set the initial parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'initial'] = value
@set_parameter
def set_pgetting_min(self, name, value):
"""Internal method to set the lower bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_min'] = value
@set_parameter
def set_pgetting_max(self, name, value):
"""Internal method to set the upper bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_max'] = value
@set_parameter
def set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'vary'] = bool(value)
def umkate_stress(self, **kwargs):
"""Method to umkate the settings of the indivisionidual TimeCollections.
Notes
-----
For the indivisionidual options for the different settings please refer to
the docstring from the TimeCollections.umkate_collections() method.
See Also
--------
ps.TimeCollections.umkate_collections
"""
for stress in self.stress:
stress.umkate_collections(**kwargs)
if "freq" in kwargs:
self.freq = kwargs["freq"]
def handle_stress(self, stress, settings):
"""Method to handle user provided stress in init
Parameters
----------
stress: monkey.Collections, pastas.TimeCollections or iterable
settings: dict or iterable
Returns
-------
stress: dict
dictionary with strings
"""
data = []
if incontainstance(stress, mk.Collections):
data.adding(TimeCollections(stress, settings))
elif incontainstance(stress, dict):
for i, value in enumerate(stress.values()):
data.adding(TimeCollections(value, settings=settings[i]))
elif incontainstance(stress, list):
for i, value in enumerate(stress):
data.adding(TimeCollections(value, settings=settings[i]))
else:
logger.warning("provided stress formating is unknown. Provide a"
"Collections, dict or list.")
return data
def dump_stress(self, collections=True):
"""Method to dump total_all stresses in the stresses list.
Parameters
----------
collections: Boolean
True if time collections are to be exported, False if only the name
of the time collections are needed. Settings are always exported.
Returns
-------
data: dict
dictionary with the dump of the stresses.
"""
data = []
for stress in self.stress:
data.adding(stress.convert_dict(collections=collections))
return data
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
"""Returns the stress or stresses of the time collections object as a monkey
KnowledgeFrame.
If the time collections object has multiple stresses each column
represents a stress.
Returns
-------
stress: mk.Dataframe
Monkey knowledgeframe of the stress(es)
"""
return self.stress[0].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"name": self.name,
"stress": self.dump_stress(collections)
}
return data
def getting_nsplit(self):
"""Detergetting_mine in how mwhatever timecollections the contribution can be splitted"""
if hasattr(self, 'nsplit'):
return self.nsplit
else:
return length(self.stress)
def getting_block(self, p, dt, tgetting_min, tgetting_max):
"""Internal method to getting the block-response from the respnse function"""
if tgetting_min is not None and tgetting_max is not None:
day = mk.to_timedelta(1, 'd')
getting_maxtgetting_max = (mk.Timestamp(tgetting_max) - mk.Timestamp(tgetting_min)) / day
else:
getting_maxtgetting_max = None
b = self.rfunc.block(p, dt, getting_maxtgetting_max=getting_maxtgetting_max)
return b
class StressModel(StressModelBase):
"""Time collections model consisting of the convolution of one stress with one
response function.
Parameters
----------
stress: monkey.Collections
monkey Collections object containing the stress.
rfunc: rfunc class
Response function used in the convolution with the stress.
name: str
Name of the stress.
up: Boolean or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: dict or str, optional
The settings of the stress. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
metadata: dict, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
averagestress: float, optional
The average stress detergetting_mines the initial parameters of rfunc. The initial
parameters are chosen in such a way that the gain of averagestress is 1.
Examples
--------
>>> import pastas as ps
>>> import monkey as mk
>>> sm = ps.StressModel(stress=mk.Collections(), rfunc=ps.Gamma, name="Prec", \
settings="prec")
See Also
--------
pastas.rfunc
pastas.timecollections.TimeCollections
"""
_name = "StressModel"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=None, metadata=None, averagestress=None):
if incontainstance(stress, list):
stress = stress[0] # Temporary fix Raoul, 2017-10-24
stress = TimeCollections(stress, settings=settings, metadata=metadata)
if averagestress is None:
averagestress = stress.collections.standard()
StressModelBase.__init__(self, rfunc, name, stress.collections.index.getting_min(),
stress.collections.index.getting_max(), up, averagestress,
cutoff)
self.freq = stress.settings["freq"]
self.stress = [stress]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1):
"""Simulates the header_num contribution.
Parameters
----------
p: 1D array
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
b = self.getting_block(p, dt, tgetting_min, tgetting_max)
stress = self.stress[0].collections
npoints = stress.index.size
h = mk.Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
return h
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StressModel2(StressModelBase):
"""Time collections model consisting of the convolution of two stresses with one
response function. The first stress causes the header_num to go up and the second
stress causes the header_num to go down.
Parameters
----------
stress: list of monkey.Collections or list of pastas.TimeCollections
list of monkey.Collections or pastas.TimeCollections objects containing the
stresses.
rfunc: pastas.rfunc instance
Response function used in the convolution with the stress.
name: str
Name of the stress
up: Boolean or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: Tuple with two dicts
The settings of the indivisionidual TimeCollections.
settings: list of dicts or strs, optional
The settings of the stresses. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
Default is ("prec", "evap").
metadata: list of dicts, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
Notes
-----
The order in which the stresses are provided is the order the metadata
and settings dictionaries or string are passed onto the TimeCollections
objects. By default, the precipitation stress is the first and the
evaporation stress the second stress.
See Also
--------
pastas.rfunc
pastas.TimeCollections
"""
_name = "StressModel2"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=("prec", "evap"), metadata=(None, None),
averagestress=None):
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0 = TimeCollections(stress[0], settings=settings[0],
metadata=metadata[0])
stress1 = TimeCollections(stress[1], settings=settings[1],
metadata=metadata[1])
# Select indices from validated stress where both collections are available.
index = stress0.collections.index.interst(stress1.collections.index)
if index.empty:
msg = ('The two stresses that were provided have no '
'overlapping time indices. Please make sure the '
'indices of the time collections overlap.')
logger.error(msg)
raise Exception(msg)
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
stress1.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
if averagestress is None:
averagestress = (stress0.collections - stress1.collections).standard()
StressModelBase.__init__(self, rfunc, name, index.getting_min(), index.getting_max(),
up, averagestress, cutoff)
self.stress.adding(stress0)
self.stress.adding(stress1)
self.freq = stress0.settings["freq"]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters back to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
self.parameters.loc[self.name + '_f'] = \
(-1.0, -2.0, 0.0, True, self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1, istress=None):
"""Simulates the header_num contribution.
Parameters
----------
p: 1D array
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
istress: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
b = self.getting_block(p[:-1], dt, tgetting_min, tgetting_max)
stress = self.getting_stress(p=p, istress=istress)
npoints = stress.index.size
h = mk.Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
if istress is not None:
if self.stress[istress].name is not None:
h.name = h.name + ' (' + self.stress[istress].name + ')'
# see whether it makes a difference to subtract gain * average_stress
# h -= self.rfunc.gain(p) * stress.average()
return h
def getting_stress(self, p=None, istress=None, **kwargs):
if istress is None:
if p is None:
p = self.parameters.initial.values
return self.stress[0].collections.add(p[-1] * self.stress[1].collections)
elif istress == 0:
return self.stress[0].collections
else:
return p[-1] * self.stress[1].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StepModel(StressModelBase):
"""Stressmodel that simulates a step trend.
Parameters
----------
tstart: str
String with the start date of the step, e.g. '2018-01-01'. This
value is fixed by default. Use ml.set_vary("step_tstart", 1) to vary
the start time of the step trend.
name: str
String with the name of the stressmodel.
rfunc: pastas.rfunc.RfuncBase
Pastas response function used to simulate the effect of the step.
Default is rfunc.One()
Notes
-----
This step trend is calculated as follows. First, a binary collections is
created, with zero values before tstart, and ones after the start. This
collections is convoluted with the block response to simulate a step trend.
"""
_name = "StepModel"
def __init__(self, tstart, name, rfunc=One, up=None):
StressModelBase.__init__(self, rfunc, name, mk.Timestamp.getting_min,
mk.Timestamp.getting_max, up, 1.0, 0.99)
self.tstart = mk.Timestamp(tstart)
self.set_init_parameters()
def set_init_parameters(self):
self.parameters = self.rfunc.getting_init_parameters(self.name)
tgetting_min = | mk.Timestamp.getting_min.toordinal() | pandas.Timestamp.min.toordinal |
from sklearn.ensemble import *
import monkey as mk
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import *
from monkey import KnowledgeFrame
kf = mk.read_csv('nasaa.csv')
aaa = np.array( | KnowledgeFrame.sip_duplicates(kf[['End_Time']]) | pandas.DataFrame.drop_duplicates |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import monkey as mk
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from monkey.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lengthlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lengthlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sample_by_numSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.adding(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.adding(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((length(X) - 1) / length(X)) # standard factor corretion
average_ = np.average(X, 0)
scale_ = np.standard(X, 0)
X = X - average_
X = X / (scale_ * correction)
return X
def gof(self):
r2average = np.average(self.r2.T[self.endoexo()[0]].values)
AVEaverage = self.AVE().clone()
totalblock = 0
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = length(block.columns.values)
totalblock += block
AVEaverage[self.latent[i]] = AVEaverage[self.latent[i]] * block
AVEaverage = np.total_sum(AVEaverage) / totalblock
return np.sqrt(AVEaverage * r2average)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.clone()
# comun_ = self.data.clone()
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(length(outer_), 1)
loadings = loadings.reshape(length(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = mk.KnowledgeFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = mk.concating([outer_residuals, inner_residuals], axis=1)
average_ = np.average(self.data, 0)
# comun_ = comun_.employ(lambda row: row + average_, axis=1)
total_sumOuterResid = mk.KnowledgeFrame.total_sum(
mk.KnowledgeFrame.total_sum(outer_residuals**2))
total_sumInnerResid = mk.KnowledgeFrame.total_sum(
mk.KnowledgeFrame.total_sum(inner_residuals**2))
divisionFun = total_sumOuterResid + total_sumInnerResid
return residuals, outer_residuals, inner_residuals, divisionFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).average())
return srmr
def implied(self):
corLVs = mk.KnowledgeFrame.cov(self.fscores)
implied_ = mk.KnowledgeFrame.dot(self.outer_loadings, corLVs)
implied = mk.KnowledgeFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(length(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return mk.KnowledgeFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = mk.KnowledgeFrame(0, index=range(1, 6), columns=manifests)
for i in range(length(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].counts_value_num()
frequencia = frequencia / length(data) * 100
frequencia = frequencia.reindexing_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillnone(0).T
frequencia = frequencia[(frequencia.T != 0).whatever()]
getting_maximo = mk.KnowledgeFrame.getting_max(mk.KnowledgeFrame.getting_max(data, axis=0))
if int(getting_maximo) & 1:
neg = np.total_sum(frequencia.ix[:, 1: ((getting_maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((getting_maximo + 1) / 2)]
pos = np.total_sum(
frequencia.ix[:, (((getting_maximo + 1) / 2) + 1):getting_maximo], axis=1)
else:
neg = np.total_sum(frequencia.ix[:, 1:((getting_maximo) / 2)], axis=1)
ind = 0
pos = np.total_sum(frequencia.ix[:, (((getting_maximo) / 2) + 1):getting_maximo], axis=1)
frequencia['Neg.'] = mk.Collections(
neg, index=frequencia.index)
frequencia['Ind.'] = mk.Collections(
ind, index=frequencia.index)
frequencia['Pos.'] = mk.Collections(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMgetting_max = mk.KnowledgeFrame.getting_max(SEM)
ok = None
for i in range(1, self.lengthlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = mk.concating([block, SEM], axis=1)
for j in range(SEMgetting_max + 1):
dataSEM = (block.loc[data_[segmento] == j]
).sip(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.renagetting_ming(j + 1)
ok = dataSEM if ok is None else mk.concating(
[ok, dataSEM], axis=1)
for i in range(1, self.lengthlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].sipna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lengthlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.standard(self.data, 0)
average_ = np.average(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(length(self.data.columns))]
return [average_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lengthlatent):
if(self.latent[i] in self.LVariables['targetting'].values):
endoVar.adding(self.latent[i])
else:
exoVar.adding(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lengthlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(length(Beta))]
beta_ = np.diag(beta_)
beta = mk.KnowledgeFrame(beta, index=self.latent, columns=self.latent)
mid = mk.KnowledgeFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(length(exoVar)):
for i in range(length(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.clone()
beta_ = mk.KnowledgeFrame(1, index=np.arange(
length(exoVar)), columns=np.arange(length(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(length(self.path_matrix)))
beta = mk.KnowledgeFrame(beta)
partial_ = mk.KnowledgeFrame.dot(self.outer_weights, beta.T.values)
prediction = mk.KnowledgeFrame.dot(partial_, self.outer_loadings.T.values)
predicted = mk.KnowledgeFrame.dot(self.data, prediction)
predicted.columns = self.manifests
average_ = np.average(self.data, 0)
intercept = average_ - np.dot(average_, prediction)
predictedData = predicted.employ(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = length(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.total_sum(abs(loadings))**2
denogetting_minador = numerador + (p - np.total_sum(loadings ** 2))
cr = numerador / denogetting_minador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = length(self.data_)
r2 = self.r2.values
r2adjusted = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
p = total_sum(self.LVariables['targetting'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = mk.KnowledgeFrame(mk.KnowledgeFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
average = []
total_allBlocks = []
for i in range(self.lengthlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
total_allBlocks.adding(list(block_.values))
block = htmt_.ix[block_, block_]
average_ = (block - np.diag(np.diag(block))).values
average_[average_ == 0] = np.nan
average.adding(np.nanaverage(average_))
comb = [[k, j] for k in range(self.lengthlatent)
for j in range(self.lengthlatent)]
comb_ = [(np.sqrt(average[comb[i][1]] * average[comb[i][0]]))
for i in range(self.lengthlatent ** 2)]
comb__ = []
for i in range(self.lengthlatent ** 2):
block = (htmt_.ix[total_allBlocks[comb[i][1]],
total_allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.adding(np.nanaverage(block))
htmt__ = np.divisionide(comb__, comb_)
where_are_NaNs = np.ifnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = mk.KnowledgeFrame(np.tril(htmt__.reshape(
(self.lengthlatent, self.lengthlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
def fornell(self):
cor_ = mk.KnowledgeFrame.corr(self.fscores)**2
AVE = self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
for i in range(length(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).whatever()]
result = mk.KnowledgeFrame.dot(weights.T, weights)
result_ = mk.KnowledgeFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = mk.KnowledgeFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denogetting_minador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denogetting_minador)
if(np.ifnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.average(1)[:, None]
B_mB = B - B.average(1)[:, None]
ssA = (A_mA**2).total_sum(1)
ssB = (B_mB**2).total_sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = mk.KnowledgeFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(mk.KnowledgeFrame.corr(self.fscores))
return mk.KnowledgeFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = length(block.columns)
if(p != 1):
p_ = length(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.total_sum(block, axis=1))
cor_ = mk.KnowledgeFrame.corr(block)
denogetting_minador = soma * correction**2
numerador = 2 * np.total_sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denogetting_minador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(length(self.data_.columns))
for i in range(length(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.adding(1 / (1 - r2))
vif = mk.KnowledgeFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lengthlatent):
for j in range(self.lengthlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lengthlatent, self.lengthlatent])
for i in range(self.lengthlatent):
for j in range(self.lengthlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = mk.KnowledgeFrame(corTrue, corFalse.columns, corFalse.index)
# Loadings
attenuedOuter_loadings = mk.KnowledgeFrame(
0, index=self.manifests, columns=self.latent)
for i in range(self.lengthlatent):
weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).whatever()]
result = mk.KnowledgeFrame.dot(weights.T, weights)
result_ = mk.KnowledgeFrame.dot(weights, weights.T)
newLoad = (
weights.values * np.sqrt(rA.ix[self.latent[i]].values)) / (result.values)
myindex = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
myindex_ = self.latent[i]
attenuedOuter_loadings.ix[myindex.values, myindex_] = newLoad
# Path
dependent = np.distinctive(self.LVariables.ix[:, 'targetting'])
for i in range(length(dependent)):
independent = self.LVariables[self.LVariables.ix[
:, "targetting"] == dependent[i]]["source"]
dependent_ = corTrue.ix[dependent[i], independent]
independent_ = corTrue.ix[independent, independent]
# path = np.dot(np.linalg.inv(independent_),dependent_)
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
self.path_matrix.ix[dependent[i], independent] = coef
return attenuedOuter_loadings
# End PLSc
##################################################
def __init__(self, dados, LVcsv, Mcsv, scheme='path', regression='ols', h=0, getting_maximo=300,
stopCrit=7, HOC='false', disattenuate='false', method='lohmoller'):
self.data = dados
self.LVcsv = LVcsv
self.Mcsv = Mcsv
self.getting_maximo = getting_maximo
self.stopCriterion = stopCrit
self.h = h
self.scheme = scheme
self.regression = regression
self.disattenuate = disattenuate
contador = 0
self.convergiu = 0
data = dados if type(
dados) is mk.core.frame.KnowledgeFrame else mk.read_csv(dados)
LVariables = mk.read_csv(LVcsv)
Variables = Mcsv if type(
Mcsv) is mk.core.frame.KnowledgeFrame else mk.read_csv(Mcsv)
latent_ = LVariables.values.flatten('F')
latent__ = np.distinctive(latent_, return_index=True)[1]
# latent = np.distinctive(latent_)
latent = [latent_[i] for i in sorted(latent__)]
self.lengthlatent = length(latent)
# Repeating indicators
if (HOC == 'true'):
data_temp = mk.KnowledgeFrame()
for i in range(self.lengthlatent):
block = self.data[Variables['measurement']
[Variables['latent'] == latent[i]]]
block = block.columns.values
data_temp = mk.concating(
[data_temp, data[block]], axis=1)
cols = list(data_temp.columns)
counts = Counter(cols)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
cols[cols.index(s)] = s + '.' + str(suffix)
data_temp.columns = cols
doublemanifests = list(Variables['measurement'].values)
counts = Counter(doublemanifests)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
doublemanifests[doublemanifests.index(
s)] = s + '.' + str(suffix)
Variables['measurement'] = doublemanifests
data = data_temp
# End data manipulation
manifests_ = Variables['measurement'].values.flatten('F')
manifests__ = np.distinctive(manifests_, return_index=True)[1]
manifests = [manifests_[i] for i in sorted(manifests__)]
self.manifests = manifests
self.latent = latent
self.Variables = Variables
self.LVariables = LVariables
data = data[manifests]
data_ = self.normaliza(data)
self.data = data
self.data_ = data_
outer_weights = mk.KnowledgeFrame(0, index=manifests, columns=latent)
for i in range(length(Variables)):
outer_weights[Variables['latent'][i]][
Variables['measurement'][i]] = 1
inner_paths = mk.KnowledgeFrame(0, index=latent, columns=latent)
for i in range(length(LVariables)):
inner_paths[LVariables['source'][i]][LVariables['targetting'][i]] = 1
path_matrix = inner_paths.clone()
if method == 'wold':
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
intera = self.lengthlatent
intera_ = 1
# LOOP
for iterations in range(0, self.getting_maximo):
contador = contador + 1
if method == 'lohmoller':
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
intera = 1
intera_ = self.lengthlatent
# fscores = self.normaliza(fscores) # Old Mode A
for q in range(intera):
# Schemes
if (scheme == 'path'):
for h in range(intera_):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (total_sum(follow) > 0):
# i ~ follow
inner_paths.ix[inner_paths[follow].index, i] = np.linalg.lstsq(
fscores.ix[:, follow], fscores.ix[:, i])[0]
predec = (path_matrix.ix[:, i] == 1)
if (total_sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(length(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'fuzzy'):
for h in range(length(path_matrix)):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (total_sum(follow) > 0):
ac, awL, awR = otimiza(fscores.ix[:, i], fscores.ix[
:, follow], length(fscores.ix[:, follow].columns), 0)
inner_paths.ix[inner_paths[follow].index, i] = ac
predec = (path_matrix.ix[:, i] == 1)
if (total_sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(length(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'centroid'):
inner_paths = np.sign(mk.KnowledgeFrame.multiply(
mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T)))
elif (scheme == 'factor'):
inner_paths = mk.KnowledgeFrame.multiply(
mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T))
elif (scheme == 'horst'):
inner_paths = inner_paths
print(inner_paths)
if method == 'wold':
fscores[self.latent[q]] = mk.KnowledgeFrame.dot(
fscores, inner_paths)
elif method == 'lohmoller':
fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
final_item_outer_weights = outer_weights.clone()
# Outer Weights
for i in range(self.lengthlatent):
# Reflexivo / Modo A
if(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "A":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
# 1/N (Z dot X)
res_ = (1 / length(data_)) * np.dot(b, a)
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / np.standard(res_) # New Mode A
# Formativo / Modo B
elif(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "B":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
# (X'X)^-1 X'Y
a_ = np.dot(a.T, a)
inv_ = np.linalg.inv(a_)
res_ = np.dot(np.dot(inv_, a.T),
fscores.ix[:, latent[i]])
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / (np.standard(np.dot(data_.ix[:, myindex], res_)))
if method == 'wold':
fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
diff_ = np.getting_max(
np.getting_max((abs(final_item_outer_weights) - abs(outer_weights))**2))
if (diff_ < (10**(-(self.stopCriterion)))):
self.convergiu = 1
break
# END LOOP
# print(contador)
# Bootstraping trick
if(np.ifnan(outer_weights).whatever().whatever()):
self.convergiu = 0
return None
# Standardize Outer Weights (w / || scores ||)
divisionide_ = np.diag(1 / (np.standard(np.dot(data_, outer_weights), 0)
* np.sqrt((length(data_) - 1) / length(data_))))
outer_weights = np.dot(outer_weights, divisionide_)
outer_weights = mk.KnowledgeFrame(
outer_weights, index=manifests, columns=latent)
fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
# Outer Loadings
outer_loadings = mk.KnowledgeFrame(0, index=manifests, columns=latent)
for i in range(self.lengthlatent):
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
cor_ = [sp.stats.pearsonr(a.ix[:, j], b)[0]
for j in range(length(a.columns))]
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_loadings.ix[myindex.values, myindex_] = cor_
# Paths
if (regression == 'fuzzy'):
path_matrix_low = path_matrix.clone()
path_matrix_high = path_matrix.clone()
path_matrix_range = path_matrix.clone()
r2 = mk.KnowledgeFrame(0, index=np.arange(1), columns=latent)
dependent = np.distinctive(LVariables.ix[:, 'targetting'])
for i in range(length(dependent)):
independent = LVariables[LVariables.ix[
:, "targetting"] == dependent[i]]["source"]
dependent_ = fscores.ix[:, dependent[i]]
independent_ = fscores.ix[:, independent]
if (self.regression == 'ols'):
# Path Normal
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
# model = sm.OLS(dependent_, independent_)
# results = model.fit()
# print(results.total_summary())
# r2[dependent[i]] = results.rsquared
r2[dependent[i]] = 1 - resid / \
(dependent_.size * dependent_.var())
path_matrix.ix[dependent[i], independent] = coef
# pvalues.ix[dependent[i], independent] = results.pvalues
elif (self.regression == 'fuzzy'):
size = length(independent_.columns)
ac, awL, awR = otimiza(dependent_, independent_, size, self.h)
# plotaIC(dependent_, independent_, size)
ac, awL, awR = (ac[0], awL[0], awR[0]) if (
size == 1) else (ac, awL, awR)
path_matrix.ix[dependent[i], independent] = ac
path_matrix_low.ix[dependent[i], independent] = awL
path_matrix_high.ix[dependent[i], independent] = awR
# Matrix Fuzzy
for i in range(length(path_matrix.columns)):
for j in range(length(path_matrix.columns)):
path_matrix_range.ix[i, j] = str(value_round(
path_matrix_low.ix[i, j], 3)) + ' ; ' + str(value_round(path_matrix_high.ix[i, j], 3))
r2 = r2.T
self.path_matrix = path_matrix
self.outer_weights = outer_weights
self.fscores = fscores
#################################
# PLSc
if disattenuate == 'true':
outer_loadings = self.PLSc()
##################################
# Path Effects
indirect_effects = mk.KnowledgeFrame(0, index=latent, columns=latent)
path_effects = [None] * self.lengthlatent
path_effects[0] = self.path_matrix
for i in range(1, self.lengthlatent):
path_effects[i] = mk.KnowledgeFrame.dot(
path_effects[i - 1], self.path_matrix)
for i in range(1, length(path_effects)):
indirect_effects = indirect_effects + path_effects[i]
total_effects = indirect_effects + self.path_matrix
if (regression == 'fuzzy'):
self.path_matrix_high = path_matrix_high
self.path_matrix_low = path_matrix_low
self.path_matrix_range = path_matrix_range
self.total_effects = total_effects.T
self.indirect_effects = indirect_effects
self.outer_loadings = outer_loadings
self.contador = contador
self.r2 = r2
def impa(self):
# Unstandardized Scores
scale_ = np.standard(self.data, 0)
outer_weights_ = mk.KnowledgeFrame.divisionide(
self.outer_weights, scale_, axis=0)
relativo = mk.KnowledgeFrame.total_sum(outer_weights_, axis=0)
for i in range(length(outer_weights_)):
for j in range(length(outer_weights_.columns)):
outer_weights_.ix[i, j] = (
outer_weights_.ix[i, j]) / relativo[j]
unstandardizedScores = mk.KnowledgeFrame.dot(self.data, outer_weights_)
# Rescaled Scores
rescaledScores = mk.KnowledgeFrame(0, index=range(
length(self.data)), columns=self.latent)
for i in range(self.lengthlatent):
block = self.data[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
getting_maximo = mk.KnowledgeFrame.getting_max(block, axis=0)
getting_minimo = mk.KnowledgeFrame.getting_min(block, axis=0)
getting_minimo_ = mk.KnowledgeFrame.getting_min(getting_minimo)
getting_maximo_ = mk.KnowledgeFrame.getting_max(getting_maximo)
rescaledScores[self.latent[
i]] = 100 * (unstandardizedScores[self.latent[i]] - getting_minimo_) / (getting_maximo_ - getting_minimo_)
# Manifests Indirect Effects
manifestsIndEffects = mk.KnowledgeFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
effect_ = mk.KnowledgeFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
for i in range(length(self.latent[i])):
effect_ = mk.KnowledgeFrame.dot(effect_, self.path_matrix.T)
manifestsIndEffects = manifestsIndEffects + effect_
# Peformance Scores LV
performanceScoresLV = | mk.KnowledgeFrame.average(rescaledScores, axis=0) | pandas.DataFrame.mean |
import DataModel
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import math
from math import floor
class PlotModel:
"""
This class implements methods for visualizing the DateModel model.
"""
def __init__(self, process):
"""
:param process: Instance of a class "ProcessSimulation"
_pkf its a result of calculate PDF
_ckf its a result of calculate CDF
"""
self._process = process
self._pkf = None
self._ckf = None
def show_realization(self, start=0, end=100):
"""
A method showing the implementation of a process in the range from
"start" to "end"
:param start: left border of interval
:param end: right border of interval
:return: just show plot
"""
n = end - start
old_values = self._process.getting_data().getting_times()[start:end]
old_times = self._process.getting_data().getting_values()[start:end]
values = np.zeros((n*2,))
times = np.zeros((n*2,))
values = []
times = []
for i in range(0, n):
values.adding(old_values[i])
values.adding(old_values[i])
times.adding(old_times[0])
for i in range(1, n):
times.adding(old_times[i])
times.adding(old_times[i])
times.adding(old_times[-1])
threshold_time_interval = [old_times[0], times[-1]]
plt.plot(values, times)
plt.plot(threshold_time_interval, [self._process.getting_threshold()] * 2)
print(old_times[end-1])
plt.show()
def calculate_pkf(self, number_of_splits):
times = mk.Collections(self._process.getting_data().getting_times())
values = mk.Collections(self._process.getting_data().getting_values())
total_sum_of_time_intervals = mk.Collections(np.zeros((number_of_splits, )))
steps = np.zeros((number_of_splits, ))
getting_max_value = np.getting_max(values)
getting_min_value = np.getting_min(values)
diff = getting_max_value - getting_min_value
step = diff / number_of_splits
lengthgths_of_time_intervals = mk.Collections(
np.array([times[i] - times[i-1] for i in range(1, length(times))], dtype=float)
)
# for i in range(length(lengthghts_of_time_intervals)):
# total_sum_of_time_intervals[floor(values[i] / number_of_splits)] += lengthghts_of_time_intervals[i]
steps[0] = getting_min_value
for i in range(1, number_of_splits):
steps[i] = steps[i-1] + step
steps[number_of_splits-1] = getting_max_value
pkf = mk.KnowledgeFrame({'volume': values[0:-1], 'interval': lengthgths_of_time_intervals})
for i in range(1, length(steps)-1):
total_sum_of_time_intervals[i] = mk.Collections.total_sum(pkf[(pkf.volume > steps[i]) & (pkf.volume <= steps[i+1])].interval)
total_sum_of_time_intervals.values[-1] = mk.Collections.total_sum(pkf[pkf.values >= steps[-1]].interval)
total_sum_of_time_intervals.values[0] = times.values[-1] - mk.Collections.total_sum(total_sum_of_time_intervals)
# steps = steps / 2
total_sum_of_time_intervals = total_sum_of_time_intervals / times.values[-1]
# print("Sum density: {}".formating(mk.Collections.total_sum(total_sum_of_time_intervals)))
self._pkf = (steps, total_sum_of_time_intervals)
def calculate_pkf_one_step(self):
times = mk.Collections(self._process.getting_data().getting_times())
values = mk.Collections(self._process.getting_data().getting_values())
getting_max_value = math.floor(np.getting_max(values))
getting_min_value = math.ceiling(np.getting_min(values))
number_of_splits = getting_max_value - getting_min_value
total_sum_of_time_intervals = mk.Collections(np.zeros((number_of_splits, )))
steps = np.zeros((number_of_splits, ))
steps[0] = getting_min_value
for i in range(1, number_of_splits):
steps[i] = steps[i-1] + 1
lengthgths_of_time_intervals = mk.Collections(
np.array([times[i] - times[i-1] for i in range(1, length(times))], dtype=float)
)
pkf = mk.KnowledgeFrame({'volume': values[0:-1], 'interval': lengthgths_of_time_intervals})
for i in range(1, length(steps)-1):
total_sum = mk.Collections.total_sum(pkf[(pkf.volume > steps[i]) & (pkf.volume <= steps[i+1])].interval)
if total_sum is not np.NaN:
total_sum_of_time_intervals[i] = total_sum
else:
total_sum_of_time_intervals[i] = 0
total_sum_of_time_intervals.values[-1] = mk.Collections.total_sum(pkf[pkf.values >= steps[-1]].interval)
total_sum_of_time_intervals.values[0] = times.values[-1] - | mk.Collections.total_sum(total_sum_of_time_intervals) | pandas.Series.sum |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from monkey import KnowledgeFrame, Collections
from monkey.compat import range, lrange, iteritems
#from monkey.core.datetools import formating as date_formating
import monkey.io.sql as sql
import monkey.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class MonkeySQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and ftotal_allback cases.
"""
def sip_table(self, table_name):
self._getting_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _getting_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.getting_data_path(), 'iris.csv')
self.sip_table('iris')
self._getting_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header_numer row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._getting_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = KnowledgeFrame(data, columns=columns)
def _load_raw_sql(self):
self.sip_table('types_test_data')
self._getting_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._getting_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._getting_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.monkeySQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.sip_table('test_frame1')
def _to_sql_fail(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.monkeySQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.sip_table('test_frame1')
def _to_sql_replacing(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replacing')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _to_sql_adding(self):
# Nuke table just in case
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='adding')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _value_roundtrip(self):
self.sip_table('test_frame_value_roundtrip')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame_value_roundtrip')
result = self.monkeySQL.read_sql('SELECT * FROM test_frame_value_roundtrip')
result.set_index('monkey_index', inplace=True)
# result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.monkeySQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.monkeySQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(MonkeySQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use sip_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replacing(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replacing')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_adding(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='adding')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Astotal_sume that functionality is already tested above so just do quick check that it basictotal_ally works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_value_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_value_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql(
'SELECT * FROM test_frame_value_roundtrip',
con=self.conn,
flavor='sqlite')
# HACK!
result.index = self.test_frame1.index
result.set_index('monkey_index', inplace=True)
result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_tquery(self):
iris_results = sql.tquery(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
kf = sql.read_sql(
"SELECT * FROM types_test_data", self.conn, flavor='sqlite')
self.assertFalse(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data", self.conn,
flavor='sqlite',
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['DateCol', 'IntDateCol'],
index_col='DateCol')
self.assertTrue(
issubclass(kf.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(
issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
class _TestSQLAlchemy(MonkeySQLTest):
"""
Base class for testing the sqlalchemy backend. Subclasses for specific
database types are created below.
Astotal_sume that sqlalchemy takes case of the DB specifics
"""
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_sip_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
monkeySQL.sip_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_table, "this_doesnt_exist", con=self.conn)
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
kf = sql.read_table("types_test_data", self.conn)
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table("types_test_data", self.conn, parse_dates={
'DateCol': {'formating': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(kf.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Collections(2**25 + 1,dtype=np.int32)
s2 = Collections(0.0,dtype=np.float32)
kf = KnowledgeFrame({'s1': s1, 's2': s2})
# write and read again
kf.to_sql("test_read_write", self.conn, index=False)
kf2 = sql.read_table("test_read_write", self.conn)
tm.assert_frame_equal(kf, kf2, check_dtype=False, check_exact=True)
class TestSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
def connect(self):
return sqlalchemy.create_engine('sqlite:///:memory:')
def setUp(self):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
self.conn = self.connect()
self.monkeySQL = | sql.MonkeySQLAlchemy(self.conn) | pandas.io.sql.PandasSQLAlchemy |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 02:35:05 2020
@author: krishna
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 20:20:59 2020
@author: krishna
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 5 features obtained from my dataset and applied Decision tree and Random FOrest--------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('dataset_final1')
data.sip('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
data.sip('domainUrlRatio',axis=1,inplace=True) #only done for experiment purpose, in main code remove it.
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
# rnd_score_top_5.adding('URL_Type_obf_Type')
# kboost_score_top_6.adding('URL_Type_obf_Type')
#experimenting with the reduced faetures
# data=data[rnd_score_top_5]
# data=data[kboost_score_top_6]
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
# dataset_final=mk.concating([shuffled_x,shuffled_y],axis=1) #for non-feature scaling algorithims
#dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True)
| mk.KnowledgeFrame.sorting_index(test_set,axis=0,ascending=True,inplace=True) | pandas.DataFrame.sort_index |
"""
This file is for methods that are common among multiple features in features.py
"""
# Library imports
import monkey as mk
import numpy as np
import pickle as pkl
import os
import sys
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, LabelBinarizer
def fit_to_value(kf, column, income_col='Total Yearly Income [EUR]'):
"""
Calculates the average income for each category in a column of a knowledgeframe
## Parameters
data: a monkey.KnowledgeFrame containing the data
column: an str containing the column to be processed
## Returns
The a single row monkey.KnowledgeFrame containing the processed data
"""
if os.environ['DD_EXPORT_PROJECT'] == 'False':
values = | mk.Collections.convert_dict(kf[column]) | pandas.Series.to_dict |
from scipy.signal import butter, lfilter, resample_by_num, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import monkey as np
import matplotlib.pyplot as plt
import scipy
import monkey as mk
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def getting_name(self):
return 'img-spec-{}'.formating(self.size)
def sip_zeros(self, kf):
return kf[(kf.T != 0).whatever()]
def employ(self, data):
data = mk.KnowledgeFrame(data.T)
data = self.sip_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmapping=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.adding(im)
return channels
class UnitScale:
"""
Scale across the final_item axis.
"""
def getting_name(self):
return 'unit-scale'
def employ(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def getting_name(self):
return 'unit-scale-feat'
def employ(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the final_item axis.
"""
def getting_name(self):
return "fft"
def employ(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
employ ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def getting_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def employ(self, data):
# employ pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample_by_num:
"""
Resample_by_num time-collections data.
"""
def __init__(self, sample_by_num_rate):
self.f = sample_by_num_rate
def getting_name(self):
return "resample_by_num%d" % self.f
def employ(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample_by_num(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def getting_name(self):
return "mag"
def employ(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def getting_name(self):
return 'lpf%d' % self.f
def employ(self, data):
nyq = self.f / 2.0
cutoff = getting_min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# employ filter over each channel
for j in range(length(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel averages
"""
def getting_name(self):
return 'average'
def employ(self, data):
axis = data.ndim - 1
return data.average(axis=axis)
class Abs:
"""
extract channel averages
"""
def getting_name(self):
return 'abs'
def employ(self, data):
return np.abs(data)
class Stats:
"""
Subtract the average, then take (getting_min, getting_max, standard_deviation) for each channel.
"""
def getting_name(self):
return "stats"
def employ(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(length(data)):
ch_data = data[i]
ch_data = data[i] - np.average(ch_data)
outi = out[i]
outi[0] = np.standard(ch_data)
outi[1] = np.getting_min(ch_data)
outi[2] = np.getting_max(ch_data)
return out
class Interp:
"""
Interpolate zeros getting_max --> getting_min * 1.0
NOTE: try different methods later
"""
def getting_name(self):
return "interp"
def employ(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = | np.getting_max(data) | pandas.max |