matlok - Python Src Code Datasets (base)
Collection
Python code from leading AI research and tools repositories
•
2 items
•
Updated
•
1
name
stringlengths 1
94
| class_name
stringlengths 1
56
| class_bases
stringlengths 0
73
⌀ | is_member
bool 2
classes | args
stringlengths 0
1.04k
| class_docstr
stringlengths 0
13k
⌀ | class_docstr_tok
stringlengths 2
18.2k
⌀ | docstr
stringlengths 0
6.72k
⌀ | docstr_tok
stringlengths 13
9.01k
⌀ | returns
stringlengths 1
279
⌀ | code
stringlengths 20
27.8k
⌀ | code_tok
stringlengths 32
38.4k
⌀ | lstart
int64 1
1.03k
| lend
int64 2
1.18k
| raises
stringclasses 9
values | filename
stringlengths 5
81
| file_path
stringlengths 14
129
| imports
stringlengths 0
786
| total_objects
int64 7
10
| num_classes
float64 1
4
⌀ | num_imports
int64 0
9
| num_all_bases
float64 0
4
⌀ | num_bases
float64 1
4
⌀ | num_functions
int64 0
10
| num_methods
float64 1
8
⌀ | label_desc
stringlengths 70
1.25k
| label_desc_len
int64 70
1.25k
| label_id
stringclasses 10
values | __index_level_0__
int64 0
514k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
test_da_tokenizer_handles_dates | global | null | false | da_tokenizer,text | null | null | null | null | null | def test_da_tokenizer_handles_dates(da_tokenizer, text):
tokens = da_tokenizer(text)
assert len(tokens) == 1
| ["def","test_da_tokenizer_handles_dates","(","da_tokenizer",",","text",")",":","tokens","=","da_tokenizer","(","text",")","assert","len","(","tokens",")","==","1"] | 17 | 19 | null | test_exceptions.py | spacy/spacy/tests/lang/da/test_exceptions.py | import pytest | 7 | null | 1 | null | null | 6 | null | Use image node_id 3 for calling a global function with example usage: test_da_tokenizer_handles_dates(da_tokenizer, text) without return types | 142 | node_id 3 | 387,996 |
test_da_tokenizer_handles_exc_in_text | global | null | false | da_tokenizer | null | null | null | null | null | def test_da_tokenizer_handles_exc_in_text(da_tokenizer):
text = "Det er bl.a. ikke meningen"
tokens = da_tokenizer(text)
assert len(tokens) == 5
assert tokens[2].text == "bl.a."
| ["def","test_da_tokenizer_handles_exc_in_text","(","da_tokenizer",")",":","text","=","``","Det","er","bl.a",".","ikke","meningen","''","tokens","=","da_tokenizer","(","text",")","assert","len","(","tokens",")","==","5","assert","tokens","[","2","]",".text","==","``","bl.a",".","''"] | 22 | 26 | null | test_exceptions.py | spacy/spacy/tests/lang/da/test_exceptions.py | import pytest | 7 | null | 1 | null | null | 6 | null | Use image node_id 4 for calling a global function with example usage: test_da_tokenizer_handles_exc_in_text(da_tokenizer) without return types | 142 | node_id 4 | 387,997 |
test_da_tokenizer_handles_custom_base_exc | global | null | false | da_tokenizer | null | null | null | null | null | def test_da_tokenizer_handles_custom_base_exc(da_tokenizer):
text = "Her er noget du kan kigge i."
tokens = da_tokenizer(text)
assert len(tokens) == 8
assert tokens[6].text == "i"
assert tokens[7].text == "."
| ["def","test_da_tokenizer_handles_custom_base_exc","(","da_tokenizer",")",":","text","=","``","Her","er","noget","du","kan","kigge","i",".","''","tokens","=","da_tokenizer","(","text",")","assert","len","(","tokens",")","==","8","assert","tokens","[","6","]",".text","==","``","i","''","assert","tokens","[","7","]",".text","==","``",".","''"] | 29 | 34 | null | test_exceptions.py | spacy/spacy/tests/lang/da/test_exceptions.py | import pytest | 7 | null | 1 | null | null | 6 | null | Use image node_id 5 for calling a global function with example usage: test_da_tokenizer_handles_custom_base_exc(da_tokenizer) without return types | 146 | node_id 5 | 387,998 |
test_da_tokenizer_slash | global | null | false | da_tokenizer,text,n_tokens | null | null | null | null | null | def test_da_tokenizer_slash(da_tokenizer, text, n_tokens):
tokens = da_tokenizer(text)
assert len(tokens) == n_tokens
| ["def","test_da_tokenizer_slash","(","da_tokenizer",",","text",",","n_tokens",")",":","tokens","=","da_tokenizer","(","text",")","assert","len","(","tokens",")","==","n_tokens"] | 57 | 59 | null | test_exceptions.py | spacy/spacy/tests/lang/da/test_exceptions.py | import pytest | 7 | null | 1 | null | null | 6 | null | Use image node_id 6 for calling a global function with example usage: test_da_tokenizer_slash(da_tokenizer, text, n_tokens) without return types | 144 | node_id 6 | 387,999 |
debug | ExecMerger | Executor | true | self,docs_matrix | null | null | null | null | result | def debug(self, docs_matrix: List[DocumentArray], **kwargs):
self.logger.debug(
f"received doc matrix in exec-merger with length {len(docs_matrix)}."
)
result = DocumentArray()
for docs in zip(*docs_matrix):
traversed_executors = [
doc.tags["traversed-executors"] for doc in docs
]
shard_ids = [doc.tags["shard_id"] for doc in docs]
shards = [doc.tags["shards"] for doc in docs]
parallels = [doc.tags["parallel"] for doc in docs]
traversed_executors = list(chain(*traversed_executors))
doc = Document()
doc.tags["traversed-executors"] = traversed_executors
doc.tags["shard_id"] = shard_ids
doc.tags["shards"] = shards
doc.tags["parallel"] = parallels
doc.tags["merged"] = True
result.append(doc)
return result
| ["def","debug","(","self",",","docs_matrix",":","List","[","DocumentArray","]",",","*","*","kwargs",")",":","self.logger.debug","(","f","''","received","doc","matrix","in","exec-merger","with","length","{","len","(","docs_matrix",")","}",".","''",")","result","=","DocumentArray","(",")","for","docs","in","zip","(","*","docs_matrix",")",":","traversed_executors","=","[","doc.tags","[","``","traversed-executors","''","]","for","doc","in","docs","]","shard_ids","=","[","doc.tags","[","``","shard_id","''","]","for","doc","in","docs","]","shards","=","[","doc.tags","[","``","shards","''","]","for","doc","in","docs","]","parallels","=","[","doc.tags","[","``","parallel","''","]","for","doc","in","docs","]","traversed_executors","=","list","(","chain","(","*","traversed_executors",")",")","doc","=","Document","(",")","doc.tags","[","``","traversed-executors","''","]","=","traversed_executors","doc.tags","[","``","shard_id","''","]","=","shard_ids","doc.tags","[","``","shards","''","]","=","shards","doc.tags","[","``","parallel","''","]","=","parallels","doc.tags","[","``","merged","''","]","=","True","result.append","(","doc",")","return","result"] | 15 | 36 | null | exec_merger.py | jina/tests/docker_compose/executor-merger/exec_merger.py | from itertools import chain
from typing import Dict, List
from jina import Document, DocumentArray, Executor, requests | 7 | 1 | 3 | 1 | 1 | 0 | 2 | Use image node_id 2 for calling the ExecMerger obj's underlying member method code with example usage: obj.debug(docs_matrix) and returns: result | 145 | node_id 2 | 156,984 |
__init__ | ExecMerger | Executor | true | self | null | null | null | null | ExecMerger | def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
| ["def","__init__","(","self",",","*","args",",","*","*","kwargs",")",":","super","(",")",".__init__","(","*","args",",","*","*","kwargs",")","from","jina.logging.logger","import","JinaLogger","self.logger","=","JinaLogger","(","self.__class__.__name__",")"] | 8 | 12 | null | exec_merger.py | jina/tests/docker_compose/executor-merger/exec_merger.py | from itertools import chain
from typing import Dict, List
from jina import Document, DocumentArray, Executor, requests | 7 | 1 | 3 | 1 | 1 | 0 | 2 | Use image node_id 1 to create a new ExecMerger object from inherited base classes: Executor with example: obj = ExecMerger() | 124 | node_id 1 | 156,983 |
_qencode | global | null | false | s | null | null | null | null | enc | def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(b" ", b"=20")
| ["def","_qencode","(","s",")",":","enc","=","_encodestring","(","s",",","quotetabs=True",")","#","Must","encode","spaces",",","which","quopri.encodestring","(",")","does","n't","do","return","enc.replace","(","b","''","``",",","b","''","=20","''",")"] | 19 | 22 | null | encoders.py | cpython/Lib/email/encoders.py | from base64 import encodebytes
from quopri import encodestring | 7 | null | 2 | null | null | 5 | null | Use image node_id 1 for calling a global function with example usage: _qencode(s) and returns: enc | 98 | node_id 1 | 52,444 |
run | global | null | false | cmd,stdout | null | null | null | null | null | def run(cmd, stdout=True):
pipe = None if stdout else subprocess.DEVNULL
try:
subprocess.check_call(cmd, stdout=pipe, stderr=pipe)
except subprocess.CalledProcessError:
print("\n! Error executing: `%s;` aborting" % " ".join(cmd))
sys.exit(1)
| ["def","run","(","cmd",",","stdout=True",")",":","pipe","=","None","if","stdout","else","subprocess.DEVNULL","try",":","subprocess.check_call","(","cmd",",","stdout=pipe",",","stderr=pipe",")","except","subprocess.CalledProcessError",":","print","(","``","\\n","!","Error","executing",":","`","%","s",";","`","aborting","''","%","``","``",".join","(","cmd",")",")","sys.exit","(","1",")"] | 42 | 48 | null | push_docs_to_repo.py | numpy/tools/ci/push_docs_to_repo.py | import argparse
import subprocess
import tempfile
import os
import sys
import shutil | 7 | null | 6 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: run(cmd, stdout) without return types | 107 | node_id 1 | 214,130 |
intlist | global | null | false | obj | null | null | null | null | unknown | def intlist(obj):
return [int(v) for v in obj]
| ["def","intlist","(","obj",")",":","return","[","int","(","v",")","for","v","in","obj","]"] | 14 | 15 | null | test_bloom.py | redis-py/tests/test_asyncio/test_bloom.py | from math import inf
import pytest
import redis.asyncio
from redis.exceptions import ModuleError, RedisError
from redis.utils import HIREDIS_AVAILABLE
from tests.conftest import assert_resp_response, is_resp2_connection, skip_ifmodversion_lt | 7 | null | 6 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: intlist(obj) and returns: unknown | 103 | node_id 1 | 339,428 |
create_item | global | null | false | item | null | null | null | null | item | def create_item(item: Item):
return item
| ["def","create_item","(","item",":","Item",")",":","return","item"] | 16 | 17 | null | tutorial001.py | fastapi/docs_src/separate_openapi_schemas/tutorial001.py | from typing import List, Union
from fastapi import FastAPI
from pydantic import BaseModel | 7 | null | 3 | null | null | 2 | null | Use image node_id 1 for calling a global function with example usage: create_item(item) and returns: item | 105 | node_id 1 | 135,814 |
parse_args | global | null | false | null | null | null | null | parser | def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
)
)
# Optional arguments for the launch helper
parser.add_argument(
"--num_cores",
type=int,
default=1,
help="Number of TPU cores to use (1 or 8).",
)
# positional
parser.add_argument(
"training_script",
type=str,
help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
),
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
| ["def","parse_args","(",")",":","``","''","''","Helper","function","parsing","the","command","line","options","@","retval","ArgumentParser","``","''","''","parser","=","ArgumentParser","(","description=","(","``","PyTorch","TPU","distributed","training","launch","``","``","helper","utility","that","will","spawn","up","``","``","multiple","distributed","processes","''",")",")","#","Optional","arguments","for","the","launch","helper","parser.add_argument","(","``","--","num_cores","''",",","type=int",",","default=1",",","help=","''","Number","of","TPU","cores","to","use","(","1","or","8",")",".","``",",",")","#","positional","parser.add_argument","(","``","training_script","''",",","type=str",",","help=","(","``","The","full","path","to","the","single","TPU","training","``","``","program\/script","to","be","launched","in","parallel",",","``","``","followed","by","all","the","arguments","for","the","``","``","training","script","''",")",",",")","#","rest","from","the","training","program","parser.add_argument","(","``","training_script_args","''",",","nargs=REMAINDER",")","return","parser.parse_args","(",")"] | 36 | 67 | null | xla_spawn.py | accelerate/tests/xla_spawn.py | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing | 7 | null | 5 | null | null | 2 | null | Use image node_id 1 for calling a global function with example usage: parse_args() and returns: parser | 102 | node_id 1 | 1,215 |
|
test_suite | global | null | false | null | null | null | null | unittest | def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
| ["def","test_suite","(",")",":","return","unittest.TestLoader","(",")",".loadTestsFromName","(","__name__",")"] | 43 | 44 | null | test_bugX000.py | psycopg2/tests/test_bugX000.py | import psycopg2
import time
import unittest | 7 | null | 3 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: test_suite() and returns: unittest | 104 | node_id 1 | 256,395 |
|
test_date_time_allocation_bug | DateTimeAllocationBugTestCase | unittest | true | self | null | null | null | null | null | def test_date_time_allocation_bug(self):
d1 = psycopg2.Date(2002, 12, 25)
d2 = psycopg2.DateFromTicks(
time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))
)
t1 = psycopg2.Time(13, 45, 30)
t2 = psycopg2.TimeFromTicks(
time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))
)
t1 = psycopg2.Timestamp(2002, 12, 25, 13, 45, 30)
t2 = psycopg2.TimestampFromTicks(
time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0))
)
del d1, d2, t1, t2
| ["def","test_date_time_allocation_bug","(","self",")",":","d1","=","psycopg2.Date","(","2002",",","12",",","25",")","d2","=","psycopg2.DateFromTicks","(","time.mktime","(","(","2002",",","12",",","25",",","0",",","0",",","0",",","0",",","0",",","0",")",")",")","t1","=","psycopg2.Time","(","13",",","45",",","30",")","t2","=","psycopg2.TimeFromTicks","(","time.mktime","(","(","2001",",","1",",","1",",","13",",","45",",","30",",","0",",","0",",","0",")",")",")","t1","=","psycopg2.Timestamp","(","2002",",","12",",","25",",","13",",","45",",","30",")","t2","=","psycopg2.TimestampFromTicks","(","time.mktime","(","(","2002",",","12",",","25",",","13",",","45",",","30",",","0",",","0",",","0",")",")",")","del","d1",",","d2",",","t1",",","t2"] | 32 | 40 | null | test_bugX000.py | psycopg2/tests/test_bugX000.py | import psycopg2
import time
import unittest | 7 | 1 | 3 | 1 | 1 | 1 | 1 | Use image node_id 1 for calling the DateTimeAllocationBugTestCase obj's underlying member method code with example usage: obj.test_date_time_allocation_bug() without return types | 178 | node_id 1 | 256,394 |
make_ax | global | null | false | grid | null | null | null | null | ax | def make_ax(grid=False):
ax = plt.figure().add_subplot(projection="3d")
ax.grid(grid)
ax.set_axis_off()
return ax
| ["def","make_ax","(","grid=False",")",":","ax","=","plt.figure","(",")",".add_subplot","(","projection=","''","3d","''",")","ax.grid","(","grid",")","ax.set_axis_off","(",")","return","ax"] | 75 | 79 | null | plot_euler_number.py | scikit-image/doc/examples/segmentation/plot_euler_number.py | from skimage.measure import euler_number, label
import matplotlib.pyplot
import numpy | 7 | null | 3 | null | null | 4 | null | Use image node_id 1 for calling a global function with example usage: make_ax(grid) and returns: ax | 99 | node_id 1 | 344,193 |
explode | global | null | false | data | null | null | null | null | data_e | def explode(data):
"""visualization to separate voxels
Data voxels are separated by 0-valued ones so that they appear
separated in the matplotlib figure.
"""
size = np.array(data.shape) * 2
data_e = np.zeros(size - 1, dtype=data.dtype)
data_e[::2, ::2, ::2] = data
return data_e
| ["def","explode","(","data",")",":","``","''","''","visualization","to","separate","voxels","Data","voxels","are","separated","by","0-valued","ones","so","that","they","appear","separated","in","the","matplotlib","figure.","``","''","''","size","=","np.array","(","data.shape",")","*","2","data_e","=","np.zeros","(","size","-","1",",","dtype=data.dtype",")","data_e","[",":",":2",",",":",":2",",",":",":2","]","=","data","return","data_e"] | 82 | 91 | null | plot_euler_number.py | scikit-image/doc/examples/segmentation/plot_euler_number.py | from skimage.measure import euler_number, label
import matplotlib.pyplot
import numpy | 7 | null | 3 | null | null | 4 | null | Use image node_id 2 for calling a global function with example usage: explode(data) and returns: data_e | 103 | node_id 2 | 344,194 |
cli | global | null | false | null | null | null | null | null | def cli():
"""Commandline interface."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--text", type=str, help="text to be turned into audio"
)
parser.add_argument(
"--output_filename",
type=str,
default="bark_generation.wav",
help="output audio file name",
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="directory to save the outputs",
)
parser.add_argument(
"--history_prompt",
type=str,
default=None,
help="history choice for audio cloning, be path to the .npz file.",
)
parser.add_argument(
"--text_temp",
default=0.7,
type=float,
help="generation temperature (1.0 more diverse, 0.0 more conservative)",
)
parser.add_argument(
"--waveform_temp",
default=0.7,
type=float,
help="generation temperature (1.0 more diverse, 0.0 more conservative)",
)
parser.add_argument(
"--silent",
default=False,
type=bool,
help="disable progress bar",
)
parser.add_argument(
"--output_full",
default=False,
type=bool,
help="return full generation to be used as a history prompt",
)
args = vars(parser.parse_args())
input_text: str = args.get("text")
output_filename: str = args.get("output_filename")
output_dir: str = args.get("output_dir")
history_prompt: str = args.get("history_prompt")
text_temp: float = args.get("text_temp")
waveform_temp: float = args.get("waveform_temp")
silent: bool = args.get("silent")
output_full: bool = args.get("output_full")
try:
os.makedirs(output_dir, exist_ok=True)
generated_audio = generate_audio(
input_text,
history_prompt=history_prompt,
text_temp=text_temp,
waveform_temp=waveform_temp,
silent=silent,
output_full=output_full,
)
output_file_path = os.path.join(output_dir, output_filename)
write_wav(output_file_path, SAMPLE_RATE, generated_audio)
print(
f"Done! Output audio file is saved at: '{output_file_path}'"
)
except Exception as e:
print(f"Oops, an error occurred: {e}")
| ["def","cli","(",")",":","``","''","''","Commandline","interface",".","''","''","''","parser","=","argparse.ArgumentParser","(","formatter_class=argparse.ArgumentDefaultsHelpFormatter",")","parser.add_argument","(","``","--","text","''",",","type=str",",","help=","''","text","to","be","turned","into","audio","''",")","parser.add_argument","(","``","--","output_filename","''",",","type=str",",","default=","''","bark_generation.wav","''",",","help=","''","output","audio","file","name","''",",",")","parser.add_argument","(","``","--","output_dir","''",",","type=str",",","default=","''",".","``",",","help=","''","directory","to","save","the","outputs","''",",",")","parser.add_argument","(","``","--","history_prompt","''",",","type=str",",","default=None",",","help=","''","history","choice","for","audio","cloning",",","be","path","to","the",".npz","file",".","``",",",")","parser.add_argument","(","``","--","text_temp","''",",","default=0.7",",","type=float",",","help=","''","generation","temperature","(","1.0","more","diverse",",","0.0","more","conservative",")","''",",",")","parser.add_argument","(","``","--","waveform_temp","''",",","default=0.7",",","type=float",",","help=","''","generation","temperature","(","1.0","more","diverse",",","0.0","more","conservative",")","''",",",")","parser.add_argument","(","``","--","silent","''",",","default=False",",","type=bool",",","help=","''","disable","progress","bar","''",",",")","parser.add_argument","(","``","--","output_full","''",",","default=False",",","type=bool",",","help=","''","return","full","generation","to","be","used","as","a","history","prompt","''",",",")","args","=","vars","(","parser.parse_args","(",")",")","input_text",":","str","=","args.get","(","``","text","''",")","output_filename",":","str","=","args.get","(","``","output_filename","''",")","output_dir",":","str","=","args.get","(","``","output_dir","''",")","history_prompt",":","str","=","args.get","(","``","history_prompt","''",")","text_temp",":","float","=","args.get","(","``","text_temp","''",")","waveform_temp",":","float","=","args.get","(","``","waveform_temp","''",")","silent",":","bool","=","args.get","(","``","silent","''",")","output_full",":","bool","=","args.get","(","``","output_full","''",")","try",":","os.makedirs","(","output_dir",",","exist_ok=True",")","generated_audio","=","generate_audio","(","input_text",",","history_prompt=history_prompt",",","text_temp=text_temp",",","waveform_temp=waveform_temp",",","silent=silent",",","output_full=output_full",",",")","output_file_path","=","os.path.join","(","output_dir",",","output_filename",")","write_wav","(","output_file_path",",","SAMPLE_RATE",",","generated_audio",")","print","(","f","''","Done","!","Output","audio","file","is","saved","at",":","'","{","output_file_path","}","'","''",")","except","Exception","as","e",":","print","(","f","''","Oops",",","an","error","occurred",":","{","e","}","''",")"] | 10 | 71 | null | cli.py | ai-audio-speech/bark/cli.py | import argparse
from typing import Dict, Optional, Union
import os
from scipy.io.wavfile import write
from .api import generate_audio
from .generation import SAMPLE_RATE | 7 | null | 6 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: cli() without return types | 96 | node_id 1 | 1,222 |
|
fake_decode_token | global | null | false | token | null | null | null | null | User | def fake_decode_token(token):
return User(
username=token + "fakedecoded",
email="[email protected]",
full_name="John Doe",
)
| ["def","fake_decode_token","(","token",")",":","return","User","(","username=token","+","``","fakedecoded","''",",","email=","''","john","@","example.com","''",",","full_name=","''","John","Doe","''",",",")"] | 19 | 22 | null | tutorial002_an_py310.py | fastapi/docs_src/security/tutorial002_an_py310.py | from typing import Annotated
from fastapi import Depends, FastAPI
from fastapi.security import OAuth2PasswordBearer
from pydantic import BaseModel | 7 | null | 4 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: fake_decode_token(token) and returns: User | 112 | node_id 1 | 135,732 |
expand_coordinates | global | null | false | indices | null | null | null | null | x, y, z | def expand_coordinates(indices):
"""
This collapses together pairs of indices, so that
the gaps in the volume array will have a zero width.
"""
x, y, z = indices
x[1::2, :, :] += 1
y[:, 1::2, :] += 1
z[:, :, 1::2] += 1
return x, y, z
| ["def","expand_coordinates","(","indices",")",":","``","''","''","This","collapses","together","pairs","of","indices",",","so","that","the","gaps","in","the","volume","array","will","have","a","zero","width.","``","''","''","x",",","y",",","z","=","indices","x","[","1",":",":2",",",":",",",":","]","+=","1","y","[",":",",","1",":",":2",",",":","]","+=","1","z","[",":",",",":",",","1",":",":2","]","+=","1","return","x",",","y",",","z"] | 97 | 106 | null | plot_euler_number.py | scikit-image/doc/examples/segmentation/plot_euler_number.py | from skimage.measure import euler_number, label
import matplotlib.pyplot
import numpy | 7 | null | 3 | null | null | 4 | null | Use image node_id 3 for calling a global function with example usage: expand_coordinates(indices) and returns: x, y, z | 120 | node_id 3 | 344,195 |
prepare_tp_fused_qkvw | global | null | false | module,src,mp_size,gpu_index | null | null | null | null | _bloom_type_transpose,tp_fuseqkv_weight,split_fusedqkv,split_fusedqkv,_glm_type_transpose,torch,_bloom_type_transpose,_transpose_fused_qkvw,_codegen_type_transpose,_glm_type_transpose,_qwen_type_transpose,_bigcode_type_transpose | def prepare_tp_fused_qkvw(module, src, mp_size, gpu_index):
module_str = str(module).strip()
if src is None:
return
fused_type_dict = {
"CodeGenBlock": "codegentype",
"BloomBlock": "bloomtype",
"GLMBlock": "glmtype",
"MPTBlock": "glmtype",
"MptBlock": "glmtype",
"BaichuanLayer": "glmtype",
"DecoderLayer": "glmtype",
"QWenBlock": "qwentype",
"FalconDecoderLayer": "bloomtype",
"GPTBigCodeBlock": "bigcodetype",
}
def _codegen_type_transpose(input, mp_size, codegen_mp_num=4):
# codegen_mp_num defined in https://github.com/huggingface/transformers/blob/main/src/transformers/models/codegen/modeling_codegen.py
assert (
get_num_kv_heads() % (mp_size * codegen_mp_num) == 0
), "codgen autoTP requires num_kv_heads % (mp_size*codegen_mp_num) == 0"
# input : [3*hidden_dim, hidden_dim](weight) or [3*hidden_dim](bias)
shape = input.shape
dst_shape = get_shard_size(shape[0], mp_size)
num_mp_blocks = input.reshape(
codegen_mp_num, shape[0] // codegen_mp_num, shape[1]
)
# num_mp_blocks : [codegen_mp_num, 3*hidden_dim/codegen_mp_num, :]
src_split = list(
torch.split(
num_mp_blocks, num_mp_blocks.shape[1] // 3, dim=1
)
)
src_split = [
x.reshape(codegen_mp_num * mp_size, -1, shape[1])
for x in src_split
]
split_fusedqkv = split_by_qkvlist_and_refuse(
src_split, get_shard_size(shape[0] // 3, mp_size), 0, 1
)
tp_fuseqkv_weight = torch.cat(split_fusedqkv, dim=0).reshape(
shape[0], -1
)
return tp_fuseqkv_weight[
gpu_index * dst_shape : (gpu_index + 1) * dst_shape
]
def _glm_type_transpose(input, mp_size):
# input : [3*hidden_dim, hidden_dim](weight) or [3*hidden_dim](bias)
shape = input.shape
src_split = torch.split(input, shape[0] // 3, dim=0)
split_fusedqkv = split_by_qkvlist_and_refuse(
src_split, get_shard_size_list(shape[0] // 3, mp_size)
)
return split_fusedqkv[gpu_index]
def _bloom_type_transpose(input, mp_size):
shape = input.shape
split_fusedqkv = input.split(
get_shard_size_list(shape[0], mp_size), dim=0
)
return split_fusedqkv[gpu_index]
def _qwen_type_transpose(input, mp_size, module):
if not hasattr(module, "_ds_fusedqkv_entered"):
# Adjust splitting absolute value variables
setattr(module, "_ds_fusedqkv_entered", True)
module.attn.split_size = get_shard_size(
module.attn.split_size, mp_size
)
return _glm_type_transpose(input, mp_size)
def _bigcode_type_transpose(input, mp_size):
n_embd = get_n_embd()
q = input[:n_embd]
kv = input[n_embd:]
shape = q.shape
split_q = q.split(
get_shard_size_list(shape[0], mp_size), dim=0
)
return torch.cat((split_q[gpu_index], kv), dim=0)
def _transpose_fused_qkvw(
src, mp_size, fused_qkv_type=None, module=None
):
# suppose num_heads=n, q(n)_w means the n-th q head linear weight, the weight format are as following
# bloomtype: [q(1)_w,k(1)_w,v(1)_w,q(2)_w,k(2)_w,v(2)_w,...,q(n)_w,k(n)_w,v(n)_w]
# glmtype: [q(1)_w, q(2)_w,...,q(n)_w,k(1)_w,k(2)_w,...,k(n)_w,v(1)_w,v(2)_w,...,v(n)_w]
# codegentype: [q(1)_w,q(2)_w,...,q(n/t)_w,k(1)_w,k(2)_w,...,k(n/t)_w,v(1)_2,v(2)_w,...v(n/t)_w,q(n/t+1)_w,...], where t is a const defined in model file.
if fused_qkv_type == "bloomtype":
return _bloom_type_transpose(src, mp_size)
elif fused_qkv_type == "codegentype":
return _codegen_type_transpose(src, mp_size)
elif fused_qkv_type == "glmtype":
return _glm_type_transpose(src, mp_size)
elif fused_qkv_type == "qwentype":
return _qwen_type_transpose(src, mp_size, module)
elif fused_qkv_type == "bigcodetype":
return _bigcode_type_transpose(src, mp_size)
raise ValueError("unknown fused_qkv_type")
for module_name, fused_type in fused_type_dict.items():
if re.search(module_name, module_str):
return _transpose_fused_qkvw(
src, mp_size, fused_type, module
)
warning_once(
f"Unrecognized fusedkqv weight type, default to using bloom type,"
f"please check in prepare_tp_fused_qkvw() to avoid potential calculation errors"
)
return _bloom_type_transpose(src, mp_size)
| ["def","prepare_tp_fused_qkvw","(","module",",","src",",","mp_size",",","gpu_index",")",":","module_str","=","str","(","module",")",".strip","(",")","if","src","is","None",":","return","fused_type_dict","=","{","``","CodeGenBlock","''",":","``","codegentype","''",",","``","BloomBlock","''",":","``","bloomtype","''",",","``","GLMBlock","''",":","``","glmtype","''",",","``","MPTBlock","''",":","``","glmtype","''",",","``","MptBlock","''",":","``","glmtype","''",",","``","BaichuanLayer","''",":","``","glmtype","''",",","``","DecoderLayer","''",":","``","glmtype","''",",","``","QWenBlock","''",":","``","qwentype","''",",","``","FalconDecoderLayer","''",":","``","bloomtype","''",",","``","GPTBigCodeBlock","''",":","``","bigcodetype","''",",","}","def","_codegen_type_transpose","(","input",",","mp_size",",","codegen_mp_num=4",")",":","#","codegen_mp_num","defined","in","https",":","\/\/github.com\/huggingface\/transformers\/blob\/main\/src\/transformers\/models\/codegen\/modeling_codegen.py","assert","(","get_num_kv_heads","(",")","%","(","mp_size","*","codegen_mp_num",")","==","0",")",",","``","codgen","autoTP","requires","num_kv_heads","%","(","mp_size","*","codegen_mp_num",")","==","0","''","#","input",":","[","3","*","hidden_dim",",","hidden_dim","]","(","weight",")","or","[","3","*","hidden_dim","]","(","bias",")","shape","=","input.shape","dst_shape","=","get_shard_size","(","shape","[","0","]",",","mp_size",")","num_mp_blocks","=","input.reshape","(","codegen_mp_num",",","shape","[","0","]","\/\/","codegen_mp_num",",","shape","[","1","]",")","#","num_mp_blocks",":","[","codegen_mp_num",",","3","*","hidden_dim\/codegen_mp_num",",",":","]","src_split","=","list","(","torch.split","(","num_mp_blocks",",","num_mp_blocks.shape","[","1","]","\/\/","3",",","dim=1",")",")","src_split","=","[","x.reshape","(","codegen_mp_num","*","mp_size",",","-1",",","shape","[","1","]",")","for","x","in","src_split","]","split_fusedqkv","=","split_by_qkvlist_and_refuse","(","src_split",",","get_shard_size","(","shape","[","0","]","\/\/","3",",","mp_size",")",",","0",",","1",")","tp_fuseqkv_weight","=","torch.cat","(","split_fusedqkv",",","dim=0",")",".reshape","(","shape","[","0","]",",","-1",")","return","tp_fuseqkv_weight","[","gpu_index","*","dst_shape",":","(","gpu_index","+","1",")","*","dst_shape","]","def","_glm_type_transpose","(","input",",","mp_size",")",":","#","input",":","[","3","*","hidden_dim",",","hidden_dim","]","(","weight",")","or","[","3","*","hidden_dim","]","(","bias",")","shape","=","input.shape","src_split","=","torch.split","(","input",",","shape","[","0","]","\/\/","3",",","dim=0",")","split_fusedqkv","=","split_by_qkvlist_and_refuse","(","src_split",",","get_shard_size_list","(","shape","[","0","]","\/\/","3",",","mp_size",")",")","return","split_fusedqkv","[","gpu_index","]","def","_bloom_type_transpose","(","input",",","mp_size",")",":","shape","=","input.shape","split_fusedqkv","=","input.split","(","get_shard_size_list","(","shape","[","0","]",",","mp_size",")",",","dim=0",")","return","split_fusedqkv","[","gpu_index","]","def","_qwen_type_transpose","(","input",",","mp_size",",","module",")",":","if","not","hasattr","(","module",",","``","_ds_fusedqkv_entered","''",")",":","#","Adjust","splitting","absolute","value","variables","setattr","(","module",",","``","_ds_fusedqkv_entered","''",",","True",")","module.attn.split_size","=","get_shard_size","(","module.attn.split_size",",","mp_size",")","return","_glm_type_transpose","(","input",",","mp_size",")","def","_bigcode_type_transpose","(","input",",","mp_size",")",":","n_embd","=","get_n_embd","(",")","q","=","input","[",":","n_embd","]","kv","=","input","[","n_embd",":","]","shape","=","q.shape","split_q","=","q.split","(","get_shard_size_list","(","shape","[","0","]",",","mp_size",")",",","dim=0",")","return","torch.cat","(","(","split_q","[","gpu_index","]",",","kv",")",",","dim=0",")","def","_transpose_fused_qkvw","(","src",",","mp_size",",","fused_qkv_type=None",",","module=None",")",":","#","suppose","num_heads=n",",","q","(","n",")","_w","means","the","n-th","q","head","linear","weight",",","the","weight","format","are","as","following","#","bloomtype",":","[","q","(","1",")","_w",",","k","(","1",")","_w",",","v","(","1",")","_w",",","q","(","2",")","_w",",","k","(","2",")","_w",",","v","(","2",")","_w",",","...",",","q","(","n",")","_w",",","k","(","n",")","_w",",","v","(","n",")","_w","]","#","glmtype",":","[","q","(","1",")","_w",",","q","(","2",")","_w",",","...",",","q","(","n",")","_w",",","k","(","1",")","_w",",","k","(","2",")","_w",",","...",",","k","(","n",")","_w",",","v","(","1",")","_w",",","v","(","2",")","_w",",","...",",","v","(","n",")","_w","]","#","codegentype",":","[","q","(","1",")","_w",",","q","(","2",")","_w",",","...",",","q","(","n\/t",")","_w",",","k","(","1",")","_w",",","k","(","2",")","_w",",","...",",","k","(","n\/t",")","_w",",","v","(","1",")","_2",",","v","(","2",")","_w",",","...","v","(","n\/t",")","_w",",","q","(","n\/t+1",")","_w",",","...","]",",","where","t","is","a","const","defined","in","model","file",".","if","fused_qkv_type","==","``","bloomtype","''",":","return","_bloom_type_transpose","(","src",",","mp_size",")","elif","fused_qkv_type","==","``","codegentype","''",":","return","_codegen_type_transpose","(","src",",","mp_size",")","elif","fused_qkv_type","==","``","glmtype","''",":","return","_glm_type_transpose","(","src",",","mp_size",")","elif","fused_qkv_type","==","``","qwentype","''",":","return","_qwen_type_transpose","(","src",",","mp_size",",","module",")","elif","fused_qkv_type","==","``","bigcodetype","''",":","return","_bigcode_type_transpose","(","src",",","mp_size",")","raise","ValueError","(","``","unknown","fused_qkv_type","''",")","for","module_name",",","fused_type","in","fused_type_dict.items","(",")",":","if","re.search","(","module_name",",","module_str",")",":","return","_transpose_fused_qkvw","(","src",",","mp_size",",","fused_type",",","module",")","warning_once","(","f","''","Unrecognized","fusedkqv","weight","type",",","default","to","using","bloom","type",",","''","f","''","please","check","in","prepare_tp_fused_qkvw","(",")","to","avoid","potential","calculation","errors","''",")","return","_bloom_type_transpose","(","src",",","mp_size",")"] | 30 | 122 | null | fusedqkv_utils.py | deepspeed/deepspeed/module_inject/fusedqkv_utils.py | import torch
from deepspeed.utils.logging import warning_once
from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list, get_num_kv_heads, get_n_embd
import re | 7 | null | 4 | null | null | 3 | null | Use image node_id 3 for calling a global function with example usage: prepare_tp_fused_qkvw(module, src, mp_size, gpu_index) and returns: _bloom_type_transpose, tp_fuseqkv_weight, split_fusedqkv, split_fusedqkv, _glm_type_transpose, torch, _bloom_type_transpose, _transpose_fused_qkvw, _codegen_type_transpose, _glm_type_transpose, _qwen_type_transpose, _bigcode_type_transpose | 377 | node_id 3 | 102,805 |
test_en_lex_attrs_capitals | global | null | false | word | null | null | null | null | null | def test_en_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| ["def","test_en_lex_attrs_capitals","(","word",")",":","assert","like_num","(","word",")","assert","like_num","(","word.upper","(",")",")"] | 68 | 70 | null | test_text.py | spacy/spacy/tests/lang/en/test_text.py | import pytest
from spacy.lang.en.lex_attrs import like_num | 7 | null | 2 | null | null | 5 | null | Use image node_id 5 for calling a global function with example usage: test_en_lex_attrs_capitals(word) without return types | 123 | node_id 5 | 388,124 |
test_en_lex_attrs_like_number_for_ordinal | global | null | false | word | null | null | null | null | null | def test_en_lex_attrs_like_number_for_ordinal(word):
assert like_num(word)
| ["def","test_en_lex_attrs_like_number_for_ordinal","(","word",")",":","assert","like_num","(","word",")"] | 63 | 64 | null | test_text.py | spacy/spacy/tests/lang/en/test_text.py | import pytest
from spacy.lang.en.lex_attrs import like_num | 7 | null | 2 | null | null | 5 | null | Use image node_id 4 for calling a global function with example usage: test_en_lex_attrs_like_number_for_ordinal(word) without return types | 138 | node_id 4 | 388,123 |
test_lex_attrs_like_number | global | null | false | en_tokenizer,text,match | null | null | null | null | null | def test_lex_attrs_like_number(en_tokenizer, text, match):
tokens = en_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| ["def","test_lex_attrs_like_number","(","en_tokenizer",",","text",",","match",")",":","tokens","=","en_tokenizer","(","text",")","assert","len","(","tokens",")","==","1","assert","tokens","[","0","]",".like_num","==","match"] | 54 | 57 | null | test_text.py | spacy/spacy/tests/lang/en/test_text.py | import pytest
from spacy.lang.en.lex_attrs import like_num | 7 | null | 2 | null | null | 5 | null | Use image node_id 3 for calling a global function with example usage: test_lex_attrs_like_number(en_tokenizer, text, match) without return types | 144 | node_id 3 | 388,122 |
test_en_tokenizer_handles_cnts | global | null | false | en_tokenizer,text,length | null | null | null | null | null | def test_en_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
| ["def","test_en_tokenizer_handles_cnts","(","en_tokenizer",",","text",",","length",")",":","tokens","=","en_tokenizer","(","text",")","assert","len","(","tokens",")","==","length"] | 33 | 35 | null | test_text.py | spacy/spacy/tests/lang/en/test_text.py | import pytest
from spacy.lang.en.lex_attrs import like_num | 7 | null | 2 | null | null | 5 | null | Use image node_id 2 for calling a global function with example usage: test_en_tokenizer_handles_cnts(en_tokenizer, text, length) without return types | 149 | node_id 2 | 388,121 |
test_en_tokenizer_handles_long_text | global | null | false | en_tokenizer | null | null | null | null | null | def test_en_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
| ["def","test_en_tokenizer_handles_long_text","(","en_tokenizer",")",":","text","=","``","''","''","Tributes","pour","in","for","late","British","Labour","Party","leader","Tributes","poured","in","from","around","the","world","Thursday","to","the","late","Labour","Party","leader","John","Smith",",","who","died","earlier","from","a","massive","heart","attack","aged","55",".","In","Washington",",","the","US","State","Department","issued","a","statement","regretting","``","the","untimely","death","''","of","the","rapier-tongued","Scottish","barrister","and","parliamentarian",".","``","Mr.","Smith",",","throughout","his","distinguished","''","''","''","tokens","=","en_tokenizer","(","text",")","assert","len","(","tokens",")","==","76"] | 6 | 18 | null | test_text.py | spacy/spacy/tests/lang/en/test_text.py | import pytest
from spacy.lang.en.lex_attrs import like_num | 7 | null | 2 | null | null | 5 | null | Use image node_id 1 for calling a global function with example usage: test_en_tokenizer_handles_long_text(en_tokenizer) without return types | 140 | node_id 1 | 388,120 |
display_voxels | global | null | false | volume | null | null | null | null | null | def display_voxels(volume):
"""
volume: (N,M,P) array
Represents a binary set of pixels: objects are marked with 1,
complementary (porosities) with 0.
The voxels are actually represented with blue transparent surfaces.
Inner porosities are represented in red.
"""
# define colors
red = "#ff0000ff"
blue = "#1f77b410"
# upscale the above voxel image, leaving gaps
filled = explode(np.ones(volume.shape))
fcolors = explode(np.where(volume, blue, red))
# Shrink the gaps
x, y, z = expand_coordinates(
np.indices(np.array(filled.shape) + 1)
)
# Define 3D figure and place voxels
ax = make_ax()
ax.voxels(x, y, z, filled, facecolors=fcolors)
# Compute Euler number in 6 and 26 neighborhood configuration, that
# correspond to 1 and 3 connectivity, respectively
e26 = euler_number(volume, connectivity=3)
e6 = euler_number(volume, connectivity=1)
plt.title(f"Euler number for N26: {e26}, for N6: {e6}")
plt.show()
| ["def","display_voxels","(","volume",")",":","``","''","''","volume",":","(","N",",","M",",","P",")","array","Represents","a","binary","set","of","pixels",":","objects","are","marked","with","1",",","complementary","(","porosities",")","with","0",".","The","voxels","are","actually","represented","with","blue","transparent","surfaces",".","Inner","porosities","are","represented","in","red.","``","''","''","#","define","colors","red","=","``","#","ff0000ff","''","blue","=","``","#","1f77b410","''","#","upscale","the","above","voxel","image",",","leaving","gaps","filled","=","explode","(","np.ones","(","volume.shape",")",")","fcolors","=","explode","(","np.where","(","volume",",","blue",",","red",")",")","#","Shrink","the","gaps","x",",","y",",","z","=","expand_coordinates","(","np.indices","(","np.array","(","filled.shape",")","+","1",")",")","#","Define","3D","figure","and","place","voxels","ax","=","make_ax","(",")","ax.voxels","(","x",",","y",",","z",",","filled",",","facecolors=fcolors",")","#","Compute","Euler","number","in","6","and","26","neighborhood","configuration",",","that","#","correspond","to","1","and","3","connectivity",",","respectively","e26","=","euler_number","(","volume",",","connectivity=3",")","e6","=","euler_number","(","volume",",","connectivity=1",")","plt.title","(","f","''","Euler","number","for","N26",":","{","e26","}",",","for","N6",":","{","e6","}","''",")","plt.show","(",")"] | 109 | 139 | null | plot_euler_number.py | scikit-image/doc/examples/segmentation/plot_euler_number.py | from skimage.measure import euler_number, label
import matplotlib.pyplot
import numpy | 7 | null | 3 | null | null | 4 | null | Use image node_id 4 for calling a global function with example usage: display_voxels(volume) without return types | 113 | node_id 4 | 344,196 |
__init__ | C2SimpleNet | null | true | self,op_name,num_inputs,debug | This module constructs a net with 'op_name' operator. The net consist
a series of such operator.
It initializes the workspace with input blob equal to the number of parameters
needed for the op.
Provides forward method to run the net niter times. | ["This","module","constructs","a","net","with","'op_name","'","operator",".","The","net","consist","a","series","of","such","operator",".","It","initializes","the","workspace","with","input","blob","equal","to","the","number","of","parameters","needed","for","the","op",".","Provides","forward","method","to","run","the","net","niter","times","."] | null | null | C2SimpleNet | def __init__(self, op_name, num_inputs=1, debug=False):
self.input_names = []
self.net = core.Net("framework_benchmark_net")
self.input_names = [f"in_{i}" for i in range(num_inputs)]
for i in range(num_inputs):
add_blob(workspace, self.input_names[i], [1])
self.net.AddExternalInputs(self.input_names)
op_constructor = getattr(self.net, op_name)
op_constructor(self.input_names)
self.output_name = self.net._net.op[-1].output
print(f"Benchmarking op {op_name}:")
for _ in range(NUM_LOOP_ITERS):
output_name = self.net._net.op[-1].output
self.input_names[-1] = output_name[0]
assert len(self.input_names) == num_inputs
op_constructor(self.input_names)
workspace.CreateNet(self.net)
if debug:
print(self.net._net)
| ["def","__init__","(","self",",","op_name",",","num_inputs=1",",","debug=False",")",":","self.input_names","=","[","]","self.net","=","core.Net","(","``","framework_benchmark_net","''",")","self.input_names","=","[","f","''","in_","{","i","}","''","for","i","in","range","(","num_inputs",")","]","for","i","in","range","(","num_inputs",")",":","add_blob","(","workspace",",","self.input_names","[","i","]",",","[","1","]",")","self.net.AddExternalInputs","(","self.input_names",")","op_constructor","=","getattr","(","self.net",",","op_name",")","op_constructor","(","self.input_names",")","self.output_name","=","self.net._net.op","[","-1","]",".output","print","(","f","''","Benchmarking","op","{","op_name","}",":","''",")","for","_","in","range","(","NUM_LOOP_ITERS",")",":","output_name","=","self.net._net.op","[","-1","]",".output","self.input_names","[","-1","]","=","output_name","[","0","]","assert","len","(","self.input_names",")","==","num_inputs","op_constructor","(","self.input_names",")","workspace.CreateNet","(","self.net",")","if","debug",":","print","(","self.net._net",")"] | 23 | 41 | null | C2Module.py | pytorch/benchmarks/framework_overhead_benchmark/C2Module.py | import numpy
from caffe2.python import core, workspace
from utils import NUM_LOOP_ITERS | 7 | 1 | 3 | 0 | null | 1 | 2 | Use image node_id 1 to create a new C2SimpleNet object with example: obj = C2SimpleNet(op_name, num_inputs, debug) | 115 | node_id 1 | 271,494 |
__init__ | Retry | null | true | self,backoff,retries,supported_errors | Retry a specific number of times after a failure | ["Retry","a","specific","number","of","times","after","a","failure"] | Initialize a `Retry` object with a `Backoff` object
that retries a maximum of `retries` times.
`retries` can be negative to retry forever.
You can specify the types of supported errors which trigger
a retry with the `supported_errors` parameter. | ["Initialize","a","`","Retry","`","object","with","a","`","Backoff","`","object","that","retries","a","maximum","of","`","retries","`","times",".","`","retries","`","can","be","negative","to","retry","forever",".","You","can","specify","the","types","of","supported","errors","which","trigger","a","retry","with","the","`","supported_errors","`","parameter","."] | Retry | def __init__(
self,
backoff,
retries,
supported_errors=(ConnectionError, TimeoutError, socket.timeout),
):
"""
Initialize a `Retry` object with a `Backoff` object
that retries a maximum of `retries` times.
`retries` can be negative to retry forever.
You can specify the types of supported errors which trigger
a retry with the `supported_errors` parameter.
"""
self._backoff = backoff
self._retries = retries
self._supported_errors = supported_errors
| ["def","__init__","(","self",",","backoff",",","retries",",","supported_errors=","(","ConnectionError",",","TimeoutError",",","socket.timeout",")",",",")",":","``","''","''","Initialize","a","`","Retry","`","object","with","a","`","Backoff","`","object","that","retries","a","maximum","of","`","retries","`","times",".","`","retries","`","can","be","negative","to","retry","forever",".","You","can","specify","the","types","of","supported","errors","which","trigger","a","retry","with","the","`","supported_errors","`","parameter.","``","''","''","self._backoff","=","backoff","self._retries","=","retries","self._supported_errors","=","supported_errors"] | 10 | 25 | null | retry.py | redis-py/redis/retry.py | import socket
from time import sleep
from redis.exceptions import ConnectionError, TimeoutError | 7 | 1 | 3 | 0 | null | 0 | 3 | Use image node_id 1 to create a new Retry object with example: obj = Retry(backoff, retries, supported_errors) | 111 | node_id 1 | 339,330 |
test_polygon_area | global | null | false | null | null | null | null | null | def test_polygon_area():
x = [0, 0, 1, 1]
y = [0, 1, 1, 0]
assert_almost_equal(polygon_area(y, x), 1)
x = [0, 0, 1]
y = [0, 1, 1]
assert_almost_equal(polygon_area(y, x), 0.5)
x = [0, 0, 0.5, 1, 1, 0.5]
y = [0, 1, 0.5, 1, 0, 0.5]
assert_almost_equal(polygon_area(y, x), 0.5)
| ["def","test_polygon_area","(",")",":","x","=","[","0",",","0",",","1",",","1","]","y","=","[","0",",","1",",","1",",","0","]","assert_almost_equal","(","polygon_area","(","y",",","x",")",",","1",")","x","=","[","0",",","0",",","1","]","y","=","[","0",",","1",",","1","]","assert_almost_equal","(","polygon_area","(","y",",","x",")",",","0.5",")","x","=","[","0",",","0",",","0.5",",","1",",","1",",","0.5","]","y","=","[","0",",","1",",","0.5",",","1",",","0",",","0.5","]","assert_almost_equal","(","polygon_area","(","y",",","x",")",",","0.5",")"] | 38 | 52 | null | test_geometry.py | scikit-image/skimage/_shared/tests/test_geometry.py | import pytest
from skimage._shared._geometry import polygon_clip, polygon_area
import numpy
from numpy.testing import assert_equal, assert_almost_equal | 7 | null | 4 | null | null | 3 | null | Use image node_id 1 for calling a global function with example usage: test_polygon_area() without return types | 110 | node_id 1 | 344,274 |
|
test_return_sequences | Seq2seqTrainerTester | TestCasePlus | true | self | null | null | null | null | model_inputs | def test_return_sequences(self):
# Tests that the number of generated sequences is correct when num_return_sequences > 1
# and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics`
INPUT_COLUMN = "question"
TARGET_COLUMN = "answer"
MAX_INPUT_LENGTH = 256
MAX_TARGET_LENGTH = 256
dataset = datasets.load_dataset(
"gsm8k", "main", split="train[:38]"
)
model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
data_collator = DataCollatorForSeq2Seq(
tokenizer, model=model, return_tensors="pt", padding="longest"
)
gen_config = GenerationConfig.from_pretrained(
"t5-small",
max_length=None,
min_length=None,
max_new_tokens=256,
min_new_tokens=1,
num_beams=5,
)
training_args = Seq2SeqTrainingArguments(
".", predict_with_generate=True
)
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=lambda x: {"samples": x[0].shape[0]},
)
def prepare_data(examples):
# Remove pairs where at least one record is none
inputs = examples[INPUT_COLUMN]
targets = examples[TARGET_COLUMN]
model_inputs = tokenizer(
inputs, max_length=MAX_INPUT_LENGTH, truncation=True
)
labels = tokenizer(
text_target=targets,
max_length=MAX_TARGET_LENGTH,
truncation=True,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
prepared_dataset = dataset.map(
prepare_data,
batched=True,
remove_columns=[INPUT_COLUMN, TARGET_COLUMN],
)
dataset_len = len(prepared_dataset) # 38
for num_return_sequences in range(3, 0, -1):
gen_config.num_return_sequences = num_return_sequences
metrics = trainer.evaluate(
eval_dataset=prepared_dataset,
generation_config=gen_config,
)
assert (
metrics["eval_samples"]
== dataset_len * num_return_sequences
), f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}"
| ["def","test_return_sequences","(","self",")",":","#","Tests","that","the","number","of","generated","sequences","is","correct","when","num_return_sequences",">","1","#","and","essentially","ensuring","that","`","accelerator.gather","(",")","`","is","used","instead","of","`","gather_for_metrics","`","INPUT_COLUMN","=","``","question","''","TARGET_COLUMN","=","``","answer","''","MAX_INPUT_LENGTH","=","256","MAX_TARGET_LENGTH","=","256","dataset","=","datasets.load_dataset","(","``","gsm8k","''",",","``","main","''",",","split=","''","train","[",":38","]","''",")","model","=","AutoModelForSeq2SeqLM.from_pretrained","(","``","t5-small","''",")","tokenizer","=","T5Tokenizer.from_pretrained","(","``","t5-small","''",")","data_collator","=","DataCollatorForSeq2Seq","(","tokenizer",",","model=model",",","return_tensors=","''","pt","''",",","padding=","''","longest","''",")","gen_config","=","GenerationConfig.from_pretrained","(","``","t5-small","''",",","max_length=None",",","min_length=None",",","max_new_tokens=256",",","min_new_tokens=1",",","num_beams=5",",",")","training_args","=","Seq2SeqTrainingArguments","(","``",".","``",",","predict_with_generate=True",")","trainer","=","Seq2SeqTrainer","(","model=model",",","args=training_args",",","tokenizer=tokenizer",",","data_collator=data_collator",",","compute_metrics=lambda","x",":","{","``","samples","''",":","x","[","0","]",".shape","[","0","]","}",",",")","def","prepare_data","(","examples",")",":","#","Remove","pairs","where","at","least","one","record","is","none","inputs","=","examples","[","INPUT_COLUMN","]","targets","=","examples","[","TARGET_COLUMN","]","model_inputs","=","tokenizer","(","inputs",",","max_length=MAX_INPUT_LENGTH",",","truncation=True",")","labels","=","tokenizer","(","text_target=targets",",","max_length=MAX_TARGET_LENGTH",",","truncation=True",",",")","model_inputs","[","``","labels","''","]","=","labels","[","``","input_ids","''","]","return","model_inputs","prepared_dataset","=","dataset.map","(","prepare_data",",","batched=True",",","remove_columns=","[","INPUT_COLUMN",",","TARGET_COLUMN","]",",",")","dataset_len","=","len","(","prepared_dataset",")","#","38","for","num_return_sequences","in","range","(","3",",","0",",","-1",")",":","gen_config.num_return_sequences","=","num_return_sequences","metrics","=","trainer.evaluate","(","eval_dataset=prepared_dataset",",","generation_config=gen_config",",",")","assert","(","metrics","[","``","eval_samples","''","]","==","dataset_len","*","num_return_sequences",")",",","f","''","Got","{","metrics","[","'eval_samples","'","]","}",",","expected",":","{","dataset_len","*","num_return_sequences","}","''"] | 138 | 183 | null | test_trainer_seq2seq.py | transformers/tests/trainer/test_trainer_seq2seq.py | from transformers import AutoModelForSeq2SeqLM, BertTokenizer, DataCollatorForSeq2Seq, EncoderDecoderModel, GenerationConfig, Seq2SeqTrainer, Seq2SeqTrainingArguments, T5Tokenizer
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available | 7 | 1 | 3 | 1 | 1 | 0 | 2 | Use image node_id 2 for calling the Seq2seqTrainerTester obj's underlying member method code with example usage: obj.test_return_sequences() and returns: model_inputs | 166 | node_id 2 | 486,561 |
main | global | null | false | null | null | null | null | null | def main():
bench("eager ", add1)
bench("compiled", torch.compile(add1))
| ["def","main","(",")",":","bench","(","``","eager","``",",","add1",")","bench","(","``","compiled","''",",","torch.compile","(","add1",")",")"] | 24 | 26 | null | overheads.py | pytorch/benchmarks/dynamo/microbenchmarks/overheads.py | import time
import timeit
import numpy
import torch | 7 | null | 4 | null | null | 3 | null | Use image node_id 3 for calling a global function with example usage: main() without return types | 97 | node_id 3 | 271,273 |
|
test_hooks_unpickle | global | null | false | null | null | null | null | str | def test_hooks_unpickle():
def inner_func(d1, d2):
return "hello!"
nlp = Language()
doc = nlp("Hello")
doc.user_hooks["similarity"] = inner_func
b = pickle.dumps(doc)
doc2 = pickle.loads(b)
assert doc2.similarity(None) == "hello!"
| ["def","test_hooks_unpickle","(",")",":","def","inner_func","(","d1",",","d2",")",":","return","``","hello","!","''","nlp","=","Language","(",")","doc","=","nlp","(","``","Hello","''",")","doc.user_hooks","[","``","similarity","''","]","=","inner_func","b","=","pickle.dumps","(","doc",")","doc2","=","pickle.loads","(","b",")","assert","doc2.similarity","(","None",")","==","``","hello","!","''"] | 45 | 54 | null | test_pickle_doc.py | spacy/spacy/tests/doc/test_pickle_doc.py | from spacy.compat import pickle
from spacy.language import Language | 7 | null | 2 | null | null | 5 | null | Use image node_id 5 for calling a global function with example usage: test_hooks_unpickle() and returns: str | 108 | node_id 5 | 387,845 |
|
tir_call | global | null | false | ib,extern,name | null | null | null | null | buf,tvm,tvm | def tir_call(ib: tvm.tir.ir_builder, extern: bool, name: str, *args):
"""
ib: ir_builder
extern: bool
True --> tvm.tir.call_extern
False --> tvm.tir.call_packed
name: str
function name
*args:
arguments for function call
"""
def buf_from_array(ib, arr, dtype):
# Allocate enough memory to store the whole array
var = ib.allocate("int32", (len(arr),), scope="global")
for i, v in enumerate(arr):
var[i] = v
# Declare a buffer, which is basically a view on the chunk of memory that we allocated
buf = tvm.tir.decl_buffer(
(len(arr),), dtype, data=var, scope="global"
)
return buf
if extern:
args = [
i.data if isinstance(i, tvm.tir.Buffer) else i
for i in args
]
return tvm.tir.call_extern("int32", name, *args)
else:
args = [
buf_from_array(ib, i, "int32")
if isinstance(i, (tuple, list, tvm.ir.container.Array))
else i
for i in args
]
return tvm.tir.call_packed(name, *args)
| ["def","tir_call","(","ib",":","tvm.tir.ir_builder",",","extern",":","bool",",","name",":","str",",","*","args",")",":","``","''","''","ib",":","ir_builder","extern",":","bool","True","--",">","tvm.tir.call_extern","False","--",">","tvm.tir.call_packed","name",":","str","function","name","*","args",":","arguments","for","function","call","``","''","''","def","buf_from_array","(","ib",",","arr",",","dtype",")",":","#","Allocate","enough","memory","to","store","the","whole","array","var","=","ib.allocate","(","``","int32","''",",","(","len","(","arr",")",",",")",",","scope=","''","global","''",")","for","i",",","v","in","enumerate","(","arr",")",":","var","[","i","]","=","v","#","Declare","a","buffer",",","which","is","basically","a","view","on","the","chunk","of","memory","that","we","allocated","buf","=","tvm.tir.decl_buffer","(","(","len","(","arr",")",",",")",",","dtype",",","data=var",",","scope=","''","global","''",")","return","buf","if","extern",":","args","=","[","i.data","if","isinstance","(","i",",","tvm.tir.Buffer",")","else","i","for","i","in","args","]","return","tvm.tir.call_extern","(","``","int32","''",",","name",",","*","args",")","else",":","args","=","[","buf_from_array","(","ib",",","i",",","``","int32","''",")","if","isinstance","(","i",",","(","tuple",",","list",",","tvm.ir.container.Array",")",")","else","i","for","i","in","args","]","return","tvm.tir.call_packed","(","name",",","*","args",")"] | 105 | 136 | null | passes.py | tvm/apps/uma/_template/passes.py | import tvm
from tvm import tir
from tvm.relay.backend.contrib.uma.api.utils import add_llvm_to_block | 7 | null | 3 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: tir_call(ib, extern, name) and returns: buf, tvm, tvm | 123 | node_id 1 | 487,914 |
_my_ai_hw_conv2d_pass | MyAiHwConv2dPass | null | true | cls,func,mod,ctx | null | null | null | null | r,unknown,op,func,func,irb_result,op | def _my_ai_hw_conv2d_pass(cls, func, mod, ctx):
_loops = dict()
_handles = []
_entry_node = None
def _has_block(name: str, func: tvm.tir.PrimFunc) -> bool:
"""
Determine of a tir.block with `name` exists in `func`
"""
def _hb(op):
if isinstance(op, tvm.tir.Block):
_found_blocks.append(op.name_hint)
_found_blocks = []
tvm.tir.stmt_functor.post_order_visit(func.body, _hb)
return name in _found_blocks
def _detect_and_replace_conv2d(
func: tvm.tir.PrimFunc,
mod: tvm.ir.IRModule,
ctx: tvm.ir.transform.PassContext,
) -> tvm.tir.PrimFunc:
def _replace_conv2d(op):
if op == _entry_node:
irb = tvm.tir.ir_builder.create()
# Collection of buffer address
buffers = [b[1].data for b in _handles]
# extraction of loop offsets
for k, v in _loops.items():
assert v.min.value == 0
offset_order = ["co", "w", "h", "ci", "kh", "kw"]
offsets = [
_loops[i].extent.value for i in offset_order
]
args = buffers + offsets
irb.emit(
tir_call(
irb, True, cls._EXTERNAL_FUNCTION_NAME, *args
)
)
irb_result = irb.get()
return irb_result
elif isinstance(op, tvm.tir.SeqStmt):
# Remove that pad block of TOPI's conv2DNCHW by only returning the 2nd statement
return op.seq[1]
return op
sch = tir.Schedule(func)
if _has_block(cls._TVM_BLOCK_MATCH_NAME, func):
conv2d_block = sch.get_block(cls._TVM_BLOCK_MATCH_NAME)
rv_loops = sch.get_loops(conv2d_block)
assert len(rv_loops) == 7
loops = dict(
n=rv_loops[0],
co=rv_loops[1],
h=rv_loops[2],
w=rv_loops[3],
ci=rv_loops[4],
kh=rv_loops[5],
kw=rv_loops[6],
)
_entry_node = sch.get(rv_loops[1])
_loops = {k: sch.get(v) for k, v in loops.items()}
_handles = func.buffer_map.items()
x = tvm.tir.stmt_functor.ir_transform(
func.body,
None,
_replace_conv2d,
["tir.For", "tir.SeqStmt"],
)
return func.with_body(x)
else:
return func
r = _detect_and_replace_conv2d(func, mod, ctx)
return r
| ["def","_my_ai_hw_conv2d_pass","(","cls",",","func",",","mod",",","ctx",")",":","_loops","=","dict","(",")","_handles","=","[","]","_entry_node","=","None","def","_has_block","(","name",":","str",",","func",":","tvm.tir.PrimFunc",")","-",">","bool",":","``","''","''","Determine","of","a","tir.block","with","`","name","`","exists","in","`","func","`","``","''","''","def","_hb","(","op",")",":","if","isinstance","(","op",",","tvm.tir.Block",")",":","_found_blocks.append","(","op.name_hint",")","_found_blocks","=","[","]","tvm.tir.stmt_functor.post_order_visit","(","func.body",",","_hb",")","return","name","in","_found_blocks","def","_detect_and_replace_conv2d","(","func",":","tvm.tir.PrimFunc",",","mod",":","tvm.ir.IRModule",",","ctx",":","tvm.ir.transform.PassContext",",",")","-",">","tvm.tir.PrimFunc",":","def","_replace_conv2d","(","op",")",":","if","op","==","_entry_node",":","irb","=","tvm.tir.ir_builder.create","(",")","#","Collection","of","buffer","address","buffers","=","[","b","[","1","]",".data","for","b","in","_handles","]","#","extraction","of","loop","offsets","for","k",",","v","in","_loops.items","(",")",":","assert","v.min.value","==","0","offset_order","=","[","``","co","''",",","``","w","''",",","``","h","''",",","``","ci","''",",","``","kh","''",",","``","kw","''","]","offsets","=","[","_loops","[","i","]",".extent.value","for","i","in","offset_order","]","args","=","buffers","+","offsets","irb.emit","(","tir_call","(","irb",",","True",",","cls._EXTERNAL_FUNCTION_NAME",",","*","args",")",")","irb_result","=","irb.get","(",")","return","irb_result","elif","isinstance","(","op",",","tvm.tir.SeqStmt",")",":","#","Remove","that","pad","block","of","TOPI","'s","conv2DNCHW","by","only","returning","the","2nd","statement","return","op.seq","[","1","]","return","op","sch","=","tir.Schedule","(","func",")","if","_has_block","(","cls._TVM_BLOCK_MATCH_NAME",",","func",")",":","conv2d_block","=","sch.get_block","(","cls._TVM_BLOCK_MATCH_NAME",")","rv_loops","=","sch.get_loops","(","conv2d_block",")","assert","len","(","rv_loops",")","==","7","loops","=","dict","(","n=rv_loops","[","0","]",",","co=rv_loops","[","1","]",",","h=rv_loops","[","2","]",",","w=rv_loops","[","3","]",",","ci=rv_loops","[","4","]",",","kh=rv_loops","[","5","]",",","kw=rv_loops","[","6","]",",",")","_entry_node","=","sch.get","(","rv_loops","[","1","]",")","_loops","=","{","k",":","sch.get","(","v",")","for","k",",","v","in","loops.items","(",")","}","_handles","=","func.buffer_map.items","(",")","x","=","tvm.tir.stmt_functor.ir_transform","(","func.body",",","None",",","_replace_conv2d",",","[","``","tir.For","''",",","``","tir.SeqStmt","''","]",",",")","return","func.with_body","(","x",")","else",":","return","func","r","=","_detect_and_replace_conv2d","(","func",",","mod",",","ctx",")","return","r"] | 35 | 102 | null | passes.py | tvm/apps/uma/_template/passes.py | import tvm
from tvm import tir
from tvm.relay.backend.contrib.uma.api.utils import add_llvm_to_block | 7 | 1 | 3 | 0 | null | 1 | 2 | Use image node_id 2 for calling the MyAiHwConv2dPass obj's underlying member method code with example usage: obj._my_ai_hw_conv2d_pass(cls, func, mod, ctx) and returns: r, unknown, op, func, func, irb_result, op | 211 | node_id 2 | 487,913 |
transform_function | MyAiHwConv2dPass | null | true | self,func,mod,ctx | null | null | null | null | self | def transform_function(
self,
func: tvm.tir.PrimFunc,
mod: tvm.ir.IRModule,
ctx: tvm.ir.transform.PassContext,
) -> tvm.tir.PrimFunc:
return self._my_ai_hw_conv2d_pass(func, mod, ctx)
| ["def","transform_function","(","self",",","func",":","tvm.tir.PrimFunc",",","mod",":","tvm.ir.IRModule",",","ctx",":","tvm.ir.transform.PassContext",",",")","-",">","tvm.tir.PrimFunc",":","return","self._my_ai_hw_conv2d_pass","(","func",",","mod",",","ctx",")"] | 29 | 32 | null | passes.py | tvm/apps/uma/_template/passes.py | import tvm
from tvm import tir
from tvm.relay.backend.contrib.uma.api.utils import add_llvm_to_block | 7 | 1 | 3 | 0 | null | 1 | 2 | Use image node_id 1 for calling the MyAiHwConv2dPass obj's underlying member method code with example usage: obj.transform_function(func, mod, ctx) and returns: self | 165 | node_id 1 | 487,912 |
pytest_cmdline_preparse | global | null | false | config,args | null | null | null | null | null | def pytest_cmdline_preparse(config, args):
if "--unskip-missing-tokenizer" not in args:
return
global unskip_missing_tokenizer
unskip_missing_tokenizer = True
| ["def","pytest_cmdline_preparse","(","config",",","args",")",":","if","``","--","unskip-missing-tokenizer","''","not","in","args",":","return","global","unskip_missing_tokenizer","unskip_missing_tokenizer","=","True"] | 46 | 50 | null | conftest.py | llama-recipes/tests/conftest.py | import pytest
from transformers import LlamaTokenizer | 7 | null | 2 | null | null | 5 | null | Use image node_id 5 for calling a global function with example usage: pytest_cmdline_preparse(config, args) without return types | 128 | node_id 5 | 166,997 |
downgrade | global | null | false | null | null | null | null | null | def downgrade():
bind = op.get_bind()
session = Session(bind=bind)
# Add the old permissions on the migration itself
add_pvms(session, get_reversed_new_pvms(PVM_MAP))
migrate_roles(session, get_reversed_pvm_map(PVM_MAP))
try:
session.commit()
except SQLAlchemyError as ex:
print(
f"An error occurred while downgrading permissions: {ex}"
)
session.rollback()
pass
| ["def","downgrade","(",")",":","bind","=","op.get_bind","(",")","session","=","Session","(","bind=bind",")","#","Add","the","old","permissions","on","the","migration","itself","add_pvms","(","session",",","get_reversed_new_pvms","(","PVM_MAP",")",")","migrate_roles","(","session",",","get_reversed_pvm_map","(","PVM_MAP",")",")","try",":","session.commit","(",")","except","SQLAlchemyError","as","ex",":","print","(","f","''","An","error","occurred","while","downgrading","permissions",":","{","ex","}","''",")","session.rollback","(",")","pass"] | 96 | 108 | null | 2020-12-14_10-49_42b4c9e01447_security_converge_databases.py | superset/superset/migrations/versions/2020-12-14_10-49_42b4c9e01447_security_converge_databases.py | import sqlalchemy
from alembic import op
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import Session
from superset.migrations.shared.security_converge import add_pvms, get_reversed_new_pvms, get_reversed_pvm_map, migrate_roles, Pvm | 7 | null | 5 | null | null | 2 | null | Use image node_id 2 for calling a global function with example usage: downgrade() without return types | 102 | node_id 2 | 437,212 |
|
test_dtype | global | null | false | null | null | null | null | null | def test_dtype():
dtype = tvm.DataType("handle")
assert dtype.type_code == tvm.DataTypeCode.HANDLE
| ["def","test_dtype","(",")",":","dtype","=","tvm.DataType","(","``","handle","''",")","assert","dtype.type_code","==","tvm.DataTypeCode.HANDLE"] | 63 | 65 | null | test_runtime_ndarray.py | tvm/tests/python/all-platform-minimal-test/test_runtime_ndarray.py | import tvm
from tvm import te
import numpy
import tvm.testing | 7 | null | 4 | null | null | 3 | null | Use image node_id 3 for calling a global function with example usage: test_dtype() without return types | 103 | node_id 3 | 498,113 |
|
upgrade | global | null | false | null | null | null | null | null | def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
# Add the new permissions on the migration itself
add_pvms(session, NEW_PVMS)
migrate_roles(session, PVM_MAP)
try:
session.commit()
except SQLAlchemyError as ex:
print(f"An error occurred while upgrading permissions: {ex}")
session.rollback()
| ["def","upgrade","(",")",":","bind","=","op.get_bind","(",")","session","=","Session","(","bind=bind",")","#","Add","the","new","permissions","on","the","migration","itself","add_pvms","(","session",",","NEW_PVMS",")","migrate_roles","(","session",",","PVM_MAP",")","try",":","session.commit","(",")","except","SQLAlchemyError","as","ex",":","print","(","f","''","An","error","occurred","while","upgrading","permissions",":","{","ex","}","''",")","session.rollback","(",")"] | 82 | 93 | null | 2020-12-14_10-49_42b4c9e01447_security_converge_databases.py | superset/superset/migrations/versions/2020-12-14_10-49_42b4c9e01447_security_converge_databases.py | import sqlalchemy
from alembic import op
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import Session
from superset.migrations.shared.security_converge import add_pvms, get_reversed_new_pvms, get_reversed_pvm_map, migrate_roles, Pvm | 7 | null | 5 | null | null | 2 | null | Use image node_id 1 for calling a global function with example usage: upgrade() without return types | 100 | node_id 1 | 437,211 |
|
test_pod_log_reader_rolebinding | TestPodReader | null | true | self,triggerer,webserver,expected | Tests RBAC Pod Reader. | ["Tests","RBAC","Pod","Reader","."] | null | null | null | def test_pod_log_reader_rolebinding(
self, triggerer, webserver, expected
):
docs = render_chart(
values={
"triggerer": {"enabled": triggerer},
"webserver": {"allowPodLogReading": webserver},
},
show_only=["templates/rbac/pod-log-reader-rolebinding.yaml"],
)
actual = (
jmespath.search("subjects[*].name", docs[0]) if docs else []
)
assert actual == expected
| ["def","test_pod_log_reader_rolebinding","(","self",",","triggerer",",","webserver",",","expected",")",":","docs","=","render_chart","(","values=","{","``","triggerer","''",":","{","``","enabled","''",":","triggerer","}",",","``","webserver","''",":","{","``","allowPodLogReading","''",":","webserver","}",",","}",",","show_only=","[","``","templates\/rbac\/pod-log-reader-rolebinding.yaml","''","]",",",")","actual","=","(","jmespath.search","(","``","subjects","[","*","]",".name","''",",","docs","[","0","]",")","if","docs","else","[","]",")","assert","actual","==","expected"] | 37 | 46 | null | test_rbac_pod_log_reader.py | airflow/helm_tests/security/test_rbac_pod_log_reader.py | from __future__ import annotations
import jmespath
import pytest
from tests.charts.helm_template_generator import render_chart | 7 | 1 | 4 | 0 | null | 0 | 2 | Use image node_id 1 for calling the TestPodReader obj's underlying member method code with example usage: obj.test_pod_log_reader_rolebinding(triggerer, webserver, expected) without return types | 194 | node_id 1 | 13,850 |
test_pod_log_reader_role | TestPodReader | null | true | self,triggerer,webserver,expected | Tests RBAC Pod Reader. | ["Tests","RBAC","Pod","Reader","."] | null | null | null | def test_pod_log_reader_role(self, triggerer, webserver, expected):
docs = render_chart(
values={
"triggerer": {"enabled": triggerer},
"webserver": {"allowPodLogReading": webserver},
},
show_only=["templates/rbac/pod-log-reader-role.yaml"],
)
actual = (
jmespath.search("metadata.name", docs[0]) if docs else None
)
assert actual == expected
| ["def","test_pod_log_reader_role","(","self",",","triggerer",",","webserver",",","expected",")",":","docs","=","render_chart","(","values=","{","``","triggerer","''",":","{","``","enabled","''",":","triggerer","}",",","``","webserver","''",":","{","``","allowPodLogReading","''",":","webserver","}",",","}",",","show_only=","[","``","templates\/rbac\/pod-log-reader-role.yaml","''","]",",",")","actual","=","(","jmespath.search","(","``","metadata.name","''",",","docs","[","0","]",")","if","docs","else","None",")","assert","actual","==","expected"] | 57 | 66 | null | test_rbac_pod_log_reader.py | airflow/helm_tests/security/test_rbac_pod_log_reader.py | from __future__ import annotations
import jmespath
import pytest
from tests.charts.helm_template_generator import render_chart | 7 | 1 | 4 | 0 | null | 0 | 2 | Use image node_id 2 for calling the TestPodReader obj's underlying member method code with example usage: obj.test_pod_log_reader_role(triggerer, webserver, expected) without return types | 187 | node_id 2 | 13,851 |
test_env_after_learn | global | null | false | algo | null | null | null | null | env | def test_env_after_learn(algo):
def make_env():
# acktr requires too much RAM, fails on travis
env = gym.make(
"CartPole-v1" if algo == "acktr" else "PongNoFrameskip-v4"
)
return env
make_session(make_default=True, graph=tf.Graph())
env = SubprocVecEnv([make_env])
learn = get_learn_function(algo)
# Commenting out the following line resolves the issue, though crash happens at env.reset().
learn(
network="mlp",
env=env,
total_timesteps=0,
load_path=None,
seed=None,
)
env.reset()
env.close()
| ["def","test_env_after_learn","(","algo",")",":","def","make_env","(",")",":","#","acktr","requires","too","much","RAM",",","fails","on","travis","env","=","gym.make","(","``","CartPole-v1","''","if","algo","==","``","acktr","''","else","``","PongNoFrameskip-v4","''",")","return","env","make_session","(","make_default=True",",","graph=tf.Graph","(",")",")","env","=","SubprocVecEnv","(","[","make_env","]",")","learn","=","get_learn_function","(","algo",")","#","Commenting","out","the","following","line","resolves","the","issue",",","though","crash","happens","at","env.reset","(",")",".","learn","(","network=","''","mlp","''",",","env=env",",","total_timesteps=0",",","load_path=None",",","seed=None",",",")","env.reset","(",")","env.close","(",")"] | 12 | 27 | null | test_env_after_learn.py | baselines/baselines/common/tests/test_env_after_learn.py | import pytest
import gym
import tensorflow
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.run import get_learn_function
from baselines.common.tf_util import make_session | 7 | null | 6 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: test_env_after_learn(algo) and returns: env | 113 | node_id 1 | 35,143 |
find_constraint_name | global | null | false | upgrade | null | null | null | null | generic_find_constraint_name | def find_constraint_name(upgrade=True):
cols = {"column_name"} if upgrade else {"datasource_name"}
return generic_find_constraint_name(
table="columns",
columns=cols,
referenced="datasources",
database=db,
)
| ["def","find_constraint_name","(","upgrade=True",")",":","cols","=","{","``","column_name","''","}","if","upgrade","else","{","``","datasource_name","''","}","return","generic_find_constraint_name","(","table=","''","columns","''",",","columns=cols",",","referenced=","''","datasources","''",",","database=db",",",")"] | 41 | 45 | null | 2016-05-27_15-03_1226819ee0e3_fix_wrong_constraint_on_table_columns.py | superset/superset/migrations/versions/2016-05-27_15-03_1226819ee0e3_fix_wrong_constraint_on_table_columns.py | import logging
from alembic import op
from superset import db
from superset.utils.core import generic_find_constraint_name | 7 | null | 4 | null | null | 3 | null | Use image node_id 1 for calling a global function with example usage: find_constraint_name(upgrade) and returns: generic_find_constraint_name | 141 | node_id 1 | 436,850 |
test_strict_warning_false | global | null | false | setup,strictness | null | null | null | null | null | def test_strict_warning_false(setup, strictness):
# If the user doesn't wish to be strict about warnings
# the following shouldn't raise any error
os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = strictness
with expected_warnings(["some warnings"]):
pass
| ["def","test_strict_warning_false","(","setup",",","strictness",")",":","#","If","the","user","does","n't","wish","to","be","strict","about","warnings","#","the","following","should","n't","raise","any","error","os.environ","[","``","SKIMAGE_TEST_STRICT_WARNINGS","''","]","=","strictness","with","expected_warnings","(","[","``","some","warnings","''","]",")",":","pass"] | 32 | 37 | null | test_warnings.py | scikit-image/skimage/_shared/tests/test_warnings.py | import os
from skimage._shared._warnings import expected_warnings
import pytest | 7 | null | 3 | null | null | 4 | null | Use image node_id 4 for calling a global function with example usage: test_strict_warning_false(setup, strictness) without return types | 135 | node_id 4 | 344,315 |
test_strict_warning_true | global | null | false | setup,strictness | null | null | null | null | null | def test_strict_warning_true(setup, strictness):
os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = strictness
with pytest.raises(ValueError):
with expected_warnings(["some warnings"]):
pass
| ["def","test_strict_warning_true","(","setup",",","strictness",")",":","os.environ","[","``","SKIMAGE_TEST_STRICT_WARNINGS","''","]","=","strictness","with","pytest.raises","(","ValueError",")",":","with","expected_warnings","(","[","``","some","warnings","''","]",")",":","pass"] | 24 | 28 | null | test_warnings.py | scikit-image/skimage/_shared/tests/test_warnings.py | import os
from skimage._shared._warnings import expected_warnings
import pytest | 7 | null | 3 | null | null | 4 | null | Use image node_id 3 for calling a global function with example usage: test_strict_warning_true(setup, strictness) without return types | 134 | node_id 3 | 344,314 |
test_strict_warnigns_default | global | null | false | setup | null | null | null | null | null | def test_strict_warnigns_default(setup):
# By default we should fail on missing expected warnings
with pytest.raises(ValueError):
with expected_warnings(["some warnings"]):
pass
| ["def","test_strict_warnigns_default","(","setup",")",":","#","By","default","we","should","fail","on","missing","expected","warnings","with","pytest.raises","(","ValueError",")",":","with","expected_warnings","(","[","``","some","warnings","''","]",")",":","pass"] | 16 | 20 | null | test_warnings.py | scikit-image/skimage/_shared/tests/test_warnings.py | import os
from skimage._shared._warnings import expected_warnings
import pytest | 7 | null | 3 | null | null | 4 | null | Use image node_id 2 for calling a global function with example usage: test_strict_warnigns_default(setup) without return types | 126 | node_id 2 | 344,313 |
test_poly_clip | global | null | false | null | null | null | null | null | def test_poly_clip():
x = [0, 1, 2, 1]
y = [0, -1, 0, 1]
yc, xc = polygon_clip(y, x, 0, 0, 1, 1)
assert_equal(polygon_area(yc, xc), 0.5)
x = [-1, 1.5, 1.5, -1]
y = [0.5, 0.5, 1.5, 1.5]
yc, xc = polygon_clip(y, x, 0, 0, 1, 1)
assert_equal(polygon_area(yc, xc), 0.5)
| ["def","test_poly_clip","(",")",":","x","=","[","0",",","1",",","2",",","1","]","y","=","[","0",",","-1",",","0",",","1","]","yc",",","xc","=","polygon_clip","(","y",",","x",",","0",",","0",",","1",",","1",")","assert_equal","(","polygon_area","(","yc",",","xc",")",",","0.5",")","x","=","[","-1",",","1.5",",","1.5",",","-1","]","y","=","[","0.5",",","0.5",",","1.5",",","1.5","]","yc",",","xc","=","polygon_clip","(","y",",","x",",","0",",","0",",","1",",","1",")","assert_equal","(","polygon_area","(","yc",",","xc",")",",","0.5",")"] | 55 | 65 | null | test_geometry.py | scikit-image/skimage/_shared/tests/test_geometry.py | import pytest
from skimage._shared._geometry import polygon_clip, polygon_area
import numpy
from numpy.testing import assert_equal, assert_almost_equal | 7 | null | 4 | null | null | 3 | null | Use image node_id 2 for calling a global function with example usage: test_poly_clip() without return types | 107 | node_id 2 | 344,275 |
|
test_hand_clip | global | null | false | null | null | null | null | null | def test_hand_clip():
(r0, c0, r1, c1) = (1.0, 1.5, 2.1, 2.5)
clip_r, clip_c = polygon_clip(
hand[:, 1], hand[:, 0], r0, c0, r1, c1
)
assert_equal(clip_r.size, 19)
assert_equal(clip_r[0], clip_r[-1])
assert_equal(clip_c[0], clip_c[-1])
(r0, c0, r1, c1) = (1.0, 1.5, 1.7, 2.5)
clip_r, clip_c = polygon_clip(
hand[:, 1], hand[:, 0], r0, c0, r1, c1
)
assert_equal(clip_r.size, 6)
(r0, c0, r1, c1) = (1.0, 1.5, 1.5, 2.5)
clip_r, clip_c = polygon_clip(
hand[:, 1], hand[:, 0], r0, c0, r1, c1
)
assert_equal(clip_r.size, 5)
| ["def","test_hand_clip","(",")",":","(","r0",",","c0",",","r1",",","c1",")","=","(","1.0",",","1.5",",","2.1",",","2.5",")","clip_r",",","clip_c","=","polygon_clip","(","hand","[",":",",","1","]",",","hand","[",":",",","0","]",",","r0",",","c0",",","r1",",","c1",")","assert_equal","(","clip_r.size",",","19",")","assert_equal","(","clip_r","[","0","]",",","clip_r","[","-1","]",")","assert_equal","(","clip_c","[","0","]",",","clip_c","[","-1","]",")","(","r0",",","c0",",","r1",",","c1",")","=","(","1.0",",","1.5",",","1.7",",","2.5",")","clip_r",",","clip_c","=","polygon_clip","(","hand","[",":",",","1","]",",","hand","[",":",",","0","]",",","r0",",","c0",",","r1",",","c1",")","assert_equal","(","clip_r.size",",","6",")","(","r0",",","c0",",","r1",",","c1",")","=","(","1.0",",","1.5",",","1.5",",","2.5",")","clip_r",",","clip_c","=","polygon_clip","(","hand","[",":",",","1","]",",","hand","[",":",",","0","]",",","r0",",","c0",",","r1",",","c1",")","assert_equal","(","clip_r.size",",","5",")"] | 68 | 81 | null | test_geometry.py | scikit-image/skimage/_shared/tests/test_geometry.py | import pytest
from skimage._shared._geometry import polygon_clip, polygon_area
import numpy
from numpy.testing import assert_equal, assert_almost_equal | 7 | null | 4 | null | null | 3 | null | Use image node_id 3 for calling a global function with example usage: test_hand_clip() without return types | 107 | node_id 3 | 344,276 |
|
test_call_works_when_ngram_size_is_1 | TestNGramRepeatBlockProcessor | null | true | self | null | null | null | null | null | def test_call_works_when_ngram_size_is_1(self) -> None:
seq1 = torch.tensor([1, 3, 0], device=device)
seq2 = torch.tensor([2, 1, 1], device=device)
seqs = torch.stack([seq1, seq2])
probs1 = torch.tensor([0.1, 0.1, 0.1, 0.1], device=device)
probs2 = torch.tensor([0.1, 0.1, 0.1, 0.1], device=device)
probs = torch.stack([probs1, probs2])
processor = NGramRepeatBlockProcessor(ngram_size=1)
processor(seqs, probs)
assert_close(probs[0], [0.0, 0.0, 0.1, 0.0])
assert_close(probs[1], [0.1, 0.0, 0.0, 0.1])
| ["def","test_call_works_when_ngram_size_is_1","(","self",")","-",">","None",":","seq1","=","torch.tensor","(","[","1",",","3",",","0","]",",","device=device",")","seq2","=","torch.tensor","(","[","2",",","1",",","1","]",",","device=device",")","seqs","=","torch.stack","(","[","seq1",",","seq2","]",")","probs1","=","torch.tensor","(","[","0.1",",","0.1",",","0.1",",","0.1","]",",","device=device",")","probs2","=","torch.tensor","(","[","0.1",",","0.1",",","0.1",",","0.1","]",",","device=device",")","probs","=","torch.stack","(","[","probs1",",","probs2","]",")","processor","=","NGramRepeatBlockProcessor","(","ngram_size=1",")","processor","(","seqs",",","probs",")","assert_close","(","probs","[","0","]",",","[","0.0",",","0.0",",","0.1",",","0.0","]",")","assert_close","(","probs","[","1","]",",","[","0.1",",","0.0",",","0.0",",","0.1","]",")"] | 35 | 51 | null | test_step_processor.py | fairseq2/tests/unit/generation/test_step_processor.py | import torch
from fairseq2.generation import NGramRepeatBlockProcessor
from tests.common import assert_close, device | 7 | 1 | 3 | 0 | null | 0 | 3 | Use image node_id 2 for calling the TestNGramRepeatBlockProcessor obj's underlying member method code with example usage: obj.test_call_works_when_ngram_size_is_1() without return types | 185 | node_id 2 | 133,804 |
get_credentials | UsernamePasswordCredentialProvider | CredentialProvider | true | self | Simple implementation of CredentialProvider that just wraps static
username and password. | ["Simple","implementation","of","CredentialProvider","that","just","wraps","static","username","and","password","."] | null | null | self,self, self | def get_credentials(self):
if self.username:
return self.username, self.password
return (self.password,)
| ["def","get_credentials","(","self",")",":","if","self.username",":","return","self.username",",","self.password","return","(","self.password",",",")"] | 23 | 26 | null | credentials.py | redis-py/redis/credentials.py | from typing import Optional, Tuple, Union | 7 | 2 | 1 | 1 | 1 | 0 | 2 | Use image node_id 2 for calling the UsernamePasswordCredentialProvider obj's underlying member method code with example usage: obj.get_credentials() and returns: self, self, self | 179 | node_id 2 | 339,298 |
__init__ | UsernamePasswordCredentialProvider | CredentialProvider | true | self,username,password | Simple implementation of CredentialProvider that just wraps static
username and password. | ["Simple","implementation","of","CredentialProvider","that","just","wraps","static","username","and","password","."] | null | null | UsernamePasswordCredentialProvider | def __init__(
self,
username: Optional[str] = None,
password: Optional[str] = None,
):
self.username = username or ""
self.password = password or ""
| ["def","__init__","(","self",",","username",":","Optional","[","str","]","=","None",",","password",":","Optional","[","str","]","=","None",",",")",":","self.username","=","username","or","``","''","self.password","=","password","or","``","''"] | 19 | 21 | null | credentials.py | redis-py/redis/credentials.py | from typing import Optional, Tuple, Union | 7 | 2 | 1 | 1 | 1 | 0 | 2 | Use image node_id 1 to create a new UsernamePasswordCredentialProvider object from inherited base classes: CredentialProvider with example: obj = UsernamePasswordCredentialProvider(username, password) | 200 | node_id 1 | 339,297 |
get_credentials | CredentialProvider | null | true | self | Credentials Provider. | ["Credentials","Provider","."] | null | null | null | def get_credentials(self) -> Union[Tuple[str], Tuple[str, str]]:
raise NotImplementedError("get_credentials must be implemented")
| ["def","get_credentials","(","self",")","-",">","Union","[","Tuple","[","str","]",",","Tuple","[","str",",","str","]","]",":","raise","NotImplementedError","(","``","get_credentials","must","be","implemented","''",")"] | 9 | 10 | credentials.py | redis-py/redis/credentials.py | from typing import Optional, Tuple, Union | 7 | 2 | 1 | 1 | null | 0 | 1 | Use image node_id 1 for calling the CredentialProvider obj's underlying member method code with example usage: obj.get_credentials() without return types | 153 | node_id 1 | 339,296 |
|
fake_decode_token | global | null | false | token | null | null | null | null | User | def fake_decode_token(token):
return User(
username=token + "fakedecoded",
email="[email protected]",
full_name="John Doe",
)
| ["def","fake_decode_token","(","token",")",":","return","User","(","username=token","+","``","fakedecoded","''",",","email=","''","john","@","example.com","''",",","full_name=","''","John","Doe","''",",",")"] | 19 | 22 | null | tutorial002.py | fastapi/docs_src/security/tutorial002.py | from typing import Union
from fastapi import Depends, FastAPI
from fastapi.security import OAuth2PasswordBearer
from pydantic import BaseModel | 7 | null | 4 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: fake_decode_token(token) and returns: User | 112 | node_id 1 | 135,730 |
downgrade | global | null | false | null | null | null | null | null | def downgrade():
constraint = (
find_constraint_name(False)
or "fk_columns_datasource_name_datasources"
)
with op.batch_alter_table(
"columns", naming_convention=naming_convention
) as batch_op:
batch_op.drop_constraint(constraint, type_="foreignkey")
batch_op.create_foreign_key(
"fk_columns_column_name_datasources",
"datasources",
["column_name"],
["datasource_name"],
)
| ["def","downgrade","(",")",":","constraint","=","(","find_constraint_name","(","False",")","or","``","fk_columns_datasource_name_datasources","''",")","with","op.batch_alter_table","(","``","columns","''",",","naming_convention=naming_convention",")","as","batch_op",":","batch_op.drop_constraint","(","constraint",",","type_=","''","foreignkey","''",")","batch_op.create_foreign_key","(","``","fk_columns_column_name_datasources","''",",","``","datasources","''",",","[","``","column_name","''","]",",","[","``","datasource_name","''","]",",",")"] | 66 | 77 | null | 2016-05-27_15-03_1226819ee0e3_fix_wrong_constraint_on_table_columns.py | superset/superset/migrations/versions/2016-05-27_15-03_1226819ee0e3_fix_wrong_constraint_on_table_columns.py | import logging
from alembic import op
from superset import db
from superset.utils.core import generic_find_constraint_name | 7 | null | 4 | null | null | 3 | null | Use image node_id 3 for calling a global function with example usage: downgrade() without return types | 102 | node_id 3 | 436,852 |
|
llama_tokenizer | global | null | false | null | null | null | null | None,LlamaTokenizer | def llama_tokenizer():
try:
return LlamaTokenizer.from_pretrained(
"meta-llama/Llama-2-7b-hf"
)
except OSError as e:
if unskip_missing_tokenizer:
raise e
return None
| ["def","llama_tokenizer","(",")",":","try",":","return","LlamaTokenizer.from_pretrained","(","``","meta-llama\/Llama-2-7b-hf","''",")","except","OSError","as","e",":","if","unskip_missing_tokenizer",":","raise","e","return","None"] | 13 | 19 | null | conftest.py | llama-recipes/tests/conftest.py | import pytest
from transformers import LlamaTokenizer | 7 | null | 2 | null | null | 5 | null | Use image node_id 1 for calling a global function with example usage: llama_tokenizer() and returns: None, LlamaTokenizer | 121 | node_id 1 | 166,993 |
|
setup_tokenizer | global | null | false | llama_tokenizer | null | null | null | null | _helper | def setup_tokenizer(llama_tokenizer):
def _helper(tokenizer_mock):
# Align with Llama 2 tokenizer
tokenizer_mock.from_pretrained.return_value = llama_tokenizer
return _helper
| ["def","setup_tokenizer","(","llama_tokenizer",")",":","def","_helper","(","tokenizer_mock",")",":","#","Align","with","Llama","2","tokenizer","tokenizer_mock.from_pretrained.return_value","=","llama_tokenizer","return","_helper"] | 23 | 28 | null | conftest.py | llama-recipes/tests/conftest.py | import pytest
from transformers import LlamaTokenizer | 7 | null | 2 | null | null | 5 | null | Use image node_id 2 for calling a global function with example usage: setup_tokenizer(llama_tokenizer) and returns: _helper | 123 | node_id 2 | 166,994 |
skip_if_tokenizer_is_missing | global | null | false | request,llama_tokenizer | null | null | null | null | null | def skip_if_tokenizer_is_missing(request, llama_tokenizer):
if (
request.node.get_closest_marker("skip_missing_tokenizer")
and not unskip_missing_tokenizer
):
if llama_tokenizer is None:
pytest.skip(ACCESS_ERROR_MSG)
| ["def","skip_if_tokenizer_is_missing","(","request",",","llama_tokenizer",")",":","if","(","request.node.get_closest_marker","(","``","skip_missing_tokenizer","''",")","and","not","unskip_missing_tokenizer",")",":","if","llama_tokenizer","is","None",":","pytest.skip","(","ACCESS_ERROR_MSG",")"] | 32 | 35 | null | conftest.py | llama-recipes/tests/conftest.py | import pytest
from transformers import LlamaTokenizer | 7 | null | 2 | null | null | 5 | null | Use image node_id 3 for calling a global function with example usage: skip_if_tokenizer_is_missing(request, llama_tokenizer) without return types | 145 | node_id 3 | 166,995 |
pytest_addoption | global | null | false | parser | null | null | null | null | null | def pytest_addoption(parser):
parser.addoption(
"--unskip-missing-tokenizer",
action="store_true",
default=False,
help="disable skip missing tokenizer",
)
| ["def","pytest_addoption","(","parser",")",":","parser.addoption","(","``","--","unskip-missing-tokenizer","''",",","action=","''","store_true","''",",","default=False",",","help=","''","disable","skip","missing","tokenizer","''",",",")"] | 38 | 42 | null | conftest.py | llama-recipes/tests/conftest.py | import pytest
from transformers import LlamaTokenizer | 7 | null | 2 | null | null | 5 | null | Use image node_id 4 for calling a global function with example usage: pytest_addoption(parser) without return types | 115 | node_id 4 | 166,996 |
upgrade | global | null | false | null | null | null | null | null | def upgrade():
try:
constraint = find_constraint_name()
with op.batch_alter_table(
"columns", naming_convention=naming_convention
) as batch_op:
if constraint:
batch_op.drop_constraint(
constraint, type_="foreignkey"
)
batch_op.create_foreign_key(
"fk_columns_datasource_name_datasources",
"datasources",
["datasource_name"],
["datasource_name"],
)
except:
logging.warning(
"Could not find or drop constraint on `columns`"
)
| ["def","upgrade","(",")",":","try",":","constraint","=","find_constraint_name","(",")","with","op.batch_alter_table","(","``","columns","''",",","naming_convention=naming_convention",")","as","batch_op",":","if","constraint",":","batch_op.drop_constraint","(","constraint",",","type_=","''","foreignkey","''",")","batch_op.create_foreign_key","(","``","fk_columns_datasource_name_datasources","''",",","``","datasources","''",",","[","``","datasource_name","''","]",",","[","``","datasource_name","''","]",",",")","except",":","logging.warning","(","``","Could","not","find","or","drop","constraint","on","`","columns","`","``",")"] | 48 | 63 | null | 2016-05-27_15-03_1226819ee0e3_fix_wrong_constraint_on_table_columns.py | superset/superset/migrations/versions/2016-05-27_15-03_1226819ee0e3_fix_wrong_constraint_on_table_columns.py | import logging
from alembic import op
from superset import db
from superset.utils.core import generic_find_constraint_name | 7 | null | 4 | null | null | 3 | null | Use image node_id 2 for calling a global function with example usage: upgrade() without return types | 100 | node_id 2 | 436,851 |
|
setup | global | null | false | null | null | null | null | null | def setup():
# Remove any environment variable if it exists
old_strictness = os.environ.pop(
"SKIMAGE_TEST_STRICT_WARNINGS", None
)
yield
# Add the user's desired strictness
if old_strictness is not None:
os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = old_strictness
| ["def","setup","(",")",":","#","Remove","any","environment","variable","if","it","exists","old_strictness","=","os.environ.pop","(","``","SKIMAGE_TEST_STRICT_WARNINGS","''",",","None",")","yield","#","Add","the","user","'s","desired","strictness","if","old_strictness","is","not","None",":","os.environ","[","``","SKIMAGE_TEST_STRICT_WARNINGS","''","]","=","old_strictness"] | 7 | 13 | null | test_warnings.py | scikit-image/skimage/_shared/tests/test_warnings.py | import os
from skimage._shared._warnings import expected_warnings
import pytest | 7 | null | 3 | null | null | 4 | null | Use image node_id 1 for calling a global function with example usage: setup() without return types | 98 | node_id 1 | 344,312 |
|
test_benchmark_infra_runs | TestDynamoBenchmark | unittest | true | self | null | null | Basic smoke test that TorchBench runs.
This test is mainly meant to check that our setup in fbcode
doesn't break.
If you see a failure here related to missing CPP headers, then
you likely need to update the resources list in:
//caffe2:inductor | ["Basic","smoke","test","that","TorchBench","runs",".","This","test","is","mainly","meant","to","check","that","our","setup","in","fbcode","does","n't","break",".","If","you","see","a","failure","here","related","to","missing","CPP","headers",",","then","you","likely","need","to","update","the","resources","list","in",":","\/\/caffe2",":","inductor"] | null | def test_benchmark_infra_runs(self) -> None:
"""
Basic smoke test that TorchBench runs.
This test is mainly meant to check that our setup in fbcode
doesn't break.
If you see a failure here related to missing CPP headers, then
you likely need to update the resources list in:
//caffe2:inductor
"""
original_dir = setup_torchbench_cwd()
try:
args = parse_args(
[
"-dcpu",
"--inductor",
"--performance",
"--only=BERT_pytorch",
"-n1",
"--batch-size=1",
]
)
run(TorchBenchmarkRunner(), args, original_dir)
finally:
os.chdir(original_dir)
| ["def","test_benchmark_infra_runs","(","self",")","-",">","None",":","``","''","''","Basic","smoke","test","that","TorchBench","runs",".","This","test","is","mainly","meant","to","check","that","our","setup","in","fbcode","does","n't","break",".","If","you","see","a","failure","here","related","to","missing","CPP","headers",",","then","you","likely","need","to","update","the","resources","list","in",":","\/\/caffe2",":","inductor","``","''","''","original_dir","=","setup_torchbench_cwd","(",")","try",":","args","=","parse_args","(","[","``","-dcpu","''",",","``","--","inductor","''",",","``","--","performance","''",",","``","--","only=BERT_pytorch","''",",","``","-n1","''",",","``","--","batch-size=1","''",",","]",")","run","(","TorchBenchmarkRunner","(",")",",","args",",","original_dir",")","finally",":","os.chdir","(","original_dir",")"] | 19 | 44 | null | test.py | pytorch/benchmarks/dynamo/test.py | import os
import unittest
from .common import parse_args, run
from .torchbench import setup_torchbench_cwd, TorchBenchmarkRunner | 7 | 1 | 4 | 1 | 1 | 0 | 1 | Use image node_id 1 for calling the TestDynamoBenchmark obj's underlying member method code with example usage: obj.test_benchmark_infra_runs() without return types | 164 | node_id 1 | 271,340 |
forward | C2SimpleNet | null | true | self,niters | This module constructs a net with 'op_name' operator. The net consist
a series of such operator.
It initializes the workspace with input blob equal to the number of parameters
needed for the op.
Provides forward method to run the net niter times. | ["This","module","constructs","a","net","with","'op_name","'","operator",".","The","net","consist","a","series","of","such","operator",".","It","initializes","the","workspace","with","input","blob","equal","to","the","number","of","parameters","needed","for","the","op",".","Provides","forward","method","to","run","the","net","niter","times","."] | null | null | null | def forward(self, niters):
workspace.RunNet(self.net, niters, False)
| ["def","forward","(","self",",","niters",")",":","workspace.RunNet","(","self.net",",","niters",",","False",")"] | 43 | 44 | null | C2Module.py | pytorch/benchmarks/framework_overhead_benchmark/C2Module.py | import numpy
from caffe2.python import core, workspace
from utils import NUM_LOOP_ITERS | 7 | 1 | 3 | 0 | null | 1 | 2 | Use image node_id 2 for calling the C2SimpleNet obj's underlying member method code with example usage: obj.forward(niters) without return types | 144 | node_id 2 | 271,495 |
read_items | global | null | false | null | null | null | null | list | def read_items() -> List[Item]:
return [
Item(
name="Portal Gun",
description="Device to travel through the multi-rick-verse",
),
Item(name="Plumbus"),
]
| ["def","read_items","(",")","-",">","List","[","Item","]",":","return","[","Item","(","name=","''","Portal","Gun","''",",","description=","''","Device","to","travel","through","the","multi-rick-verse","''",",",")",",","Item","(","name=","''","Plumbus","''",")",",","]"] | 21 | 28 | null | tutorial001.py | fastapi/docs_src/separate_openapi_schemas/tutorial001.py | from typing import List, Union
from fastapi import FastAPI
from pydantic import BaseModel | 7 | null | 3 | null | null | 2 | null | Use image node_id 2 for calling a global function with example usage: read_items() and returns: list | 100 | node_id 2 | 135,815 |
|
upgrade | global | null | false | null | null | null | null | null | def upgrade():
"""Apply Add ``RenderedTaskInstanceFields`` table"""
json_type = sa.JSON
conn = op.get_bind()
if conn.dialect.name != "postgresql":
# Mysql 5.7+/MariaDB 10.2.3 has JSON support. Rather than checking for
# versions, check for the function existing.
try:
conn.execute(text("SELECT JSON_VALID(1)")).fetchone()
except (sa.exc.OperationalError, sa.exc.ProgrammingError):
json_type = sa.Text
op.create_table(
TABLE_NAME,
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("task_id", StringID(), nullable=False),
sa.Column(
"execution_date",
sa.TIMESTAMP(timezone=True),
nullable=False,
),
sa.Column("rendered_fields", json_type(), nullable=False),
sa.PrimaryKeyConstraint(
"dag_id", "task_id", "execution_date"
),
)
| ["def","upgrade","(",")",":","``","''","''","Apply","Add","``","RenderedTaskInstanceFields","``","table","''","''","''","json_type","=","sa.JSON","conn","=","op.get_bind","(",")","if","conn.dialect.name","!","=","``","postgresql","''",":","#","Mysql","5.7+\/MariaDB","10.2.3","has","JSON","support",".","Rather","than","checking","for","#","versions",",","check","for","the","function","existing",".","try",":","conn.execute","(","text","(","``","SELECT","JSON_VALID","(","1",")","''",")",")",".fetchone","(",")","except","(","sa.exc.OperationalError",",","sa.exc.ProgrammingError",")",":","json_type","=","sa.Text","op.create_table","(","TABLE_NAME",",","sa.Column","(","``","dag_id","''",",","StringID","(",")",",","nullable=False",")",",","sa.Column","(","``","task_id","''",",","StringID","(",")",",","nullable=False",")",",","sa.Column","(","``","execution_date","''",",","sa.TIMESTAMP","(","timezone=True",")",",","nullable=False",",",")",",","sa.Column","(","``","rendered_fields","''",",","json_type","(",")",",","nullable=False",")",",","sa.PrimaryKeyConstraint","(","``","dag_id","''",",","``","task_id","''",",","``","execution_date","''",")",",",")"] | 43 | 63 | null | 0053_1_10_10_add_rendered_task_instance_fields_table.py | airflow/airflow/migrations/versions/0053_1_10_10_add_rendered_task_instance_fields_table.py | from __future__ import annotations
import sqlalchemy
from alembic import op
from sqlalchemy import text
from airflow.migrations.db_types import StringID | 7 | null | 5 | null | null | 2 | null | Use image node_id 1 for calling a global function with example usage: upgrade() without return types | 100 | node_id 1 | 2,595 |
|
has_unique_constraint | global | null | false | constraint_name,table_name | null | null | null | null | unknown | def has_unique_constraint(
constraint_name: str, table_name: str
) -> bool:
bind = op.get_bind()
inspector = Inspector.from_engine(bind)
unique_constraints = inspector.get_unique_constraints(table_name)
return constraint_name in {
constraint["name"] for constraint in unique_constraints
}
| ["def","has_unique_constraint","(","constraint_name",":","str",",","table_name",":","str",")","-",">","bool",":","bind","=","op.get_bind","(",")","inspector","=","Inspector.from_engine","(","bind",")","unique_constraints","=","inspector.get_unique_constraints","(","table_name",")","return","constraint_name","in","{","constraint","[","``","name","''","]","for","constraint","in","unique_constraints","}"] | 129 | 133 | null | 2020-11-04_11-06_49b5a32daba5_add_report_schedules.py | superset/superset/migrations/versions/2020-11-04_11-06_49b5a32daba5_add_report_schedules.py | import sqlalchemy
from alembic import op
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.exc import OperationalError | 7 | null | 4 | null | null | 3 | null | Use image node_id 2 for calling a global function with example usage: has_unique_constraint(constraint_name, table_name) and returns: unknown | 141 | node_id 2 | 437,189 |
upgrade | global | null | false | null | null | null | null | null | def upgrade():
op.create_table(
"report_schedule",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("type", sa.String(length=50), nullable=False),
sa.Column(
"name", sa.String(length=150), nullable=False, unique=True
),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("context_markdown", sa.Text(), nullable=True),
sa.Column(
"active", sa.Boolean(), default=True, nullable=True
),
sa.Column("crontab", sa.String(length=50), nullable=False),
sa.Column("sql", sa.Text(), nullable=True),
sa.Column("chart_id", sa.Integer(), nullable=True),
sa.Column("dashboard_id", sa.Integer(), nullable=True),
sa.Column("database_id", sa.Integer(), nullable=True),
sa.Column("last_eval_dttm", sa.DateTime(), nullable=True),
sa.Column("last_state", sa.String(length=50), nullable=True),
sa.Column("last_value", sa.Float(), nullable=True),
sa.Column("last_value_row_json", sa.Text(), nullable=True),
sa.Column(
"validator_type", sa.String(length=100), nullable=True
),
sa.Column(
"validator_config_json",
sa.Text(),
default="{}",
nullable=True,
),
sa.Column(
"log_retention", sa.Integer(), nullable=True, default=90
),
sa.Column(
"grace_period",
sa.Integer(),
nullable=True,
default=60 * 60 * 4,
),
# Audit Mixin
sa.Column("created_on", sa.DateTime(), nullable=True),
sa.Column("changed_on", sa.DateTime(), nullable=True),
sa.Column("created_by_fk", sa.Integer(), nullable=True),
sa.Column("changed_by_fk", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["chart_id"], ["slices.id"]),
sa.ForeignKeyConstraint(["dashboard_id"], ["dashboards.id"]),
sa.ForeignKeyConstraint(["database_id"], ["dbs.id"]),
sa.ForeignKeyConstraint(["changed_by_fk"], ["ab_user.id"]),
sa.ForeignKeyConstraint(["created_by_fk"], ["ab_user.id"]),
sa.PrimaryKeyConstraint("id"),
)
try:
op.create_unique_constraint(
"uq_report_schedule_name", "report_schedule", ["name"]
)
except Exception:
# Expected to fail on SQLite
pass
op.create_index(
op.f("ix_report_schedule_active"),
"report_schedule",
["active"],
unique=False,
)
op.create_table(
"report_execution_log",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("scheduled_dttm", sa.DateTime(), nullable=False),
sa.Column("start_dttm", sa.DateTime(), nullable=True),
sa.Column("end_dttm", sa.DateTime(), nullable=True),
sa.Column("value", sa.Float(), nullable=True),
sa.Column("value_row_json", sa.Text(), nullable=True),
sa.Column("state", sa.String(length=50), nullable=False),
sa.Column("error_message", sa.Text(), nullable=True),
sa.Column("report_schedule_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["report_schedule_id"], ["report_schedule.id"]
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"report_recipient",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("type", sa.String(length=50), nullable=False),
sa.Column(
"recipient_config_json",
sa.Text(),
default="{}",
nullable=True,
),
sa.Column("report_schedule_id", sa.Integer(), nullable=False),
# Audit Mixin
sa.Column("created_on", sa.DateTime(), nullable=True),
sa.Column("changed_on", sa.DateTime(), nullable=True),
sa.Column("created_by_fk", sa.Integer(), nullable=True),
sa.Column("changed_by_fk", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["report_schedule_id"], ["report_schedule.id"]
),
sa.ForeignKeyConstraint(["changed_by_fk"], ["ab_user.id"]),
sa.ForeignKeyConstraint(["created_by_fk"], ["ab_user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"report_schedule_user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("report_schedule_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["report_schedule_id"],
["report_schedule.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["ab_user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
| ["def","upgrade","(",")",":","op.create_table","(","``","report_schedule","''",",","sa.Column","(","``","id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.Column","(","``","type","''",",","sa.String","(","length=50",")",",","nullable=False",")",",","sa.Column","(","``","name","''",",","sa.String","(","length=150",")",",","nullable=False",",","unique=True",")",",","sa.Column","(","``","description","''",",","sa.Text","(",")",",","nullable=True",")",",","sa.Column","(","``","context_markdown","''",",","sa.Text","(",")",",","nullable=True",")",",","sa.Column","(","``","active","''",",","sa.Boolean","(",")",",","default=True",",","nullable=True",")",",","sa.Column","(","``","crontab","''",",","sa.String","(","length=50",")",",","nullable=False",")",",","sa.Column","(","``","sql","''",",","sa.Text","(",")",",","nullable=True",")",",","sa.Column","(","``","chart_id","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","dashboard_id","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","database_id","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","last_eval_dttm","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.Column","(","``","last_state","''",",","sa.String","(","length=50",")",",","nullable=True",")",",","sa.Column","(","``","last_value","''",",","sa.Float","(",")",",","nullable=True",")",",","sa.Column","(","``","last_value_row_json","''",",","sa.Text","(",")",",","nullable=True",")",",","sa.Column","(","``","validator_type","''",",","sa.String","(","length=100",")",",","nullable=True",")",",","sa.Column","(","``","validator_config_json","''",",","sa.Text","(",")",",","default=","''","{","}","''",",","nullable=True",",",")",",","sa.Column","(","``","log_retention","''",",","sa.Integer","(",")",",","nullable=True",",","default=90",")",",","sa.Column","(","``","grace_period","''",",","sa.Integer","(",")",",","nullable=True",",","default=60","*","60","*","4",",",")",",","#","Audit","Mixin","sa.Column","(","``","created_on","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.Column","(","``","changed_on","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.Column","(","``","created_by_fk","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","changed_by_fk","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.ForeignKeyConstraint","(","[","``","chart_id","''","]",",","[","``","slices.id","''","]",")",",","sa.ForeignKeyConstraint","(","[","``","dashboard_id","''","]",",","[","``","dashboards.id","''","]",")",",","sa.ForeignKeyConstraint","(","[","``","database_id","''","]",",","[","``","dbs.id","''","]",")",",","sa.ForeignKeyConstraint","(","[","``","changed_by_fk","''","]",",","[","``","ab_user.id","''","]",")",",","sa.ForeignKeyConstraint","(","[","``","created_by_fk","''","]",",","[","``","ab_user.id","''","]",")",",","sa.PrimaryKeyConstraint","(","``","id","''",")",",",")","try",":","op.create_unique_constraint","(","``","uq_report_schedule_name","''",",","``","report_schedule","''",",","[","``","name","''","]",")","except","Exception",":","#","Expected","to","fail","on","SQLite","pass","op.create_index","(","op.f","(","``","ix_report_schedule_active","''",")",",","``","report_schedule","''",",","[","``","active","''","]",",","unique=False",",",")","op.create_table","(","``","report_execution_log","''",",","sa.Column","(","``","id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.Column","(","``","scheduled_dttm","''",",","sa.DateTime","(",")",",","nullable=False",")",",","sa.Column","(","``","start_dttm","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.Column","(","``","end_dttm","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.Column","(","``","value","''",",","sa.Float","(",")",",","nullable=True",")",",","sa.Column","(","``","value_row_json","''",",","sa.Text","(",")",",","nullable=True",")",",","sa.Column","(","``","state","''",",","sa.String","(","length=50",")",",","nullable=False",")",",","sa.Column","(","``","error_message","''",",","sa.Text","(",")",",","nullable=True",")",",","sa.Column","(","``","report_schedule_id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.ForeignKeyConstraint","(","[","``","report_schedule_id","''","]",",","[","``","report_schedule.id","''","]",")",",","sa.PrimaryKeyConstraint","(","``","id","''",")",",",")","op.create_table","(","``","report_recipient","''",",","sa.Column","(","``","id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.Column","(","``","type","''",",","sa.String","(","length=50",")",",","nullable=False",")",",","sa.Column","(","``","recipient_config_json","''",",","sa.Text","(",")",",","default=","''","{","}","''",",","nullable=True",",",")",",","sa.Column","(","``","report_schedule_id","''",",","sa.Integer","(",")",",","nullable=False",")",",","#","Audit","Mixin","sa.Column","(","``","created_on","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.Column","(","``","changed_on","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.Column","(","``","created_by_fk","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","changed_by_fk","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.ForeignKeyConstraint","(","[","``","report_schedule_id","''","]",",","[","``","report_schedule.id","''","]",")",",","sa.ForeignKeyConstraint","(","[","``","changed_by_fk","''","]",",","[","``","ab_user.id","''","]",")",",","sa.ForeignKeyConstraint","(","[","``","created_by_fk","''","]",",","[","``","ab_user.id","''","]",")",",","sa.PrimaryKeyConstraint","(","``","id","''",")",",",")","op.create_table","(","``","report_schedule_user","''",",","sa.Column","(","``","id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.Column","(","``","user_id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.Column","(","``","report_schedule_id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.ForeignKeyConstraint","(","[","``","report_schedule_id","''","]",",","[","``","report_schedule.id","''","]",",",")",",","sa.ForeignKeyConstraint","(","[","``","user_id","''","]",",","[","``","ab_user.id","''","]",",",")",",","sa.PrimaryKeyConstraint","(","``","id","''",")",",",")"] | 35 | 126 | null | 2020-11-04_11-06_49b5a32daba5_add_report_schedules.py | superset/superset/migrations/versions/2020-11-04_11-06_49b5a32daba5_add_report_schedules.py | import sqlalchemy
from alembic import op
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.exc import OperationalError | 7 | null | 4 | null | null | 3 | null | Use image node_id 1 for calling a global function with example usage: upgrade() without return types | 100 | node_id 1 | 437,188 |
|
upgrade | global | null | false | null | null | null | null | null | def upgrade():
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happening to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "known_event" in tables:
for fkey in inspector.get_foreign_keys(
table_name="known_event", referred_table="users"
):
if fkey["name"]:
with op.batch_alter_table(
table_name="known_event"
) as bop:
bop.drop_constraint(
fkey["name"], type_="foreignkey"
)
if "chart" in tables:
op.drop_table(
"chart",
)
if "users" in tables:
op.drop_table("users")
| ["def","upgrade","(",")",":","#","We","previously","had","a","KnownEvent","'s","table",",","but","we","deleted","the","table","without","#","a","down","migration","to","remove","it","(","so","we","did","n't","delete","anyone","'s","data","if","they","#","were","happening","to","use","the","feature",".","#","#","But","before","we","can","delete","the","users","table","we","need","to","drop","the","FK","conn","=","op.get_bind","(",")","inspector","=","inspect","(","conn",")","tables","=","inspector.get_table_names","(",")","if","``","known_event","''","in","tables",":","for","fkey","in","inspector.get_foreign_keys","(","table_name=","''","known_event","''",",","referred_table=","''","users","''",")",":","if","fkey","[","``","name","''","]",":","with","op.batch_alter_table","(","table_name=","''","known_event","''",")","as","bop",":","bop.drop_constraint","(","fkey","[","``","name","''","]",",","type_=","''","foreignkey","''",")","if","``","chart","''","in","tables",":","op.drop_table","(","``","chart","''",",",")","if","``","users","''","in","tables",":","op.drop_table","(","``","users","''",")"] | 39 | 62 | null | 0059_2_0_0_drop_user_and_chart.py | airflow/airflow/migrations/versions/0059_2_0_0_drop_user_and_chart.py | from __future__ import annotations
import sqlalchemy
from alembic import op
from sqlalchemy import inspect, text
from sqlalchemy.dialects import mysql | 7 | null | 5 | null | null | 2 | null | Use image node_id 1 for calling a global function with example usage: upgrade() without return types | 100 | node_id 1 | 2,607 |
|
downgrade | global | null | false | null | null | null | null | null | def downgrade():
conn = op.get_bind()
op.create_table(
"users",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("username", sa.String(length=250), nullable=True),
sa.Column("email", sa.String(length=500), nullable=True),
sa.Column("password", sa.String(255)),
sa.Column("superuser", sa.Boolean(), default=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("username"),
)
op.create_table(
"chart",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("label", sa.String(length=200), nullable=True),
sa.Column("conn_id", sa.String(length=250), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("chart_type", sa.String(length=100), nullable=True),
sa.Column("sql_layout", sa.String(length=50), nullable=True),
sa.Column("sql", sa.Text(), nullable=True),
sa.Column("y_log_scale", sa.Boolean(), nullable=True),
sa.Column("show_datatable", sa.Boolean(), nullable=True),
sa.Column("show_sql", sa.Boolean(), nullable=True),
sa.Column("height", sa.Integer(), nullable=True),
sa.Column(
"default_params", sa.String(length=5000), nullable=True
),
sa.Column("x_is_date", sa.Boolean(), nullable=True),
sa.Column("iteration_no", sa.Integer(), nullable=True),
sa.Column("last_modified", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
)
if conn.dialect.name == "mysql":
conn.execute(text("SET time_zone = '+00:00'"))
op.alter_column(
table_name="chart",
column_name="last_modified",
type_=mysql.TIMESTAMP(fsp=6),
)
else:
if conn.dialect.name in ("sqlite", "mssql"):
return
if conn.dialect.name == "postgresql":
conn.execute(text("set timezone=UTC"))
op.alter_column(
table_name="chart",
column_name="last_modified",
type_=sa.TIMESTAMP(timezone=True),
)
| ["def","downgrade","(",")",":","conn","=","op.get_bind","(",")","op.create_table","(","``","users","''",",","sa.Column","(","``","id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.Column","(","``","username","''",",","sa.String","(","length=250",")",",","nullable=True",")",",","sa.Column","(","``","email","''",",","sa.String","(","length=500",")",",","nullable=True",")",",","sa.Column","(","``","password","''",",","sa.String","(","255",")",")",",","sa.Column","(","``","superuser","''",",","sa.Boolean","(",")",",","default=False",")",",","sa.PrimaryKeyConstraint","(","``","id","''",")",",","sa.UniqueConstraint","(","``","username","''",")",",",")","op.create_table","(","``","chart","''",",","sa.Column","(","``","id","''",",","sa.Integer","(",")",",","nullable=False",")",",","sa.Column","(","``","label","''",",","sa.String","(","length=200",")",",","nullable=True",")",",","sa.Column","(","``","conn_id","''",",","sa.String","(","length=250",")",",","nullable=False",")",",","sa.Column","(","``","user_id","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","chart_type","''",",","sa.String","(","length=100",")",",","nullable=True",")",",","sa.Column","(","``","sql_layout","''",",","sa.String","(","length=50",")",",","nullable=True",")",",","sa.Column","(","``","sql","''",",","sa.Text","(",")",",","nullable=True",")",",","sa.Column","(","``","y_log_scale","''",",","sa.Boolean","(",")",",","nullable=True",")",",","sa.Column","(","``","show_datatable","''",",","sa.Boolean","(",")",",","nullable=True",")",",","sa.Column","(","``","show_sql","''",",","sa.Boolean","(",")",",","nullable=True",")",",","sa.Column","(","``","height","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","default_params","''",",","sa.String","(","length=5000",")",",","nullable=True",")",",","sa.Column","(","``","x_is_date","''",",","sa.Boolean","(",")",",","nullable=True",")",",","sa.Column","(","``","iteration_no","''",",","sa.Integer","(",")",",","nullable=True",")",",","sa.Column","(","``","last_modified","''",",","sa.DateTime","(",")",",","nullable=True",")",",","sa.ForeignKeyConstraint","(","[","``","user_id","''","]",",","[","``","users.id","''","]",",",")",",","sa.PrimaryKeyConstraint","(","``","id","''",")",",",")","if","conn.dialect.name","==","``","mysql","''",":","conn.execute","(","text","(","``","SET","time_zone","=","'+00:00","'","''",")",")","op.alter_column","(","table_name=","''","chart","''",",","column_name=","''","last_modified","''",",","type_=mysql.TIMESTAMP","(","fsp=6",")",",",")","else",":","if","conn.dialect.name","in","(","``","sqlite","''",",","``","mssql","''",")",":","return","if","conn.dialect.name","==","``","postgresql","''",":","conn.execute","(","text","(","``","set","timezone=UTC","''",")",")","op.alter_column","(","table_name=","''","chart","''",",","column_name=","''","last_modified","''",",","type_=sa.TIMESTAMP","(","timezone=True",")",",",")"] | 65 | 113 | null | 0059_2_0_0_drop_user_and_chart.py | airflow/airflow/migrations/versions/0059_2_0_0_drop_user_and_chart.py | from __future__ import annotations
import sqlalchemy
from alembic import op
from sqlalchemy import inspect, text
from sqlalchemy.dialects import mysql | 7 | null | 5 | null | null | 2 | null | Use image node_id 2 for calling a global function with example usage: downgrade() without return types | 102 | node_id 2 | 2,608 |
|
evaluation | global | null | false | X_selected,n_clusters,y | null | null | null | null | nmi, acc | def evaluation(X_selected, n_clusters, y):
"""
This function calculates ARI, ACC and NMI of clustering results
Input
-----
X_selected: {numpy array}, shape (n_samples, n_selected_features}
input data on the selected features
n_clusters: {int}
number of clusters
y: {numpy array}, shape (n_samples,)
true labels
Output
------
nmi: {float}
Normalized Mutual Information
acc: {float}
Accuracy
"""
k_means = KMeans(
n_clusters=n_clusters,
init="k-means++",
n_init=10,
max_iter=300,
tol=0.0001,
precompute_distances=True,
verbose=0,
random_state=None,
copy_x=True,
n_jobs=1,
)
k_means.fit(X_selected)
y_predict = k_means.labels_
# calculate NMI
nmi = normalized_mutual_info_score(y, y_predict)
# calculate ACC
y_permuted_predict = best_map(y, y_predict)
acc = accuracy_score(y, y_permuted_predict)
return nmi, acc
| ["def","evaluation","(","X_selected",",","n_clusters",",","y",")",":","``","''","''","This","function","calculates","ARI",",","ACC","and","NMI","of","clustering","results","Input","--","--","-","X_selected",":","{","numpy","array","}",",","shape","(","n_samples",",","n_selected_features","}","input","data","on","the","selected","features","n_clusters",":","{","int","}","number","of","clusters","y",":","{","numpy","array","}",",","shape","(","n_samples",",",")","true","labels","Output","--","--","--","nmi",":","{","float","}","Normalized","Mutual","Information","acc",":","{","float","}","Accuracy","``","''","''","k_means","=","KMeans","(","n_clusters=n_clusters",",","init=","''","k-means++","''",",","n_init=10",",","max_iter=300",",","tol=0.0001",",","precompute_distances=True",",","verbose=0",",","random_state=None",",","copy_x=True",",","n_jobs=1",",",")","k_means.fit","(","X_selected",")","y_predict","=","k_means.labels_","#","calculate","NMI","nmi","=","normalized_mutual_info_score","(","y",",","y_predict",")","#","calculate","ACC","y_permuted_predict","=","best_map","(","y",",","y_predict",")","acc","=","accuracy_score","(","y",",","y_permuted_predict",")","return","nmi",",","acc"] | 39 | 73 | null | unsupervised_evaluation.py | scikit-feature/skfeature/utility/unsupervised_evaluation.py | import numpy
import sklearn.utils.linear_assignment_
from sklearn.metrics import accuracy_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.cluster import KMeans | 7 | null | 5 | null | null | 2 | null | Use image node_id 2 for calling a global function with example usage: evaluation(X_selected, n_clusters, y) and returns: nmi, acc | 130 | node_id 2 | 343,991 |
best_map | global | null | false | l1,l2 | null | null | null | null | new_l2 | def best_map(l1, l2):
"""
Permute labels of l2 to match l1 as much as possible
"""
if len(l1) != len(l2):
print("L1.shape must == L2.shape")
exit(0)
label1 = np.unique(l1)
n_class1 = len(label1)
label2 = np.unique(l2)
n_class2 = len(label2)
n_class = max(n_class1, n_class2)
G = np.zeros((n_class, n_class))
for i in range(0, n_class1):
for j in range(0, n_class2):
ss = l1 == label1[i]
tt = l2 == label2[j]
G[i, j] = np.count_nonzero(ss & tt)
A = la.linear_assignment(-G)
new_l2 = np.zeros(l2.shape)
for i in range(0, n_class2):
new_l2[l2 == label2[A[i][1]]] = label1[A[i][0]]
return new_l2.astype(int)
| ["def","best_map","(","l1",",","l2",")",":","``","''","''","Permute","labels","of","l2","to","match","l1","as","much","as","possible","``","''","''","if","len","(","l1",")","!","=","len","(","l2",")",":","print","(","``","L1.shape","must","==","L2.shape","''",")","exit","(","0",")","label1","=","np.unique","(","l1",")","n_class1","=","len","(","label1",")","label2","=","np.unique","(","l2",")","n_class2","=","len","(","label2",")","n_class","=","max","(","n_class1",",","n_class2",")","G","=","np.zeros","(","(","n_class",",","n_class",")",")","for","i","in","range","(","0",",","n_class1",")",":","for","j","in","range","(","0",",","n_class2",")",":","ss","=","l1","==","label1","[","i","]","tt","=","l2","==","label2","[","j","]","G","[","i",",","j","]","=","np.count_nonzero","(","ss","&","tt",")","A","=","la.linear_assignment","(","-G",")","new_l2","=","np.zeros","(","l2.shape",")","for","i","in","range","(","0",",","n_class2",")",":","new_l2","[","l2","==","label2","[","A","[","i","]","[","1","]","]","]","=","label1","[","A","[","i","]","[","0","]","]","return","new_l2.astype","(","int",")"] | 8 | 36 | null | unsupervised_evaluation.py | scikit-feature/skfeature/utility/unsupervised_evaluation.py | import numpy
import sklearn.utils.linear_assignment_
from sklearn.metrics import accuracy_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.cluster import KMeans | 7 | null | 5 | null | null | 2 | null | Use image node_id 1 for calling a global function with example usage: best_map(l1, l2) and returns: new_l2 | 106 | node_id 1 | 343,990 |
encode_base64 | global | null | false | msg | null | null | null | null | null | def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = str(_bencode(orig), "ascii")
msg.set_payload(encdata)
msg["Content-Transfer-Encoding"] = "base64"
| ["def","encode_base64","(","msg",")",":","``","''","''","Encode","the","message","'s","payload","in","Base64",".","Also",",","add","an","appropriate","Content-Transfer-Encoding","header.","``","''","''","orig","=","msg.get_payload","(","decode=True",")","encdata","=","str","(","_bencode","(","orig",")",",","``","ascii","''",")","msg.set_payload","(","encdata",")","msg","[","``","Content-Transfer-Encoding","''","]","=","``","base64","''"] | 25 | 33 | null | encoders.py | cpython/Lib/email/encoders.py | from base64 import encodebytes
from quopri import encodestring | 7 | null | 2 | null | null | 5 | null | Use image node_id 2 for calling a global function with example usage: encode_base64(msg) without return types | 109 | node_id 2 | 52,445 |
encode_quopri | global | null | false | msg | null | null | null | null | null | def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = _qencode(orig)
msg.set_payload(encdata)
msg["Content-Transfer-Encoding"] = "quoted-printable"
| ["def","encode_quopri","(","msg",")",":","``","''","''","Encode","the","message","'s","payload","in","quoted-printable",".","Also",",","add","an","appropriate","Content-Transfer-Encoding","header.","``","''","''","orig","=","msg.get_payload","(","decode=True",")","encdata","=","_qencode","(","orig",")","msg.set_payload","(","encdata",")","msg","[","``","Content-Transfer-Encoding","''","]","=","``","quoted-printable","''"] | 36 | 44 | null | encoders.py | cpython/Lib/email/encoders.py | from base64 import encodebytes
from quopri import encodestring | 7 | null | 2 | null | null | 5 | null | Use image node_id 3 for calling a global function with example usage: encode_quopri(msg) without return types | 109 | node_id 3 | 52,446 |
encode_7or8bit | global | null | false | msg | null | null | null | null | null | def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload(decode=True)
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg["Content-Transfer-Encoding"] = "7bit"
return
# We play a trick to make this go fast. If decoding from ASCII succeeds,
# we know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.decode("ascii")
except UnicodeError:
msg["Content-Transfer-Encoding"] = "8bit"
else:
msg["Content-Transfer-Encoding"] = "7bit"
| ["def","encode_7or8bit","(","msg",")",":","``","''","''","Set","the","Content-Transfer-Encoding","header","to","7bit","or","8bit",".","''","''","''","orig","=","msg.get_payload","(","decode=True",")","if","orig","is","None",":","#","There","'s","no","payload",".","For","backwards","compatibility","we","use","7bit","msg","[","``","Content-Transfer-Encoding","''","]","=","``","7bit","''","return","#","We","play","a","trick","to","make","this","go","fast",".","If","decoding","from","ASCII","succeeds",",","#","we","know","the","data","must","be","7bit",",","otherwise","treat","it","as","8bit",".","try",":","orig.decode","(","``","ascii","''",")","except","UnicodeError",":","msg","[","``","Content-Transfer-Encoding","''","]","=","``","8bit","''","else",":","msg","[","``","Content-Transfer-Encoding","''","]","=","``","7bit","''"] | 47 | 61 | null | encoders.py | cpython/Lib/email/encoders.py | from base64 import encodebytes
from quopri import encodestring | 7 | null | 2 | null | null | 5 | null | Use image node_id 4 for calling a global function with example usage: encode_7or8bit(msg) without return types | 110 | node_id 4 | 52,447 |
encode_noop | global | null | false | msg | null | null | null | null | null | def encode_noop(msg):
"""Do nothing."""
| ["def","encode_noop","(","msg",")",":","``","''","''","Do","nothing",".","''","''","''"] | 64 | 65 | null | encoders.py | cpython/Lib/email/encoders.py | from base64 import encodebytes
from quopri import encodestring | 7 | null | 2 | null | null | 5 | null | Use image node_id 5 for calling a global function with example usage: encode_noop(msg) without return types | 107 | node_id 5 | 52,448 |
__init__ | Foo | null | true | self | Docstring for class Foo. Example from Sphinx docs. | ["Docstring","for","class","Foo",".","Example","from","Sphinx","docs","."] | null | null | Foo | def __init__(self):
#: Doc comment for instance attribute qux.
self.qux = 3
self.spam = 4
"""Docstring for instance attribute spam."""
| ["def","__init__","(","self",")",":","#",":","Doc","comment","for","instance","attribute","qux",".","self.qux","=","3","self.spam","=","4","``","''","''","Docstring","for","instance","attribute","spam",".","''","''","''"] | 70 | 75 | null | comments.py | black/tests/data/cases/comments.py | import os.path
import sys
import a
from b.c import X | 7 | 1 | 4 | 0 | null | 1 | 1 | Use image node_id 1 to create a new Foo object with example: obj = Foo() | 73 | node_id 1 | 37,107 |
torch_nn_func_batchnorm | global | null | false | input,running_mean,running_var,weight,bias,training,momentum,eps | null | null | null | null | flops, macs | def torch_nn_func_batchnorm(
input: torch.Tensor,
running_mean: Optional[torch.Tensor],
running_var: Optional[torch.Tensor],
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
training: bool = False,
momentum: float = 0.1,
eps: float = 1e-5,
) -> Tuple[int, int]:
has_affine = weight is not None
if training:
flops = input.numel() * (2 if has_affine else 1)
else:
flops = input.numel() * (5 if has_affine else 4)
macs = 0
return flops, macs
| ["def","torch_nn_func_batchnorm","(","input",":","torch.Tensor",",","running_mean",":","Optional","[","torch.Tensor","]",",","running_var",":","Optional","[","torch.Tensor","]",",","weight",":","Optional","[","torch.Tensor","]","=","None",",","bias",":","Optional","[","torch.Tensor","]","=","None",",","training",":","bool","=","False",",","momentum",":","float","=","0.1",",","eps",":","float","=","1e-5",",",")","-",">","Tuple","[","int",",","int","]",":","has_affine","=","weight","is","not","None","if","training",":","flops","=","input.numel","(",")","*","(","2","if","has_affine","else","1",")","else",":","flops","=","input.numel","(",")","*","(","5","if","has_affine","else","4",")","macs","=","0","return","flops",",","macs"] | 54 | 70 | null | normalization.py | colossalai/colossalai/fx/profiler/experimental/profiler_function/normalization.py | from typing import List, Optional, Tuple
import torch
from ..registry import meta_profiler_function | 7 | null | 3 | null | null | 4 | null | Use image node_id 4 for calling a global function with example usage: torch_nn_func_batchnorm(input, running_mean, running_var, weight, bias, training, momentum, eps) and returns: flops, macs | 192 | node_id 4 | 41,339 |
torch_nn_func_layernorm | global | null | false | input,normalized_shape,weight,bias,eps | null | null | null | null | flops, macs | def torch_nn_func_layernorm(
input: torch.Tensor,
normalized_shape: List[int],
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
eps: float = 1e-5,
) -> Tuple[int, int]:
has_affine = weight is not None
flops = input.numel() * (5 if has_affine else 4)
macs = 0
return flops, macs
| ["def","torch_nn_func_layernorm","(","input",":","torch.Tensor",",","normalized_shape",":","List","[","int","]",",","weight",":","Optional","[","torch.Tensor","]","=","None",",","bias",":","Optional","[","torch.Tensor","]","=","None",",","eps",":","float","=","1e-5",",",")","-",">","Tuple","[","int",",","int","]",":","has_affine","=","weight","is","not","None","flops","=","input.numel","(",")","*","(","5","if","has_affine","else","4",")","macs","=","0","return","flops",",","macs"] | 40 | 50 | null | normalization.py | colossalai/colossalai/fx/profiler/experimental/profiler_function/normalization.py | from typing import List, Optional, Tuple
import torch
from ..registry import meta_profiler_function | 7 | null | 3 | null | null | 4 | null | Use image node_id 3 for calling a global function with example usage: torch_nn_func_layernorm(input, normalized_shape, weight, bias, eps) and returns: flops, macs | 163 | node_id 3 | 41,338 |
test_model_dump_json | global | null | false | null | null | null | null | null | def test_model_dump_json() -> None:
class A(BaseModel):
a: str
b: int = 2
assert (
model_dump_json(A(a="aaa")).replace(" ", "")
== '{"a":"aaa","b":2}'
)
| ["def","test_model_dump_json","(",")","-",">","None",":","class","A","(","BaseModel",")",":","a",":","str","b",":","int","=","2","assert","(","model_dump_json","(","A","(","a=","''","aaa","''",")",")",".replace","(","``","``",",","``","''",")","==","'","{","``","a","''",":","''","aaa","''",",","''","b","''",":2","}","'",")"] | 36 | 41 | null | test_pydantic.py | autogen/test/test_pydantic.py | from typing import Dict, List, Optional, Tuple, Union
from pydantic import BaseModel, Field
from typing_extensions import Annotated
from autogen._pydantic import model_dump, model_dump_json, type2schema | 7 | null | 4 | null | null | 3 | null | Use image node_id 3 for calling a global function with example usage: test_model_dump_json() without return types | 113 | node_id 3 | 33,747 |
|
test_complex_file | global | null | false | test_case | null | null | null | null | null | def test_complex_file(test_case):
"""Run domain-specific tests from test.py file."""
case_module = __import__(
f"test_cases.{test_case}",
globals=globals(),
locals=locals(),
fromlist=["expectation"],
level=1,
)
test_case_dir = os.path.normcase(os.path.dirname(__file__))
test_case_file = os.path.join(
test_case_dir, "test_cases", test_case + ".py"
)
results = list(
check(
[test_case_file],
select=set(ErrorRegistry.get_error_codes()),
ignore_decorators=re.compile("wraps|ignored_decorator"),
property_decorators=DEFAULT_PROPERTY_DECORATORS,
)
)
for error in results:
assert isinstance(error, Error)
results = {(e.definition.name, e.message) for e in results}
assert case_module.expectation.expected == results
| ["def","test_complex_file","(","test_case",")",":","``","''","''","Run","domain-specific","tests","from","test.py","file",".","''","''","''","case_module","=","__import__","(","f","''","test_cases",".","{","test_case","}","''",",","globals=globals","(",")",",","locals=locals","(",")",",","fromlist=","[","``","expectation","''","]",",","level=1",",",")","test_case_dir","=","os.path.normcase","(","os.path.dirname","(","__file__",")",")","test_case_file","=","os.path.join","(","test_case_dir",",","``","test_cases","''",",","test_case","+","``",".py","''",")","results","=","list","(","check","(","[","test_case_file","]",",","select=set","(","ErrorRegistry.get_error_codes","(",")",")",",","ignore_decorators=re.compile","(","``","wraps|ignored_decorator","''",")",",","property_decorators=DEFAULT_PROPERTY_DECORATORS",",",")",")","for","error","in","results",":","assert","isinstance","(","error",",","Error",")","results","=","{","(","e.definition.name",",","e.message",")","for","e","in","results","}","assert","case_module.expectation.expected","==","results"] | 30 | 52 | null | test_definitions.py | pydocstyle/src/tests/test_definitions.py | import os
import re
import pytest
from pydocstyle.violations import Error, ErrorRegistry
from pydocstyle.checker import check
from pydocstyle.config import ConfigurationParser | 7 | null | 6 | null | null | 1 | null | Use image node_id 1 for calling a global function with example usage: test_complex_file(test_case) without return types | 119 | node_id 1 | 258,436 |
This dataset is a subset of the matlok python copilot datasets. Please refer to the Multimodal Python Copilot Training Overview for more details on how to use this dataset.
Each row contains python code, either a class method or a global function, imported modules, base classes (if any), exceptions (ordered based off the code), returns (ordered based off the code), arguments (ordered based off the code), and more.
{
"args": "string",
"class_bases": "string",
"class_docstr": "string",
"class_docstr_tok": "string",
"class_name": "string",
"code": "string",
"code_tok": "string",
"docstr": "string",
"docstr_tok": "string",
"file_path": "string",
"filename": "string",
"imports": "string",
"is_member": "bool",
"label_desc": "string",
"label_desc_len": "int64",
"label_id": "string",
"lend": "int64",
"lstart": "int64",
"name": "string",
"num_all_bases": "float64",
"num_bases": "float64",
"num_classes": "float64",
"num_functions": "int64",
"num_imports": "int64",
"num_methods": "float64",
"raises": "string",
"returns": "string",
"total_objects": "int64"
}
from datasets import load_dataset
ds = load_dataset("matlok/python-copilot-training-on-ai-research-repos", data_dir="files")