text
stringlengths 89
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def get_parent_label(self, treepos):
"""Given the treeposition of a node, return the label of its parent.
Returns None, if the tree has no parent.
"""
parent_pos = self.get_parent_treepos(treepos)
if parent_pos is not None:
parent = self.dgtree[parent_pos]
return parent.label()
else:
return None | [
"def",
"get_parent_label",
"(",
"self",
",",
"treepos",
")",
":",
"parent_pos",
"=",
"self",
".",
"get_parent_treepos",
"(",
"treepos",
")",
"if",
"parent_pos",
"is",
"not",
"None",
":",
"parent",
"=",
"self",
".",
"dgtree",
"[",
"parent_pos",
"]",
"return",
"parent",
".",
"label",
"(",
")",
"else",
":",
"return",
"None"
] | 37.1 | 8.6 |
def get_go2nt(self, usr_go2nt):
"""Combine user namedtuple fields, GO object fields, and format_txt."""
gos_all = self.get_gos_all()
# Minimum set of namedtuple fields available for use with Sorter on grouped GO IDs
prt_flds_all = get_hdridx_flds() + self.gosubdag.prt_attr['flds']
if not usr_go2nt:
return self.__init_go2nt_dflt(gos_all, prt_flds_all)
usr_nt_flds = next(iter(usr_go2nt.values()))._fields
# If user namedtuple already contains all fields available, then return usr_go2nt
if len(set(prt_flds_all).difference(usr_nt_flds)) == 0:
return self._init_go2nt_aug(usr_go2nt)
# Otherwise, combine user fields and default Sorter fields
return self.__init_go2nt_w_usr(gos_all, usr_go2nt, prt_flds_all) | [
"def",
"get_go2nt",
"(",
"self",
",",
"usr_go2nt",
")",
":",
"gos_all",
"=",
"self",
".",
"get_gos_all",
"(",
")",
"# Minimum set of namedtuple fields available for use with Sorter on grouped GO IDs",
"prt_flds_all",
"=",
"get_hdridx_flds",
"(",
")",
"+",
"self",
".",
"gosubdag",
".",
"prt_attr",
"[",
"'flds'",
"]",
"if",
"not",
"usr_go2nt",
":",
"return",
"self",
".",
"__init_go2nt_dflt",
"(",
"gos_all",
",",
"prt_flds_all",
")",
"usr_nt_flds",
"=",
"next",
"(",
"iter",
"(",
"usr_go2nt",
".",
"values",
"(",
")",
")",
")",
".",
"_fields",
"# If user namedtuple already contains all fields available, then return usr_go2nt",
"if",
"len",
"(",
"set",
"(",
"prt_flds_all",
")",
".",
"difference",
"(",
"usr_nt_flds",
")",
")",
"==",
"0",
":",
"return",
"self",
".",
"_init_go2nt_aug",
"(",
"usr_go2nt",
")",
"# Otherwise, combine user fields and default Sorter fields",
"return",
"self",
".",
"__init_go2nt_w_usr",
"(",
"gos_all",
",",
"usr_go2nt",
",",
"prt_flds_all",
")"
] | 61.384615 | 22.692308 |
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger | [
"def",
"create_logger",
"(",
"app",
")",
":",
"Logger",
"=",
"getLoggerClass",
"(",
")",
"class",
"DebugLogger",
"(",
"Logger",
")",
":",
"def",
"getEffectiveLevel",
"(",
"x",
")",
":",
"if",
"x",
".",
"level",
"==",
"0",
"and",
"app",
".",
"debug",
":",
"return",
"DEBUG",
"return",
"Logger",
".",
"getEffectiveLevel",
"(",
"x",
")",
"class",
"DebugHandler",
"(",
"StreamHandler",
")",
":",
"def",
"emit",
"(",
"x",
",",
"record",
")",
":",
"StreamHandler",
".",
"emit",
"(",
"x",
",",
"record",
")",
"if",
"app",
".",
"debug",
"else",
"None",
"handler",
"=",
"DebugHandler",
"(",
")",
"handler",
".",
"setLevel",
"(",
"DEBUG",
")",
"handler",
".",
"setFormatter",
"(",
"Formatter",
"(",
"app",
".",
"debug_log_format",
")",
")",
"logger",
"=",
"getLogger",
"(",
"app",
".",
"logger_name",
")",
"# just in case that was not a new logger, get rid of all the handlers",
"# already attached to it.",
"del",
"logger",
".",
"handlers",
"[",
":",
"]",
"logger",
".",
"__class__",
"=",
"DebugLogger",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"logger"
] | 35.793103 | 15.586207 |
def ipv6_reassembly(packet, *, count=NotImplemented):
"""Make data for IPv6 reassembly."""
if scapy_all is None:
raise ModuleNotFound("No module named 'scapy'", name='scapy')
if 'IPv6' in packet:
ipv6 = packet['IPv6']
if scapy_all.IPv6ExtHdrFragment not in ipv6: # pylint: disable=E1101
return False, None # dismiss not fragmented packet
ipv6_frag = ipv6['IPv6ExtHdrFragment']
data = dict(
bufid=(
ipaddress.ip_address(ipv6.src), # source IP address
ipaddress.ip_address(ipv6.dst), # destination IP address
ipv6.fl, # label
TP_PROTO.get(ipv6_frag.nh).name, # next header field in IPv6 Fragment Header
),
num=count, # original packet range number
fo=ipv6_frag.offset, # fragment offset
ihl=len(ipv6) - len(ipv6_frag), # header length, only headers before IPv6-Frag
mf=bool(ipv6_frag.m), # more fragment flag
tl=len(ipv6), # total length, header includes
header=bytearray(bytes(ipv6)[:-len(ipv6_frag)]), # raw bytearray type header before IPv6-Frag
payload=bytearray(bytes(ipv6_frag.payload)), # raw bytearray type payload after IPv6-Frag
)
return True, data
return False, None | [
"def",
"ipv6_reassembly",
"(",
"packet",
",",
"*",
",",
"count",
"=",
"NotImplemented",
")",
":",
"if",
"scapy_all",
"is",
"None",
":",
"raise",
"ModuleNotFound",
"(",
"\"No module named 'scapy'\"",
",",
"name",
"=",
"'scapy'",
")",
"if",
"'IPv6'",
"in",
"packet",
":",
"ipv6",
"=",
"packet",
"[",
"'IPv6'",
"]",
"if",
"scapy_all",
".",
"IPv6ExtHdrFragment",
"not",
"in",
"ipv6",
":",
"# pylint: disable=E1101",
"return",
"False",
",",
"None",
"# dismiss not fragmented packet",
"ipv6_frag",
"=",
"ipv6",
"[",
"'IPv6ExtHdrFragment'",
"]",
"data",
"=",
"dict",
"(",
"bufid",
"=",
"(",
"ipaddress",
".",
"ip_address",
"(",
"ipv6",
".",
"src",
")",
",",
"# source IP address",
"ipaddress",
".",
"ip_address",
"(",
"ipv6",
".",
"dst",
")",
",",
"# destination IP address",
"ipv6",
".",
"fl",
",",
"# label",
"TP_PROTO",
".",
"get",
"(",
"ipv6_frag",
".",
"nh",
")",
".",
"name",
",",
"# next header field in IPv6 Fragment Header",
")",
",",
"num",
"=",
"count",
",",
"# original packet range number",
"fo",
"=",
"ipv6_frag",
".",
"offset",
",",
"# fragment offset",
"ihl",
"=",
"len",
"(",
"ipv6",
")",
"-",
"len",
"(",
"ipv6_frag",
")",
",",
"# header length, only headers before IPv6-Frag",
"mf",
"=",
"bool",
"(",
"ipv6_frag",
".",
"m",
")",
",",
"# more fragment flag",
"tl",
"=",
"len",
"(",
"ipv6",
")",
",",
"# total length, header includes",
"header",
"=",
"bytearray",
"(",
"bytes",
"(",
"ipv6",
")",
"[",
":",
"-",
"len",
"(",
"ipv6_frag",
")",
"]",
")",
",",
"# raw bytearray type header before IPv6-Frag",
"payload",
"=",
"bytearray",
"(",
"bytes",
"(",
"ipv6_frag",
".",
"payload",
")",
")",
",",
"# raw bytearray type payload after IPv6-Frag",
")",
"return",
"True",
",",
"data",
"return",
"False",
",",
"None"
] | 60.923077 | 34.230769 |
def write(self, filename):
"""
Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to.
"""
f = h5py.File(filename, "w")
# Must store eqcorrscan version number, username would be useful too.
data_group = f.create_group(name="data")
for i, data in enumerate(self.data):
dset = data_group.create_dataset(name="data_" + str(i),
shape=data.shape,
dtype=data.dtype)
dset[...] = data
data_group.attrs['length'] = len(self.data)
data_group.attrs['name'] = self.name.encode("ascii", "ignore")
data_group.attrs['sampling_rate'] = self.sampling_rate
data_group.attrs['multiplex'] = self.multiplex
data_group.attrs['lowcut'] = self.lowcut
data_group.attrs['highcut'] = self.highcut
data_group.attrs['filt_order'] = self.filt_order
data_group.attrs['dimension'] = self.dimension
data_group.attrs['user'] = getpass.getuser()
data_group.attrs['eqcorrscan_version'] = str(eqcorrscan.__version__)
# Convert station-channel list to something writable
ascii_stachans = ['.'.join(stachan).encode("ascii", "ignore")
for stachan in self.stachans]
stachans = f.create_dataset(name="stachans",
shape=(len(ascii_stachans),),
dtype='S10')
stachans[...] = ascii_stachans
u_group = f.create_group("u")
for i, u in enumerate(self.u):
uset = u_group.create_dataset(name="u_" + str(i),
shape=u.shape, dtype=u.dtype)
uset[...] = u
u_group.attrs['length'] = len(self.u)
sigma_group = f.create_group("sigma")
for i, sigma in enumerate(self.sigma):
sigmaset = sigma_group.create_dataset(name="sigma_" + str(i),
shape=sigma.shape,
dtype=sigma.dtype)
sigmaset[...] = sigma
sigma_group.attrs['length'] = len(self.sigma)
v_group = f.create_group("v")
for i, v in enumerate(self.v):
vset = v_group.create_dataset(name="v_" + str(i),
shape=v.shape, dtype=v.dtype)
vset[...] = v
v_group.attrs['length'] = len(self.v)
f.flush()
f.close()
return self | [
"def",
"write",
"(",
"self",
",",
"filename",
")",
":",
"f",
"=",
"h5py",
".",
"File",
"(",
"filename",
",",
"\"w\"",
")",
"# Must store eqcorrscan version number, username would be useful too.",
"data_group",
"=",
"f",
".",
"create_group",
"(",
"name",
"=",
"\"data\"",
")",
"for",
"i",
",",
"data",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"dset",
"=",
"data_group",
".",
"create_dataset",
"(",
"name",
"=",
"\"data_\"",
"+",
"str",
"(",
"i",
")",
",",
"shape",
"=",
"data",
".",
"shape",
",",
"dtype",
"=",
"data",
".",
"dtype",
")",
"dset",
"[",
"...",
"]",
"=",
"data",
"data_group",
".",
"attrs",
"[",
"'length'",
"]",
"=",
"len",
"(",
"self",
".",
"data",
")",
"data_group",
".",
"attrs",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
".",
"encode",
"(",
"\"ascii\"",
",",
"\"ignore\"",
")",
"data_group",
".",
"attrs",
"[",
"'sampling_rate'",
"]",
"=",
"self",
".",
"sampling_rate",
"data_group",
".",
"attrs",
"[",
"'multiplex'",
"]",
"=",
"self",
".",
"multiplex",
"data_group",
".",
"attrs",
"[",
"'lowcut'",
"]",
"=",
"self",
".",
"lowcut",
"data_group",
".",
"attrs",
"[",
"'highcut'",
"]",
"=",
"self",
".",
"highcut",
"data_group",
".",
"attrs",
"[",
"'filt_order'",
"]",
"=",
"self",
".",
"filt_order",
"data_group",
".",
"attrs",
"[",
"'dimension'",
"]",
"=",
"self",
".",
"dimension",
"data_group",
".",
"attrs",
"[",
"'user'",
"]",
"=",
"getpass",
".",
"getuser",
"(",
")",
"data_group",
".",
"attrs",
"[",
"'eqcorrscan_version'",
"]",
"=",
"str",
"(",
"eqcorrscan",
".",
"__version__",
")",
"# Convert station-channel list to something writable",
"ascii_stachans",
"=",
"[",
"'.'",
".",
"join",
"(",
"stachan",
")",
".",
"encode",
"(",
"\"ascii\"",
",",
"\"ignore\"",
")",
"for",
"stachan",
"in",
"self",
".",
"stachans",
"]",
"stachans",
"=",
"f",
".",
"create_dataset",
"(",
"name",
"=",
"\"stachans\"",
",",
"shape",
"=",
"(",
"len",
"(",
"ascii_stachans",
")",
",",
")",
",",
"dtype",
"=",
"'S10'",
")",
"stachans",
"[",
"...",
"]",
"=",
"ascii_stachans",
"u_group",
"=",
"f",
".",
"create_group",
"(",
"\"u\"",
")",
"for",
"i",
",",
"u",
"in",
"enumerate",
"(",
"self",
".",
"u",
")",
":",
"uset",
"=",
"u_group",
".",
"create_dataset",
"(",
"name",
"=",
"\"u_\"",
"+",
"str",
"(",
"i",
")",
",",
"shape",
"=",
"u",
".",
"shape",
",",
"dtype",
"=",
"u",
".",
"dtype",
")",
"uset",
"[",
"...",
"]",
"=",
"u",
"u_group",
".",
"attrs",
"[",
"'length'",
"]",
"=",
"len",
"(",
"self",
".",
"u",
")",
"sigma_group",
"=",
"f",
".",
"create_group",
"(",
"\"sigma\"",
")",
"for",
"i",
",",
"sigma",
"in",
"enumerate",
"(",
"self",
".",
"sigma",
")",
":",
"sigmaset",
"=",
"sigma_group",
".",
"create_dataset",
"(",
"name",
"=",
"\"sigma_\"",
"+",
"str",
"(",
"i",
")",
",",
"shape",
"=",
"sigma",
".",
"shape",
",",
"dtype",
"=",
"sigma",
".",
"dtype",
")",
"sigmaset",
"[",
"...",
"]",
"=",
"sigma",
"sigma_group",
".",
"attrs",
"[",
"'length'",
"]",
"=",
"len",
"(",
"self",
".",
"sigma",
")",
"v_group",
"=",
"f",
".",
"create_group",
"(",
"\"v\"",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"self",
".",
"v",
")",
":",
"vset",
"=",
"v_group",
".",
"create_dataset",
"(",
"name",
"=",
"\"v_\"",
"+",
"str",
"(",
"i",
")",
",",
"shape",
"=",
"v",
".",
"shape",
",",
"dtype",
"=",
"v",
".",
"dtype",
")",
"vset",
"[",
"...",
"]",
"=",
"v",
"v_group",
".",
"attrs",
"[",
"'length'",
"]",
"=",
"len",
"(",
"self",
".",
"v",
")",
"f",
".",
"flush",
"(",
")",
"f",
".",
"close",
"(",
")",
"return",
"self"
] | 46.912281 | 16.631579 |
def group_by_key(dirnames, key):
"""Group a set of output directories according to a model parameter.
Parameters
----------
dirnames: list[str]
Output directories
key: various
A field of a :class:`Model` instance.
Returns
-------
groups: dict[various: list[str]]
For each value of `key` that is found at least once in the models, a
list of the output directories where `key` is that value.
"""
groups = defaultdict(lambda: [])
for dirname in dirnames:
m = get_recent_model(dirname)
groups[m.__dict__[key]].append(dirname)
return dict(groups) | [
"def",
"group_by_key",
"(",
"dirnames",
",",
"key",
")",
":",
"groups",
"=",
"defaultdict",
"(",
"lambda",
":",
"[",
"]",
")",
"for",
"dirname",
"in",
"dirnames",
":",
"m",
"=",
"get_recent_model",
"(",
"dirname",
")",
"groups",
"[",
"m",
".",
"__dict__",
"[",
"key",
"]",
"]",
".",
"append",
"(",
"dirname",
")",
"return",
"dict",
"(",
"groups",
")"
] | 29.47619 | 17.428571 |
def get_noconflict_metaclass(bases, left_metas, right_metas):
"""
Not intended to be used outside of this module, unless you know what you are doing.
"""
# make tuple of needed metaclasses in specified priority order
metas = left_metas + tuple(map(type, bases)) + right_metas
needed_metas = remove_redundant(metas)
# return existing confict-solving meta, if any
if needed_metas in memoized_metaclasses_map:
return memoized_metaclasses_map[needed_metas]
# nope: compute, memoize and return needed conflict-solving meta
elif not needed_metas: # wee, a trivial case, happy us
meta = type
elif len(needed_metas) == 1: # another trivial case
meta = needed_metas[0]
# check for recursion, can happen i.e. for Zope ExtensionClasses
elif needed_metas == bases:
raise TypeError("Incompatible root metatypes", needed_metas)
else: # gotta work ...
metaname = '_' + ''.join([m.__name__ for m in needed_metas])
meta = classmaker()(metaname, needed_metas, {})
memoized_metaclasses_map[needed_metas] = meta
return meta | [
"def",
"get_noconflict_metaclass",
"(",
"bases",
",",
"left_metas",
",",
"right_metas",
")",
":",
"# make tuple of needed metaclasses in specified priority order",
"metas",
"=",
"left_metas",
"+",
"tuple",
"(",
"map",
"(",
"type",
",",
"bases",
")",
")",
"+",
"right_metas",
"needed_metas",
"=",
"remove_redundant",
"(",
"metas",
")",
"# return existing confict-solving meta, if any",
"if",
"needed_metas",
"in",
"memoized_metaclasses_map",
":",
"return",
"memoized_metaclasses_map",
"[",
"needed_metas",
"]",
"# nope: compute, memoize and return needed conflict-solving meta",
"elif",
"not",
"needed_metas",
":",
"# wee, a trivial case, happy us",
"meta",
"=",
"type",
"elif",
"len",
"(",
"needed_metas",
")",
"==",
"1",
":",
"# another trivial case",
"meta",
"=",
"needed_metas",
"[",
"0",
"]",
"# check for recursion, can happen i.e. for Zope ExtensionClasses",
"elif",
"needed_metas",
"==",
"bases",
":",
"raise",
"TypeError",
"(",
"\"Incompatible root metatypes\"",
",",
"needed_metas",
")",
"else",
":",
"# gotta work ...",
"metaname",
"=",
"'_'",
"+",
"''",
".",
"join",
"(",
"[",
"m",
".",
"__name__",
"for",
"m",
"in",
"needed_metas",
"]",
")",
"meta",
"=",
"classmaker",
"(",
")",
"(",
"metaname",
",",
"needed_metas",
",",
"{",
"}",
")",
"memoized_metaclasses_map",
"[",
"needed_metas",
"]",
"=",
"meta",
"return",
"meta"
] | 45.958333 | 18.541667 |
def get_wd_search_results(search_string='', mediawiki_api_url='https://www.wikidata.org/w/api.php',
user_agent=config['USER_AGENT_DEFAULT'],
max_results=500, language='en'):
"""
Performs a search in WD for a certain WD search string
:param search_string: a string which should be searched for in WD
:type search_string: str
:param mediawiki_api_url: Specify the mediawiki_api_url.
:type mediawiki_api_url: str
:param user_agent: The user agent string transmitted in the http header
:type user_agent: str
:param max_results: The maximum number of search results returned. Default 500
:type max_results: int
:param language: The language in which to perform the search. Default 'en'
:type language: str
:return: returns a list of QIDs found in the search and a list of labels complementary to the QIDs
"""
params = {
'action': 'wbsearchentities',
'language': language,
'search': search_string,
'format': 'json',
'limit': 50
}
headers = {
'User-Agent': user_agent
}
cont_count = 1
id_list = []
id_labels = []
while cont_count > 0:
params.update({'continue': 0 if cont_count == 1 else cont_count})
reply = requests.get(mediawiki_api_url, params=params, headers=headers)
reply.raise_for_status()
search_results = reply.json()
if search_results['success'] != 1:
raise WDSearchError('WD search failed')
else:
for i in search_results['search']:
id_list.append(i['id'])
id_labels.append(i['label'])
if 'search-continue' not in search_results:
cont_count = 0
else:
cont_count = search_results['search-continue']
if cont_count > max_results:
break
return id_list | [
"def",
"get_wd_search_results",
"(",
"search_string",
"=",
"''",
",",
"mediawiki_api_url",
"=",
"'https://www.wikidata.org/w/api.php'",
",",
"user_agent",
"=",
"config",
"[",
"'USER_AGENT_DEFAULT'",
"]",
",",
"max_results",
"=",
"500",
",",
"language",
"=",
"'en'",
")",
":",
"params",
"=",
"{",
"'action'",
":",
"'wbsearchentities'",
",",
"'language'",
":",
"language",
",",
"'search'",
":",
"search_string",
",",
"'format'",
":",
"'json'",
",",
"'limit'",
":",
"50",
"}",
"headers",
"=",
"{",
"'User-Agent'",
":",
"user_agent",
"}",
"cont_count",
"=",
"1",
"id_list",
"=",
"[",
"]",
"id_labels",
"=",
"[",
"]",
"while",
"cont_count",
">",
"0",
":",
"params",
".",
"update",
"(",
"{",
"'continue'",
":",
"0",
"if",
"cont_count",
"==",
"1",
"else",
"cont_count",
"}",
")",
"reply",
"=",
"requests",
".",
"get",
"(",
"mediawiki_api_url",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"headers",
")",
"reply",
".",
"raise_for_status",
"(",
")",
"search_results",
"=",
"reply",
".",
"json",
"(",
")",
"if",
"search_results",
"[",
"'success'",
"]",
"!=",
"1",
":",
"raise",
"WDSearchError",
"(",
"'WD search failed'",
")",
"else",
":",
"for",
"i",
"in",
"search_results",
"[",
"'search'",
"]",
":",
"id_list",
".",
"append",
"(",
"i",
"[",
"'id'",
"]",
")",
"id_labels",
".",
"append",
"(",
"i",
"[",
"'label'",
"]",
")",
"if",
"'search-continue'",
"not",
"in",
"search_results",
":",
"cont_count",
"=",
"0",
"else",
":",
"cont_count",
"=",
"search_results",
"[",
"'search-continue'",
"]",
"if",
"cont_count",
">",
"max_results",
":",
"break",
"return",
"id_list"
] | 36.571429 | 21.821429 |
def to_dict(self, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request'ubq body.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
if self._script:
d['script'] = self._script
d.update(self._extra)
d.update(kwargs)
return d | [
"def",
"to_dict",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"d",
"=",
"{",
"}",
"if",
"self",
".",
"query",
":",
"d",
"[",
"\"query\"",
"]",
"=",
"self",
".",
"query",
".",
"to_dict",
"(",
")",
"if",
"self",
".",
"_script",
":",
"d",
"[",
"'script'",
"]",
"=",
"self",
".",
"_script",
"d",
".",
"update",
"(",
"self",
".",
"_extra",
")",
"d",
".",
"update",
"(",
"kwargs",
")",
"return",
"d"
] | 24.611111 | 21.166667 |
def broadcast(self, channel, event, data):
"""Broadcasts an event to all sockets listening on a channel."""
payload = self._server.serialize_event(event, data)
for socket_id in self.subscriptions.get(channel, ()):
rv = self._server.sockets.get(socket_id)
if rv is not None:
rv.socket.send(payload) | [
"def",
"broadcast",
"(",
"self",
",",
"channel",
",",
"event",
",",
"data",
")",
":",
"payload",
"=",
"self",
".",
"_server",
".",
"serialize_event",
"(",
"event",
",",
"data",
")",
"for",
"socket_id",
"in",
"self",
".",
"subscriptions",
".",
"get",
"(",
"channel",
",",
"(",
")",
")",
":",
"rv",
"=",
"self",
".",
"_server",
".",
"sockets",
".",
"get",
"(",
"socket_id",
")",
"if",
"rv",
"is",
"not",
"None",
":",
"rv",
".",
"socket",
".",
"send",
"(",
"payload",
")"
] | 50.714286 | 9.285714 |
def work(self):
"""
Start ternya work.
First, import customer's service modules.
Second, init openstack mq.
Third, keep a ternya connection that can auto-reconnect.
"""
self.init_modules()
connection = self.init_mq()
TernyaConnection(self, connection).connect() | [
"def",
"work",
"(",
"self",
")",
":",
"self",
".",
"init_modules",
"(",
")",
"connection",
"=",
"self",
".",
"init_mq",
"(",
")",
"TernyaConnection",
"(",
"self",
",",
"connection",
")",
".",
"connect",
"(",
")"
] | 29.454545 | 13.454545 |
def detrended_price_oscillator(data, period):
"""
Detrended Price Oscillator.
Formula:
DPO = DATA[i] - Avg(DATA[period/2 + 1])
"""
catch_errors.check_for_period_error(data, period)
period = int(period)
dop = [data[idx] - np.mean(data[idx+1-(int(period/2)+1):idx+1]) for idx in range(period-1, len(data))]
dop = fill_for_noncomputable_vals(data, dop)
return dop | [
"def",
"detrended_price_oscillator",
"(",
"data",
",",
"period",
")",
":",
"catch_errors",
".",
"check_for_period_error",
"(",
"data",
",",
"period",
")",
"period",
"=",
"int",
"(",
"period",
")",
"dop",
"=",
"[",
"data",
"[",
"idx",
"]",
"-",
"np",
".",
"mean",
"(",
"data",
"[",
"idx",
"+",
"1",
"-",
"(",
"int",
"(",
"period",
"/",
"2",
")",
"+",
"1",
")",
":",
"idx",
"+",
"1",
"]",
")",
"for",
"idx",
"in",
"range",
"(",
"period",
"-",
"1",
",",
"len",
"(",
"data",
")",
")",
"]",
"dop",
"=",
"fill_for_noncomputable_vals",
"(",
"data",
",",
"dop",
")",
"return",
"dop"
] | 32.5 | 17.833333 |
def liste_parametres(self, parametre=None):
"""
Liste des paramètres
Paramètres:
parametre: si fourni, retourne l'entrée pour ce parametre uniquement
"""
condition = ""
if parametre:
condition = "WHERE CCHIM='%s'" % parametre
_sql = """SELECT CCHIM AS PARAMETRE,
NCON AS LIBELLE,
NOPOL AS CODE
FROM NOM_MESURE %s ORDER BY CCHIM""" % condition
return psql.read_sql(_sql, self.conn) | [
"def",
"liste_parametres",
"(",
"self",
",",
"parametre",
"=",
"None",
")",
":",
"condition",
"=",
"\"\"",
"if",
"parametre",
":",
"condition",
"=",
"\"WHERE CCHIM='%s'\"",
"%",
"parametre",
"_sql",
"=",
"\"\"\"SELECT CCHIM AS PARAMETRE,\n NCON AS LIBELLE,\n NOPOL AS CODE\n FROM NOM_MESURE %s ORDER BY CCHIM\"\"\"",
"%",
"condition",
"return",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")"
] | 29.6875 | 15.1875 |
def write_pdb(self, custom_name='', out_suffix='', out_dir=None, custom_selection=None, force_rerun=False):
"""Write a new PDB file for the Structure's FIRST MODEL.
Set custom_selection to a PDB.Select class for custom SMCRA selections.
Args:
custom_name: Filename of the new file (without extension)
out_suffix: Optional string to append to new PDB file
out_dir: Optional directory to output the file
custom_selection: Optional custom selection class
force_rerun: If existing file should be overwritten
Returns:
out_file: filepath of new PDB file
"""
if not custom_selection:
custom_selection = ModelSelection([0])
# If no output directory, custom name, or suffix is specified, add a suffix "_new"
if not out_dir or not custom_name:
if not out_suffix:
out_suffix = '_new'
# Prepare the output file path
outfile = ssbio.utils.outfile_maker(inname=self.structure_file,
outname=custom_name,
append_to_name=out_suffix,
outdir=out_dir,
outext='.pdb')
try:
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
self.save(outfile, custom_selection)
except TypeError as e:
# If trying to save something that can't be saved as a PDB (example: 5iqr.cif), log an error and return None
# The error thrown by PDBIO.py is "TypeError: %c requires int or char"
log.error('{}: unable to save structure in PDB file format'.format(self.structure_file))
raise TypeError(e)
return outfile | [
"def",
"write_pdb",
"(",
"self",
",",
"custom_name",
"=",
"''",
",",
"out_suffix",
"=",
"''",
",",
"out_dir",
"=",
"None",
",",
"custom_selection",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"not",
"custom_selection",
":",
"custom_selection",
"=",
"ModelSelection",
"(",
"[",
"0",
"]",
")",
"# If no output directory, custom name, or suffix is specified, add a suffix \"_new\"",
"if",
"not",
"out_dir",
"or",
"not",
"custom_name",
":",
"if",
"not",
"out_suffix",
":",
"out_suffix",
"=",
"'_new'",
"# Prepare the output file path",
"outfile",
"=",
"ssbio",
".",
"utils",
".",
"outfile_maker",
"(",
"inname",
"=",
"self",
".",
"structure_file",
",",
"outname",
"=",
"custom_name",
",",
"append_to_name",
"=",
"out_suffix",
",",
"outdir",
"=",
"out_dir",
",",
"outext",
"=",
"'.pdb'",
")",
"try",
":",
"if",
"ssbio",
".",
"utils",
".",
"force_rerun",
"(",
"flag",
"=",
"force_rerun",
",",
"outfile",
"=",
"outfile",
")",
":",
"self",
".",
"save",
"(",
"outfile",
",",
"custom_selection",
")",
"except",
"TypeError",
"as",
"e",
":",
"# If trying to save something that can't be saved as a PDB (example: 5iqr.cif), log an error and return None",
"# The error thrown by PDBIO.py is \"TypeError: %c requires int or char\"",
"log",
".",
"error",
"(",
"'{}: unable to save structure in PDB file format'",
".",
"format",
"(",
"self",
".",
"structure_file",
")",
")",
"raise",
"TypeError",
"(",
"e",
")",
"return",
"outfile"
] | 45.325 | 26.55 |
def _establish_authenticated_session(self, kik_node):
"""
Updates the kik node and creates a new connection to kik servers.
This new connection will be initiated with another payload which proves
we have the credentials for a specific user. This is how authentication is done.
:param kik_node: The user's kik node (everything before '@' in JID).
"""
self.kik_node = kik_node
log.info("[+] Closing current connection and creating a new authenticated one.")
self.disconnect()
self._connect() | [
"def",
"_establish_authenticated_session",
"(",
"self",
",",
"kik_node",
")",
":",
"self",
".",
"kik_node",
"=",
"kik_node",
"log",
".",
"info",
"(",
"\"[+] Closing current connection and creating a new authenticated one.\"",
")",
"self",
".",
"disconnect",
"(",
")",
"self",
".",
"_connect",
"(",
")"
] | 43 | 25.923077 |
def __update_binding(
self, dependency, service, reference, old_properties, new_value
):
# type: (Any, Any, ServiceReference, dict, bool) -> None
"""
Calls back component binding and field binding methods when the
properties of an injected dependency have been updated.
:param dependency: The dependency handler
:param service: The injected service
:param reference: The reference of the injected service
:param old_properties: Previous properties of the dependency
:param new_value: If True, inject the new value of the handler
"""
if new_value:
# Set the value
setattr(
self.instance, dependency.get_field(), dependency.get_value()
)
# Call the component back
self.__safe_field_callback(
dependency.get_field(),
constants.IPOPO_CALLBACK_UPDATE_FIELD,
service,
reference,
old_properties,
)
self.safe_callback(
constants.IPOPO_CALLBACK_UPDATE, service, reference, old_properties
) | [
"def",
"__update_binding",
"(",
"self",
",",
"dependency",
",",
"service",
",",
"reference",
",",
"old_properties",
",",
"new_value",
")",
":",
"# type: (Any, Any, ServiceReference, dict, bool) -> None",
"if",
"new_value",
":",
"# Set the value",
"setattr",
"(",
"self",
".",
"instance",
",",
"dependency",
".",
"get_field",
"(",
")",
",",
"dependency",
".",
"get_value",
"(",
")",
")",
"# Call the component back",
"self",
".",
"__safe_field_callback",
"(",
"dependency",
".",
"get_field",
"(",
")",
",",
"constants",
".",
"IPOPO_CALLBACK_UPDATE_FIELD",
",",
"service",
",",
"reference",
",",
"old_properties",
",",
")",
"self",
".",
"safe_callback",
"(",
"constants",
".",
"IPOPO_CALLBACK_UPDATE",
",",
"service",
",",
"reference",
",",
"old_properties",
")"
] | 34.875 | 21.375 |
def _column_width(self, index=None, name=None, max_width=300, **kwargs):
"""
:param index: int of the column index
:param name: str of the name of the column
:param max_width: int of the max size of characters in the width
:return: int of the width of this column
"""
assert name is not None or index is not None
if name and name not in self._column_index:
return min(max_width, name)
if index is not None:
name = self.columns[index]
else:
index = self._column_index[name]
values_width = [len(name)]
if isinstance(self._parameters.get(name, None), list):
values_width += [len(self._safe_str(p, **kwargs))
for p in self._parameters[name]]
values_width += [len(self._safe_str(row[index], **kwargs))
for row in self.table]
ret = max(values_width)
return min(max_width, ret) if max_width else ret | [
"def",
"_column_width",
"(",
"self",
",",
"index",
"=",
"None",
",",
"name",
"=",
"None",
",",
"max_width",
"=",
"300",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"name",
"is",
"not",
"None",
"or",
"index",
"is",
"not",
"None",
"if",
"name",
"and",
"name",
"not",
"in",
"self",
".",
"_column_index",
":",
"return",
"min",
"(",
"max_width",
",",
"name",
")",
"if",
"index",
"is",
"not",
"None",
":",
"name",
"=",
"self",
".",
"columns",
"[",
"index",
"]",
"else",
":",
"index",
"=",
"self",
".",
"_column_index",
"[",
"name",
"]",
"values_width",
"=",
"[",
"len",
"(",
"name",
")",
"]",
"if",
"isinstance",
"(",
"self",
".",
"_parameters",
".",
"get",
"(",
"name",
",",
"None",
")",
",",
"list",
")",
":",
"values_width",
"+=",
"[",
"len",
"(",
"self",
".",
"_safe_str",
"(",
"p",
",",
"*",
"*",
"kwargs",
")",
")",
"for",
"p",
"in",
"self",
".",
"_parameters",
"[",
"name",
"]",
"]",
"values_width",
"+=",
"[",
"len",
"(",
"self",
".",
"_safe_str",
"(",
"row",
"[",
"index",
"]",
",",
"*",
"*",
"kwargs",
")",
")",
"for",
"row",
"in",
"self",
".",
"table",
"]",
"ret",
"=",
"max",
"(",
"values_width",
")",
"return",
"min",
"(",
"max_width",
",",
"ret",
")",
"if",
"max_width",
"else",
"ret"
] | 38.192308 | 17.038462 |
def normalize_tuple_slice(node):
"""
Normalize an ast.Tuple node representing the internals of a slice.
Returns the node wrapped in an ast.Index.
Returns an ExtSlice node built from the tuple elements if there are any
slices.
"""
if not any(isinstance(elt, ast.Slice) for elt in node.elts):
return ast.Index(value=node)
return ast.ExtSlice(
[
# Wrap non-Slice nodes in Index nodes.
elt if isinstance(elt, ast.Slice) else ast.Index(value=elt)
for elt in node.elts
]
) | [
"def",
"normalize_tuple_slice",
"(",
"node",
")",
":",
"if",
"not",
"any",
"(",
"isinstance",
"(",
"elt",
",",
"ast",
".",
"Slice",
")",
"for",
"elt",
"in",
"node",
".",
"elts",
")",
":",
"return",
"ast",
".",
"Index",
"(",
"value",
"=",
"node",
")",
"return",
"ast",
".",
"ExtSlice",
"(",
"[",
"# Wrap non-Slice nodes in Index nodes.",
"elt",
"if",
"isinstance",
"(",
"elt",
",",
"ast",
".",
"Slice",
")",
"else",
"ast",
".",
"Index",
"(",
"value",
"=",
"elt",
")",
"for",
"elt",
"in",
"node",
".",
"elts",
"]",
")"
] | 30.388889 | 20.944444 |
def list_memberships(self, subject_descriptor, direction=None, depth=None):
"""ListMemberships.
[Preview API] Get all the memberships where this descriptor is a member in the relationship.
:param str subject_descriptor: Fetch all direct memberships of this descriptor.
:param str direction: Defaults to Up.
:param int depth: The maximum number of edges to traverse up or down the membership tree. Currently the only supported value is '1'.
:rtype: [GraphMembership]
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
query_parameters = {}
if direction is not None:
query_parameters['direction'] = self._serialize.query('direction', direction, 'str')
if depth is not None:
query_parameters['depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='e34b6394-6b30-4435-94a9-409a5eef3e31',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GraphMembership]', self._unwrap_collection(response)) | [
"def",
"list_memberships",
"(",
"self",
",",
"subject_descriptor",
",",
"direction",
"=",
"None",
",",
"depth",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"subject_descriptor",
"is",
"not",
"None",
":",
"route_values",
"[",
"'subjectDescriptor'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'subject_descriptor'",
",",
"subject_descriptor",
",",
"'str'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"direction",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'direction'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'direction'",
",",
"direction",
",",
"'str'",
")",
"if",
"depth",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'depth'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'depth'",
",",
"depth",
",",
"'int'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'e34b6394-6b30-4435-94a9-409a5eef3e31'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[GraphMembership]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] | 61.954545 | 28.5 |
def headerHTML(header,fname):
"""given the bytestring ABF header, make and launch HTML."""
html="<html><body><code>"
html+="<h2>%s</h2>"%(fname)
html+=pprint.pformat(header, indent=1)
html=html.replace("\n",'<br>').replace(" "," ")
html=html.replace(r"\x00","")
html+="</code></body></html>"
print("saving header file:",fname)
f=open(fname,'w')
f.write(html)
f.close()
webbrowser.open(fname) | [
"def",
"headerHTML",
"(",
"header",
",",
"fname",
")",
":",
"html",
"=",
"\"<html><body><code>\"",
"html",
"+=",
"\"<h2>%s</h2>\"",
"%",
"(",
"fname",
")",
"html",
"+=",
"pprint",
".",
"pformat",
"(",
"header",
",",
"indent",
"=",
"1",
")",
"html",
"=",
"html",
".",
"replace",
"(",
"\"\\n\"",
",",
"'<br>'",
")",
".",
"replace",
"(",
"\" \"",
",",
"\" \"",
")",
"html",
"=",
"html",
".",
"replace",
"(",
"r\"\\x00\"",
",",
"\"\"",
")",
"html",
"+=",
"\"</code></body></html>\"",
"print",
"(",
"\"saving header file:\"",
",",
"fname",
")",
"f",
"=",
"open",
"(",
"fname",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"html",
")",
"f",
".",
"close",
"(",
")",
"webbrowser",
".",
"open",
"(",
"fname",
")"
] | 36.923077 | 9.538462 |
def getParam(xmlelement):
"""Converts an mzML xml element to a param tuple.
:param xmlelement: #TODO docstring
:returns: a param tuple or False if the xmlelement is not a parameter
('userParam', 'cvParam' or 'referenceableParamGroupRef')
"""
elementTag = clearTag(xmlelement.tag)
if elementTag in ['userParam', 'cvParam', 'referenceableParamGroupRef']:
if elementTag == 'cvParam':
param = cvParamFromDict(xmlelement.attrib)
elif elementTag == 'userParam':
param = userParamFromDict(xmlelement.attrib)
else:
param = refParamGroupFromDict(xmlelement.attrib)
else:
param = False
return param | [
"def",
"getParam",
"(",
"xmlelement",
")",
":",
"elementTag",
"=",
"clearTag",
"(",
"xmlelement",
".",
"tag",
")",
"if",
"elementTag",
"in",
"[",
"'userParam'",
",",
"'cvParam'",
",",
"'referenceableParamGroupRef'",
"]",
":",
"if",
"elementTag",
"==",
"'cvParam'",
":",
"param",
"=",
"cvParamFromDict",
"(",
"xmlelement",
".",
"attrib",
")",
"elif",
"elementTag",
"==",
"'userParam'",
":",
"param",
"=",
"userParamFromDict",
"(",
"xmlelement",
".",
"attrib",
")",
"else",
":",
"param",
"=",
"refParamGroupFromDict",
"(",
"xmlelement",
".",
"attrib",
")",
"else",
":",
"param",
"=",
"False",
"return",
"param"
] | 35.789474 | 18.315789 |
def _plot_thermo(self, func, temperatures, factor=1, ax=None, ylabel=None, label=None, ylim=None, **kwargs):
"""
Plots a thermodynamic property for a generic function from a PhononDos instance.
Args:
func: the thermodynamic function to be used to calculate the property
temperatures: a list of temperatures
factor: a multiplicative factor applied to the thermodynamic property calculated. Used to change
the units.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
ylabel: label for the y axis
label: label of the plot
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
values = []
for t in temperatures:
values.append(func(t, structure=self.structure) * factor)
ax.plot(temperatures, values, label=label, **kwargs)
if ylim:
ax.set_ylim(ylim)
ax.set_xlim((np.min(temperatures), np.max(temperatures)))
ylim = plt.ylim()
if ylim[0] < 0 < ylim[1]:
plt.plot(plt.xlim(), [0, 0], 'k-', linewidth=1)
ax.set_xlabel(r"$T$ (K)")
if ylabel:
ax.set_ylabel(ylabel)
return fig | [
"def",
"_plot_thermo",
"(",
"self",
",",
"func",
",",
"temperatures",
",",
"factor",
"=",
"1",
",",
"ax",
"=",
"None",
",",
"ylabel",
"=",
"None",
",",
"label",
"=",
"None",
",",
"ylim",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
",",
"fig",
",",
"plt",
"=",
"get_ax_fig_plt",
"(",
"ax",
")",
"values",
"=",
"[",
"]",
"for",
"t",
"in",
"temperatures",
":",
"values",
".",
"append",
"(",
"func",
"(",
"t",
",",
"structure",
"=",
"self",
".",
"structure",
")",
"*",
"factor",
")",
"ax",
".",
"plot",
"(",
"temperatures",
",",
"values",
",",
"label",
"=",
"label",
",",
"*",
"*",
"kwargs",
")",
"if",
"ylim",
":",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"ax",
".",
"set_xlim",
"(",
"(",
"np",
".",
"min",
"(",
"temperatures",
")",
",",
"np",
".",
"max",
"(",
"temperatures",
")",
")",
")",
"ylim",
"=",
"plt",
".",
"ylim",
"(",
")",
"if",
"ylim",
"[",
"0",
"]",
"<",
"0",
"<",
"ylim",
"[",
"1",
"]",
":",
"plt",
".",
"plot",
"(",
"plt",
".",
"xlim",
"(",
")",
",",
"[",
"0",
",",
"0",
"]",
",",
"'k-'",
",",
"linewidth",
"=",
"1",
")",
"ax",
".",
"set_xlabel",
"(",
"r\"$T$ (K)\"",
")",
"if",
"ylabel",
":",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
")",
"return",
"fig"
] | 34.175 | 24.925 |
def check_vtech(text):
"""Suggest the correct name.
source: Virginia Tech Division of Student Affairs
source_url: http://bit.ly/2en1zbv
"""
err = "institution.vtech"
msg = "Incorrect name. Use '{}' instead of '{}'."
institution = [
["Virginia Polytechnic Institute and State University",
["Virginia Polytechnic and State University"]],
]
return preferred_forms_check(text, institution, err, msg) | [
"def",
"check_vtech",
"(",
"text",
")",
":",
"err",
"=",
"\"institution.vtech\"",
"msg",
"=",
"\"Incorrect name. Use '{}' instead of '{}'.\"",
"institution",
"=",
"[",
"[",
"\"Virginia Polytechnic Institute and State University\"",
",",
"[",
"\"Virginia Polytechnic and State University\"",
"]",
"]",
",",
"]",
"return",
"preferred_forms_check",
"(",
"text",
",",
"institution",
",",
"err",
",",
"msg",
")"
] | 31.214286 | 18.142857 |
def esc_ansicolor (color):
"""convert a named color definition to an escaped ANSI color"""
control = ''
if ";" in color:
control, color = color.split(";", 1)
control = AnsiControl.get(control, '')+";"
cnum = AnsiColor.get(color, '0')
return AnsiEsc % (control+cnum) | [
"def",
"esc_ansicolor",
"(",
"color",
")",
":",
"control",
"=",
"''",
"if",
"\";\"",
"in",
"color",
":",
"control",
",",
"color",
"=",
"color",
".",
"split",
"(",
"\";\"",
",",
"1",
")",
"control",
"=",
"AnsiControl",
".",
"get",
"(",
"control",
",",
"''",
")",
"+",
"\";\"",
"cnum",
"=",
"AnsiColor",
".",
"get",
"(",
"color",
",",
"'0'",
")",
"return",
"AnsiEsc",
"%",
"(",
"control",
"+",
"cnum",
")"
] | 36.75 | 10.125 |
def sd(series):
"""
Returns the standard deviation of values in a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.std()
else:
return np.nan | [
"def",
"sd",
"(",
"series",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"series",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"return",
"series",
".",
"std",
"(",
")",
"else",
":",
"return",
"np",
".",
"nan"
] | 20.833333 | 19.5 |
def purge_object(self, pid, log_message=None):
"""
Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean
"""
kwargs = {'pid': pid}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.purgeObject(**kwargs)
return response.status_code == requests.codes.ok | [
"def",
"purge_object",
"(",
"self",
",",
"pid",
",",
"log_message",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"'pid'",
":",
"pid",
"}",
"if",
"log_message",
":",
"kwargs",
"[",
"'logMessage'",
"]",
"=",
"log_message",
"response",
"=",
"self",
".",
"api",
".",
"purgeObject",
"(",
"*",
"*",
"kwargs",
")",
"return",
"response",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"ok"
] | 35.846154 | 13.384615 |
def read_plink(file_prefix, verbose=True):
r"""Read PLINK files into Pandas data frames.
Parameters
----------
file_prefix : str
Path prefix to the set of PLINK files. It supports loading many BED
files at once using globstrings wildcard.
verbose : bool
``True`` for progress information; ``False`` otherwise.
Returns
-------
:class:`pandas.DataFrame`
Alleles.
:class:`pandas.DataFrame`
Samples.
:class:`numpy.ndarray`
Genotype.
Examples
--------
We have shipped this package with an example so can load and inspect by
doing
.. doctest::
>>> from pandas_plink import read_plink
>>> from pandas_plink import example_file_prefix
>>> (bim, fam, bed) = read_plink(example_file_prefix(), verbose=False)
>>> print(bim.head()) #doctest: +NORMALIZE_WHITESPACE
chrom snp cm pos a0 a1 i
0 1 rs10399749 0.0 45162 G C 0
1 1 rs2949420 0.0 45257 C T 1
2 1 rs2949421 0.0 45413 0 0 2
3 1 rs2691310 0.0 46844 A T 3
4 1 rs4030303 0.0 72434 0 G 4
>>> print(fam.head()) #doctest: +NORMALIZE_WHITESPACE
fid iid father mother gender trait i
0 Sample_1 Sample_1 0 0 1 -9 0
1 Sample_2 Sample_2 0 0 2 -9 1
2 Sample_3 Sample_3 Sample_1 Sample_2 2 -9 2
>>> print(bed.compute()) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
The values of the ``bed`` matrix denote how many alleles ``a1`` (see
output of data frame ``bim``) are in the corresponding position and
individual. Notice the column ``i`` in ``bim`` and ``fam`` data frames.
It maps to the corresponding position of the bed matrix:
.. doctest::
>>> chrom1 = bim.query("chrom=='1'")
>>> X = bed[chrom1.i.values, :].compute()
>>> print(X) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
It also allows the use of the wildcard character ``*`` for mapping
multiple BED files at
once: ``(bim, fam, bed) = read_plink("chrom*")``.
In this case, only one of the FAM files will be used to define
sample information. Data from BIM and BED files are concatenated to
provide a single view of the files.
"""
from dask.array import concatenate
file_prefixes = sorted(glob(file_prefix))
if len(file_prefixes) == 0:
file_prefixes = [file_prefix.replace("*", "")]
file_prefixes = sorted(_clean_prefixes(file_prefixes))
fn = []
for fp in file_prefixes:
fn.append({s: "%s.%s" % (fp, s) for s in ["bed", "bim", "fam"]})
pbar = tqdm(desc="Mapping files", total=3 * len(fn), disable=not verbose)
msg = "Reading bim file(s)..."
bim = _read_file(fn, msg, lambda fn: _read_bim(fn["bim"]), pbar)
if len(file_prefixes) > 1:
if verbose:
msg = "Multiple files read in this order: {}"
print(msg.format([basename(f) for f in file_prefixes]))
nmarkers = dict()
index_offset = 0
for i, bi in enumerate(bim):
nmarkers[fn[i]["bed"]] = bi.shape[0]
bi["i"] += index_offset
index_offset += bi.shape[0]
bim = pd.concat(bim, axis=0, ignore_index=True)
msg = "Reading fam file(s)..."
fam = _read_file([fn[0]], msg, lambda fn: _read_fam(fn["fam"]), pbar)[0]
nsamples = fam.shape[0]
bed = _read_file(
fn,
"Reading bed file(s)...",
lambda fn: _read_bed(fn["bed"], nsamples, nmarkers[fn["bed"]]),
pbar,
)
bed = concatenate(bed, axis=0)
pbar.close()
return (bim, fam, bed) | [
"def",
"read_plink",
"(",
"file_prefix",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"dask",
".",
"array",
"import",
"concatenate",
"file_prefixes",
"=",
"sorted",
"(",
"glob",
"(",
"file_prefix",
")",
")",
"if",
"len",
"(",
"file_prefixes",
")",
"==",
"0",
":",
"file_prefixes",
"=",
"[",
"file_prefix",
".",
"replace",
"(",
"\"*\"",
",",
"\"\"",
")",
"]",
"file_prefixes",
"=",
"sorted",
"(",
"_clean_prefixes",
"(",
"file_prefixes",
")",
")",
"fn",
"=",
"[",
"]",
"for",
"fp",
"in",
"file_prefixes",
":",
"fn",
".",
"append",
"(",
"{",
"s",
":",
"\"%s.%s\"",
"%",
"(",
"fp",
",",
"s",
")",
"for",
"s",
"in",
"[",
"\"bed\"",
",",
"\"bim\"",
",",
"\"fam\"",
"]",
"}",
")",
"pbar",
"=",
"tqdm",
"(",
"desc",
"=",
"\"Mapping files\"",
",",
"total",
"=",
"3",
"*",
"len",
"(",
"fn",
")",
",",
"disable",
"=",
"not",
"verbose",
")",
"msg",
"=",
"\"Reading bim file(s)...\"",
"bim",
"=",
"_read_file",
"(",
"fn",
",",
"msg",
",",
"lambda",
"fn",
":",
"_read_bim",
"(",
"fn",
"[",
"\"bim\"",
"]",
")",
",",
"pbar",
")",
"if",
"len",
"(",
"file_prefixes",
")",
">",
"1",
":",
"if",
"verbose",
":",
"msg",
"=",
"\"Multiple files read in this order: {}\"",
"print",
"(",
"msg",
".",
"format",
"(",
"[",
"basename",
"(",
"f",
")",
"for",
"f",
"in",
"file_prefixes",
"]",
")",
")",
"nmarkers",
"=",
"dict",
"(",
")",
"index_offset",
"=",
"0",
"for",
"i",
",",
"bi",
"in",
"enumerate",
"(",
"bim",
")",
":",
"nmarkers",
"[",
"fn",
"[",
"i",
"]",
"[",
"\"bed\"",
"]",
"]",
"=",
"bi",
".",
"shape",
"[",
"0",
"]",
"bi",
"[",
"\"i\"",
"]",
"+=",
"index_offset",
"index_offset",
"+=",
"bi",
".",
"shape",
"[",
"0",
"]",
"bim",
"=",
"pd",
".",
"concat",
"(",
"bim",
",",
"axis",
"=",
"0",
",",
"ignore_index",
"=",
"True",
")",
"msg",
"=",
"\"Reading fam file(s)...\"",
"fam",
"=",
"_read_file",
"(",
"[",
"fn",
"[",
"0",
"]",
"]",
",",
"msg",
",",
"lambda",
"fn",
":",
"_read_fam",
"(",
"fn",
"[",
"\"fam\"",
"]",
")",
",",
"pbar",
")",
"[",
"0",
"]",
"nsamples",
"=",
"fam",
".",
"shape",
"[",
"0",
"]",
"bed",
"=",
"_read_file",
"(",
"fn",
",",
"\"Reading bed file(s)...\"",
",",
"lambda",
"fn",
":",
"_read_bed",
"(",
"fn",
"[",
"\"bed\"",
"]",
",",
"nsamples",
",",
"nmarkers",
"[",
"fn",
"[",
"\"bed\"",
"]",
"]",
")",
",",
"pbar",
",",
")",
"bed",
"=",
"concatenate",
"(",
"bed",
",",
"axis",
"=",
"0",
")",
"pbar",
".",
"close",
"(",
")",
"return",
"(",
"bim",
",",
"fam",
",",
"bed",
")"
] | 31.818898 | 21.88189 |
def get_services_uids(context=None, analyses_serv=None, values=None):
"""
This function returns a list of UIDs from analyses services from its
parameters.
:param analyses_serv: A list (or one object) of service-related info items.
see _resolve_items_to_service_uids() docstring.
:type analyses_serv: list
:param values: a dict, where keys are AR|Sample schema field names.
:type values: dict
:returns: a list of analyses services UIDs
"""
if not analyses_serv:
analyses_serv = []
if not values:
values = {}
if not context or (not analyses_serv and not values):
raise RuntimeError(
"get_services_uids: Missing or wrong parameters.")
# Merge analyses from analyses_serv and values into one list
analyses_services = analyses_serv + (values.get("Analyses", None) or [])
# It is possible to create analysis requests
# by JSON petitions and services, profiles or types aren't allways send.
# Sometimes we can get analyses and profiles that doesn't match and we
# should act in consequence.
# Getting the analyses profiles
analyses_profiles = values.get('Profiles', [])
if not isinstance(analyses_profiles, (list, tuple)):
# Plone converts the incoming form value to a list, if there are
# multiple values; but if not, it will send a string (a single UID).
analyses_profiles = [analyses_profiles]
if not analyses_services and not analyses_profiles:
return []
# Add analysis services UIDs from profiles to analyses_services variable.
if analyses_profiles:
uid_catalog = getToolByName(context, 'uid_catalog')
for brain in uid_catalog(UID=analyses_profiles):
profile = api.get_object(brain)
# Only services UIDs
services_uids = profile.getRawService()
# _resolve_items_to_service_uids() will remove duplicates
analyses_services += services_uids
return _resolve_items_to_service_uids(analyses_services) | [
"def",
"get_services_uids",
"(",
"context",
"=",
"None",
",",
"analyses_serv",
"=",
"None",
",",
"values",
"=",
"None",
")",
":",
"if",
"not",
"analyses_serv",
":",
"analyses_serv",
"=",
"[",
"]",
"if",
"not",
"values",
":",
"values",
"=",
"{",
"}",
"if",
"not",
"context",
"or",
"(",
"not",
"analyses_serv",
"and",
"not",
"values",
")",
":",
"raise",
"RuntimeError",
"(",
"\"get_services_uids: Missing or wrong parameters.\"",
")",
"# Merge analyses from analyses_serv and values into one list",
"analyses_services",
"=",
"analyses_serv",
"+",
"(",
"values",
".",
"get",
"(",
"\"Analyses\"",
",",
"None",
")",
"or",
"[",
"]",
")",
"# It is possible to create analysis requests",
"# by JSON petitions and services, profiles or types aren't allways send.",
"# Sometimes we can get analyses and profiles that doesn't match and we",
"# should act in consequence.",
"# Getting the analyses profiles",
"analyses_profiles",
"=",
"values",
".",
"get",
"(",
"'Profiles'",
",",
"[",
"]",
")",
"if",
"not",
"isinstance",
"(",
"analyses_profiles",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# Plone converts the incoming form value to a list, if there are",
"# multiple values; but if not, it will send a string (a single UID).",
"analyses_profiles",
"=",
"[",
"analyses_profiles",
"]",
"if",
"not",
"analyses_services",
"and",
"not",
"analyses_profiles",
":",
"return",
"[",
"]",
"# Add analysis services UIDs from profiles to analyses_services variable.",
"if",
"analyses_profiles",
":",
"uid_catalog",
"=",
"getToolByName",
"(",
"context",
",",
"'uid_catalog'",
")",
"for",
"brain",
"in",
"uid_catalog",
"(",
"UID",
"=",
"analyses_profiles",
")",
":",
"profile",
"=",
"api",
".",
"get_object",
"(",
"brain",
")",
"# Only services UIDs",
"services_uids",
"=",
"profile",
".",
"getRawService",
"(",
")",
"# _resolve_items_to_service_uids() will remove duplicates",
"analyses_services",
"+=",
"services_uids",
"return",
"_resolve_items_to_service_uids",
"(",
"analyses_services",
")"
] | 41.708333 | 21.333333 |
def ranking_metric(df, method, pos, neg, classes, ascending):
"""The main function to rank an expression table.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
"""
# exclude any zero stds.
df_mean = df.groupby(by=classes, axis=1).mean()
df_std = df.groupby(by=classes, axis=1).std()
if method == 'signal_to_noise':
ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])
elif method == 't_test':
ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/len(df_std)+df_std[neg]**2/len(df_std) )
elif method == 'ratio_of_classes':
ser = df_mean[pos] / df_mean[neg]
elif method == 'diff_of_classes':
ser = df_mean[pos] - df_mean[neg]
elif method == 'log2_ratio_of_classes':
ser = np.log2(df_mean[pos] / df_mean[neg])
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
ser = ser.sort_values(ascending=ascending)
return ser | [
"def",
"ranking_metric",
"(",
"df",
",",
"method",
",",
"pos",
",",
"neg",
",",
"classes",
",",
"ascending",
")",
":",
"# exclude any zero stds.",
"df_mean",
"=",
"df",
".",
"groupby",
"(",
"by",
"=",
"classes",
",",
"axis",
"=",
"1",
")",
".",
"mean",
"(",
")",
"df_std",
"=",
"df",
".",
"groupby",
"(",
"by",
"=",
"classes",
",",
"axis",
"=",
"1",
")",
".",
"std",
"(",
")",
"if",
"method",
"==",
"'signal_to_noise'",
":",
"ser",
"=",
"(",
"df_mean",
"[",
"pos",
"]",
"-",
"df_mean",
"[",
"neg",
"]",
")",
"/",
"(",
"df_std",
"[",
"pos",
"]",
"+",
"df_std",
"[",
"neg",
"]",
")",
"elif",
"method",
"==",
"'t_test'",
":",
"ser",
"=",
"(",
"df_mean",
"[",
"pos",
"]",
"-",
"df_mean",
"[",
"neg",
"]",
")",
"/",
"np",
".",
"sqrt",
"(",
"df_std",
"[",
"pos",
"]",
"**",
"2",
"/",
"len",
"(",
"df_std",
")",
"+",
"df_std",
"[",
"neg",
"]",
"**",
"2",
"/",
"len",
"(",
"df_std",
")",
")",
"elif",
"method",
"==",
"'ratio_of_classes'",
":",
"ser",
"=",
"df_mean",
"[",
"pos",
"]",
"/",
"df_mean",
"[",
"neg",
"]",
"elif",
"method",
"==",
"'diff_of_classes'",
":",
"ser",
"=",
"df_mean",
"[",
"pos",
"]",
"-",
"df_mean",
"[",
"neg",
"]",
"elif",
"method",
"==",
"'log2_ratio_of_classes'",
":",
"ser",
"=",
"np",
".",
"log2",
"(",
"df_mean",
"[",
"pos",
"]",
"/",
"df_mean",
"[",
"neg",
"]",
")",
"else",
":",
"logging",
".",
"error",
"(",
"\"Please provide correct method name!!!\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"ser",
"=",
"ser",
".",
"sort_values",
"(",
"ascending",
"=",
"ascending",
")",
"return",
"ser"
] | 47.333333 | 35 |
def compute_samples(self):
""" Sample from a Normal distribution with inferred mu and std """
eps = tf.random_normal([self.batch_size, self.eq_samples, self.iw_samples, self.num_latent])
z = tf.reshape(eps * self.std + self.mu, [-1, self.num_latent])
return z | [
"def",
"compute_samples",
"(",
"self",
")",
":",
"eps",
"=",
"tf",
".",
"random_normal",
"(",
"[",
"self",
".",
"batch_size",
",",
"self",
".",
"eq_samples",
",",
"self",
".",
"iw_samples",
",",
"self",
".",
"num_latent",
"]",
")",
"z",
"=",
"tf",
".",
"reshape",
"(",
"eps",
"*",
"self",
".",
"std",
"+",
"self",
".",
"mu",
",",
"[",
"-",
"1",
",",
"self",
".",
"num_latent",
"]",
")",
"return",
"z"
] | 57.4 | 25.8 |
def _get(self, text):
"""
Analyze the text to get the right function
Parameters
----------
text : str
The text that could call a function
"""
if self.strict:
match = self.prog.match(text)
if match:
cmd = match.group()
if cmd in self:
return cmd
else:
words = self.prog.findall(text)
for word in words:
if word in self:
return word | [
"def",
"_get",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"strict",
":",
"match",
"=",
"self",
".",
"prog",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"cmd",
"=",
"match",
".",
"group",
"(",
")",
"if",
"cmd",
"in",
"self",
":",
"return",
"cmd",
"else",
":",
"words",
"=",
"self",
".",
"prog",
".",
"findall",
"(",
"text",
")",
"for",
"word",
"in",
"words",
":",
"if",
"word",
"in",
"self",
":",
"return",
"word"
] | 26.4 | 13.2 |
def context(name=None):
"""Declare that a class defines a context.
Contexts are for use with HierarchicalShell for discovering
and using functionality from the command line.
Args:
name (str): Optional name for this context if you don't want
to just use the class name.
"""
def _context(cls):
annotated(cls, name)
cls.context = True
return cls
return _context | [
"def",
"context",
"(",
"name",
"=",
"None",
")",
":",
"def",
"_context",
"(",
"cls",
")",
":",
"annotated",
"(",
"cls",
",",
"name",
")",
"cls",
".",
"context",
"=",
"True",
"return",
"cls",
"return",
"_context"
] | 23.222222 | 22.055556 |
def data(self):
"""
Get the content for the response (lazily decoded).
"""
if self._data is None:
self._data = self.response.content.decode("utf-8")
return self._data | [
"def",
"data",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"self",
".",
"_data",
"=",
"self",
".",
"response",
".",
"content",
".",
"decode",
"(",
"\"utf-8\"",
")",
"return",
"self",
".",
"_data"
] | 30.285714 | 12.857143 |
def absent(name, region=None, key=None, keyid=None, profile=None):
'''
Ensure a pipeline with the service_name does not exist
name
Name of the service to ensure a data pipeline does not exist for.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
result_pipeline_id = __salt__['boto_datapipeline.pipeline_id_from_name'](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' not in result_pipeline_id:
pipeline_id = result_pipeline_id['result']
if __opts__['test']:
ret['comment'] = 'Data pipeline {0} set to be deleted.'.format(name)
ret['result'] = None
return ret
else:
__salt__['boto_datapipeline.delete_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
ret['changes']['old'] = {'pipeline_id': pipeline_id}
ret['changes']['new'] = None
else:
ret['comment'] = 'AWS data pipeline {0} absent.'.format(name)
return ret | [
"def",
"absent",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"result_pipeline_id",
"=",
"__salt__",
"[",
"'boto_datapipeline.pipeline_id_from_name'",
"]",
"(",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
")",
"if",
"'error'",
"not",
"in",
"result_pipeline_id",
":",
"pipeline_id",
"=",
"result_pipeline_id",
"[",
"'result'",
"]",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Data pipeline {0} set to be deleted.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"else",
":",
"__salt__",
"[",
"'boto_datapipeline.delete_pipeline'",
"]",
"(",
"pipeline_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"{",
"'pipeline_id'",
":",
"pipeline_id",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"None",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'AWS data pipeline {0} absent.'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] | 28.367347 | 24.244898 |
def workspace_from_nothing(self, directory, mets_basename='mets.xml', clobber_mets=False):
"""
Create an empty workspace.
"""
if directory is None:
directory = tempfile.mkdtemp(prefix=TMP_PREFIX)
if not exists(directory):
makedirs(directory)
mets_fpath = join(directory, mets_basename)
if not clobber_mets and exists(mets_fpath):
raise Exception("Not clobbering existing mets.xml in '%s'." % directory)
mets = OcrdMets.empty_mets()
with open(mets_fpath, 'wb') as fmets:
log.info("Writing %s", mets_fpath)
fmets.write(mets.to_xml(xmllint=True))
return Workspace(self, directory, mets) | [
"def",
"workspace_from_nothing",
"(",
"self",
",",
"directory",
",",
"mets_basename",
"=",
"'mets.xml'",
",",
"clobber_mets",
"=",
"False",
")",
":",
"if",
"directory",
"is",
"None",
":",
"directory",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"TMP_PREFIX",
")",
"if",
"not",
"exists",
"(",
"directory",
")",
":",
"makedirs",
"(",
"directory",
")",
"mets_fpath",
"=",
"join",
"(",
"directory",
",",
"mets_basename",
")",
"if",
"not",
"clobber_mets",
"and",
"exists",
"(",
"mets_fpath",
")",
":",
"raise",
"Exception",
"(",
"\"Not clobbering existing mets.xml in '%s'.\"",
"%",
"directory",
")",
"mets",
"=",
"OcrdMets",
".",
"empty_mets",
"(",
")",
"with",
"open",
"(",
"mets_fpath",
",",
"'wb'",
")",
"as",
"fmets",
":",
"log",
".",
"info",
"(",
"\"Writing %s\"",
",",
"mets_fpath",
")",
"fmets",
".",
"write",
"(",
"mets",
".",
"to_xml",
"(",
"xmllint",
"=",
"True",
")",
")",
"return",
"Workspace",
"(",
"self",
",",
"directory",
",",
"mets",
")"
] | 39.333333 | 15.555556 |
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True | [
"def",
"open_fileswitcher",
"(",
"self",
",",
"symbol",
"=",
"False",
")",
":",
"if",
"self",
".",
"fileswitcher",
"is",
"not",
"None",
"and",
"self",
".",
"fileswitcher",
".",
"is_visible",
":",
"self",
".",
"fileswitcher",
".",
"hide",
"(",
")",
"self",
".",
"fileswitcher",
".",
"is_visible",
"=",
"False",
"return",
"if",
"symbol",
":",
"self",
".",
"fileswitcher",
".",
"plugin",
"=",
"self",
".",
"editor",
"self",
".",
"fileswitcher",
".",
"set_search_text",
"(",
"'@'",
")",
"else",
":",
"self",
".",
"fileswitcher",
".",
"set_search_text",
"(",
"''",
")",
"self",
".",
"fileswitcher",
".",
"show",
"(",
")",
"self",
".",
"fileswitcher",
".",
"is_visible",
"=",
"True"
] | 39.142857 | 9.428571 |
def align(self):
"""Align to the next byte, returns the amount of bits skipped"""
bits = self._bits
self._buffer = 0
self._bits = 0
return bits | [
"def",
"align",
"(",
"self",
")",
":",
"bits",
"=",
"self",
".",
"_bits",
"self",
".",
"_buffer",
"=",
"0",
"self",
".",
"_bits",
"=",
"0",
"return",
"bits"
] | 25.428571 | 19.142857 |
def get_environment_tar(self):
'''return the environment.tar generated with the Singularity software.
We first try the Linux Filesystem expected location in /usr/libexec
If not found, we detect the system archicture
dirname $(singularity selftest 2>&1 | grep 'lib' | awk '{print $4}' | sed -e 's@\(.*/singularity\).*@\1@')
'''
from sregistry.utils import ( which, run_command )
# First attempt - look at File System Hierarchy Standard (FHS)
res = which('singularity')['message']
libexec = res.replace('/bin/singularity','')
envtar = '%s/libexec/singularity/bootstrap-scripts/environment.tar' %libexec
if os.path.exists(envtar):
return envtar
# Second attempt, debian distribution will identify folder
try:
res = which('dpkg-architecture')['message']
if res is not None:
cmd = ['dpkg-architecture', '-qDEB_HOST_MULTIARCH']
triplet = run_command(cmd)['message'].strip('\n')
envtar = '/usr/lib/%s/singularity/bootstrap-scripts/environment.tar' %triplet
if os.path.exists(envtar):
return envtar
except:
pass
# Final, return environment.tar provided in package
return "%s/environment.tar" %os.path.abspath(os.path.dirname(__file__)) | [
"def",
"get_environment_tar",
"(",
"self",
")",
":",
"from",
"sregistry",
".",
"utils",
"import",
"(",
"which",
",",
"run_command",
")",
"# First attempt - look at File System Hierarchy Standard (FHS)",
"res",
"=",
"which",
"(",
"'singularity'",
")",
"[",
"'message'",
"]",
"libexec",
"=",
"res",
".",
"replace",
"(",
"'/bin/singularity'",
",",
"''",
")",
"envtar",
"=",
"'%s/libexec/singularity/bootstrap-scripts/environment.tar'",
"%",
"libexec",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"envtar",
")",
":",
"return",
"envtar",
"# Second attempt, debian distribution will identify folder",
"try",
":",
"res",
"=",
"which",
"(",
"'dpkg-architecture'",
")",
"[",
"'message'",
"]",
"if",
"res",
"is",
"not",
"None",
":",
"cmd",
"=",
"[",
"'dpkg-architecture'",
",",
"'-qDEB_HOST_MULTIARCH'",
"]",
"triplet",
"=",
"run_command",
"(",
"cmd",
")",
"[",
"'message'",
"]",
".",
"strip",
"(",
"'\\n'",
")",
"envtar",
"=",
"'/usr/lib/%s/singularity/bootstrap-scripts/environment.tar'",
"%",
"triplet",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"envtar",
")",
":",
"return",
"envtar",
"except",
":",
"pass",
"# Final, return environment.tar provided in package",
"return",
"\"%s/environment.tar\"",
"%",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")"
] | 41 | 25.967742 |
def p_DictionaryMember(p):
"""DictionaryMember : Type IDENTIFIER Default ";"
"""
p[0] = model.DictionaryMember(type=p[1], name=p[2], default=p[3]) | [
"def",
"p_DictionaryMember",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"model",
".",
"DictionaryMember",
"(",
"type",
"=",
"p",
"[",
"1",
"]",
",",
"name",
"=",
"p",
"[",
"2",
"]",
",",
"default",
"=",
"p",
"[",
"3",
"]",
")"
] | 37.25 | 10.25 |
def read_memory(self, addr, transfer_size=32, now=True):
"""
read a memory location. By default, a word will
be read
"""
result = self.ap.read_memory(addr, transfer_size, now)
# Read callback returned for async reads.
def read_memory_cb():
return self.bp_manager.filter_memory(addr, transfer_size, result())
if now:
return self.bp_manager.filter_memory(addr, transfer_size, result)
else:
return read_memory_cb | [
"def",
"read_memory",
"(",
"self",
",",
"addr",
",",
"transfer_size",
"=",
"32",
",",
"now",
"=",
"True",
")",
":",
"result",
"=",
"self",
".",
"ap",
".",
"read_memory",
"(",
"addr",
",",
"transfer_size",
",",
"now",
")",
"# Read callback returned for async reads.",
"def",
"read_memory_cb",
"(",
")",
":",
"return",
"self",
".",
"bp_manager",
".",
"filter_memory",
"(",
"addr",
",",
"transfer_size",
",",
"result",
"(",
")",
")",
"if",
"now",
":",
"return",
"self",
".",
"bp_manager",
".",
"filter_memory",
"(",
"addr",
",",
"transfer_size",
",",
"result",
")",
"else",
":",
"return",
"read_memory_cb"
] | 33.666667 | 20.866667 |
def get_all_host_templates(resource_root, cluster_name="default"):
"""
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3) | [
"def",
"get_all_host_templates",
"(",
"resource_root",
",",
"cluster_name",
"=",
"\"default\"",
")",
":",
"return",
"call",
"(",
"resource_root",
".",
"get",
",",
"HOST_TEMPLATES_PATH",
"%",
"(",
"cluster_name",
",",
")",
",",
"ApiHostTemplate",
",",
"True",
",",
"api_version",
"=",
"3",
")"
] | 36.7 | 11.3 |
def xpathNextPreceding(self, ctxt):
"""Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextPreceding(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextPreceding() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"xpathNextPreceding",
"(",
"self",
",",
"ctxt",
")",
":",
"if",
"ctxt",
"is",
"None",
":",
"ctxt__o",
"=",
"None",
"else",
":",
"ctxt__o",
"=",
"ctxt",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlXPathNextPreceding",
"(",
"ctxt__o",
",",
"self",
".",
"_o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"xpathError",
"(",
"'xmlXPathNextPreceding() failed'",
")",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | 50.615385 | 15.307692 |
def extract(self, url=None, raw_html=None):
''' Extract the most likely article content from the html page
Args:
url (str): URL to pull and parse
raw_html (str): String representation of the HTML page
Returns:
Article: Representation of the article contents \
including other parsed and extracted metadata '''
crawl_candidate = CrawlCandidate(self.config, url, raw_html)
return self.__crawl(crawl_candidate) | [
"def",
"extract",
"(",
"self",
",",
"url",
"=",
"None",
",",
"raw_html",
"=",
"None",
")",
":",
"crawl_candidate",
"=",
"CrawlCandidate",
"(",
"self",
".",
"config",
",",
"url",
",",
"raw_html",
")",
"return",
"self",
".",
"__crawl",
"(",
"crawl_candidate",
")"
] | 46.363636 | 21.454545 |
def from_statement(cls, statement, filename='<expr>'):
"""A helper to construct a PythonFile from a triple-quoted string, for testing.
:param statement: Python file contents
:return: Instance of PythonFile
"""
lines = textwrap.dedent(statement).split('\n')
if lines and not lines[0]: # Remove the initial empty line, which is an artifact of dedent.
lines = lines[1:]
blob = '\n'.join(lines).encode('utf-8')
tree = cls._parse(blob, filename)
return cls(blob=blob, tree=tree, root=None, filename=filename) | [
"def",
"from_statement",
"(",
"cls",
",",
"statement",
",",
"filename",
"=",
"'<expr>'",
")",
":",
"lines",
"=",
"textwrap",
".",
"dedent",
"(",
"statement",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"lines",
"and",
"not",
"lines",
"[",
"0",
"]",
":",
"# Remove the initial empty line, which is an artifact of dedent.",
"lines",
"=",
"lines",
"[",
"1",
":",
"]",
"blob",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"tree",
"=",
"cls",
".",
"_parse",
"(",
"blob",
",",
"filename",
")",
"return",
"cls",
"(",
"blob",
"=",
"blob",
",",
"tree",
"=",
"tree",
",",
"root",
"=",
"None",
",",
"filename",
"=",
"filename",
")"
] | 44.666667 | 14.666667 |
def _process_response(cls, response):
"""
Examine the response and raise an error is something is off
"""
if len(response) != 1:
raise BadResponseError("Malformed response: {}".format(response))
stats = list(itervalues(response))[0]
if not len(stats):
raise BadResponseError("Malformed response for host: {}".format(stats))
return stats | [
"def",
"_process_response",
"(",
"cls",
",",
"response",
")",
":",
"if",
"len",
"(",
"response",
")",
"!=",
"1",
":",
"raise",
"BadResponseError",
"(",
"\"Malformed response: {}\"",
".",
"format",
"(",
"response",
")",
")",
"stats",
"=",
"list",
"(",
"itervalues",
"(",
"response",
")",
")",
"[",
"0",
"]",
"if",
"not",
"len",
"(",
"stats",
")",
":",
"raise",
"BadResponseError",
"(",
"\"Malformed response for host: {}\"",
".",
"format",
"(",
"stats",
")",
")",
"return",
"stats"
] | 33.916667 | 19.916667 |
def list(self, **kwds):
"""
Endpoint: /tags/list.json
Returns a list of Tag objects.
"""
tags = self._client.get("/tags/list.json", **kwds)["result"]
tags = self._result_to_list(tags)
return [Tag(self._client, tag) for tag in tags] | [
"def",
"list",
"(",
"self",
",",
"*",
"*",
"kwds",
")",
":",
"tags",
"=",
"self",
".",
"_client",
".",
"get",
"(",
"\"/tags/list.json\"",
",",
"*",
"*",
"kwds",
")",
"[",
"\"result\"",
"]",
"tags",
"=",
"self",
".",
"_result_to_list",
"(",
"tags",
")",
"return",
"[",
"Tag",
"(",
"self",
".",
"_client",
",",
"tag",
")",
"for",
"tag",
"in",
"tags",
"]"
] | 31.111111 | 12.222222 |
def is_import_exception(mod):
"""Check module name to see if import has been whitelisted.
Import based rules should not run on any whitelisted module
"""
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS)) | [
"def",
"is_import_exception",
"(",
"mod",
")",
":",
"return",
"(",
"mod",
"in",
"IMPORT_EXCEPTIONS",
"or",
"any",
"(",
"mod",
".",
"startswith",
"(",
"m",
"+",
"'.'",
")",
"for",
"m",
"in",
"IMPORT_EXCEPTIONS",
")",
")"
] | 39.285714 | 15.142857 |
def covariance_points(self,x0,y0,xother,yother):
""" get the covariance between base point x0,y0 and
other points xother,yother implied by Vario2d
Parameters
----------
x0 : (float)
x-coordinate of base point
y0 : (float)
y-coordinate of base point
xother : (float or numpy.ndarray)
x-coordinates of other points
yother : (float or numpy.ndarray)
y-coordinates of other points
Returns
-------
cov : numpy.ndarray
covariance between base point and other points implied by
Vario2d.
Note
----
len(cov) = len(xother) = len(yother)
"""
dx = x0 - xother
dy = y0 - yother
dxx,dyy = self._apply_rotation(dx,dy)
h = np.sqrt(dxx*dxx + dyy*dyy)
return self._h_function(h) | [
"def",
"covariance_points",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"xother",
",",
"yother",
")",
":",
"dx",
"=",
"x0",
"-",
"xother",
"dy",
"=",
"y0",
"-",
"yother",
"dxx",
",",
"dyy",
"=",
"self",
".",
"_apply_rotation",
"(",
"dx",
",",
"dy",
")",
"h",
"=",
"np",
".",
"sqrt",
"(",
"dxx",
"*",
"dxx",
"+",
"dyy",
"*",
"dyy",
")",
"return",
"self",
".",
"_h_function",
"(",
"h",
")"
] | 27.935484 | 15.806452 |
def get_security_zones_activation(self) -> (bool, bool):
""" returns the value of the security zones if they are armed or not
Returns
internal
True if the internal zone is armed
external
True if the external zone is armed
"""
internal_active = False
external_active = False
for g in self.groups:
if isinstance(g, SecurityZoneGroup):
if g.label == "EXTERNAL":
external_active = g.active
elif g.label == "INTERNAL":
internal_active = g.active
return internal_active, external_active | [
"def",
"get_security_zones_activation",
"(",
"self",
")",
"->",
"(",
"bool",
",",
"bool",
")",
":",
"internal_active",
"=",
"False",
"external_active",
"=",
"False",
"for",
"g",
"in",
"self",
".",
"groups",
":",
"if",
"isinstance",
"(",
"g",
",",
"SecurityZoneGroup",
")",
":",
"if",
"g",
".",
"label",
"==",
"\"EXTERNAL\"",
":",
"external_active",
"=",
"g",
".",
"active",
"elif",
"g",
".",
"label",
"==",
"\"INTERNAL\"",
":",
"internal_active",
"=",
"g",
".",
"active",
"return",
"internal_active",
",",
"external_active"
] | 37.833333 | 10.555556 |
def get_perm_name(cls, action, full=True):
"""
Return the name of the permission for a given model and action.
By default it returns the full permission name `app_label.perm_codename`. If `full=False`, it returns only the
`perm_codename`.
"""
codename = "{}_{}".format(action, cls.__name__.lower())
if full:
return "{}.{}".format(cls._meta.app_label, codename)
return codename | [
"def",
"get_perm_name",
"(",
"cls",
",",
"action",
",",
"full",
"=",
"True",
")",
":",
"codename",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"action",
",",
"cls",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"if",
"full",
":",
"return",
"\"{}.{}\"",
".",
"format",
"(",
"cls",
".",
"_meta",
".",
"app_label",
",",
"codename",
")",
"return",
"codename"
] | 37 | 22.818182 |
def get_peptable_headerfields(headertypes, lookup=False, poolnames=False):
"""Called by driver to generate headerfields object"""
field_defs = {'isoquant': get_isoquant_fields,
'precursorquant': get_precursorquant_fields,
'peptidefdr': get_peptidefdr_fields,
'peptidepep': get_peptidepep_fields,
'proteindata': get_proteininfo_fields,
}
return generate_headerfields(headertypes, field_defs, poolnames, lookup) | [
"def",
"get_peptable_headerfields",
"(",
"headertypes",
",",
"lookup",
"=",
"False",
",",
"poolnames",
"=",
"False",
")",
":",
"field_defs",
"=",
"{",
"'isoquant'",
":",
"get_isoquant_fields",
",",
"'precursorquant'",
":",
"get_precursorquant_fields",
",",
"'peptidefdr'",
":",
"get_peptidefdr_fields",
",",
"'peptidepep'",
":",
"get_peptidepep_fields",
",",
"'proteindata'",
":",
"get_proteininfo_fields",
",",
"}",
"return",
"generate_headerfields",
"(",
"headertypes",
",",
"field_defs",
",",
"poolnames",
",",
"lookup",
")"
] | 55.888889 | 18.555556 |
def pick_peaks(nc, L=16, offset_denom=0.1):
"""Obtain peaks from a novelty curve using an adaptive threshold."""
offset = nc.mean() * float(offset_denom)
th = filters.median_filter(nc, size=L) + offset
#th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset
#import pylab as plt
#plt.plot(nc)
#plt.plot(th)
#plt.show()
# th = np.ones(nc.shape[0]) * nc.mean() - 0.08
peaks = []
for i in range(1, nc.shape[0] - 1):
# is it a peak?
if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
# is it above the threshold?
if nc[i] > th[i]:
peaks.append(i)
return peaks | [
"def",
"pick_peaks",
"(",
"nc",
",",
"L",
"=",
"16",
",",
"offset_denom",
"=",
"0.1",
")",
":",
"offset",
"=",
"nc",
".",
"mean",
"(",
")",
"*",
"float",
"(",
"offset_denom",
")",
"th",
"=",
"filters",
".",
"median_filter",
"(",
"nc",
",",
"size",
"=",
"L",
")",
"+",
"offset",
"#th = filters.gaussian_filter(nc, sigma=L/2., mode=\"nearest\") + offset",
"#import pylab as plt",
"#plt.plot(nc)",
"#plt.plot(th)",
"#plt.show()",
"# th = np.ones(nc.shape[0]) * nc.mean() - 0.08",
"peaks",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"nc",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
":",
"# is it a peak?",
"if",
"nc",
"[",
"i",
"-",
"1",
"]",
"<",
"nc",
"[",
"i",
"]",
"and",
"nc",
"[",
"i",
"]",
">",
"nc",
"[",
"i",
"+",
"1",
"]",
":",
"# is it above the threshold?",
"if",
"nc",
"[",
"i",
"]",
">",
"th",
"[",
"i",
"]",
":",
"peaks",
".",
"append",
"(",
"i",
")",
"return",
"peaks"
] | 36.111111 | 13.777778 |
def reset(self, total_size=None):
"""Remove all file system contents and reset the root."""
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.open_files = []
self._free_fd_heap = []
self._last_ino = 0
self._last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams() | [
"def",
"reset",
"(",
"self",
",",
"total_size",
"=",
"None",
")",
":",
"self",
".",
"root",
"=",
"FakeDirectory",
"(",
"self",
".",
"path_separator",
",",
"filesystem",
"=",
"self",
")",
"self",
".",
"cwd",
"=",
"self",
".",
"root",
".",
"name",
"self",
".",
"open_files",
"=",
"[",
"]",
"self",
".",
"_free_fd_heap",
"=",
"[",
"]",
"self",
".",
"_last_ino",
"=",
"0",
"self",
".",
"_last_dev",
"=",
"0",
"self",
".",
"mount_points",
"=",
"{",
"}",
"self",
".",
"add_mount_point",
"(",
"self",
".",
"root",
".",
"name",
",",
"total_size",
")",
"self",
".",
"_add_standard_streams",
"(",
")"
] | 36.25 | 13.666667 |
def list_firmware_manifests(self, **kwargs):
"""List all manifests.
:param int limit: number of manifests to retrieve
:param str order: sort direction of manifests when ordered by time. 'desc' or 'asc'
:param str after: get manifests after given `image_id`
:param dict filters: Dictionary of filters to apply
:return: list of :py:class:`FirmwareManifest` objects
:rtype: PaginatedResponse
"""
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, FirmwareManifest, True)
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.firmware_manifest_list, lwrap_type=FirmwareManifest, **kwargs) | [
"def",
"list_firmware_manifests",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"self",
".",
"_verify_sort_options",
"(",
"kwargs",
")",
"kwargs",
"=",
"self",
".",
"_verify_filters",
"(",
"kwargs",
",",
"FirmwareManifest",
",",
"True",
")",
"api",
"=",
"self",
".",
"_get_api",
"(",
"update_service",
".",
"DefaultApi",
")",
"return",
"PaginatedResponse",
"(",
"api",
".",
"firmware_manifest_list",
",",
"lwrap_type",
"=",
"FirmwareManifest",
",",
"*",
"*",
"kwargs",
")"
] | 51.428571 | 20.928571 |
def connect(self):
"""
Creates a new KazooClient and establishes a connection.
Passes the client the `handle_connection_change` method as a callback
to fire when the Zookeeper connection changes state.
"""
self.client = client.KazooClient(hosts=",".join(self.hosts))
self.client.add_listener(self.handle_connection_change)
self.client.start_async() | [
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"client",
"=",
"client",
".",
"KazooClient",
"(",
"hosts",
"=",
"\",\"",
".",
"join",
"(",
"self",
".",
"hosts",
")",
")",
"self",
".",
"client",
".",
"add_listener",
"(",
"self",
".",
"handle_connection_change",
")",
"self",
".",
"client",
".",
"start_async",
"(",
")"
] | 36.727273 | 21.818182 |
def emit(self, data_frame):
"""Use this function in emit data into the store.
:param data_frame: DataFrame to be recorded.
"""
if self.result is not None:
raise MultipleEmitsError()
data_frame.columns = [self.prefix + '__' + c
for c in data_frame.columns]
self.result = data_frame | [
"def",
"emit",
"(",
"self",
",",
"data_frame",
")",
":",
"if",
"self",
".",
"result",
"is",
"not",
"None",
":",
"raise",
"MultipleEmitsError",
"(",
")",
"data_frame",
".",
"columns",
"=",
"[",
"self",
".",
"prefix",
"+",
"'__'",
"+",
"c",
"for",
"c",
"in",
"data_frame",
".",
"columns",
"]",
"self",
".",
"result",
"=",
"data_frame"
] | 36.2 | 11 |
def _decode(rawstr):
"""Convert a raw string to a Message.
"""
# Check for the magick word.
try:
rawstr = rawstr.decode('utf-8')
except (AttributeError, UnicodeEncodeError):
pass
except (UnicodeDecodeError):
try:
rawstr = rawstr.decode('iso-8859-1')
except (UnicodeDecodeError):
rawstr = rawstr.decode('utf-8', 'ignore')
if not rawstr.startswith(_MAGICK):
raise MessageError("This is not a '%s' message (wrong magick word)"
% _MAGICK)
rawstr = rawstr[len(_MAGICK):]
# Check for element count and version
raw = re.split(r"\s+", rawstr, maxsplit=6)
if len(raw) < 5:
raise MessageError("Could node decode raw string: '%s ...'"
% str(rawstr[:36]))
version = raw[4][:len(_VERSION)]
if not _is_valid_version(version):
raise MessageError("Invalid Message version: '%s'" % str(version))
# Start to build message
msg = dict((('subject', raw[0].strip()),
('type', raw[1].strip()),
('sender', raw[2].strip()),
('time', strp_isoformat(raw[3].strip())),
('version', version)))
# Data part
try:
mimetype = raw[5].lower()
data = raw[6]
except IndexError:
mimetype = None
if mimetype is None:
msg['data'] = ''
msg['binary'] = False
elif mimetype == 'application/json':
try:
msg['data'] = json.loads(raw[6], object_hook=datetime_decoder)
msg['binary'] = False
except ValueError:
raise MessageError("JSON decode failed on '%s ...'" % raw[6][:36])
elif mimetype == 'text/ascii':
msg['data'] = str(data)
msg['binary'] = False
elif mimetype == 'binary/octet-stream':
msg['data'] = data
msg['binary'] = True
else:
raise MessageError("Unknown mime-type '%s'" % mimetype)
return msg | [
"def",
"_decode",
"(",
"rawstr",
")",
":",
"# Check for the magick word.",
"try",
":",
"rawstr",
"=",
"rawstr",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"(",
"AttributeError",
",",
"UnicodeEncodeError",
")",
":",
"pass",
"except",
"(",
"UnicodeDecodeError",
")",
":",
"try",
":",
"rawstr",
"=",
"rawstr",
".",
"decode",
"(",
"'iso-8859-1'",
")",
"except",
"(",
"UnicodeDecodeError",
")",
":",
"rawstr",
"=",
"rawstr",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
"if",
"not",
"rawstr",
".",
"startswith",
"(",
"_MAGICK",
")",
":",
"raise",
"MessageError",
"(",
"\"This is not a '%s' message (wrong magick word)\"",
"%",
"_MAGICK",
")",
"rawstr",
"=",
"rawstr",
"[",
"len",
"(",
"_MAGICK",
")",
":",
"]",
"# Check for element count and version",
"raw",
"=",
"re",
".",
"split",
"(",
"r\"\\s+\"",
",",
"rawstr",
",",
"maxsplit",
"=",
"6",
")",
"if",
"len",
"(",
"raw",
")",
"<",
"5",
":",
"raise",
"MessageError",
"(",
"\"Could node decode raw string: '%s ...'\"",
"%",
"str",
"(",
"rawstr",
"[",
":",
"36",
"]",
")",
")",
"version",
"=",
"raw",
"[",
"4",
"]",
"[",
":",
"len",
"(",
"_VERSION",
")",
"]",
"if",
"not",
"_is_valid_version",
"(",
"version",
")",
":",
"raise",
"MessageError",
"(",
"\"Invalid Message version: '%s'\"",
"%",
"str",
"(",
"version",
")",
")",
"# Start to build message",
"msg",
"=",
"dict",
"(",
"(",
"(",
"'subject'",
",",
"raw",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
",",
"(",
"'type'",
",",
"raw",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
",",
"(",
"'sender'",
",",
"raw",
"[",
"2",
"]",
".",
"strip",
"(",
")",
")",
",",
"(",
"'time'",
",",
"strp_isoformat",
"(",
"raw",
"[",
"3",
"]",
".",
"strip",
"(",
")",
")",
")",
",",
"(",
"'version'",
",",
"version",
")",
")",
")",
"# Data part",
"try",
":",
"mimetype",
"=",
"raw",
"[",
"5",
"]",
".",
"lower",
"(",
")",
"data",
"=",
"raw",
"[",
"6",
"]",
"except",
"IndexError",
":",
"mimetype",
"=",
"None",
"if",
"mimetype",
"is",
"None",
":",
"msg",
"[",
"'data'",
"]",
"=",
"''",
"msg",
"[",
"'binary'",
"]",
"=",
"False",
"elif",
"mimetype",
"==",
"'application/json'",
":",
"try",
":",
"msg",
"[",
"'data'",
"]",
"=",
"json",
".",
"loads",
"(",
"raw",
"[",
"6",
"]",
",",
"object_hook",
"=",
"datetime_decoder",
")",
"msg",
"[",
"'binary'",
"]",
"=",
"False",
"except",
"ValueError",
":",
"raise",
"MessageError",
"(",
"\"JSON decode failed on '%s ...'\"",
"%",
"raw",
"[",
"6",
"]",
"[",
":",
"36",
"]",
")",
"elif",
"mimetype",
"==",
"'text/ascii'",
":",
"msg",
"[",
"'data'",
"]",
"=",
"str",
"(",
"data",
")",
"msg",
"[",
"'binary'",
"]",
"=",
"False",
"elif",
"mimetype",
"==",
"'binary/octet-stream'",
":",
"msg",
"[",
"'data'",
"]",
"=",
"data",
"msg",
"[",
"'binary'",
"]",
"=",
"True",
"else",
":",
"raise",
"MessageError",
"(",
"\"Unknown mime-type '%s'\"",
"%",
"mimetype",
")",
"return",
"msg"
] | 32.166667 | 16 |
def get_hist(rfile, histname, get_overflow=False):
"""Read a 1D Histogram."""
import root_numpy as rnp
rfile = open_rfile(rfile)
hist = rfile[histname]
xlims = np.array(list(hist.xedges()))
bin_values = rnp.hist2array(hist, include_overflow=get_overflow)
rfile.close()
return bin_values, xlims | [
"def",
"get_hist",
"(",
"rfile",
",",
"histname",
",",
"get_overflow",
"=",
"False",
")",
":",
"import",
"root_numpy",
"as",
"rnp",
"rfile",
"=",
"open_rfile",
"(",
"rfile",
")",
"hist",
"=",
"rfile",
"[",
"histname",
"]",
"xlims",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"hist",
".",
"xedges",
"(",
")",
")",
")",
"bin_values",
"=",
"rnp",
".",
"hist2array",
"(",
"hist",
",",
"include_overflow",
"=",
"get_overflow",
")",
"rfile",
".",
"close",
"(",
")",
"return",
"bin_values",
",",
"xlims"
] | 31.7 | 15.1 |
def execute(handlers):
'''
Run the command
:return:
'''
# verify if the environment variables are correctly set
check_environment()
# create the argument parser
parser = create_parser(handlers)
# if no argument is provided, print help and exit
if len(sys.argv[1:]) == 0:
parser.print_help()
return 0
# insert the boolean values for some of the options
sys.argv = config.insert_bool_values(sys.argv)
try:
# parse the args
args, unknown_args = parser.parse_known_args()
except ValueError as ex:
Log.error("Error while parsing arguments: %s", str(ex))
Log.debug(traceback.format_exc())
sys.exit(1)
command_line_args = vars(args)
# set log level
log.set_logging_level(command_line_args)
Log.debug("Input Command Line Args: %s", command_line_args)
# command to be execute
command = command_line_args['subcommand']
# print the input parameters, if verbose is enabled
Log.debug("Processed Command Line Args: %s", command_line_args)
results = run(handlers, command, parser, command_line_args, unknown_args)
return 0 if result.is_successful(results) else 1 | [
"def",
"execute",
"(",
"handlers",
")",
":",
"# verify if the environment variables are correctly set",
"check_environment",
"(",
")",
"# create the argument parser",
"parser",
"=",
"create_parser",
"(",
"handlers",
")",
"# if no argument is provided, print help and exit",
"if",
"len",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"==",
"0",
":",
"parser",
".",
"print_help",
"(",
")",
"return",
"0",
"# insert the boolean values for some of the options",
"sys",
".",
"argv",
"=",
"config",
".",
"insert_bool_values",
"(",
"sys",
".",
"argv",
")",
"try",
":",
"# parse the args",
"args",
",",
"unknown_args",
"=",
"parser",
".",
"parse_known_args",
"(",
")",
"except",
"ValueError",
"as",
"ex",
":",
"Log",
".",
"error",
"(",
"\"Error while parsing arguments: %s\"",
",",
"str",
"(",
"ex",
")",
")",
"Log",
".",
"debug",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"command_line_args",
"=",
"vars",
"(",
"args",
")",
"# set log level",
"log",
".",
"set_logging_level",
"(",
"command_line_args",
")",
"Log",
".",
"debug",
"(",
"\"Input Command Line Args: %s\"",
",",
"command_line_args",
")",
"# command to be execute",
"command",
"=",
"command_line_args",
"[",
"'subcommand'",
"]",
"# print the input parameters, if verbose is enabled",
"Log",
".",
"debug",
"(",
"\"Processed Command Line Args: %s\"",
",",
"command_line_args",
")",
"results",
"=",
"run",
"(",
"handlers",
",",
"command",
",",
"parser",
",",
"command_line_args",
",",
"unknown_args",
")",
"return",
"0",
"if",
"result",
".",
"is_successful",
"(",
"results",
")",
"else",
"1"
] | 26 | 22.904762 |
def create(cls, name, enabled=True, superuser=True):
"""
Create a new API Client. Once client is created,
you can create a new password by::
>>> client = ApiClient.create('myclient')
>>> print(client)
ApiClient(name=myclient)
>>> client.change_password('mynewpassword')
:param str name: name of client
:param bool enabled: enable client
:param bool superuser: is superuser account
:raises CreateElementFailed: failure creating element with reason
:return: instance with meta
:rtype: ApiClient
"""
json = {
'enabled': enabled,
'name': name,
'superuser': superuser}
return ElementCreator(cls, json) | [
"def",
"create",
"(",
"cls",
",",
"name",
",",
"enabled",
"=",
"True",
",",
"superuser",
"=",
"True",
")",
":",
"json",
"=",
"{",
"'enabled'",
":",
"enabled",
",",
"'name'",
":",
"name",
",",
"'superuser'",
":",
"superuser",
"}",
"return",
"ElementCreator",
"(",
"cls",
",",
"json",
")"
] | 32.913043 | 13.608696 |
def bakery_client_for_controller(self, controller_name):
'''Make a copy of the bakery client with a the appropriate controller's
cookiejar in it.
'''
bakery_client = self.bakery_client
if bakery_client:
bakery_client = copy.copy(bakery_client)
else:
bakery_client = httpbakery.Client()
bakery_client.cookies = self.jujudata.cookies_for_controller(
controller_name)
return bakery_client | [
"def",
"bakery_client_for_controller",
"(",
"self",
",",
"controller_name",
")",
":",
"bakery_client",
"=",
"self",
".",
"bakery_client",
"if",
"bakery_client",
":",
"bakery_client",
"=",
"copy",
".",
"copy",
"(",
"bakery_client",
")",
"else",
":",
"bakery_client",
"=",
"httpbakery",
".",
"Client",
"(",
")",
"bakery_client",
".",
"cookies",
"=",
"self",
".",
"jujudata",
".",
"cookies_for_controller",
"(",
"controller_name",
")",
"return",
"bakery_client"
] | 39.5 | 18 |
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml | [
"def",
"aloha_to_etree",
"(",
"html_source",
")",
":",
"xml",
"=",
"_tidy2xhtml5",
"(",
"html_source",
")",
"for",
"i",
",",
"transform",
"in",
"enumerate",
"(",
"ALOHA2HTML_TRANSFORM_PIPELINE",
")",
":",
"xml",
"=",
"transform",
"(",
"xml",
")",
"return",
"xml"
] | 40.333333 | 12.666667 |
def _build_latex_array(self, aliases=None):
"""Returns an array of strings containing \\LaTeX for this circuit.
If aliases is not None, aliases contains a dict mapping
the current qubits in the circuit to new qubit names.
We will deduce the register names and sizes from aliases.
"""
columns = 1
# Rename qregs if necessary
if aliases:
qregdata = {}
for q in aliases.values():
if q[0] not in qregdata:
qregdata[q[0]] = q[1] + 1
elif qregdata[q[0]] < q[1] + 1:
qregdata[q[0]] = q[1] + 1
else:
qregdata = self.qregs
for column, layer in enumerate(self.ops, 1):
for op in layer:
if op.condition:
mask = self._get_mask(op.condition[0])
cl_reg = self.clbit_list[self._ffs(mask)]
if_reg = cl_reg[0]
pos_2 = self.img_regs[cl_reg]
if_value = format(op.condition[1],
'b').zfill(self.cregs[if_reg])[::-1]
if op.name not in ['measure', 'barrier', 'snapshot', 'load',
'save', 'noise']:
nm = op.name
qarglist = op.qargs
if aliases is not None:
qarglist = map(lambda x: aliases[x], qarglist)
if len(qarglist) == 1:
pos_1 = self.img_regs[(qarglist[0][0],
qarglist[0][1])]
if op.condition:
mask = self._get_mask(op.condition[0])
cl_reg = self.clbit_list[self._ffs(mask)]
if_reg = cl_reg[0]
pos_2 = self.img_regs[cl_reg]
if nm == "x":
self._latex[pos_1][column] = "\\gate{X}"
elif nm == "y":
self._latex[pos_1][column] = "\\gate{Y}"
elif nm == "z":
self._latex[pos_1][column] = "\\gate{Z}"
elif nm == "h":
self._latex[pos_1][column] = "\\gate{H}"
elif nm == "s":
self._latex[pos_1][column] = "\\gate{S}"
elif nm == "sdg":
self._latex[pos_1][column] = "\\gate{S^\\dag}"
elif nm == "t":
self._latex[pos_1][column] = "\\gate{T}"
elif nm == "tdg":
self._latex[pos_1][column] = "\\gate{T^\\dag}"
elif nm == "u0":
self._latex[pos_1][column] = "\\gate{U_0(%s)}" % (
op.op.params[0])
elif nm == "u1":
self._latex[pos_1][column] = "\\gate{U_1(%s)}" % (
op.op.params[0])
elif nm == "u2":
self._latex[pos_1][column] = \
"\\gate{U_2\\left(%s,%s\\right)}" % (
op.op.params[0], op.op.params[1])
elif nm == "u3":
self._latex[pos_1][column] = ("\\gate{U_3(%s,%s,%s)}" % (
op.op.params[0],
op.op.params[1],
op.op.params[2]))
elif nm == "rx":
self._latex[pos_1][column] = "\\gate{R_x(%s)}" % (
op.op.params[0])
elif nm == "ry":
self._latex[pos_1][column] = "\\gate{R_y(%s)}" % (
op.op.params[0])
elif nm == "rz":
self._latex[pos_1][column] = "\\gate{R_z(%s)}" % (
op.op.params[0])
else:
self._latex[pos_1][columns] = "\\gate{%s}" % nm
gap = pos_2 - pos_1
for i in range(self.cregs[if_reg]):
if if_value[i] == '1':
self._latex[pos_2 + i][column] = \
"\\control \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
self._latex[pos_2 + i][column] = \
"\\controlo \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
if nm == "x":
self._latex[pos_1][column] = "\\gate{X}"
elif nm == "y":
self._latex[pos_1][column] = "\\gate{Y}"
elif nm == "z":
self._latex[pos_1][column] = "\\gate{Z}"
elif nm == "h":
self._latex[pos_1][column] = "\\gate{H}"
elif nm == "s":
self._latex[pos_1][column] = "\\gate{S}"
elif nm == "sdg":
self._latex[pos_1][column] = "\\gate{S^\\dag}"
elif nm == "t":
self._latex[pos_1][column] = "\\gate{T}"
elif nm == "tdg":
self._latex[pos_1][column] = "\\gate{T^\\dag}"
elif nm == "u0":
self._latex[pos_1][column] = "\\gate{U_0(%s)}" % (
op.op.params[0])
elif nm == "u1":
self._latex[pos_1][column] = "\\gate{U_1(%s)}" % (
op.op.params[0])
elif nm == "u2":
self._latex[pos_1][column] = \
"\\gate{U_2\\left(%s,%s\\right)}" % (
op.op.params[0], op.op.params[1])
elif nm == "u3":
self._latex[pos_1][column] = ("\\gate{U_3(%s,%s,%s)}" % (
op.op.params[0],
op.op.params[1],
op.op.params[2]))
elif nm == "rx":
self._latex[pos_1][column] = "\\gate{R_x(%s)}" % (
op.op.params[0])
elif nm == "ry":
self._latex[pos_1][column] = "\\gate{R_y(%s)}" % (
op.op.params[0])
elif nm == "rz":
self._latex[pos_1][column] = "\\gate{R_z(%s)}" % (
op.op.params[0])
elif nm == "reset":
self._latex[pos_1][column] = (
"\\push{\\rule{.6em}{0em}\\ket{0}\\"
"rule{.2em}{0em}} \\qw")
else:
self._latex[pos_1][columns] = "\\gate{%s}" % nm
elif len(qarglist) == 2:
pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]
pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]
if op.condition:
pos_3 = self.img_regs[(if_reg, 0)]
temp = [pos_1, pos_2, pos_3]
temp.sort(key=int)
bottom = temp[1]
gap = pos_3 - bottom
for i in range(self.cregs[if_reg]):
if if_value[i] == '1':
self._latex[pos_3 + i][column] = \
"\\control \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
self._latex[pos_3 + i][column] = \
"\\controlo \\cw \\cwx[-" + str(gap) + "]"
gap = 1
if nm == "cx":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\targ"
elif nm == "cz":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\control\\qw"
elif nm == "cy":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{Y}"
elif nm == "ch":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{H}"
elif nm == "swap":
self._latex[pos_1][column] = "\\qswap"
self._latex[pos_2][column] = \
"\\qswap \\qwx[" + str(pos_1 - pos_2) + "]"
elif nm == "crz":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = \
"\\gate{R_z(%s)}" % (op.op.params[0])
elif nm == "cu1":
self._latex[pos_1][column - 1] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column - 1] = "\\control\\qw"
self._latex[min(pos_1, pos_2)][column] = \
"\\dstick{%s}\\qw" % (op.op.params[0])
self._latex[max(pos_1, pos_2)][column] = "\\qw"
elif nm == "cu3":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = \
"\\gate{U_3(%s,%s,%s)}" % (op.op.params[0],
op.op.params[1],
op.op.params[2])
else:
temp = [pos_1, pos_2]
temp.sort(key=int)
if nm == "cx":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\targ"
elif nm == "cz":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\control\\qw"
elif nm == "cy":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{Y}"
elif nm == "ch":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{H}"
elif nm == "swap":
self._latex[pos_1][column] = "\\qswap"
self._latex[pos_2][column] = \
"\\qswap \\qwx[" + str(pos_1 - pos_2) + "]"
elif nm == "crz":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = \
"\\gate{R_z(%s)}" % (op.op.params[0])
elif nm == "cu1":
self._latex[pos_1][column - 1] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column - 1] = "\\control\\qw"
self._latex[min(pos_1, pos_2)][column] = \
"\\dstick{%s}\\qw" % (op.op.params[0])
self._latex[max(pos_1, pos_2)][column] = "\\qw"
elif nm == "cu3":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = ("\\gate{U_3(%s,%s,%s)}" % (
op.op.params[0],
op.op.params[1],
op.op.params[2]))
else:
start_pos = min([pos_1, pos_2])
stop_pos = max([pos_1, pos_2])
if stop_pos - start_pos >= 2:
delta = stop_pos - start_pos
self._latex[start_pos][columns] = (
"\\multigate{%s}{%s}" % (delta, nm))
for i_pos in range(start_pos + 1, stop_pos + 1):
self._latex[i_pos][columns] = "\\ghost{%s}" % nm
else:
self._latex[start_pos][columns] = (
"\\multigate{1}{%s}" % nm)
self._latex[stop_pos][columns] = "\\ghost{%s}" % nm
elif len(qarglist) == 3:
pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]
pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]
pos_3 = self.img_regs[(qarglist[2][0], qarglist[2][1])]
if op.condition:
pos_4 = self.img_regs[(if_reg, 0)]
temp = [pos_1, pos_2, pos_3, pos_4]
temp.sort(key=int)
bottom = temp[2]
prev_column = [x[column - 1] for x in self._latex]
for item, prev_entry in enumerate(prev_column):
if 'barrier' in prev_entry:
span = re.search('barrier{(.*)}', prev_entry)
if span and any(i in temp for i in range(
item, int(span.group(1)))):
self._latex[item][column - 1] = \
prev_entry.replace(
'\\barrier{',
'\\barrier[-0.65em]{')
gap = pos_4 - bottom
for i in range(self.cregs[if_reg]):
if if_value[i] == '1':
self._latex[pos_4 + i][column] = \
"\\control \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
self._latex[pos_4 + i][column] = \
"\\controlo \\cw \\cwx[-" + str(gap) + "]"
gap = 1
if nm == "ccx":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\ctrl{" + str(
pos_3 - pos_2) + "}"
self._latex[pos_3][column] = "\\targ"
if nm == "cswap":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\qswap"
self._latex[pos_3][column] = \
"\\qswap \\qwx[" + str(pos_2 - pos_3) + "]"
else:
temp = [pos_1, pos_2, pos_3]
temp.sort(key=int)
prev_column = [x[column - 1] for x in self._latex]
for item, prev_entry in enumerate(prev_column):
if 'barrier' in prev_entry:
span = re.search('barrier{(.*)}', prev_entry)
if span and any(i in temp for i in range(
item, int(span.group(1)))):
self._latex[item][column - 1] = \
prev_entry.replace(
'\\barrier{',
'\\barrier[-0.65em]{')
if nm == "ccx":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\ctrl{" + str(
pos_3 - pos_2) + "}"
self._latex[pos_3][column] = "\\targ"
elif nm == "cswap":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\qswap"
self._latex[pos_3][column] = \
"\\qswap \\qwx[" + str(pos_2 - pos_3) + "]"
else:
start_pos = min([pos_1, pos_2, pos_3])
stop_pos = max([pos_1, pos_2, pos_3])
if stop_pos - start_pos >= 3:
delta = stop_pos - start_pos
self._latex[start_pos][columns] = (
"\\multigate{%s}{%s}" % (delta, nm))
for i_pos in range(start_pos + 1, stop_pos + 1):
self._latex[i_pos][columns] = "\\ghost{%s}" % nm
else:
self._latex[pos_1][columns] = (
"\\multigate{2}{%s}" % nm)
self._latex[pos_2][columns] = "\\ghost{%s}" % nm
self._latex[pos_3][columns] = "\\ghost{%s}" % nm
elif len(qarglist) > 3:
nbits = len(qarglist)
pos_array = [self.img_regs[(qarglist[0][0],
qarglist[0][1])]]
for i in range(1, nbits):
pos_array.append(self.img_regs[(qarglist[i][0],
qarglist[i][1])])
pos_start = min(pos_array)
pos_stop = max(pos_array)
delta = pos_stop - pos_start
self._latex[pos_start][columns] = (
"\\multigate{%s}{%s}" % (nbits - 1, nm))
for pos in range(pos_start + 1, pos_stop + 1):
self._latex[pos][columns] = "\\ghost{%s}" % nm
elif op.name == "measure":
if (len(op.cargs) != 1
or len(op.qargs) != 1
or op.op.params):
raise exceptions.VisualizationError("bad operation record")
if op.condition:
raise exceptions.VisualizationError(
"If controlled measures currently not supported.")
qname, qindex = op.qargs[0]
cname, cindex = op.cargs[0]
if aliases:
newq = aliases[(qname, qindex)]
qname = newq[0]
qindex = newq[1]
pos_1 = self.img_regs[(qname, qindex)]
pos_2 = self.img_regs[(cname, cindex)]
try:
self._latex[pos_1][column] = "\\meter"
prev_column = [x[column - 1] for x in self._latex]
for item, prev_entry in enumerate(prev_column):
if 'barrier' in prev_entry:
span = re.search('barrier{(.*)}', prev_entry)
if span and (
item + int(span.group(1))) - pos_1 >= 0:
self._latex[item][column - 1] = \
prev_entry.replace(
'\\barrier{',
'\\barrier[-1.15em]{')
self._latex[pos_2][column] = \
"\\cw \\cwx[-" + str(pos_2 - pos_1) + "]"
except Exception as e:
raise exceptions.VisualizationError(
'Error during Latex building: %s' % str(e))
elif op.name in ['barrier', 'snapshot', 'load', 'save',
'noise']:
if self.plot_barriers:
qarglist = op.qargs
indexes = [self._get_qubit_index(x) for x in qarglist]
start_bit = self.qubit_list[min(indexes)]
if aliases is not None:
qarglist = map(lambda x: aliases[x], qarglist)
start = self.img_regs[start_bit]
span = len(op.qargs) - 1
self._latex[start][column] = "\\qw \\barrier{" + str(
span) + "}"
else:
raise exceptions.VisualizationError("bad node data") | [
"def",
"_build_latex_array",
"(",
"self",
",",
"aliases",
"=",
"None",
")",
":",
"columns",
"=",
"1",
"# Rename qregs if necessary",
"if",
"aliases",
":",
"qregdata",
"=",
"{",
"}",
"for",
"q",
"in",
"aliases",
".",
"values",
"(",
")",
":",
"if",
"q",
"[",
"0",
"]",
"not",
"in",
"qregdata",
":",
"qregdata",
"[",
"q",
"[",
"0",
"]",
"]",
"=",
"q",
"[",
"1",
"]",
"+",
"1",
"elif",
"qregdata",
"[",
"q",
"[",
"0",
"]",
"]",
"<",
"q",
"[",
"1",
"]",
"+",
"1",
":",
"qregdata",
"[",
"q",
"[",
"0",
"]",
"]",
"=",
"q",
"[",
"1",
"]",
"+",
"1",
"else",
":",
"qregdata",
"=",
"self",
".",
"qregs",
"for",
"column",
",",
"layer",
"in",
"enumerate",
"(",
"self",
".",
"ops",
",",
"1",
")",
":",
"for",
"op",
"in",
"layer",
":",
"if",
"op",
".",
"condition",
":",
"mask",
"=",
"self",
".",
"_get_mask",
"(",
"op",
".",
"condition",
"[",
"0",
"]",
")",
"cl_reg",
"=",
"self",
".",
"clbit_list",
"[",
"self",
".",
"_ffs",
"(",
"mask",
")",
"]",
"if_reg",
"=",
"cl_reg",
"[",
"0",
"]",
"pos_2",
"=",
"self",
".",
"img_regs",
"[",
"cl_reg",
"]",
"if_value",
"=",
"format",
"(",
"op",
".",
"condition",
"[",
"1",
"]",
",",
"'b'",
")",
".",
"zfill",
"(",
"self",
".",
"cregs",
"[",
"if_reg",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"if",
"op",
".",
"name",
"not",
"in",
"[",
"'measure'",
",",
"'barrier'",
",",
"'snapshot'",
",",
"'load'",
",",
"'save'",
",",
"'noise'",
"]",
":",
"nm",
"=",
"op",
".",
"name",
"qarglist",
"=",
"op",
".",
"qargs",
"if",
"aliases",
"is",
"not",
"None",
":",
"qarglist",
"=",
"map",
"(",
"lambda",
"x",
":",
"aliases",
"[",
"x",
"]",
",",
"qarglist",
")",
"if",
"len",
"(",
"qarglist",
")",
"==",
"1",
":",
"pos_1",
"=",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"]",
"if",
"op",
".",
"condition",
":",
"mask",
"=",
"self",
".",
"_get_mask",
"(",
"op",
".",
"condition",
"[",
"0",
"]",
")",
"cl_reg",
"=",
"self",
".",
"clbit_list",
"[",
"self",
".",
"_ffs",
"(",
"mask",
")",
"]",
"if_reg",
"=",
"cl_reg",
"[",
"0",
"]",
"pos_2",
"=",
"self",
".",
"img_regs",
"[",
"cl_reg",
"]",
"if",
"nm",
"==",
"\"x\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{X}\"",
"elif",
"nm",
"==",
"\"y\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{Y}\"",
"elif",
"nm",
"==",
"\"z\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{Z}\"",
"elif",
"nm",
"==",
"\"h\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{H}\"",
"elif",
"nm",
"==",
"\"s\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{S}\"",
"elif",
"nm",
"==",
"\"sdg\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{S^\\\\dag}\"",
"elif",
"nm",
"==",
"\"t\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{T}\"",
"elif",
"nm",
"==",
"\"tdg\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{T^\\\\dag}\"",
"elif",
"nm",
"==",
"\"u0\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{U_0(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"u1\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{U_1(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"u2\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{U_2\\\\left(%s,%s\\\\right)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"1",
"]",
")",
"elif",
"nm",
"==",
"\"u3\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"(",
"\"\\\\gate{U_3(%s,%s,%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"1",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"2",
"]",
")",
")",
"elif",
"nm",
"==",
"\"rx\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_x(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"ry\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_y(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"rz\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_z(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\gate{%s}\"",
"%",
"nm",
"gap",
"=",
"pos_2",
"-",
"pos_1",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cregs",
"[",
"if_reg",
"]",
")",
":",
"if",
"if_value",
"[",
"i",
"]",
"==",
"'1'",
":",
"self",
".",
"_latex",
"[",
"pos_2",
"+",
"i",
"]",
"[",
"column",
"]",
"=",
"\"\\\\control \\\\cw \\\\cwx[-\"",
"+",
"str",
"(",
"gap",
")",
"+",
"\"]\"",
"gap",
"=",
"1",
"else",
":",
"self",
".",
"_latex",
"[",
"pos_2",
"+",
"i",
"]",
"[",
"column",
"]",
"=",
"\"\\\\controlo \\\\cw \\\\cwx[-\"",
"+",
"str",
"(",
"gap",
")",
"+",
"\"]\"",
"gap",
"=",
"1",
"else",
":",
"if",
"nm",
"==",
"\"x\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{X}\"",
"elif",
"nm",
"==",
"\"y\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{Y}\"",
"elif",
"nm",
"==",
"\"z\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{Z}\"",
"elif",
"nm",
"==",
"\"h\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{H}\"",
"elif",
"nm",
"==",
"\"s\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{S}\"",
"elif",
"nm",
"==",
"\"sdg\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{S^\\\\dag}\"",
"elif",
"nm",
"==",
"\"t\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{T}\"",
"elif",
"nm",
"==",
"\"tdg\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{T^\\\\dag}\"",
"elif",
"nm",
"==",
"\"u0\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{U_0(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"u1\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{U_1(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"u2\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{U_2\\\\left(%s,%s\\\\right)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"1",
"]",
")",
"elif",
"nm",
"==",
"\"u3\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"(",
"\"\\\\gate{U_3(%s,%s,%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"1",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"2",
"]",
")",
")",
"elif",
"nm",
"==",
"\"rx\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_x(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"ry\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_y(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"rz\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_z(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"reset\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"(",
"\"\\\\push{\\\\rule{.6em}{0em}\\\\ket{0}\\\\\"",
"\"rule{.2em}{0em}} \\\\qw\"",
")",
"else",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\gate{%s}\"",
"%",
"nm",
"elif",
"len",
"(",
"qarglist",
")",
"==",
"2",
":",
"pos_1",
"=",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"]",
"pos_2",
"=",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"1",
"]",
"[",
"1",
"]",
")",
"]",
"if",
"op",
".",
"condition",
":",
"pos_3",
"=",
"self",
".",
"img_regs",
"[",
"(",
"if_reg",
",",
"0",
")",
"]",
"temp",
"=",
"[",
"pos_1",
",",
"pos_2",
",",
"pos_3",
"]",
"temp",
".",
"sort",
"(",
"key",
"=",
"int",
")",
"bottom",
"=",
"temp",
"[",
"1",
"]",
"gap",
"=",
"pos_3",
"-",
"bottom",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cregs",
"[",
"if_reg",
"]",
")",
":",
"if",
"if_value",
"[",
"i",
"]",
"==",
"'1'",
":",
"self",
".",
"_latex",
"[",
"pos_3",
"+",
"i",
"]",
"[",
"column",
"]",
"=",
"\"\\\\control \\\\cw \\\\cwx[-\"",
"+",
"str",
"(",
"gap",
")",
"+",
"\"]\"",
"gap",
"=",
"1",
"else",
":",
"self",
".",
"_latex",
"[",
"pos_3",
"+",
"i",
"]",
"[",
"column",
"]",
"=",
"\"\\\\controlo \\\\cw \\\\cwx[-\"",
"+",
"str",
"(",
"gap",
")",
"+",
"\"]\"",
"gap",
"=",
"1",
"if",
"nm",
"==",
"\"cx\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\targ\"",
"elif",
"nm",
"==",
"\"cz\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\control\\\\qw\"",
"elif",
"nm",
"==",
"\"cy\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{Y}\"",
"elif",
"nm",
"==",
"\"ch\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{H}\"",
"elif",
"nm",
"==",
"\"swap\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap \\\\qwx[\"",
"+",
"str",
"(",
"pos_1",
"-",
"pos_2",
")",
"+",
"\"]\"",
"elif",
"nm",
"==",
"\"crz\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_z(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"cu1\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"-",
"1",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"-",
"1",
"]",
"=",
"\"\\\\control\\\\qw\"",
"self",
".",
"_latex",
"[",
"min",
"(",
"pos_1",
",",
"pos_2",
")",
"]",
"[",
"column",
"]",
"=",
"\"\\\\dstick{%s}\\\\qw\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"self",
".",
"_latex",
"[",
"max",
"(",
"pos_1",
",",
"pos_2",
")",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qw\"",
"elif",
"nm",
"==",
"\"cu3\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{U_3(%s,%s,%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"1",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"2",
"]",
")",
"else",
":",
"temp",
"=",
"[",
"pos_1",
",",
"pos_2",
"]",
"temp",
".",
"sort",
"(",
"key",
"=",
"int",
")",
"if",
"nm",
"==",
"\"cx\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\targ\"",
"elif",
"nm",
"==",
"\"cz\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\control\\\\qw\"",
"elif",
"nm",
"==",
"\"cy\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{Y}\"",
"elif",
"nm",
"==",
"\"ch\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{H}\"",
"elif",
"nm",
"==",
"\"swap\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap \\\\qwx[\"",
"+",
"str",
"(",
"pos_1",
"-",
"pos_2",
")",
"+",
"\"]\"",
"elif",
"nm",
"==",
"\"crz\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\gate{R_z(%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"elif",
"nm",
"==",
"\"cu1\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"-",
"1",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"-",
"1",
"]",
"=",
"\"\\\\control\\\\qw\"",
"self",
".",
"_latex",
"[",
"min",
"(",
"pos_1",
",",
"pos_2",
")",
"]",
"[",
"column",
"]",
"=",
"\"\\\\dstick{%s}\\\\qw\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
")",
"self",
".",
"_latex",
"[",
"max",
"(",
"pos_1",
",",
"pos_2",
")",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qw\"",
"elif",
"nm",
"==",
"\"cu3\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"(",
"\"\\\\gate{U_3(%s,%s,%s)}\"",
"%",
"(",
"op",
".",
"op",
".",
"params",
"[",
"0",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"1",
"]",
",",
"op",
".",
"op",
".",
"params",
"[",
"2",
"]",
")",
")",
"else",
":",
"start_pos",
"=",
"min",
"(",
"[",
"pos_1",
",",
"pos_2",
"]",
")",
"stop_pos",
"=",
"max",
"(",
"[",
"pos_1",
",",
"pos_2",
"]",
")",
"if",
"stop_pos",
"-",
"start_pos",
">=",
"2",
":",
"delta",
"=",
"stop_pos",
"-",
"start_pos",
"self",
".",
"_latex",
"[",
"start_pos",
"]",
"[",
"columns",
"]",
"=",
"(",
"\"\\\\multigate{%s}{%s}\"",
"%",
"(",
"delta",
",",
"nm",
")",
")",
"for",
"i_pos",
"in",
"range",
"(",
"start_pos",
"+",
"1",
",",
"stop_pos",
"+",
"1",
")",
":",
"self",
".",
"_latex",
"[",
"i_pos",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\ghost{%s}\"",
"%",
"nm",
"else",
":",
"self",
".",
"_latex",
"[",
"start_pos",
"]",
"[",
"columns",
"]",
"=",
"(",
"\"\\\\multigate{1}{%s}\"",
"%",
"nm",
")",
"self",
".",
"_latex",
"[",
"stop_pos",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\ghost{%s}\"",
"%",
"nm",
"elif",
"len",
"(",
"qarglist",
")",
"==",
"3",
":",
"pos_1",
"=",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"]",
"pos_2",
"=",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"1",
"]",
"[",
"1",
"]",
")",
"]",
"pos_3",
"=",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"2",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"2",
"]",
"[",
"1",
"]",
")",
"]",
"if",
"op",
".",
"condition",
":",
"pos_4",
"=",
"self",
".",
"img_regs",
"[",
"(",
"if_reg",
",",
"0",
")",
"]",
"temp",
"=",
"[",
"pos_1",
",",
"pos_2",
",",
"pos_3",
",",
"pos_4",
"]",
"temp",
".",
"sort",
"(",
"key",
"=",
"int",
")",
"bottom",
"=",
"temp",
"[",
"2",
"]",
"prev_column",
"=",
"[",
"x",
"[",
"column",
"-",
"1",
"]",
"for",
"x",
"in",
"self",
".",
"_latex",
"]",
"for",
"item",
",",
"prev_entry",
"in",
"enumerate",
"(",
"prev_column",
")",
":",
"if",
"'barrier'",
"in",
"prev_entry",
":",
"span",
"=",
"re",
".",
"search",
"(",
"'barrier{(.*)}'",
",",
"prev_entry",
")",
"if",
"span",
"and",
"any",
"(",
"i",
"in",
"temp",
"for",
"i",
"in",
"range",
"(",
"item",
",",
"int",
"(",
"span",
".",
"group",
"(",
"1",
")",
")",
")",
")",
":",
"self",
".",
"_latex",
"[",
"item",
"]",
"[",
"column",
"-",
"1",
"]",
"=",
"prev_entry",
".",
"replace",
"(",
"'\\\\barrier{'",
",",
"'\\\\barrier[-0.65em]{'",
")",
"gap",
"=",
"pos_4",
"-",
"bottom",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cregs",
"[",
"if_reg",
"]",
")",
":",
"if",
"if_value",
"[",
"i",
"]",
"==",
"'1'",
":",
"self",
".",
"_latex",
"[",
"pos_4",
"+",
"i",
"]",
"[",
"column",
"]",
"=",
"\"\\\\control \\\\cw \\\\cwx[-\"",
"+",
"str",
"(",
"gap",
")",
"+",
"\"]\"",
"gap",
"=",
"1",
"else",
":",
"self",
".",
"_latex",
"[",
"pos_4",
"+",
"i",
"]",
"[",
"column",
"]",
"=",
"\"\\\\controlo \\\\cw \\\\cwx[-\"",
"+",
"str",
"(",
"gap",
")",
"+",
"\"]\"",
"gap",
"=",
"1",
"if",
"nm",
"==",
"\"ccx\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_3",
"-",
"pos_2",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_3",
"]",
"[",
"column",
"]",
"=",
"\"\\\\targ\"",
"if",
"nm",
"==",
"\"cswap\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap\"",
"self",
".",
"_latex",
"[",
"pos_3",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap \\\\qwx[\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_3",
")",
"+",
"\"]\"",
"else",
":",
"temp",
"=",
"[",
"pos_1",
",",
"pos_2",
",",
"pos_3",
"]",
"temp",
".",
"sort",
"(",
"key",
"=",
"int",
")",
"prev_column",
"=",
"[",
"x",
"[",
"column",
"-",
"1",
"]",
"for",
"x",
"in",
"self",
".",
"_latex",
"]",
"for",
"item",
",",
"prev_entry",
"in",
"enumerate",
"(",
"prev_column",
")",
":",
"if",
"'barrier'",
"in",
"prev_entry",
":",
"span",
"=",
"re",
".",
"search",
"(",
"'barrier{(.*)}'",
",",
"prev_entry",
")",
"if",
"span",
"and",
"any",
"(",
"i",
"in",
"temp",
"for",
"i",
"in",
"range",
"(",
"item",
",",
"int",
"(",
"span",
".",
"group",
"(",
"1",
")",
")",
")",
")",
":",
"self",
".",
"_latex",
"[",
"item",
"]",
"[",
"column",
"-",
"1",
"]",
"=",
"prev_entry",
".",
"replace",
"(",
"'\\\\barrier{'",
",",
"'\\\\barrier[-0.65em]{'",
")",
"if",
"nm",
"==",
"\"ccx\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_3",
"-",
"pos_2",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_3",
"]",
"[",
"column",
"]",
"=",
"\"\\\\targ\"",
"elif",
"nm",
"==",
"\"cswap\"",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\ctrl{\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"}\"",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap\"",
"self",
".",
"_latex",
"[",
"pos_3",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qswap \\\\qwx[\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_3",
")",
"+",
"\"]\"",
"else",
":",
"start_pos",
"=",
"min",
"(",
"[",
"pos_1",
",",
"pos_2",
",",
"pos_3",
"]",
")",
"stop_pos",
"=",
"max",
"(",
"[",
"pos_1",
",",
"pos_2",
",",
"pos_3",
"]",
")",
"if",
"stop_pos",
"-",
"start_pos",
">=",
"3",
":",
"delta",
"=",
"stop_pos",
"-",
"start_pos",
"self",
".",
"_latex",
"[",
"start_pos",
"]",
"[",
"columns",
"]",
"=",
"(",
"\"\\\\multigate{%s}{%s}\"",
"%",
"(",
"delta",
",",
"nm",
")",
")",
"for",
"i_pos",
"in",
"range",
"(",
"start_pos",
"+",
"1",
",",
"stop_pos",
"+",
"1",
")",
":",
"self",
".",
"_latex",
"[",
"i_pos",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\ghost{%s}\"",
"%",
"nm",
"else",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"columns",
"]",
"=",
"(",
"\"\\\\multigate{2}{%s}\"",
"%",
"nm",
")",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\ghost{%s}\"",
"%",
"nm",
"self",
".",
"_latex",
"[",
"pos_3",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\ghost{%s}\"",
"%",
"nm",
"elif",
"len",
"(",
"qarglist",
")",
">",
"3",
":",
"nbits",
"=",
"len",
"(",
"qarglist",
")",
"pos_array",
"=",
"[",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"nbits",
")",
":",
"pos_array",
".",
"append",
"(",
"self",
".",
"img_regs",
"[",
"(",
"qarglist",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"qarglist",
"[",
"i",
"]",
"[",
"1",
"]",
")",
"]",
")",
"pos_start",
"=",
"min",
"(",
"pos_array",
")",
"pos_stop",
"=",
"max",
"(",
"pos_array",
")",
"delta",
"=",
"pos_stop",
"-",
"pos_start",
"self",
".",
"_latex",
"[",
"pos_start",
"]",
"[",
"columns",
"]",
"=",
"(",
"\"\\\\multigate{%s}{%s}\"",
"%",
"(",
"nbits",
"-",
"1",
",",
"nm",
")",
")",
"for",
"pos",
"in",
"range",
"(",
"pos_start",
"+",
"1",
",",
"pos_stop",
"+",
"1",
")",
":",
"self",
".",
"_latex",
"[",
"pos",
"]",
"[",
"columns",
"]",
"=",
"\"\\\\ghost{%s}\"",
"%",
"nm",
"elif",
"op",
".",
"name",
"==",
"\"measure\"",
":",
"if",
"(",
"len",
"(",
"op",
".",
"cargs",
")",
"!=",
"1",
"or",
"len",
"(",
"op",
".",
"qargs",
")",
"!=",
"1",
"or",
"op",
".",
"op",
".",
"params",
")",
":",
"raise",
"exceptions",
".",
"VisualizationError",
"(",
"\"bad operation record\"",
")",
"if",
"op",
".",
"condition",
":",
"raise",
"exceptions",
".",
"VisualizationError",
"(",
"\"If controlled measures currently not supported.\"",
")",
"qname",
",",
"qindex",
"=",
"op",
".",
"qargs",
"[",
"0",
"]",
"cname",
",",
"cindex",
"=",
"op",
".",
"cargs",
"[",
"0",
"]",
"if",
"aliases",
":",
"newq",
"=",
"aliases",
"[",
"(",
"qname",
",",
"qindex",
")",
"]",
"qname",
"=",
"newq",
"[",
"0",
"]",
"qindex",
"=",
"newq",
"[",
"1",
"]",
"pos_1",
"=",
"self",
".",
"img_regs",
"[",
"(",
"qname",
",",
"qindex",
")",
"]",
"pos_2",
"=",
"self",
".",
"img_regs",
"[",
"(",
"cname",
",",
"cindex",
")",
"]",
"try",
":",
"self",
".",
"_latex",
"[",
"pos_1",
"]",
"[",
"column",
"]",
"=",
"\"\\\\meter\"",
"prev_column",
"=",
"[",
"x",
"[",
"column",
"-",
"1",
"]",
"for",
"x",
"in",
"self",
".",
"_latex",
"]",
"for",
"item",
",",
"prev_entry",
"in",
"enumerate",
"(",
"prev_column",
")",
":",
"if",
"'barrier'",
"in",
"prev_entry",
":",
"span",
"=",
"re",
".",
"search",
"(",
"'barrier{(.*)}'",
",",
"prev_entry",
")",
"if",
"span",
"and",
"(",
"item",
"+",
"int",
"(",
"span",
".",
"group",
"(",
"1",
")",
")",
")",
"-",
"pos_1",
">=",
"0",
":",
"self",
".",
"_latex",
"[",
"item",
"]",
"[",
"column",
"-",
"1",
"]",
"=",
"prev_entry",
".",
"replace",
"(",
"'\\\\barrier{'",
",",
"'\\\\barrier[-1.15em]{'",
")",
"self",
".",
"_latex",
"[",
"pos_2",
"]",
"[",
"column",
"]",
"=",
"\"\\\\cw \\\\cwx[-\"",
"+",
"str",
"(",
"pos_2",
"-",
"pos_1",
")",
"+",
"\"]\"",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"VisualizationError",
"(",
"'Error during Latex building: %s'",
"%",
"str",
"(",
"e",
")",
")",
"elif",
"op",
".",
"name",
"in",
"[",
"'barrier'",
",",
"'snapshot'",
",",
"'load'",
",",
"'save'",
",",
"'noise'",
"]",
":",
"if",
"self",
".",
"plot_barriers",
":",
"qarglist",
"=",
"op",
".",
"qargs",
"indexes",
"=",
"[",
"self",
".",
"_get_qubit_index",
"(",
"x",
")",
"for",
"x",
"in",
"qarglist",
"]",
"start_bit",
"=",
"self",
".",
"qubit_list",
"[",
"min",
"(",
"indexes",
")",
"]",
"if",
"aliases",
"is",
"not",
"None",
":",
"qarglist",
"=",
"map",
"(",
"lambda",
"x",
":",
"aliases",
"[",
"x",
"]",
",",
"qarglist",
")",
"start",
"=",
"self",
".",
"img_regs",
"[",
"start_bit",
"]",
"span",
"=",
"len",
"(",
"op",
".",
"qargs",
")",
"-",
"1",
"self",
".",
"_latex",
"[",
"start",
"]",
"[",
"column",
"]",
"=",
"\"\\\\qw \\\\barrier{\"",
"+",
"str",
"(",
"span",
")",
"+",
"\"}\"",
"else",
":",
"raise",
"exceptions",
".",
"VisualizationError",
"(",
"\"bad node data\"",
")"
] | 55.248227 | 22.3026 |
def youtube_meta(client, channel, nick, message, match):
""" Return meta information about a video """
if not API_KEY:
return 'You must set YOUTUBE_DATA_API_KEY in settings!'
identifier = match[0]
params = {
'id': identifier,
'key': API_KEY,
'part': 'snippet,statistics,contentDetails',
}
response = requests.get(API_ROOT, params=params)
if response.status_code != 200:
return 'Error in response, ' + str(response.status_code) + ' for identifier: ' + identifier
try:
data = response.json()['items'][0]
except:
print('Exception requesting info for identifier: ' + identifier)
traceback.print_exc()
response_dict = {
'title': data['snippet']['title'],
'poster': data['snippet']['channelTitle'],
'date': str(parse_date(data['snippet']['publishedAt'])),
'views': data['statistics']['viewCount'],
'likes': data['statistics']['likeCount'],
'dislikes': data['statistics']['dislikeCount'],
'duration': parse_duration(data['contentDetails']['duration']),
}
return RESPONSE_TEMPLATE.format(**response_dict).encode('utf-8').strip() | [
"def",
"youtube_meta",
"(",
"client",
",",
"channel",
",",
"nick",
",",
"message",
",",
"match",
")",
":",
"if",
"not",
"API_KEY",
":",
"return",
"'You must set YOUTUBE_DATA_API_KEY in settings!'",
"identifier",
"=",
"match",
"[",
"0",
"]",
"params",
"=",
"{",
"'id'",
":",
"identifier",
",",
"'key'",
":",
"API_KEY",
",",
"'part'",
":",
"'snippet,statistics,contentDetails'",
",",
"}",
"response",
"=",
"requests",
".",
"get",
"(",
"API_ROOT",
",",
"params",
"=",
"params",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"return",
"'Error in response, '",
"+",
"str",
"(",
"response",
".",
"status_code",
")",
"+",
"' for identifier: '",
"+",
"identifier",
"try",
":",
"data",
"=",
"response",
".",
"json",
"(",
")",
"[",
"'items'",
"]",
"[",
"0",
"]",
"except",
":",
"print",
"(",
"'Exception requesting info for identifier: '",
"+",
"identifier",
")",
"traceback",
".",
"print_exc",
"(",
")",
"response_dict",
"=",
"{",
"'title'",
":",
"data",
"[",
"'snippet'",
"]",
"[",
"'title'",
"]",
",",
"'poster'",
":",
"data",
"[",
"'snippet'",
"]",
"[",
"'channelTitle'",
"]",
",",
"'date'",
":",
"str",
"(",
"parse_date",
"(",
"data",
"[",
"'snippet'",
"]",
"[",
"'publishedAt'",
"]",
")",
")",
",",
"'views'",
":",
"data",
"[",
"'statistics'",
"]",
"[",
"'viewCount'",
"]",
",",
"'likes'",
":",
"data",
"[",
"'statistics'",
"]",
"[",
"'likeCount'",
"]",
",",
"'dislikes'",
":",
"data",
"[",
"'statistics'",
"]",
"[",
"'dislikeCount'",
"]",
",",
"'duration'",
":",
"parse_duration",
"(",
"data",
"[",
"'contentDetails'",
"]",
"[",
"'duration'",
"]",
")",
",",
"}",
"return",
"RESPONSE_TEMPLATE",
".",
"format",
"(",
"*",
"*",
"response_dict",
")",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")"
] | 37.451613 | 21.677419 |
def add_task_to_diagram(self, process_id, task_name="", node_id=None):
"""
Adds a Task element to BPMN diagram.
User-defined attributes:
- name
:param process_id: string object. ID of parent process,
:param task_name: string object. Name of task,
:param node_id: string object. ID of node. Default value - None.
:return: a tuple, where first value is task ID, second a reference to created object.
"""
return self.add_flow_node_to_diagram(process_id, consts.Consts.task, task_name, node_id) | [
"def",
"add_task_to_diagram",
"(",
"self",
",",
"process_id",
",",
"task_name",
"=",
"\"\"",
",",
"node_id",
"=",
"None",
")",
":",
"return",
"self",
".",
"add_flow_node_to_diagram",
"(",
"process_id",
",",
"consts",
".",
"Consts",
".",
"task",
",",
"task_name",
",",
"node_id",
")"
] | 40 | 26.142857 |
def submit_snl(self, snl):
"""
Submits a list of StructureNL to the Materials Project site.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl (StructureNL/[StructureNL]): A single StructureNL, or a list
of StructureNL objects
Returns:
A list of inserted submission ids.
Raises:
MPRestError
"""
try:
snl = snl if isinstance(snl, list) else [snl]
jsondata = [s.as_dict() for s in snl]
payload = {"snl": json.dumps(jsondata, cls=MontyEncoder)}
response = self.session.post("{}/snl/submit".format(self.preamble),
data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp['inserted_ids']
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex)) | [
"def",
"submit_snl",
"(",
"self",
",",
"snl",
")",
":",
"try",
":",
"snl",
"=",
"snl",
"if",
"isinstance",
"(",
"snl",
",",
"list",
")",
"else",
"[",
"snl",
"]",
"jsondata",
"=",
"[",
"s",
".",
"as_dict",
"(",
")",
"for",
"s",
"in",
"snl",
"]",
"payload",
"=",
"{",
"\"snl\"",
":",
"json",
".",
"dumps",
"(",
"jsondata",
",",
"cls",
"=",
"MontyEncoder",
")",
"}",
"response",
"=",
"self",
".",
"session",
".",
"post",
"(",
"\"{}/snl/submit\"",
".",
"format",
"(",
"self",
".",
"preamble",
")",
",",
"data",
"=",
"payload",
")",
"if",
"response",
".",
"status_code",
"in",
"[",
"200",
",",
"400",
"]",
":",
"resp",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"text",
",",
"cls",
"=",
"MontyDecoder",
")",
"if",
"resp",
"[",
"\"valid_response\"",
"]",
":",
"if",
"resp",
".",
"get",
"(",
"\"warning\"",
")",
":",
"warnings",
".",
"warn",
"(",
"resp",
"[",
"\"warning\"",
"]",
")",
"return",
"resp",
"[",
"'inserted_ids'",
"]",
"else",
":",
"raise",
"MPRestError",
"(",
"resp",
"[",
"\"error\"",
"]",
")",
"raise",
"MPRestError",
"(",
"\"REST error with status code {} and error {}\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
")",
"except",
"Exception",
"as",
"ex",
":",
"raise",
"MPRestError",
"(",
"str",
"(",
"ex",
")",
")"
] | 36.125 | 22.125 |
def response(self, component_id, component=None, **kwargs):
"""Add a response which can be referenced.
:param str component_id: ref_id to use as reference
:param dict component: response fields
:param dict kwargs: plugin-specific arguments
"""
if component_id in self._responses:
raise DuplicateComponentNameError(
'Another response with name "{}" is already registered.'.format(
component_id
)
)
component = component or {}
ret = component.copy()
# Execute all helpers from plugins
for plugin in self._plugins:
try:
ret.update(plugin.response_helper(component, **kwargs) or {})
except PluginMethodNotImplementedError:
continue
self._responses[component_id] = ret
return self | [
"def",
"response",
"(",
"self",
",",
"component_id",
",",
"component",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"component_id",
"in",
"self",
".",
"_responses",
":",
"raise",
"DuplicateComponentNameError",
"(",
"'Another response with name \"{}\" is already registered.'",
".",
"format",
"(",
"component_id",
")",
")",
"component",
"=",
"component",
"or",
"{",
"}",
"ret",
"=",
"component",
".",
"copy",
"(",
")",
"# Execute all helpers from plugins",
"for",
"plugin",
"in",
"self",
".",
"_plugins",
":",
"try",
":",
"ret",
".",
"update",
"(",
"plugin",
".",
"response_helper",
"(",
"component",
",",
"*",
"*",
"kwargs",
")",
"or",
"{",
"}",
")",
"except",
"PluginMethodNotImplementedError",
":",
"continue",
"self",
".",
"_responses",
"[",
"component_id",
"]",
"=",
"ret",
"return",
"self"
] | 38.347826 | 14.652174 |
def new_contact(cls, address_book, supported_private_objects, version,
localize_dates):
"""Use this to create a new and empty contact."""
return cls(address_book, None, supported_private_objects, version,
localize_dates) | [
"def",
"new_contact",
"(",
"cls",
",",
"address_book",
",",
"supported_private_objects",
",",
"version",
",",
"localize_dates",
")",
":",
"return",
"cls",
"(",
"address_book",
",",
"None",
",",
"supported_private_objects",
",",
"version",
",",
"localize_dates",
")"
] | 52 | 17 |
def _store_inferential_results(self,
value_array,
index_names,
attribute_name,
series_name=None,
column_names=None):
"""
Store the estimation results that relate to statistical inference, such
as parameter estimates, standard errors, p-values, etc.
Parameters
----------
value_array : 1D or 2D ndarray.
Contains the values that are to be stored on the model instance.
index_names : list of strings.
Contains the names that are to be displayed on the 'rows' for each
value being stored. There should be one element for each value of
`value_array.`
series_name : string or None, optional.
The name of the pandas series being created for `value_array.` This
kwarg should be None when `value_array` is a 1D ndarray.
attribute_name : string.
The attribute name that will be exposed on the model instance and
related to the passed `value_array.`
column_names : list of strings, or None, optional.
Same as `index_names` except that it pertains to the columns of a
2D ndarray. When `value_array` is a 2D ndarray, There should be one
element for each column of `value_array.` This kwarg should be None
otherwise.
Returns
-------
None. Stores a pandas series or dataframe on the model instance.
"""
if len(value_array.shape) == 1:
assert series_name is not None
new_attribute_value = pd.Series(value_array,
index=index_names,
name=series_name)
elif len(value_array.shape) == 2:
assert column_names is not None
new_attribute_value = pd.DataFrame(value_array,
index=index_names,
columns=column_names)
setattr(self, attribute_name, new_attribute_value)
return None | [
"def",
"_store_inferential_results",
"(",
"self",
",",
"value_array",
",",
"index_names",
",",
"attribute_name",
",",
"series_name",
"=",
"None",
",",
"column_names",
"=",
"None",
")",
":",
"if",
"len",
"(",
"value_array",
".",
"shape",
")",
"==",
"1",
":",
"assert",
"series_name",
"is",
"not",
"None",
"new_attribute_value",
"=",
"pd",
".",
"Series",
"(",
"value_array",
",",
"index",
"=",
"index_names",
",",
"name",
"=",
"series_name",
")",
"elif",
"len",
"(",
"value_array",
".",
"shape",
")",
"==",
"2",
":",
"assert",
"column_names",
"is",
"not",
"None",
"new_attribute_value",
"=",
"pd",
".",
"DataFrame",
"(",
"value_array",
",",
"index",
"=",
"index_names",
",",
"columns",
"=",
"column_names",
")",
"setattr",
"(",
"self",
",",
"attribute_name",
",",
"new_attribute_value",
")",
"return",
"None"
] | 45.854167 | 20.520833 |
def _calc_eta(self):
""" Calculates estimated time left until completion. """
elapsed = self._elapsed()
if self.cnt == 0 or elapsed < 0.001:
return None
rate = float(self.cnt) / elapsed
self.eta = (float(self.max_iter) - float(self.cnt)) / rate | [
"def",
"_calc_eta",
"(",
"self",
")",
":",
"elapsed",
"=",
"self",
".",
"_elapsed",
"(",
")",
"if",
"self",
".",
"cnt",
"==",
"0",
"or",
"elapsed",
"<",
"0.001",
":",
"return",
"None",
"rate",
"=",
"float",
"(",
"self",
".",
"cnt",
")",
"/",
"elapsed",
"self",
".",
"eta",
"=",
"(",
"float",
"(",
"self",
".",
"max_iter",
")",
"-",
"float",
"(",
"self",
".",
"cnt",
")",
")",
"/",
"rate"
] | 41.428571 | 10.571429 |
def get_s3_region_from_endpoint(endpoint):
"""
Extracts and returns an AWS S3 region from an endpoint
of form `s3-ap-southeast-1.amazonaws.com`
:param endpoint: Endpoint region to be extracted.
"""
# Extract region by regex search.
m = _EXTRACT_REGION_REGEX.search(endpoint)
if m:
# Regex matches, we have found a region.
region = m.group(1)
if region == 'external-1':
# Handle special scenario for us-east-1 URL.
return 'us-east-1'
if region.startswith('dualstack'):
# Handle special scenario for dualstack URL.
return region.split('.')[1]
return region
# No regex matches return None.
return None | [
"def",
"get_s3_region_from_endpoint",
"(",
"endpoint",
")",
":",
"# Extract region by regex search.",
"m",
"=",
"_EXTRACT_REGION_REGEX",
".",
"search",
"(",
"endpoint",
")",
"if",
"m",
":",
"# Regex matches, we have found a region.",
"region",
"=",
"m",
".",
"group",
"(",
"1",
")",
"if",
"region",
"==",
"'external-1'",
":",
"# Handle special scenario for us-east-1 URL.",
"return",
"'us-east-1'",
"if",
"region",
".",
"startswith",
"(",
"'dualstack'",
")",
":",
"# Handle special scenario for dualstack URL.",
"return",
"region",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"return",
"region",
"# No regex matches return None.",
"return",
"None"
] | 30.73913 | 13.869565 |
def zoom_in_pixel(self, curr_pixel):
""" return the curr_frag at a higher resolution"""
low_frag = curr_pixel[0]
high_frag = curr_pixel[1]
level = curr_pixel[2]
if level > 0:
str_level = str(level)
low_sub_low = self.spec_level[str_level]["fragments_dict"][
low_frag
]["sub_low_index"]
low_sub_high = self.spec_level[str_level]["fragments_dict"][
low_frag
]["sub_high_index"]
high_sub_low = self.spec_level[str_level]["fragments_dict"][
high_frag
]["sub_low_index"]
high_sub_high = self.spec_level[str_level]["fragments_dict"][
high_frag
]["sub_high_index"]
vect = [low_sub_low, low_sub_high, high_sub_low, high_sub_high]
new_pix_low = min(vect)
new_pix_high = max(vect)
new_level = level - 1
new_pixel = [new_pix_low, new_pix_high, new_level]
else:
new_pixel = curr_pixel
return new_pixel | [
"def",
"zoom_in_pixel",
"(",
"self",
",",
"curr_pixel",
")",
":",
"low_frag",
"=",
"curr_pixel",
"[",
"0",
"]",
"high_frag",
"=",
"curr_pixel",
"[",
"1",
"]",
"level",
"=",
"curr_pixel",
"[",
"2",
"]",
"if",
"level",
">",
"0",
":",
"str_level",
"=",
"str",
"(",
"level",
")",
"low_sub_low",
"=",
"self",
".",
"spec_level",
"[",
"str_level",
"]",
"[",
"\"fragments_dict\"",
"]",
"[",
"low_frag",
"]",
"[",
"\"sub_low_index\"",
"]",
"low_sub_high",
"=",
"self",
".",
"spec_level",
"[",
"str_level",
"]",
"[",
"\"fragments_dict\"",
"]",
"[",
"low_frag",
"]",
"[",
"\"sub_high_index\"",
"]",
"high_sub_low",
"=",
"self",
".",
"spec_level",
"[",
"str_level",
"]",
"[",
"\"fragments_dict\"",
"]",
"[",
"high_frag",
"]",
"[",
"\"sub_low_index\"",
"]",
"high_sub_high",
"=",
"self",
".",
"spec_level",
"[",
"str_level",
"]",
"[",
"\"fragments_dict\"",
"]",
"[",
"high_frag",
"]",
"[",
"\"sub_high_index\"",
"]",
"vect",
"=",
"[",
"low_sub_low",
",",
"low_sub_high",
",",
"high_sub_low",
",",
"high_sub_high",
"]",
"new_pix_low",
"=",
"min",
"(",
"vect",
")",
"new_pix_high",
"=",
"max",
"(",
"vect",
")",
"new_level",
"=",
"level",
"-",
"1",
"new_pixel",
"=",
"[",
"new_pix_low",
",",
"new_pix_high",
",",
"new_level",
"]",
"else",
":",
"new_pixel",
"=",
"curr_pixel",
"return",
"new_pixel"
] | 39.37037 | 15 |
def from_stream(cls, stream):
"""
Read the first occurrence of ScfCycle from stream.
Returns:
None if no `ScfCycle` entry is found.
"""
fields = _magic_parser(stream, magic=cls.MAGIC)
if fields:
fields.pop("iter")
return cls(fields)
else:
return None | [
"def",
"from_stream",
"(",
"cls",
",",
"stream",
")",
":",
"fields",
"=",
"_magic_parser",
"(",
"stream",
",",
"magic",
"=",
"cls",
".",
"MAGIC",
")",
"if",
"fields",
":",
"fields",
".",
"pop",
"(",
"\"iter\"",
")",
"return",
"cls",
"(",
"fields",
")",
"else",
":",
"return",
"None"
] | 24.5 | 17.357143 |
def status(sec):
"""Toolbar progressive status
"""
if _meta_.prg_bar in ["on", "ON"]:
syms = ["|", "/", "-", "\\"]
for sym in syms:
sys.stdout.write("\b{0}{1}{2}".format(_meta_.color["GREY"], sym,
_meta_.color["ENDC"]))
sys.stdout.flush()
time.sleep(float(sec)) | [
"def",
"status",
"(",
"sec",
")",
":",
"if",
"_meta_",
".",
"prg_bar",
"in",
"[",
"\"on\"",
",",
"\"ON\"",
"]",
":",
"syms",
"=",
"[",
"\"|\"",
",",
"\"/\"",
",",
"\"-\"",
",",
"\"\\\\\"",
"]",
"for",
"sym",
"in",
"syms",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\b{0}{1}{2}\"",
".",
"format",
"(",
"_meta_",
".",
"color",
"[",
"\"GREY\"",
"]",
",",
"sym",
",",
"_meta_",
".",
"color",
"[",
"\"ENDC\"",
"]",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"time",
".",
"sleep",
"(",
"float",
"(",
"sec",
")",
")"
] | 36.6 | 13 |
def auc_roc_score(input:Tensor, targ:Tensor):
"Using trapezoid method to calculate the area under roc curve"
fpr, tpr = roc_curve(input, targ)
d = fpr[1:] - fpr[:-1]
sl1, sl2 = [slice(None)], [slice(None)]
sl1[-1], sl2[-1] = slice(1, None), slice(None, -1)
return (d * (tpr[tuple(sl1)] + tpr[tuple(sl2)]) / 2.).sum(-1) | [
"def",
"auc_roc_score",
"(",
"input",
":",
"Tensor",
",",
"targ",
":",
"Tensor",
")",
":",
"fpr",
",",
"tpr",
"=",
"roc_curve",
"(",
"input",
",",
"targ",
")",
"d",
"=",
"fpr",
"[",
"1",
":",
"]",
"-",
"fpr",
"[",
":",
"-",
"1",
"]",
"sl1",
",",
"sl2",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
",",
"[",
"slice",
"(",
"None",
")",
"]",
"sl1",
"[",
"-",
"1",
"]",
",",
"sl2",
"[",
"-",
"1",
"]",
"=",
"slice",
"(",
"1",
",",
"None",
")",
",",
"slice",
"(",
"None",
",",
"-",
"1",
")",
"return",
"(",
"d",
"*",
"(",
"tpr",
"[",
"tuple",
"(",
"sl1",
")",
"]",
"+",
"tpr",
"[",
"tuple",
"(",
"sl2",
")",
"]",
")",
"/",
"2.",
")",
".",
"sum",
"(",
"-",
"1",
")"
] | 48 | 12.857143 |
def find_alliteration(self):
"""
Find alliterations in the complete verse.
:return:
"""
if len(self.phonological_features_text) == 0:
logger.error("No phonological transcription found")
raise ValueError
else:
first_sounds = []
for i, line in enumerate(self.phonological_features_text):
first_sounds.append([])
for j, short_line in enumerate(line):
first_sounds[i].append([])
for viisuord in short_line:
first_sounds[i][j].append(viisuord[0])
verse_alliterations = []
n_alliterations_lines = []
for i, first_sound_line in enumerate(first_sounds):
if isinstance(self.long_lines[i][0], ShortLine) and isinstance(self.long_lines[i][1], ShortLine):
self.long_lines[i][0].get_first_sounds()
self.long_lines[i][1].get_first_sounds()
alli, counter = self.long_lines[i][0].find_alliterations(self.long_lines[i][1])
verse_alliterations.append(alli)
n_alliterations_lines.append(counter)
elif isinstance(self.long_lines[i][0], LongLine):
self.long_lines[i][0].get_first_sounds()
alli, counter = self.long_lines[i][0].find_alliterations()
verse_alliterations.append(alli)
n_alliterations_lines.append(counter)
return verse_alliterations, n_alliterations_lines | [
"def",
"find_alliteration",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"phonological_features_text",
")",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"\"No phonological transcription found\"",
")",
"raise",
"ValueError",
"else",
":",
"first_sounds",
"=",
"[",
"]",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"self",
".",
"phonological_features_text",
")",
":",
"first_sounds",
".",
"append",
"(",
"[",
"]",
")",
"for",
"j",
",",
"short_line",
"in",
"enumerate",
"(",
"line",
")",
":",
"first_sounds",
"[",
"i",
"]",
".",
"append",
"(",
"[",
"]",
")",
"for",
"viisuord",
"in",
"short_line",
":",
"first_sounds",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"append",
"(",
"viisuord",
"[",
"0",
"]",
")",
"verse_alliterations",
"=",
"[",
"]",
"n_alliterations_lines",
"=",
"[",
"]",
"for",
"i",
",",
"first_sound_line",
"in",
"enumerate",
"(",
"first_sounds",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"ShortLine",
")",
"and",
"isinstance",
"(",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"ShortLine",
")",
":",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"0",
"]",
".",
"get_first_sounds",
"(",
")",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"1",
"]",
".",
"get_first_sounds",
"(",
")",
"alli",
",",
"counter",
"=",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"0",
"]",
".",
"find_alliterations",
"(",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"1",
"]",
")",
"verse_alliterations",
".",
"append",
"(",
"alli",
")",
"n_alliterations_lines",
".",
"append",
"(",
"counter",
")",
"elif",
"isinstance",
"(",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"LongLine",
")",
":",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"0",
"]",
".",
"get_first_sounds",
"(",
")",
"alli",
",",
"counter",
"=",
"self",
".",
"long_lines",
"[",
"i",
"]",
"[",
"0",
"]",
".",
"find_alliterations",
"(",
")",
"verse_alliterations",
".",
"append",
"(",
"alli",
")",
"n_alliterations_lines",
".",
"append",
"(",
"counter",
")",
"return",
"verse_alliterations",
",",
"n_alliterations_lines"
] | 49.03125 | 19.15625 |
def translate_codons(sequence):
'''Return the translated protein from 'sequence' assuming +1 reading frame
Source - http://adamcoster.com/2011/01/13/python-clean-up-and-translate-nucleotide-sequences/
'''
return ''.join([gencode.get(sequence[3*i:3*i+3],'X') for i in range(len(sequence)//3)]) | [
"def",
"translate_codons",
"(",
"sequence",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"gencode",
".",
"get",
"(",
"sequence",
"[",
"3",
"*",
"i",
":",
"3",
"*",
"i",
"+",
"3",
"]",
",",
"'X'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"sequence",
")",
"//",
"3",
")",
"]",
")"
] | 61.4 | 38.2 |
def finalize(self):
""" Is called before destruction. Can be used to clean-up resources
"""
logger.debug("Finalizing: {}".format(self))
self.plotItem.scene().sigMouseMoved.disconnect(self.mouseMoved)
self.plotItem.close()
self.graphicsLayoutWidget.close() | [
"def",
"finalize",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Finalizing: {}\"",
".",
"format",
"(",
"self",
")",
")",
"self",
".",
"plotItem",
".",
"scene",
"(",
")",
".",
"sigMouseMoved",
".",
"disconnect",
"(",
"self",
".",
"mouseMoved",
")",
"self",
".",
"plotItem",
".",
"close",
"(",
")",
"self",
".",
"graphicsLayoutWidget",
".",
"close",
"(",
")"
] | 42.428571 | 10.714286 |
def getextensibleindex(self, key, name):
"""
Get the index of the first extensible item.
Only for internal use. # TODO : hide this
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
name : str
The name of the object to fetch.
Returns
-------
int
"""
return getextensibleindex(
self.idfobjects, self.model, self.idd_info,
key, name) | [
"def",
"getextensibleindex",
"(",
"self",
",",
"key",
",",
"name",
")",
":",
"return",
"getextensibleindex",
"(",
"self",
".",
"idfobjects",
",",
"self",
".",
"model",
",",
"self",
".",
"idd_info",
",",
"key",
",",
"name",
")"
] | 23.333333 | 19.619048 |
def DEFINE_flag(flag, flag_values=_flagvalues.FLAGS, module_name=None): # pylint: disable=invalid-name
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
Args:
flag: Flag, a flag that is key to the module.
flag_values: FlagValues, the FlagValues instance with which the flag will
be registered. This should almost never need to be overridden.
module_name: str, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
"""
# Copying the reference to flag_values prevents pychecker warnings.
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if module_name:
module = sys.modules.get(module_name)
else:
module, module_name = _helpers.get_calling_module_object_and_name()
flag_values.register_flag_by_module(module_name, flag)
flag_values.register_flag_by_module_id(id(module), flag) | [
"def",
"DEFINE_flag",
"(",
"flag",
",",
"flag_values",
"=",
"_flagvalues",
".",
"FLAGS",
",",
"module_name",
"=",
"None",
")",
":",
"# pylint: disable=invalid-name",
"# Copying the reference to flag_values prevents pychecker warnings.",
"fv",
"=",
"flag_values",
"fv",
"[",
"flag",
".",
"name",
"]",
"=",
"flag",
"# Tell flag_values who's defining the flag.",
"if",
"module_name",
":",
"module",
"=",
"sys",
".",
"modules",
".",
"get",
"(",
"module_name",
")",
"else",
":",
"module",
",",
"module_name",
"=",
"_helpers",
".",
"get_calling_module_object_and_name",
"(",
")",
"flag_values",
".",
"register_flag_by_module",
"(",
"module_name",
",",
"flag",
")",
"flag_values",
".",
"register_flag_by_module_id",
"(",
"id",
"(",
"module",
")",
",",
"flag",
")"
] | 44.814815 | 24.925926 |
def dump_code(disassembly, pc = None,
bLowercase = True,
bits = None):
"""
Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not disassembly:
return ''
table = Table(sep = ' | ')
for (addr, size, code, dump) in disassembly:
if bLowercase:
code = code.lower()
if addr == pc:
addr = ' * %s' % HexDump.address(addr, bits)
else:
addr = ' %s' % HexDump.address(addr, bits)
table.addRow(addr, dump, code)
table.justify(1, 1)
return table.getOutput() | [
"def",
"dump_code",
"(",
"disassembly",
",",
"pc",
"=",
"None",
",",
"bLowercase",
"=",
"True",
",",
"bits",
"=",
"None",
")",
":",
"if",
"not",
"disassembly",
":",
"return",
"''",
"table",
"=",
"Table",
"(",
"sep",
"=",
"' | '",
")",
"for",
"(",
"addr",
",",
"size",
",",
"code",
",",
"dump",
")",
"in",
"disassembly",
":",
"if",
"bLowercase",
":",
"code",
"=",
"code",
".",
"lower",
"(",
")",
"if",
"addr",
"==",
"pc",
":",
"addr",
"=",
"' * %s'",
"%",
"HexDump",
".",
"address",
"(",
"addr",
",",
"bits",
")",
"else",
":",
"addr",
"=",
"' %s'",
"%",
"HexDump",
".",
"address",
"(",
"addr",
",",
"bits",
")",
"table",
".",
"addRow",
"(",
"addr",
",",
"dump",
",",
"code",
")",
"table",
".",
"justify",
"(",
"1",
",",
"1",
")",
"return",
"table",
".",
"getOutput",
"(",
")"
] | 37.891892 | 20.972973 |
def _convert_choices(self, choices):
"""Auto create db values then call super method"""
final_choices = []
for choice in choices:
if isinstance(choice, ChoiceEntry):
final_choices.append(choice)
continue
original_choice = choice
if isinstance(choice, six.string_types):
if choice == _NO_SUBSET_NAME_:
continue
choice = [choice, ]
else:
choice = list(choice)
length = len(choice)
assert 1 <= length <= 4, 'Invalid number of entries in %s' % (original_choice,)
final_choice = []
# do we have attributes?
if length > 1 and isinstance(choice[-1], Mapping):
final_choice.append(choice.pop())
elif length == 4:
attributes = choice.pop()
assert attributes is None or isinstance(attributes, Mapping), 'Last argument must be a dict-like object in %s' % (original_choice,)
if attributes:
final_choice.append(attributes)
# the constant
final_choice.insert(0, choice.pop(0))
if len(choice):
# we were given a db value
final_choice.insert(1, choice.pop(0))
if len(choice):
# we were given a display value
final_choice.insert(2, choice.pop(0))
else:
# set None to compute it later
final_choice.insert(1, None)
if final_choice[1] is None:
# no db value, we compute it from the constant
final_choice[1] = self.value_transform(final_choice[0])
final_choices.append(final_choice)
return super(AutoChoices, self)._convert_choices(final_choices) | [
"def",
"_convert_choices",
"(",
"self",
",",
"choices",
")",
":",
"final_choices",
"=",
"[",
"]",
"for",
"choice",
"in",
"choices",
":",
"if",
"isinstance",
"(",
"choice",
",",
"ChoiceEntry",
")",
":",
"final_choices",
".",
"append",
"(",
"choice",
")",
"continue",
"original_choice",
"=",
"choice",
"if",
"isinstance",
"(",
"choice",
",",
"six",
".",
"string_types",
")",
":",
"if",
"choice",
"==",
"_NO_SUBSET_NAME_",
":",
"continue",
"choice",
"=",
"[",
"choice",
",",
"]",
"else",
":",
"choice",
"=",
"list",
"(",
"choice",
")",
"length",
"=",
"len",
"(",
"choice",
")",
"assert",
"1",
"<=",
"length",
"<=",
"4",
",",
"'Invalid number of entries in %s'",
"%",
"(",
"original_choice",
",",
")",
"final_choice",
"=",
"[",
"]",
"# do we have attributes?",
"if",
"length",
">",
"1",
"and",
"isinstance",
"(",
"choice",
"[",
"-",
"1",
"]",
",",
"Mapping",
")",
":",
"final_choice",
".",
"append",
"(",
"choice",
".",
"pop",
"(",
")",
")",
"elif",
"length",
"==",
"4",
":",
"attributes",
"=",
"choice",
".",
"pop",
"(",
")",
"assert",
"attributes",
"is",
"None",
"or",
"isinstance",
"(",
"attributes",
",",
"Mapping",
")",
",",
"'Last argument must be a dict-like object in %s'",
"%",
"(",
"original_choice",
",",
")",
"if",
"attributes",
":",
"final_choice",
".",
"append",
"(",
"attributes",
")",
"# the constant",
"final_choice",
".",
"insert",
"(",
"0",
",",
"choice",
".",
"pop",
"(",
"0",
")",
")",
"if",
"len",
"(",
"choice",
")",
":",
"# we were given a db value",
"final_choice",
".",
"insert",
"(",
"1",
",",
"choice",
".",
"pop",
"(",
"0",
")",
")",
"if",
"len",
"(",
"choice",
")",
":",
"# we were given a display value",
"final_choice",
".",
"insert",
"(",
"2",
",",
"choice",
".",
"pop",
"(",
"0",
")",
")",
"else",
":",
"# set None to compute it later",
"final_choice",
".",
"insert",
"(",
"1",
",",
"None",
")",
"if",
"final_choice",
"[",
"1",
"]",
"is",
"None",
":",
"# no db value, we compute it from the constant",
"final_choice",
"[",
"1",
"]",
"=",
"self",
".",
"value_transform",
"(",
"final_choice",
"[",
"0",
"]",
")",
"final_choices",
".",
"append",
"(",
"final_choice",
")",
"return",
"super",
"(",
"AutoChoices",
",",
"self",
")",
".",
"_convert_choices",
"(",
"final_choices",
")"
] | 34.811321 | 19.943396 |
def compute_taxes(self, precision=None):
'''
Returns the total amount of taxes of this group.
@param precision:int Total amount of discounts
@return: Decimal
'''
return sum([line.compute_taxes(precision) for line in self.__lines]) | [
"def",
"compute_taxes",
"(",
"self",
",",
"precision",
"=",
"None",
")",
":",
"return",
"sum",
"(",
"[",
"line",
".",
"compute_taxes",
"(",
"precision",
")",
"for",
"line",
"in",
"self",
".",
"__lines",
"]",
")"
] | 38.857143 | 20 |
def centerOn(self, point):
"""
Centers this node on the inputed point.
:param point | <QPointF>
"""
rect = self.rect()
x = point.x() - rect.width() / 2.0
y = point.y() - rect.height() / 2.0
self.setPos(x, y) | [
"def",
"centerOn",
"(",
"self",
",",
"point",
")",
":",
"rect",
"=",
"self",
".",
"rect",
"(",
")",
"x",
"=",
"point",
".",
"x",
"(",
")",
"-",
"rect",
".",
"width",
"(",
")",
"/",
"2.0",
"y",
"=",
"point",
".",
"y",
"(",
")",
"-",
"rect",
".",
"height",
"(",
")",
"/",
"2.0",
"self",
".",
"setPos",
"(",
"x",
",",
"y",
")"
] | 25.818182 | 11.090909 |
def get_history(self):
"""Get history of applied upgrades."""
self.load_history()
return map(lambda x: (x, self.history[x]), self.ordered_history) | [
"def",
"get_history",
"(",
"self",
")",
":",
"self",
".",
"load_history",
"(",
")",
"return",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
",",
"self",
".",
"history",
"[",
"x",
"]",
")",
",",
"self",
".",
"ordered_history",
")"
] | 41.75 | 15.75 |
def update(self, other: 'Language') -> 'Language':
"""
Update this Language with the fields of another Language.
"""
return Language.make(
language=other.language or self.language,
extlangs=other.extlangs or self.extlangs,
script=other.script or self.script,
region=other.region or self.region,
variants=other.variants or self.variants,
extensions=other.extensions or self.extensions,
private=other.private or self.private
) | [
"def",
"update",
"(",
"self",
",",
"other",
":",
"'Language'",
")",
"->",
"'Language'",
":",
"return",
"Language",
".",
"make",
"(",
"language",
"=",
"other",
".",
"language",
"or",
"self",
".",
"language",
",",
"extlangs",
"=",
"other",
".",
"extlangs",
"or",
"self",
".",
"extlangs",
",",
"script",
"=",
"other",
".",
"script",
"or",
"self",
".",
"script",
",",
"region",
"=",
"other",
".",
"region",
"or",
"self",
".",
"region",
",",
"variants",
"=",
"other",
".",
"variants",
"or",
"self",
".",
"variants",
",",
"extensions",
"=",
"other",
".",
"extensions",
"or",
"self",
".",
"extensions",
",",
"private",
"=",
"other",
".",
"private",
"or",
"self",
".",
"private",
")"
] | 41.230769 | 12.153846 |
def current_model(self, controller_name=None, model_only=False):
'''Return the current model, qualified by its controller name.
If controller_name is specified, the current model for
that controller will be returned.
If model_only is true, only the model name, not qualified by
its controller name, will be returned.
'''
# TODO respect JUJU_MODEL environment variable.
if not controller_name:
controller_name = self.current_controller()
if not controller_name:
raise JujuError('No current controller')
models = self.models()[controller_name]
if 'current-model' not in models:
return None
if model_only:
return models['current-model']
return controller_name + ':' + models['current-model'] | [
"def",
"current_model",
"(",
"self",
",",
"controller_name",
"=",
"None",
",",
"model_only",
"=",
"False",
")",
":",
"# TODO respect JUJU_MODEL environment variable.",
"if",
"not",
"controller_name",
":",
"controller_name",
"=",
"self",
".",
"current_controller",
"(",
")",
"if",
"not",
"controller_name",
":",
"raise",
"JujuError",
"(",
"'No current controller'",
")",
"models",
"=",
"self",
".",
"models",
"(",
")",
"[",
"controller_name",
"]",
"if",
"'current-model'",
"not",
"in",
"models",
":",
"return",
"None",
"if",
"model_only",
":",
"return",
"models",
"[",
"'current-model'",
"]",
"return",
"controller_name",
"+",
"':'",
"+",
"models",
"[",
"'current-model'",
"]"
] | 43.315789 | 16.157895 |
def patch(destination, name=None, settings=None):
"""Decorator to create a patch.
The object being decorated becomes the :attr:`~Patch.obj` attribute of the
patch.
Parameters
----------
destination : object
Patch destination.
name : str
Name of the attribute at the destination.
settings : gorilla.Settings
Settings.
Returns
-------
object
The decorated object.
See Also
--------
:class:`Patch`.
"""
def decorator(wrapped):
base = _get_base(wrapped)
name_ = base.__name__ if name is None else name
settings_ = copy.deepcopy(settings)
patch = Patch(destination, name_, wrapped, settings=settings_)
data = get_decorator_data(base, set_default=True)
data.patches.append(patch)
return wrapped
return decorator | [
"def",
"patch",
"(",
"destination",
",",
"name",
"=",
"None",
",",
"settings",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"wrapped",
")",
":",
"base",
"=",
"_get_base",
"(",
"wrapped",
")",
"name_",
"=",
"base",
".",
"__name__",
"if",
"name",
"is",
"None",
"else",
"name",
"settings_",
"=",
"copy",
".",
"deepcopy",
"(",
"settings",
")",
"patch",
"=",
"Patch",
"(",
"destination",
",",
"name_",
",",
"wrapped",
",",
"settings",
"=",
"settings_",
")",
"data",
"=",
"get_decorator_data",
"(",
"base",
",",
"set_default",
"=",
"True",
")",
"data",
".",
"patches",
".",
"append",
"(",
"patch",
")",
"return",
"wrapped",
"return",
"decorator"
] | 24.5 | 21.5 |
def parse_global_args(argv):
"""Parse all global iotile tool arguments.
Any flag based argument at the start of the command line is considered as
a global flag and parsed. The first non flag argument starts the commands
that are passed to the underlying hierarchical shell.
Args:
argv (list): The command line for this command
Returns:
Namespace: The parsed arguments, with all of the commands that should
be executed in an iotile shell as the attribute 'commands'
"""
parser = create_parser()
args = parser.parse_args(argv)
should_log = args.include or args.exclude or (args.verbose > 0)
verbosity = args.verbose
root = logging.getLogger()
if should_log:
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s',
'%y-%m-%d %H:%M:%S')
if args.logfile:
handler = logging.FileHandler(args.logfile)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
if args.include and args.exclude:
print("You cannot combine whitelisted (-i) and blacklisted (-e) loggers, you must use one or the other.")
sys.exit(1)
loglevels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
if verbosity >= len(loglevels):
verbosity = len(loglevels) - 1
level = loglevels[verbosity]
if args.include:
for name in args.include:
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
root.addHandler(logging.NullHandler())
else:
# Disable propagation of log events from disabled loggers
for name in args.exclude:
logger = logging.getLogger(name)
logger.disabled = True
root.setLevel(level)
root.addHandler(handler)
else:
root.addHandler(logging.NullHandler())
return args | [
"def",
"parse_global_args",
"(",
"argv",
")",
":",
"parser",
"=",
"create_parser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"should_log",
"=",
"args",
".",
"include",
"or",
"args",
".",
"exclude",
"or",
"(",
"args",
".",
"verbose",
">",
"0",
")",
"verbosity",
"=",
"args",
".",
"verbose",
"root",
"=",
"logging",
".",
"getLogger",
"(",
")",
"if",
"should_log",
":",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s'",
",",
"'%y-%m-%d %H:%M:%S'",
")",
"if",
"args",
".",
"logfile",
":",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"args",
".",
"logfile",
")",
"else",
":",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"if",
"args",
".",
"include",
"and",
"args",
".",
"exclude",
":",
"print",
"(",
"\"You cannot combine whitelisted (-i) and blacklisted (-e) loggers, you must use one or the other.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"loglevels",
"=",
"[",
"logging",
".",
"ERROR",
",",
"logging",
".",
"WARNING",
",",
"logging",
".",
"INFO",
",",
"logging",
".",
"DEBUG",
"]",
"if",
"verbosity",
">=",
"len",
"(",
"loglevels",
")",
":",
"verbosity",
"=",
"len",
"(",
"loglevels",
")",
"-",
"1",
"level",
"=",
"loglevels",
"[",
"verbosity",
"]",
"if",
"args",
".",
"include",
":",
"for",
"name",
"in",
"args",
".",
"include",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"root",
".",
"addHandler",
"(",
"logging",
".",
"NullHandler",
"(",
")",
")",
"else",
":",
"# Disable propagation of log events from disabled loggers",
"for",
"name",
"in",
"args",
".",
"exclude",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"disabled",
"=",
"True",
"root",
".",
"setLevel",
"(",
"level",
")",
"root",
".",
"addHandler",
"(",
"handler",
")",
"else",
":",
"root",
".",
"addHandler",
"(",
"logging",
".",
"NullHandler",
"(",
")",
")",
"return",
"args"
] | 32.516129 | 22.629032 |
def add_rrset(self, section, rrset, **kw):
"""Add the rrset to the specified section.
Any keyword arguments are passed on to the rdataset's to_wire()
routine.
@param section: the section
@type section: int
@param rrset: the rrset
@type rrset: dns.rrset.RRset object
"""
self._set_section(section)
before = self.output.tell()
n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns.exception.TooBig
self.counts[section] += n | [
"def",
"add_rrset",
"(",
"self",
",",
"section",
",",
"rrset",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"_set_section",
"(",
"section",
")",
"before",
"=",
"self",
".",
"output",
".",
"tell",
"(",
")",
"n",
"=",
"rrset",
".",
"to_wire",
"(",
"self",
".",
"output",
",",
"self",
".",
"compress",
",",
"self",
".",
"origin",
",",
"*",
"*",
"kw",
")",
"after",
"=",
"self",
".",
"output",
".",
"tell",
"(",
")",
"if",
"after",
">=",
"self",
".",
"max_size",
":",
"self",
".",
"_rollback",
"(",
"before",
")",
"raise",
"dns",
".",
"exception",
".",
"TooBig",
"self",
".",
"counts",
"[",
"section",
"]",
"+=",
"n"
] | 31.95 | 13.9 |
def predict(self, a, b):
""" Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic
"""
a = np.array(a).reshape((-1, 1))
b = np.array(b).reshape((-1, 1))
return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2 | [
"def",
"predict",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"a",
"=",
"np",
".",
"array",
"(",
"a",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"b",
"=",
"np",
".",
"array",
"(",
"b",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"return",
"(",
"mutual_info_regression",
"(",
"a",
",",
"b",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
")",
"+",
"mutual_info_regression",
"(",
"b",
",",
"a",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
")",
")",
"/",
"2"
] | 30.692308 | 17.384615 |
def insert_tile(self, tile_info):
"""Add or replace an entry in the tile cache.
Args:
tile_info (TileInfo): The newly registered tile.
"""
for i, tile in enumerate(self.registered_tiles):
if tile.slot == tile_info.slot:
self.registered_tiles[i] = tile_info
return
self.registered_tiles.append(tile_info) | [
"def",
"insert_tile",
"(",
"self",
",",
"tile_info",
")",
":",
"for",
"i",
",",
"tile",
"in",
"enumerate",
"(",
"self",
".",
"registered_tiles",
")",
":",
"if",
"tile",
".",
"slot",
"==",
"tile_info",
".",
"slot",
":",
"self",
".",
"registered_tiles",
"[",
"i",
"]",
"=",
"tile_info",
"return",
"self",
".",
"registered_tiles",
".",
"append",
"(",
"tile_info",
")"
] | 30 | 17.692308 |
def add_upsert(self, value, criteria):
"""Add a tag or populator to the batch by value and criteria"""
value = value.strip()
v = value.lower()
self.lower_val_to_val[v] = value
criteria_array = self.upserts.get(v)
if criteria_array is None:
criteria_array = []
# start with # '{"value": "some_value", "criteria": []}, '
self.upserts_size[v] = 31 + len(value)
criteria_array.append(criteria.to_dict())
self.upserts[v] = criteria_array
self.upserts_size[v] += criteria.json_size() | [
"def",
"add_upsert",
"(",
"self",
",",
"value",
",",
"criteria",
")",
":",
"value",
"=",
"value",
".",
"strip",
"(",
")",
"v",
"=",
"value",
".",
"lower",
"(",
")",
"self",
".",
"lower_val_to_val",
"[",
"v",
"]",
"=",
"value",
"criteria_array",
"=",
"self",
".",
"upserts",
".",
"get",
"(",
"v",
")",
"if",
"criteria_array",
"is",
"None",
":",
"criteria_array",
"=",
"[",
"]",
"# start with # '{\"value\": \"some_value\", \"criteria\": []}, '",
"self",
".",
"upserts_size",
"[",
"v",
"]",
"=",
"31",
"+",
"len",
"(",
"value",
")",
"criteria_array",
".",
"append",
"(",
"criteria",
".",
"to_dict",
"(",
")",
")",
"self",
".",
"upserts",
"[",
"v",
"]",
"=",
"criteria_array",
"self",
".",
"upserts_size",
"[",
"v",
"]",
"+=",
"criteria",
".",
"json_size",
"(",
")"
] | 40.928571 | 10.571429 |
def _backtick_columns(cols):
"""
Quote the column names
"""
def bt(s):
b = '' if s == '*' or not s else '`'
return [_ for _ in [b + (s or '') + b] if _]
formatted = []
for c in cols:
if c[0] == '#':
formatted.append(c[1:])
elif c.startswith('(') and c.endswith(')'):
# WHERE (column_a, column_b) IN ((1,10), (1,20))
formatted.append(c)
else:
# backtick the former part when it meets the first dot, and then all the rest
formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.split('.')[1:]))))
return ', '.join(formatted) | [
"def",
"_backtick_columns",
"(",
"cols",
")",
":",
"def",
"bt",
"(",
"s",
")",
":",
"b",
"=",
"''",
"if",
"s",
"==",
"'*'",
"or",
"not",
"s",
"else",
"'`'",
"return",
"[",
"_",
"for",
"_",
"in",
"[",
"b",
"+",
"(",
"s",
"or",
"''",
")",
"+",
"b",
"]",
"if",
"_",
"]",
"formatted",
"=",
"[",
"]",
"for",
"c",
"in",
"cols",
":",
"if",
"c",
"[",
"0",
"]",
"==",
"'#'",
":",
"formatted",
".",
"append",
"(",
"c",
"[",
"1",
":",
"]",
")",
"elif",
"c",
".",
"startswith",
"(",
"'('",
")",
"and",
"c",
".",
"endswith",
"(",
"')'",
")",
":",
"# WHERE (column_a, column_b) IN ((1,10), (1,20))",
"formatted",
".",
"append",
"(",
"c",
")",
"else",
":",
"# backtick the former part when it meets the first dot, and then all the rest",
"formatted",
".",
"append",
"(",
"'.'",
".",
"join",
"(",
"bt",
"(",
"c",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"+",
"bt",
"(",
"'.'",
".",
"join",
"(",
"c",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
")",
")",
")",
")",
"return",
"', '",
".",
"join",
"(",
"formatted",
")"
] | 35.35 | 18.95 |
def set_item_class_name_on_custom_generator_class(cls):
"""
Set the attribute `cls.__tohu_items_name__` to a string which defines the name
of the namedtuple class which will be used to produce items for the custom
generator.
By default this will be the first part of the class name (before '...Generator'),
for example:
FoobarGenerator -> Foobar
QuuxGenerator -> Quux
However, it can be set explicitly by the user by defining `__tohu_items_name__`
in the class definition, for example:
class Quux(CustomGenerator):
__tohu_items_name__ = 'MyQuuxItem'
"""
if '__tohu__items__name__' in cls.__dict__:
logger.debug(
f"Using item class name '{cls.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')")
else:
m = re.match('^(.*)Generator$', cls.__name__)
if m is not None:
cls.__tohu_items_name__ = m.group(1)
logger.debug(f"Using item class name '{cls.__tohu_items_name__}' (derived from custom generator name)")
else:
raise ValueError("Cannot derive class name for items to be produced by custom generator. "
"Please set '__tohu_items_name__' at the top of the custom generator's "
"definition or change its name so that it ends in '...Generator'") | [
"def",
"set_item_class_name_on_custom_generator_class",
"(",
"cls",
")",
":",
"if",
"'__tohu__items__name__'",
"in",
"cls",
".",
"__dict__",
":",
"logger",
".",
"debug",
"(",
"f\"Using item class name '{cls.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')\"",
")",
"else",
":",
"m",
"=",
"re",
".",
"match",
"(",
"'^(.*)Generator$'",
",",
"cls",
".",
"__name__",
")",
"if",
"m",
"is",
"not",
"None",
":",
"cls",
".",
"__tohu_items_name__",
"=",
"m",
".",
"group",
"(",
"1",
")",
"logger",
".",
"debug",
"(",
"f\"Using item class name '{cls.__tohu_items_name__}' (derived from custom generator name)\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot derive class name for items to be produced by custom generator. \"",
"\"Please set '__tohu_items_name__' at the top of the custom generator's \"",
"\"definition or change its name so that it ends in '...Generator'\"",
")"
] | 45.166667 | 28.833333 |
def size_on_disk(self):
"""
:return: size of data and indices in bytes on the storage device
"""
ret = self.connection.query(
'SHOW TABLE STATUS FROM `{database}` WHERE NAME="{table}"'.format(
database=self.database, table=self.table_name), as_dict=True).fetchone()
return ret['Data_length'] + ret['Index_length'] | [
"def",
"size_on_disk",
"(",
"self",
")",
":",
"ret",
"=",
"self",
".",
"connection",
".",
"query",
"(",
"'SHOW TABLE STATUS FROM `{database}` WHERE NAME=\"{table}\"'",
".",
"format",
"(",
"database",
"=",
"self",
".",
"database",
",",
"table",
"=",
"self",
".",
"table_name",
")",
",",
"as_dict",
"=",
"True",
")",
".",
"fetchone",
"(",
")",
"return",
"ret",
"[",
"'Data_length'",
"]",
"+",
"ret",
"[",
"'Index_length'",
"]"
] | 46.75 | 19.25 |
def sort_queryset(queryset, request, context=None):
""" Returns a sorted queryset
The context argument is only used in the template tag
"""
sort_by = request.GET.get('sort_by')
if sort_by:
if sort_by in [el.name for el in queryset.model._meta.fields]:
queryset = queryset.order_by(sort_by)
else:
if sort_by in request.session:
sort_by = request.session[sort_by]
try:
queryset = queryset.order_by(sort_by)
except:
raise
# added else to fix a bug when using changelist
# TODO: use less ifs and more standard sorting
elif context is not None:
# sorted ascending
if sort_by[0] != '-':
sort_by = context['cl'].list_display[int(sort_by) - 1]
# sorted descending
else:
sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1]
queryset = queryset.order_by(sort_by)
return queryset | [
"def",
"sort_queryset",
"(",
"queryset",
",",
"request",
",",
"context",
"=",
"None",
")",
":",
"sort_by",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'sort_by'",
")",
"if",
"sort_by",
":",
"if",
"sort_by",
"in",
"[",
"el",
".",
"name",
"for",
"el",
"in",
"queryset",
".",
"model",
".",
"_meta",
".",
"fields",
"]",
":",
"queryset",
"=",
"queryset",
".",
"order_by",
"(",
"sort_by",
")",
"else",
":",
"if",
"sort_by",
"in",
"request",
".",
"session",
":",
"sort_by",
"=",
"request",
".",
"session",
"[",
"sort_by",
"]",
"try",
":",
"queryset",
"=",
"queryset",
".",
"order_by",
"(",
"sort_by",
")",
"except",
":",
"raise",
"# added else to fix a bug when using changelist",
"# TODO: use less ifs and more standard sorting",
"elif",
"context",
"is",
"not",
"None",
":",
"# sorted ascending",
"if",
"sort_by",
"[",
"0",
"]",
"!=",
"'-'",
":",
"sort_by",
"=",
"context",
"[",
"'cl'",
"]",
".",
"list_display",
"[",
"int",
"(",
"sort_by",
")",
"-",
"1",
"]",
"# sorted descending",
"else",
":",
"sort_by",
"=",
"'-'",
"+",
"context",
"[",
"'cl'",
"]",
".",
"list_display",
"[",
"abs",
"(",
"int",
"(",
"sort_by",
")",
")",
"-",
"1",
"]",
"queryset",
"=",
"queryset",
".",
"order_by",
"(",
"sort_by",
")",
"return",
"queryset"
] | 40.961538 | 14.807692 |
def get_fixed_param_names(self) -> List[str]:
"""
Get the fixed params of the network.
:return: List of strings, names of the layers
"""
args = set(self.args.keys()) | set(self.auxs.keys())
return list(args & set(self.sym.list_arguments())) | [
"def",
"get_fixed_param_names",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"args",
"=",
"set",
"(",
"self",
".",
"args",
".",
"keys",
"(",
")",
")",
"|",
"set",
"(",
"self",
".",
"auxs",
".",
"keys",
"(",
")",
")",
"return",
"list",
"(",
"args",
"&",
"set",
"(",
"self",
".",
"sym",
".",
"list_arguments",
"(",
")",
")",
")"
] | 31.333333 | 15.555556 |
def longest_dimension_first(vector, start=(0, 0), width=None, height=None):
"""List the (x, y) steps on a longest-dimension first route.
Note that when multiple dimensions are the same magnitude, one will be
chosen at random with uniform probability.
Parameters
----------
vector : (x, y, z)
The vector which the path should cover.
start : (x, y)
The coordinates from which the path should start (note this is a 2D
coordinate).
width : int or None
The width of the topology beyond which we wrap around (0 <= x < width).
If None, no wrapping on the X axis will occur.
height : int or None
The height of the topology beyond which we wrap around (0 <= y <
height). If None, no wrapping on the Y axis will occur.
Returns
-------
[(:py:class:`~rig.links.Links`, (x, y)), ...]
Produces (in order) a (direction, (x, y)) pair for every hop along the
longest dimension first route. The direction gives the direction to
travel in from the previous step to reach the current step. Ties are
broken randomly. The first generated value is that of the first hop
after the starting position, the last generated value is the
destination position.
"""
x, y = start
out = []
for dimension, magnitude in sorted(enumerate(vector),
key=(lambda x:
abs(x[1]) + random.random()),
reverse=True):
if magnitude == 0:
break
# Advance in the specified direction
sign = 1 if magnitude > 0 else -1
for _ in range(abs(magnitude)):
if dimension == 0:
dx, dy = sign, 0
elif dimension == 1:
dx, dy = 0, sign
elif dimension == 2: # pragma: no branch
dx, dy = -sign, -sign
x += dx
y += dy
# Wrap-around if required
if width is not None:
x %= width
if height is not None:
y %= height
direction = Links.from_vector((dx, dy))
out.append((direction, (x, y)))
return out | [
"def",
"longest_dimension_first",
"(",
"vector",
",",
"start",
"=",
"(",
"0",
",",
"0",
")",
",",
"width",
"=",
"None",
",",
"height",
"=",
"None",
")",
":",
"x",
",",
"y",
"=",
"start",
"out",
"=",
"[",
"]",
"for",
"dimension",
",",
"magnitude",
"in",
"sorted",
"(",
"enumerate",
"(",
"vector",
")",
",",
"key",
"=",
"(",
"lambda",
"x",
":",
"abs",
"(",
"x",
"[",
"1",
"]",
")",
"+",
"random",
".",
"random",
"(",
")",
")",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"magnitude",
"==",
"0",
":",
"break",
"# Advance in the specified direction",
"sign",
"=",
"1",
"if",
"magnitude",
">",
"0",
"else",
"-",
"1",
"for",
"_",
"in",
"range",
"(",
"abs",
"(",
"magnitude",
")",
")",
":",
"if",
"dimension",
"==",
"0",
":",
"dx",
",",
"dy",
"=",
"sign",
",",
"0",
"elif",
"dimension",
"==",
"1",
":",
"dx",
",",
"dy",
"=",
"0",
",",
"sign",
"elif",
"dimension",
"==",
"2",
":",
"# pragma: no branch",
"dx",
",",
"dy",
"=",
"-",
"sign",
",",
"-",
"sign",
"x",
"+=",
"dx",
"y",
"+=",
"dy",
"# Wrap-around if required",
"if",
"width",
"is",
"not",
"None",
":",
"x",
"%=",
"width",
"if",
"height",
"is",
"not",
"None",
":",
"y",
"%=",
"height",
"direction",
"=",
"Links",
".",
"from_vector",
"(",
"(",
"dx",
",",
"dy",
")",
")",
"out",
".",
"append",
"(",
"(",
"direction",
",",
"(",
"x",
",",
"y",
")",
")",
")",
"return",
"out"
] | 34.061538 | 21.646154 |
def _set_ipv6_track(self, v, load=False):
"""
Setter method for ipv6_track, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_track is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_track() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipv6_track.ipv6_track, is_container='container', presence=False, yang_name="ipv6-track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track', u'alt-name': u'track'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_track must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipv6_track.ipv6_track, is_container='container', presence=False, yang_name="ipv6-track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track', u'alt-name': u'track'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)""",
})
self.__ipv6_track = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_ipv6_track",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"ipv6_track",
".",
"ipv6_track",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"ipv6-track\"",
",",
"rest_name",
"=",
"\"track\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Track'",
",",
"u'alt-name'",
":",
"u'track'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-anycast-gateway'",
",",
"defining_module",
"=",
"'brocade-anycast-gateway'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"ipv6_track must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=ipv6_track.ipv6_track, is_container='container', presence=False, yang_name=\"ipv6-track\", rest_name=\"track\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track', u'alt-name': u'track'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__ipv6_track",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 76 | 36.045455 |