text
stringlengths 89
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def htmlNodeDumpOutput(self, buf, cur, encoding):
"""Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns/spaces are added. """
if buf is None: buf__o = None
else: buf__o = buf._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpOutput(buf__o, self._o, cur__o, encoding) | [
"def",
"htmlNodeDumpOutput",
"(",
"self",
",",
"buf",
",",
"cur",
",",
"encoding",
")",
":",
"if",
"buf",
"is",
"None",
":",
"buf__o",
"=",
"None",
"else",
":",
"buf__o",
"=",
"buf",
".",
"_o",
"if",
"cur",
"is",
"None",
":",
"cur__o",
"=",
"None",
"else",
":",
"cur__o",
"=",
"cur",
".",
"_o",
"libxml2mod",
".",
"htmlNodeDumpOutput",
"(",
"buf__o",
",",
"self",
".",
"_o",
",",
"cur__o",
",",
"encoding",
")"
] | 47.875 | 8.625 |
def plot(args):
"""
%prog plot workdir sample chr1,chr2
Plot some chromosomes for visual proof. Separate multiple chromosomes with
comma. Must contain folder workdir/sample-cn/.
"""
from jcvi.graphics.base import savefig
p = OptionParser(plot.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png")
if len(args) != 3:
sys.exit(not p.print_help())
workdir, sample_key, chrs = args
chrs = chrs.split(",")
hmm = CopyNumberHMM(workdir=workdir)
hmm.plot(sample_key, chrs=chrs)
image_name = sample_key + "_cn." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | [
"def",
"plot",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"graphics",
".",
"base",
"import",
"savefig",
"p",
"=",
"OptionParser",
"(",
"plot",
".",
"__doc__",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"args",
",",
"figsize",
"=",
"\"8x7\"",
",",
"format",
"=",
"\"png\"",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"3",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"workdir",
",",
"sample_key",
",",
"chrs",
"=",
"args",
"chrs",
"=",
"chrs",
".",
"split",
"(",
"\",\"",
")",
"hmm",
"=",
"CopyNumberHMM",
"(",
"workdir",
"=",
"workdir",
")",
"hmm",
".",
"plot",
"(",
"sample_key",
",",
"chrs",
"=",
"chrs",
")",
"image_name",
"=",
"sample_key",
"+",
"\"_cn.\"",
"+",
"iopts",
".",
"format",
"savefig",
"(",
"image_name",
",",
"dpi",
"=",
"iopts",
".",
"dpi",
",",
"iopts",
"=",
"iopts",
")"
] | 29.409091 | 17.590909 |
def update_reminder(self, reminder_id, reminder_dict):
"""
Updates a reminder
:param reminder_id: the reminder id
:param reminder_dict: dict
:return: dict
"""
return self._create_put_request(
resource=REMINDERS,
billomat_id=reminder_id,
send_data=reminder_dict
) | [
"def",
"update_reminder",
"(",
"self",
",",
"reminder_id",
",",
"reminder_dict",
")",
":",
"return",
"self",
".",
"_create_put_request",
"(",
"resource",
"=",
"REMINDERS",
",",
"billomat_id",
"=",
"reminder_id",
",",
"send_data",
"=",
"reminder_dict",
")"
] | 27 | 11.153846 |
def votes(ctx, account, type):
""" List accounts vesting balances
"""
if not isinstance(type, (list, tuple)):
type = [type]
account = Account(account, full=True)
ret = {key: list() for key in Vote.types()}
for vote in account["votes"]:
t = Vote.vote_type_from_id(vote["id"])
ret[t].append(vote)
t = [["id", "url", "account"]]
for vote in ret["committee"]:
t.append(
[vote["id"], vote["url"], Account(vote["committee_member_account"])["name"]]
)
if "committee" in type:
t = [["id", "url", "account", "votes"]]
for vote in ret["committee"]:
t.append(
[
vote["id"],
vote["url"],
Account(vote["committee_member_account"])["name"],
str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})),
]
)
print_table(t)
if "witness" in type:
t = [
[
"id",
"account",
"url",
"votes",
"last_confirmed_block_num",
"total_missed",
"westing",
]
]
for vote in ret["witness"]:
t.append(
[
vote["id"],
Account(vote["witness_account"])["name"],
vote["url"],
str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})),
vote["last_confirmed_block_num"],
vote["total_missed"],
str(Vesting(vote.get("pay_vb")).claimable)
if vote.get("pay_vb")
else "",
]
)
print_table(t)
if "worker" in type:
t = [["id", "name/url", "daily_pay", "votes", "time", "account"]]
for vote in ret["worker"]:
votes = Amount({"amount": vote["total_votes_for"], "asset_id": "1.3.0"})
amount = Amount({"amount": vote["daily_pay"], "asset_id": "1.3.0"})
t.append(
[
vote["id"],
"{name}\n{url}".format(**vote),
str(amount),
str(votes),
"{work_begin_date}\n-\n{work_end_date}".format(**vote),
str(Account(vote["worker_account"])["name"]),
]
)
print_table(t) | [
"def",
"votes",
"(",
"ctx",
",",
"account",
",",
"type",
")",
":",
"if",
"not",
"isinstance",
"(",
"type",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"type",
"=",
"[",
"type",
"]",
"account",
"=",
"Account",
"(",
"account",
",",
"full",
"=",
"True",
")",
"ret",
"=",
"{",
"key",
":",
"list",
"(",
")",
"for",
"key",
"in",
"Vote",
".",
"types",
"(",
")",
"}",
"for",
"vote",
"in",
"account",
"[",
"\"votes\"",
"]",
":",
"t",
"=",
"Vote",
".",
"vote_type_from_id",
"(",
"vote",
"[",
"\"id\"",
"]",
")",
"ret",
"[",
"t",
"]",
".",
"append",
"(",
"vote",
")",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"url\"",
",",
"\"account\"",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"committee\"",
"]",
":",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"vote",
"[",
"\"url\"",
"]",
",",
"Account",
"(",
"vote",
"[",
"\"committee_member_account\"",
"]",
")",
"[",
"\"name\"",
"]",
"]",
")",
"if",
"\"committee\"",
"in",
"type",
":",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"url\"",
",",
"\"account\"",
",",
"\"votes\"",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"committee\"",
"]",
":",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"vote",
"[",
"\"url\"",
"]",
",",
"Account",
"(",
"vote",
"[",
"\"committee_member_account\"",
"]",
")",
"[",
"\"name\"",
"]",
",",
"str",
"(",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"total_votes\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
")",
",",
"]",
")",
"print_table",
"(",
"t",
")",
"if",
"\"witness\"",
"in",
"type",
":",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"account\"",
",",
"\"url\"",
",",
"\"votes\"",
",",
"\"last_confirmed_block_num\"",
",",
"\"total_missed\"",
",",
"\"westing\"",
",",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"witness\"",
"]",
":",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"Account",
"(",
"vote",
"[",
"\"witness_account\"",
"]",
")",
"[",
"\"name\"",
"]",
",",
"vote",
"[",
"\"url\"",
"]",
",",
"str",
"(",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"total_votes\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
")",
",",
"vote",
"[",
"\"last_confirmed_block_num\"",
"]",
",",
"vote",
"[",
"\"total_missed\"",
"]",
",",
"str",
"(",
"Vesting",
"(",
"vote",
".",
"get",
"(",
"\"pay_vb\"",
")",
")",
".",
"claimable",
")",
"if",
"vote",
".",
"get",
"(",
"\"pay_vb\"",
")",
"else",
"\"\"",
",",
"]",
")",
"print_table",
"(",
"t",
")",
"if",
"\"worker\"",
"in",
"type",
":",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"name/url\"",
",",
"\"daily_pay\"",
",",
"\"votes\"",
",",
"\"time\"",
",",
"\"account\"",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"worker\"",
"]",
":",
"votes",
"=",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"total_votes_for\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
"amount",
"=",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"daily_pay\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"\"{name}\\n{url}\"",
".",
"format",
"(",
"*",
"*",
"vote",
")",
",",
"str",
"(",
"amount",
")",
",",
"str",
"(",
"votes",
")",
",",
"\"{work_begin_date}\\n-\\n{work_end_date}\"",
".",
"format",
"(",
"*",
"*",
"vote",
")",
",",
"str",
"(",
"Account",
"(",
"vote",
"[",
"\"worker_account\"",
"]",
")",
"[",
"\"name\"",
"]",
")",
",",
"]",
")",
"print_table",
"(",
"t",
")"
] | 32.824324 | 18.648649 |
def count(self, model_class, conditions=None):
'''
Counts the number of records in the model's table.
- `model_class`: the model to count.
- `conditions`: optional SQL conditions (contents of the WHERE clause).
'''
query = 'SELECT count() FROM $table'
if conditions:
query += ' WHERE ' + conditions
query = self._substitute(query, model_class)
r = self._send(query)
return int(r.text) if r.text else 0 | [
"def",
"count",
"(",
"self",
",",
"model_class",
",",
"conditions",
"=",
"None",
")",
":",
"query",
"=",
"'SELECT count() FROM $table'",
"if",
"conditions",
":",
"query",
"+=",
"' WHERE '",
"+",
"conditions",
"query",
"=",
"self",
".",
"_substitute",
"(",
"query",
",",
"model_class",
")",
"r",
"=",
"self",
".",
"_send",
"(",
"query",
")",
"return",
"int",
"(",
"r",
".",
"text",
")",
"if",
"r",
".",
"text",
"else",
"0"
] | 37.076923 | 16.615385 |
def get_session(self, redirect_url):
"""Create Session to store credentials.
Parameters
redirect_url (str)
The full URL that the Uber server redirected to after
the user authorized your app.
Returns
(Session)
A Session object with OAuth 2.0 credentials.
Raises
UberIllegalState (APIError)
Raised if redirect URL contains an error.
"""
query_params = self._extract_query(redirect_url)
error = query_params.get('error')
if error:
raise UberIllegalState(error)
# convert space delimited string to set
scopes = query_params.get('scope')
scopes_set = {scope for scope in scopes.split()}
oauth2credential = OAuth2Credential(
client_id=self.client_id,
redirect_url=self.redirect_url,
access_token=query_params.get('access_token'),
expires_in_seconds=query_params.get('expires_in'),
scopes=scopes_set,
grant_type=auth.IMPLICIT_GRANT,
)
return Session(oauth2credential=oauth2credential) | [
"def",
"get_session",
"(",
"self",
",",
"redirect_url",
")",
":",
"query_params",
"=",
"self",
".",
"_extract_query",
"(",
"redirect_url",
")",
"error",
"=",
"query_params",
".",
"get",
"(",
"'error'",
")",
"if",
"error",
":",
"raise",
"UberIllegalState",
"(",
"error",
")",
"# convert space delimited string to set",
"scopes",
"=",
"query_params",
".",
"get",
"(",
"'scope'",
")",
"scopes_set",
"=",
"{",
"scope",
"for",
"scope",
"in",
"scopes",
".",
"split",
"(",
")",
"}",
"oauth2credential",
"=",
"OAuth2Credential",
"(",
"client_id",
"=",
"self",
".",
"client_id",
",",
"redirect_url",
"=",
"self",
".",
"redirect_url",
",",
"access_token",
"=",
"query_params",
".",
"get",
"(",
"'access_token'",
")",
",",
"expires_in_seconds",
"=",
"query_params",
".",
"get",
"(",
"'expires_in'",
")",
",",
"scopes",
"=",
"scopes_set",
",",
"grant_type",
"=",
"auth",
".",
"IMPLICIT_GRANT",
",",
")",
"return",
"Session",
"(",
"oauth2credential",
"=",
"oauth2credential",
")"
] | 31.805556 | 17.638889 |
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets) | [
"def",
"long2ip",
"(",
"l",
",",
"rfc1924",
"=",
"False",
")",
":",
"if",
"MAX_IP",
"<",
"l",
"or",
"l",
"<",
"MIN_IP",
":",
"raise",
"TypeError",
"(",
"\"expected int between %d and %d inclusive\"",
"%",
"(",
"MIN_IP",
",",
"MAX_IP",
")",
")",
"if",
"rfc1924",
":",
"return",
"long2rfc1924",
"(",
"l",
")",
"# format as one big hex value",
"hex_str",
"=",
"'%032x'",
"%",
"l",
"# split into double octet chunks without padding zeros",
"hextets",
"=",
"[",
"'%x'",
"%",
"int",
"(",
"hex_str",
"[",
"x",
":",
"x",
"+",
"4",
"]",
",",
"16",
")",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"32",
",",
"4",
")",
"]",
"# find and remove left most longest run of zeros",
"dc_start",
",",
"dc_len",
"=",
"(",
"-",
"1",
",",
"0",
")",
"run_start",
",",
"run_len",
"=",
"(",
"-",
"1",
",",
"0",
")",
"for",
"idx",
",",
"hextet",
"in",
"enumerate",
"(",
"hextets",
")",
":",
"if",
"'0'",
"==",
"hextet",
":",
"run_len",
"+=",
"1",
"if",
"-",
"1",
"==",
"run_start",
":",
"run_start",
"=",
"idx",
"if",
"run_len",
">",
"dc_len",
":",
"dc_len",
",",
"dc_start",
"=",
"(",
"run_len",
",",
"run_start",
")",
"else",
":",
"run_len",
",",
"run_start",
"=",
"(",
"0",
",",
"-",
"1",
")",
"# end for",
"if",
"dc_len",
">",
"1",
":",
"dc_end",
"=",
"dc_start",
"+",
"dc_len",
"if",
"dc_end",
"==",
"len",
"(",
"hextets",
")",
":",
"hextets",
"+=",
"[",
"''",
"]",
"hextets",
"[",
"dc_start",
":",
"dc_end",
"]",
"=",
"[",
"''",
"]",
"if",
"dc_start",
"==",
"0",
":",
"hextets",
"=",
"[",
"''",
"]",
"+",
"hextets",
"# end if",
"return",
"':'",
".",
"join",
"(",
"hextets",
")"
] | 31.178082 | 17.972603 |
def buglist(self, from_date=DEFAULT_DATETIME):
"""Get a summary of bugs in CSV format.
:param from_date: retrieve bugs that where updated from that date
"""
if not self.version:
self.version = self.__fetch_version()
if self.version in self.OLD_STYLE_VERSIONS:
order = 'Last+Changed'
else:
order = 'changeddate'
date = from_date.strftime("%Y-%m-%d %H:%M:%S")
params = {
self.PCHFIELD_FROM: date,
self.PCTYPE: self.CTYPE_CSV,
self.PLIMIT: self.max_bugs_csv,
self.PORDER: order
}
response = self.call(self.CGI_BUGLIST, params)
return response | [
"def",
"buglist",
"(",
"self",
",",
"from_date",
"=",
"DEFAULT_DATETIME",
")",
":",
"if",
"not",
"self",
".",
"version",
":",
"self",
".",
"version",
"=",
"self",
".",
"__fetch_version",
"(",
")",
"if",
"self",
".",
"version",
"in",
"self",
".",
"OLD_STYLE_VERSIONS",
":",
"order",
"=",
"'Last+Changed'",
"else",
":",
"order",
"=",
"'changeddate'",
"date",
"=",
"from_date",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"params",
"=",
"{",
"self",
".",
"PCHFIELD_FROM",
":",
"date",
",",
"self",
".",
"PCTYPE",
":",
"self",
".",
"CTYPE_CSV",
",",
"self",
".",
"PLIMIT",
":",
"self",
".",
"max_bugs_csv",
",",
"self",
".",
"PORDER",
":",
"order",
"}",
"response",
"=",
"self",
".",
"call",
"(",
"self",
".",
"CGI_BUGLIST",
",",
"params",
")",
"return",
"response"
] | 27.72 | 18.6 |
def ensure_ceph_keyring(service, user=None, group=None,
relation='ceph', key=None):
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
@returns boolean: Flag to indicate whether a key was successfully written
to disk based on either relation data or a supplied key
"""
if not key:
for rid in relation_ids(relation):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
add_key(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True | [
"def",
"ensure_ceph_keyring",
"(",
"service",
",",
"user",
"=",
"None",
",",
"group",
"=",
"None",
",",
"relation",
"=",
"'ceph'",
",",
"key",
"=",
"None",
")",
":",
"if",
"not",
"key",
":",
"for",
"rid",
"in",
"relation_ids",
"(",
"relation",
")",
":",
"for",
"unit",
"in",
"related_units",
"(",
"rid",
")",
":",
"key",
"=",
"relation_get",
"(",
"'key'",
",",
"rid",
"=",
"rid",
",",
"unit",
"=",
"unit",
")",
"if",
"key",
":",
"break",
"if",
"not",
"key",
":",
"return",
"False",
"add_key",
"(",
"service",
"=",
"service",
",",
"key",
"=",
"key",
")",
"keyring",
"=",
"_keyring_path",
"(",
"service",
")",
"if",
"user",
"and",
"group",
":",
"check_call",
"(",
"[",
"'chown'",
",",
"'%s.%s'",
"%",
"(",
"user",
",",
"group",
")",
",",
"keyring",
"]",
")",
"return",
"True"
] | 33.166667 | 19.333333 |
def set_triggered_by_event(self, value):
"""
Setter for 'triggered_by_event' field.
:param value - a new value of 'triggered_by_event' field. Must be a boolean type. Does not accept None value.
"""
if value is None or not isinstance(value, bool):
raise TypeError("TriggeredByEvent must be set to a bool")
else:
self.__triggered_by_event = value | [
"def",
"set_triggered_by_event",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"\"TriggeredByEvent must be set to a bool\"",
")",
"else",
":",
"self",
".",
"__triggered_by_event",
"=",
"value"
] | 45.333333 | 17.777778 |
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsntfs_file_entry.get_modification_time_as_integer()
return dfdatetime_filetime.Filetime(timestamp=timestamp) | [
"def",
"modification_time",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_fsntfs_file_entry",
".",
"get_modification_time_as_integer",
"(",
")",
"return",
"dfdatetime_filetime",
".",
"Filetime",
"(",
"timestamp",
"=",
"timestamp",
")"
] | 60.5 | 16.5 |
def ci(python="python", codecov="codecov", coverage_file="coverage.xml", wheel=True):
"""
Run the most common CI tasks
"""
# Import pip
from pip import __version__ as pip_version
if Version(pip_version) >= Version("10.0.0"):
import pip._internal as pip
else:
import pip
# Install requirements with pip
pip.main(["install"] + DEPENDENCIES + REQUIREMENTS + ["-U"])
# Build the installation wheel
if wheel is True:
build_and_install_wheel(python)
# Remove all non-essential files
for to_delete in TO_DELETE:
rmtree(to_delete)
# Run the tests on the installed ttkthemes
return_code = run_command("{} -m nose --with-coverage --cover-xml --cover-package=ttkthemes".format(python))
if return_code != 0:
print("Tests failed.")
exit(return_code)
print("Tests successful.")
# Run codecov
return_code = run_command("{} -f {}".format(codecov, coverage_file))
if return_code != 0:
print("Codecov failed.")
exit(return_code)
# Successfully finished CI
exit(0) | [
"def",
"ci",
"(",
"python",
"=",
"\"python\"",
",",
"codecov",
"=",
"\"codecov\"",
",",
"coverage_file",
"=",
"\"coverage.xml\"",
",",
"wheel",
"=",
"True",
")",
":",
"# Import pip",
"from",
"pip",
"import",
"__version__",
"as",
"pip_version",
"if",
"Version",
"(",
"pip_version",
")",
">=",
"Version",
"(",
"\"10.0.0\"",
")",
":",
"import",
"pip",
".",
"_internal",
"as",
"pip",
"else",
":",
"import",
"pip",
"# Install requirements with pip",
"pip",
".",
"main",
"(",
"[",
"\"install\"",
"]",
"+",
"DEPENDENCIES",
"+",
"REQUIREMENTS",
"+",
"[",
"\"-U\"",
"]",
")",
"# Build the installation wheel",
"if",
"wheel",
"is",
"True",
":",
"build_and_install_wheel",
"(",
"python",
")",
"# Remove all non-essential files",
"for",
"to_delete",
"in",
"TO_DELETE",
":",
"rmtree",
"(",
"to_delete",
")",
"# Run the tests on the installed ttkthemes",
"return_code",
"=",
"run_command",
"(",
"\"{} -m nose --with-coverage --cover-xml --cover-package=ttkthemes\"",
".",
"format",
"(",
"python",
")",
")",
"if",
"return_code",
"!=",
"0",
":",
"print",
"(",
"\"Tests failed.\"",
")",
"exit",
"(",
"return_code",
")",
"print",
"(",
"\"Tests successful.\"",
")",
"# Run codecov",
"return_code",
"=",
"run_command",
"(",
"\"{} -f {}\"",
".",
"format",
"(",
"codecov",
",",
"coverage_file",
")",
")",
"if",
"return_code",
"!=",
"0",
":",
"print",
"(",
"\"Codecov failed.\"",
")",
"exit",
"(",
"return_code",
")",
"# Successfully finished CI",
"exit",
"(",
"0",
")"
] | 32.69697 | 17.060606 |
def _find_indices(self, x):
"""Find indices and distances of the given nodes.
Can be overridden by subclasses to improve efficiency.
"""
# find relevant edges between which xi are situated
index_vecs = []
# compute distance to lower edge in unity units
norm_distances = []
# iterate through dimensions
for xi, cvec in zip(x, self.coord_vecs):
idcs = np.searchsorted(cvec, xi) - 1
idcs[idcs < 0] = 0
idcs[idcs > cvec.size - 2] = cvec.size - 2
index_vecs.append(idcs)
norm_distances.append((xi - cvec[idcs]) /
(cvec[idcs + 1] - cvec[idcs]))
return index_vecs, norm_distances | [
"def",
"_find_indices",
"(",
"self",
",",
"x",
")",
":",
"# find relevant edges between which xi are situated",
"index_vecs",
"=",
"[",
"]",
"# compute distance to lower edge in unity units",
"norm_distances",
"=",
"[",
"]",
"# iterate through dimensions",
"for",
"xi",
",",
"cvec",
"in",
"zip",
"(",
"x",
",",
"self",
".",
"coord_vecs",
")",
":",
"idcs",
"=",
"np",
".",
"searchsorted",
"(",
"cvec",
",",
"xi",
")",
"-",
"1",
"idcs",
"[",
"idcs",
"<",
"0",
"]",
"=",
"0",
"idcs",
"[",
"idcs",
">",
"cvec",
".",
"size",
"-",
"2",
"]",
"=",
"cvec",
".",
"size",
"-",
"2",
"index_vecs",
".",
"append",
"(",
"idcs",
")",
"norm_distances",
".",
"append",
"(",
"(",
"xi",
"-",
"cvec",
"[",
"idcs",
"]",
")",
"/",
"(",
"cvec",
"[",
"idcs",
"+",
"1",
"]",
"-",
"cvec",
"[",
"idcs",
"]",
")",
")",
"return",
"index_vecs",
",",
"norm_distances"
] | 33.181818 | 17.545455 |
def scoped(cls, optionable, removal_version=None, removal_hint=None):
"""Returns a dependency on this subsystem, scoped to `optionable`.
:param removal_version: An optional deprecation version for this scoped Subsystem dependency.
:param removal_hint: An optional hint to accompany a deprecation removal_version.
Return value is suitable for use in SubsystemClientMixin.subsystem_dependencies().
"""
return SubsystemDependency(cls, optionable.options_scope, removal_version, removal_hint) | [
"def",
"scoped",
"(",
"cls",
",",
"optionable",
",",
"removal_version",
"=",
"None",
",",
"removal_hint",
"=",
"None",
")",
":",
"return",
"SubsystemDependency",
"(",
"cls",
",",
"optionable",
".",
"options_scope",
",",
"removal_version",
",",
"removal_hint",
")"
] | 56.222222 | 34.333333 |
def norm(field, vmin=0, vmax=255):
"""Truncates field to 0,1; then normalizes to a uin8 on [0,255]"""
field = 255*np.clip(field, 0, 1)
field = field.astype('uint8')
return field | [
"def",
"norm",
"(",
"field",
",",
"vmin",
"=",
"0",
",",
"vmax",
"=",
"255",
")",
":",
"field",
"=",
"255",
"*",
"np",
".",
"clip",
"(",
"field",
",",
"0",
",",
"1",
")",
"field",
"=",
"field",
".",
"astype",
"(",
"'uint8'",
")",
"return",
"field"
] | 37.8 | 8.2 |
async def series(self):
'''list of series in the collection
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
items = []
for i in await self.items:
if i.type == 'Series':
items.append(i)
elif hasattr(i, 'series'):
items.extend(await i.series)
return items | [
"async",
"def",
"series",
"(",
"self",
")",
":",
"items",
"=",
"[",
"]",
"for",
"i",
"in",
"await",
"self",
".",
"items",
":",
"if",
"i",
".",
"type",
"==",
"'Series'",
":",
"items",
".",
"append",
"(",
"i",
")",
"elif",
"hasattr",
"(",
"i",
",",
"'series'",
")",
":",
"items",
".",
"extend",
"(",
"await",
"i",
".",
"series",
")",
"return",
"items"
] | 18.052632 | 22.368421 |
def _safe_output(line):
'''
Looks for rabbitmqctl warning, or general formatting, strings that aren't
intended to be parsed as output.
Returns a boolean whether the line can be parsed as rabbitmqctl output.
'''
return not any([
line.startswith('Listing') and line.endswith('...'),
line.startswith('Listing') and '\t' not in line,
'...done' in line,
line.startswith('WARNING:')
]) | [
"def",
"_safe_output",
"(",
"line",
")",
":",
"return",
"not",
"any",
"(",
"[",
"line",
".",
"startswith",
"(",
"'Listing'",
")",
"and",
"line",
".",
"endswith",
"(",
"'...'",
")",
",",
"line",
".",
"startswith",
"(",
"'Listing'",
")",
"and",
"'\\t'",
"not",
"in",
"line",
",",
"'...done'",
"in",
"line",
",",
"line",
".",
"startswith",
"(",
"'WARNING:'",
")",
"]",
")"
] | 35.666667 | 22.333333 |
def _backup_pb_gui(self, dirs):
"""Create a zip backup with a GUI progress bar."""
import PySimpleGUI as sg
# Legacy support
with ZipFile(self.zip_filename, 'w') as backup_zip:
for count, path in enumerate(dirs):
backup_zip.write(path, path[len(self.source):len(path)])
if not sg.OneLineProgressMeter('Writing Zip Files', count + 1, len(dirs) - 1, 'Files'):
break | [
"def",
"_backup_pb_gui",
"(",
"self",
",",
"dirs",
")",
":",
"import",
"PySimpleGUI",
"as",
"sg",
"# Legacy support",
"with",
"ZipFile",
"(",
"self",
".",
"zip_filename",
",",
"'w'",
")",
"as",
"backup_zip",
":",
"for",
"count",
",",
"path",
"in",
"enumerate",
"(",
"dirs",
")",
":",
"backup_zip",
".",
"write",
"(",
"path",
",",
"path",
"[",
"len",
"(",
"self",
".",
"source",
")",
":",
"len",
"(",
"path",
")",
"]",
")",
"if",
"not",
"sg",
".",
"OneLineProgressMeter",
"(",
"'Writing Zip Files'",
",",
"count",
"+",
"1",
",",
"len",
"(",
"dirs",
")",
"-",
"1",
",",
"'Files'",
")",
":",
"break"
] | 50.111111 | 18.777778 |
def _shellcomplete(cli, prog_name, complete_var=None):
"""Internal handler for the bash completion support.
Parameters
----------
cli : click.Command
The main click Command of the program
prog_name : str
The program name on the command line
complete_var : str
The environment variable name used to control the completion behavior (Default value = None)
"""
if complete_var is None:
complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
if complete_instr == 'source':
echo(get_code(prog_name=prog_name, env_name=complete_var))
elif complete_instr == 'source-bash':
echo(get_code('bash', prog_name, complete_var))
elif complete_instr == 'source-fish':
echo(get_code('fish', prog_name, complete_var))
elif complete_instr == 'source-powershell':
echo(get_code('powershell', prog_name, complete_var))
elif complete_instr == 'source-zsh':
echo(get_code('zsh', prog_name, complete_var))
elif complete_instr in ['complete', 'complete-bash']:
# keep 'complete' for bash for backward compatibility
do_bash_complete(cli, prog_name)
elif complete_instr == 'complete-fish':
do_fish_complete(cli, prog_name)
elif complete_instr == 'complete-powershell':
do_powershell_complete(cli, prog_name)
elif complete_instr == 'complete-zsh':
do_zsh_complete(cli, prog_name)
elif complete_instr == 'install':
shell, path = install(prog_name=prog_name, env_name=complete_var)
click.echo('%s completion installed in %s' % (shell, path))
elif complete_instr == 'install-bash':
shell, path = install(shell='bash', prog_name=prog_name, env_name=complete_var)
click.echo('%s completion installed in %s' % (shell, path))
elif complete_instr == 'install-fish':
shell, path = install(shell='fish', prog_name=prog_name, env_name=complete_var)
click.echo('%s completion installed in %s' % (shell, path))
elif complete_instr == 'install-zsh':
shell, path = install(shell='zsh', prog_name=prog_name, env_name=complete_var)
click.echo('%s completion installed in %s' % (shell, path))
elif complete_instr == 'install-powershell':
shell, path = install(shell='powershell', prog_name=prog_name, env_name=complete_var)
click.echo('%s completion installed in %s' % (shell, path))
sys.exit() | [
"def",
"_shellcomplete",
"(",
"cli",
",",
"prog_name",
",",
"complete_var",
"=",
"None",
")",
":",
"if",
"complete_var",
"is",
"None",
":",
"complete_var",
"=",
"'_%s_COMPLETE'",
"%",
"(",
"prog_name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
")",
".",
"upper",
"(",
")",
"complete_instr",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"complete_var",
")",
"if",
"not",
"complete_instr",
":",
"return",
"if",
"complete_instr",
"==",
"'source'",
":",
"echo",
"(",
"get_code",
"(",
"prog_name",
"=",
"prog_name",
",",
"env_name",
"=",
"complete_var",
")",
")",
"elif",
"complete_instr",
"==",
"'source-bash'",
":",
"echo",
"(",
"get_code",
"(",
"'bash'",
",",
"prog_name",
",",
"complete_var",
")",
")",
"elif",
"complete_instr",
"==",
"'source-fish'",
":",
"echo",
"(",
"get_code",
"(",
"'fish'",
",",
"prog_name",
",",
"complete_var",
")",
")",
"elif",
"complete_instr",
"==",
"'source-powershell'",
":",
"echo",
"(",
"get_code",
"(",
"'powershell'",
",",
"prog_name",
",",
"complete_var",
")",
")",
"elif",
"complete_instr",
"==",
"'source-zsh'",
":",
"echo",
"(",
"get_code",
"(",
"'zsh'",
",",
"prog_name",
",",
"complete_var",
")",
")",
"elif",
"complete_instr",
"in",
"[",
"'complete'",
",",
"'complete-bash'",
"]",
":",
"# keep 'complete' for bash for backward compatibility",
"do_bash_complete",
"(",
"cli",
",",
"prog_name",
")",
"elif",
"complete_instr",
"==",
"'complete-fish'",
":",
"do_fish_complete",
"(",
"cli",
",",
"prog_name",
")",
"elif",
"complete_instr",
"==",
"'complete-powershell'",
":",
"do_powershell_complete",
"(",
"cli",
",",
"prog_name",
")",
"elif",
"complete_instr",
"==",
"'complete-zsh'",
":",
"do_zsh_complete",
"(",
"cli",
",",
"prog_name",
")",
"elif",
"complete_instr",
"==",
"'install'",
":",
"shell",
",",
"path",
"=",
"install",
"(",
"prog_name",
"=",
"prog_name",
",",
"env_name",
"=",
"complete_var",
")",
"click",
".",
"echo",
"(",
"'%s completion installed in %s'",
"%",
"(",
"shell",
",",
"path",
")",
")",
"elif",
"complete_instr",
"==",
"'install-bash'",
":",
"shell",
",",
"path",
"=",
"install",
"(",
"shell",
"=",
"'bash'",
",",
"prog_name",
"=",
"prog_name",
",",
"env_name",
"=",
"complete_var",
")",
"click",
".",
"echo",
"(",
"'%s completion installed in %s'",
"%",
"(",
"shell",
",",
"path",
")",
")",
"elif",
"complete_instr",
"==",
"'install-fish'",
":",
"shell",
",",
"path",
"=",
"install",
"(",
"shell",
"=",
"'fish'",
",",
"prog_name",
"=",
"prog_name",
",",
"env_name",
"=",
"complete_var",
")",
"click",
".",
"echo",
"(",
"'%s completion installed in %s'",
"%",
"(",
"shell",
",",
"path",
")",
")",
"elif",
"complete_instr",
"==",
"'install-zsh'",
":",
"shell",
",",
"path",
"=",
"install",
"(",
"shell",
"=",
"'zsh'",
",",
"prog_name",
"=",
"prog_name",
",",
"env_name",
"=",
"complete_var",
")",
"click",
".",
"echo",
"(",
"'%s completion installed in %s'",
"%",
"(",
"shell",
",",
"path",
")",
")",
"elif",
"complete_instr",
"==",
"'install-powershell'",
":",
"shell",
",",
"path",
"=",
"install",
"(",
"shell",
"=",
"'powershell'",
",",
"prog_name",
"=",
"prog_name",
",",
"env_name",
"=",
"complete_var",
")",
"click",
".",
"echo",
"(",
"'%s completion installed in %s'",
"%",
"(",
"shell",
",",
"path",
")",
")",
"sys",
".",
"exit",
"(",
")"
] | 46.943396 | 17.679245 |
def confusion_matrix(links_true, links_pred, total=None):
"""Compute the confusion matrix.
The confusion matrix is of the following form:
+----------------------+-----------------------+----------------------+
| | Predicted Positives | Predicted Negatives |
+======================+=======================+======================+
| **True Positives** | True Positives (TP) | False Negatives (FN) |
+----------------------+-----------------------+----------------------+
| **True Negatives** | False Positives (FP) | True Negatives (TN) |
+----------------------+-----------------------+----------------------+
The confusion matrix is an informative way to analyse a prediction. The
matrix can used to compute measures like precision and recall. The count
of true prositives is [0,0], false negatives is [0,1], true negatives
is [1,1] and false positives is [1,0].
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
total: int, pandas.MultiIndex
The count of all record pairs (both links and non-links). When the
argument is a pandas.MultiIndex, the length of the index is used. If
the total is None, the number of True Negatives is not computed.
Default None.
Returns
-------
numpy.array
The confusion matrix with TP, TN, FN, FP values.
Note
----
The number of True Negatives is computed based on the total argument.
This argument is the number of record pairs of the entire matrix.
"""
links_true = _get_multiindex(links_true)
links_pred = _get_multiindex(links_pred)
tp = true_positives(links_true, links_pred)
fp = false_positives(links_true, links_pred)
fn = false_negatives(links_true, links_pred)
if total is None:
tn = numpy.nan
else:
tn = true_negatives(links_true, links_pred, total)
return numpy.array([[tp, fn], [fp, tn]]) | [
"def",
"confusion_matrix",
"(",
"links_true",
",",
"links_pred",
",",
"total",
"=",
"None",
")",
":",
"links_true",
"=",
"_get_multiindex",
"(",
"links_true",
")",
"links_pred",
"=",
"_get_multiindex",
"(",
"links_pred",
")",
"tp",
"=",
"true_positives",
"(",
"links_true",
",",
"links_pred",
")",
"fp",
"=",
"false_positives",
"(",
"links_true",
",",
"links_pred",
")",
"fn",
"=",
"false_negatives",
"(",
"links_true",
",",
"links_pred",
")",
"if",
"total",
"is",
"None",
":",
"tn",
"=",
"numpy",
".",
"nan",
"else",
":",
"tn",
"=",
"true_negatives",
"(",
"links_true",
",",
"links_pred",
",",
"total",
")",
"return",
"numpy",
".",
"array",
"(",
"[",
"[",
"tp",
",",
"fn",
"]",
",",
"[",
"fp",
",",
"tn",
"]",
"]",
")"
] | 37.745455 | 25.690909 |
def style_from_dict(style_dict, include_defaults=True):
"""
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
"""
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
# Expand token inheritance and turn style description into Attrs.
token_to_attrs = {}
# (Loop through the tokens in order. Sorting makes sure that
# we process the parent first.)
for ttype, styledef in sorted(style_dict.items()):
# Start from parent Attrs or default Attrs.
attrs = DEFAULT_ATTRS
if 'noinherit' not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
# Now update with the given attributes.
for part in styledef.split():
if part == 'noinherit':
pass
elif part == 'bold':
attrs = attrs._replace(bold=True)
elif part == 'nobold':
attrs = attrs._replace(bold=False)
elif part == 'italic':
attrs = attrs._replace(italic=True)
elif part == 'noitalic':
attrs = attrs._replace(italic=False)
elif part == 'underline':
attrs = attrs._replace(underline=True)
elif part == 'nounderline':
attrs = attrs._replace(underline=False)
# prompt_toolkit extensions. Not in Pygments.
elif part == 'blink':
attrs = attrs._replace(blink=True)
elif part == 'noblink':
attrs = attrs._replace(blink=False)
elif part == 'reverse':
attrs = attrs._replace(reverse=True)
elif part == 'noreverse':
attrs = attrs._replace(reverse=False)
# Pygments properties that we ignore.
elif part in ('roman', 'sans', 'mono'):
pass
elif part.startswith('border:'):
pass
# Colors.
elif part.startswith('bg:'):
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs) | [
"def",
"style_from_dict",
"(",
"style_dict",
",",
"include_defaults",
"=",
"True",
")",
":",
"assert",
"isinstance",
"(",
"style_dict",
",",
"Mapping",
")",
"if",
"include_defaults",
":",
"s2",
"=",
"{",
"}",
"s2",
".",
"update",
"(",
"DEFAULT_STYLE_EXTENSIONS",
")",
"s2",
".",
"update",
"(",
"style_dict",
")",
"style_dict",
"=",
"s2",
"# Expand token inheritance and turn style description into Attrs.",
"token_to_attrs",
"=",
"{",
"}",
"# (Loop through the tokens in order. Sorting makes sure that",
"# we process the parent first.)",
"for",
"ttype",
",",
"styledef",
"in",
"sorted",
"(",
"style_dict",
".",
"items",
"(",
")",
")",
":",
"# Start from parent Attrs or default Attrs.",
"attrs",
"=",
"DEFAULT_ATTRS",
"if",
"'noinherit'",
"not",
"in",
"styledef",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"ttype",
")",
"+",
"1",
")",
":",
"try",
":",
"attrs",
"=",
"token_to_attrs",
"[",
"ttype",
"[",
":",
"-",
"i",
"]",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"break",
"# Now update with the given attributes.",
"for",
"part",
"in",
"styledef",
".",
"split",
"(",
")",
":",
"if",
"part",
"==",
"'noinherit'",
":",
"pass",
"elif",
"part",
"==",
"'bold'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"bold",
"=",
"True",
")",
"elif",
"part",
"==",
"'nobold'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"bold",
"=",
"False",
")",
"elif",
"part",
"==",
"'italic'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"italic",
"=",
"True",
")",
"elif",
"part",
"==",
"'noitalic'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"italic",
"=",
"False",
")",
"elif",
"part",
"==",
"'underline'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"underline",
"=",
"True",
")",
"elif",
"part",
"==",
"'nounderline'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"underline",
"=",
"False",
")",
"# prompt_toolkit extensions. Not in Pygments.",
"elif",
"part",
"==",
"'blink'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"blink",
"=",
"True",
")",
"elif",
"part",
"==",
"'noblink'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"blink",
"=",
"False",
")",
"elif",
"part",
"==",
"'reverse'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"reverse",
"=",
"True",
")",
"elif",
"part",
"==",
"'noreverse'",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"reverse",
"=",
"False",
")",
"# Pygments properties that we ignore.",
"elif",
"part",
"in",
"(",
"'roman'",
",",
"'sans'",
",",
"'mono'",
")",
":",
"pass",
"elif",
"part",
".",
"startswith",
"(",
"'border:'",
")",
":",
"pass",
"# Colors.",
"elif",
"part",
".",
"startswith",
"(",
"'bg:'",
")",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"bgcolor",
"=",
"_colorformat",
"(",
"part",
"[",
"3",
":",
"]",
")",
")",
"else",
":",
"attrs",
"=",
"attrs",
".",
"_replace",
"(",
"color",
"=",
"_colorformat",
"(",
"part",
")",
")",
"token_to_attrs",
"[",
"ttype",
"]",
"=",
"attrs",
"return",
"_StyleFromDict",
"(",
"token_to_attrs",
")"
] | 32.724138 | 17.229885 |
def on_open_output_tool_clicked(self):
"""Autoconnect slot activated when open output tool button is clicked.
"""
output_path = self.output_path.text()
if not output_path:
output_path = os.path.expanduser('~')
# noinspection PyCallByClass,PyTypeChecker
filename, __ = QFileDialog.getSaveFileName(
self, tr('Output file'), output_path, tr('Raster file (*.tif)'))
if filename:
self.output_path.setText(filename) | [
"def",
"on_open_output_tool_clicked",
"(",
"self",
")",
":",
"output_path",
"=",
"self",
".",
"output_path",
".",
"text",
"(",
")",
"if",
"not",
"output_path",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"# noinspection PyCallByClass,PyTypeChecker",
"filename",
",",
"__",
"=",
"QFileDialog",
".",
"getSaveFileName",
"(",
"self",
",",
"tr",
"(",
"'Output file'",
")",
",",
"output_path",
",",
"tr",
"(",
"'Raster file (*.tif)'",
")",
")",
"if",
"filename",
":",
"self",
".",
"output_path",
".",
"setText",
"(",
"filename",
")"
] | 44.636364 | 10.181818 |
def OnRemoveCards(self, removedcards):
"""Called when a card is removed.
Removes the card from the tree."""
self.mutex.acquire()
try:
parentnode = self.root
for cardtoremove in removedcards:
(childReader, cookie) = self.GetFirstChild(parentnode)
found = False
while childReader.IsOk() and not found:
if self.GetItemText(childReader) == \
str(cardtoremove.reader):
(childCard, cookie2) = self.GetFirstChild(childReader)
self.SetItemText(childCard, 'no card inserted')
found = True
else:
(childReader, cookie) = \
self.GetNextChild(parentnode, cookie)
self.Expand(self.root)
finally:
self.mutex.release()
self.EnsureVisible(self.root)
self.Repaint() | [
"def",
"OnRemoveCards",
"(",
"self",
",",
"removedcards",
")",
":",
"self",
".",
"mutex",
".",
"acquire",
"(",
")",
"try",
":",
"parentnode",
"=",
"self",
".",
"root",
"for",
"cardtoremove",
"in",
"removedcards",
":",
"(",
"childReader",
",",
"cookie",
")",
"=",
"self",
".",
"GetFirstChild",
"(",
"parentnode",
")",
"found",
"=",
"False",
"while",
"childReader",
".",
"IsOk",
"(",
")",
"and",
"not",
"found",
":",
"if",
"self",
".",
"GetItemText",
"(",
"childReader",
")",
"==",
"str",
"(",
"cardtoremove",
".",
"reader",
")",
":",
"(",
"childCard",
",",
"cookie2",
")",
"=",
"self",
".",
"GetFirstChild",
"(",
"childReader",
")",
"self",
".",
"SetItemText",
"(",
"childCard",
",",
"'no card inserted'",
")",
"found",
"=",
"True",
"else",
":",
"(",
"childReader",
",",
"cookie",
")",
"=",
"self",
".",
"GetNextChild",
"(",
"parentnode",
",",
"cookie",
")",
"self",
".",
"Expand",
"(",
"self",
".",
"root",
")",
"finally",
":",
"self",
".",
"mutex",
".",
"release",
"(",
")",
"self",
".",
"EnsureVisible",
"(",
"self",
".",
"root",
")",
"self",
".",
"Repaint",
"(",
")"
] | 42.130435 | 13.913043 |
def child_added(self, child):
""" Overwrite the content view """
view = child.widget
if view is not None:
self.dialog.setContentView(view) | [
"def",
"child_added",
"(",
"self",
",",
"child",
")",
":",
"view",
"=",
"child",
".",
"widget",
"if",
"view",
"is",
"not",
"None",
":",
"self",
".",
"dialog",
".",
"setContentView",
"(",
"view",
")"
] | 34 | 8 |
def main(bot):
"""
Entry point for the command line launcher.
:param bot: the IRC bot to run
:type bot: :class:`fatbotslim.irc.bot.IRC`
"""
greenlet = spawn(bot.run)
try:
greenlet.join()
except KeyboardInterrupt:
print '' # cosmetics matters
log.info("Killed by user, disconnecting...")
bot.disconnect()
finally:
greenlet.kill() | [
"def",
"main",
"(",
"bot",
")",
":",
"greenlet",
"=",
"spawn",
"(",
"bot",
".",
"run",
")",
"try",
":",
"greenlet",
".",
"join",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"''",
"# cosmetics matters",
"log",
".",
"info",
"(",
"\"Killed by user, disconnecting...\"",
")",
"bot",
".",
"disconnect",
"(",
")",
"finally",
":",
"greenlet",
".",
"kill",
"(",
")"
] | 24.4375 | 14.4375 |
def select_entry(self, core_element_id, by_cursor=True):
"""Selects the row entry belonging to the given core_element_id by cursor or tree selection"""
for row_num, element_row in enumerate(self.list_store):
# Compare data port ids
if element_row[self.ID_STORAGE_ID] == core_element_id:
if by_cursor:
self.tree_view.set_cursor(row_num)
else:
self.tree_view.get_selection().select_path((row_num, ))
break | [
"def",
"select_entry",
"(",
"self",
",",
"core_element_id",
",",
"by_cursor",
"=",
"True",
")",
":",
"for",
"row_num",
",",
"element_row",
"in",
"enumerate",
"(",
"self",
".",
"list_store",
")",
":",
"# Compare data port ids",
"if",
"element_row",
"[",
"self",
".",
"ID_STORAGE_ID",
"]",
"==",
"core_element_id",
":",
"if",
"by_cursor",
":",
"self",
".",
"tree_view",
".",
"set_cursor",
"(",
"row_num",
")",
"else",
":",
"self",
".",
"tree_view",
".",
"get_selection",
"(",
")",
".",
"select_path",
"(",
"(",
"row_num",
",",
")",
")",
"break"
] | 52.2 | 16.8 |
def create_tfs_core_client(url, token=None):
"""
Create a core_client.py client for a Team Foundation Server Enterprise connection instance
If token is not provided, will attempt to use the TFS_API_TOKEN
environment variable if present.
"""
if token is None:
token = os.environ.get('TFS_API_TOKEN', None)
tfs_connection = create_tfs_connection(url, token)
tfs_client = tfs_connection.get_client('vsts.core.v4_1.core_client.CoreClient')
if tfs_client is None:
msg = 'Unable to connect to TFS Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return tfs_client | [
"def",
"create_tfs_core_client",
"(",
"url",
",",
"token",
"=",
"None",
")",
":",
"if",
"token",
"is",
"None",
":",
"token",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'TFS_API_TOKEN'",
",",
"None",
")",
"tfs_connection",
"=",
"create_tfs_connection",
"(",
"url",
",",
"token",
")",
"tfs_client",
"=",
"tfs_connection",
".",
"get_client",
"(",
"'vsts.core.v4_1.core_client.CoreClient'",
")",
"if",
"tfs_client",
"is",
"None",
":",
"msg",
"=",
"'Unable to connect to TFS Enterprise (%s) with provided token.'",
"raise",
"RuntimeError",
"(",
"msg",
",",
"url",
")",
"return",
"tfs_client"
] | 34.777778 | 22.888889 |
def get(self, *keys: str, default: Any = NOT_SET) -> Any:
""" Returns values from the settings in the order of keys, the first value encountered is used.
Example:
>>> settings = Settings({"ARCA_ONE": 1, "ARCA_TWO": 2})
>>> settings.get("one")
1
>>> settings.get("one", "two")
1
>>> settings.get("two", "one")
2
>>> settings.get("three", "one")
1
>>> settings.get("three", default=3)
3
>>> settings.get("three")
Traceback (most recent call last):
...
KeyError:
:param keys: One or more keys to get from settings. If multiple keys are provided, the value of the first key
that has a value is returned.
:param default: If none of the ``options`` aren't set, return this value.
:return: A value from the settings or the default.
:raise ValueError: If no keys are provided.
:raise KeyError: If none of the keys are set and no default is provided.
"""
if not len(keys):
raise ValueError("At least one key must be provided.")
for option in keys:
key = f"{self.PREFIX}_{option.upper()}"
if key in self._data:
return self._data[key]
if default is NOT_SET:
raise KeyError("None of the following key is present in settings and no default is set: {}".format(
", ".join(keys)
))
return default | [
"def",
"get",
"(",
"self",
",",
"*",
"keys",
":",
"str",
",",
"default",
":",
"Any",
"=",
"NOT_SET",
")",
"->",
"Any",
":",
"if",
"not",
"len",
"(",
"keys",
")",
":",
"raise",
"ValueError",
"(",
"\"At least one key must be provided.\"",
")",
"for",
"option",
"in",
"keys",
":",
"key",
"=",
"f\"{self.PREFIX}_{option.upper()}\"",
"if",
"key",
"in",
"self",
".",
"_data",
":",
"return",
"self",
".",
"_data",
"[",
"key",
"]",
"if",
"default",
"is",
"NOT_SET",
":",
"raise",
"KeyError",
"(",
"\"None of the following key is present in settings and no default is set: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"keys",
")",
")",
")",
"return",
"default"
] | 33.295455 | 23.022727 |
def _check_logged_user(self):
"""Check if a user is logged. Otherwise, an error is raised."""
if not self._env or not self._password or not self._login:
raise error.InternalError("Login required") | [
"def",
"_check_logged_user",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_env",
"or",
"not",
"self",
".",
"_password",
"or",
"not",
"self",
".",
"_login",
":",
"raise",
"error",
".",
"InternalError",
"(",
"\"Login required\"",
")"
] | 55.25 | 13 |
def _force(self,obj,objtype=None):
"""Force a new value to be generated, and return it."""
gen=super(Dynamic,self).__get__(obj,objtype)
if hasattr(gen,'_Dynamic_last'):
return self._produce_value(gen,force=True)
else:
return gen | [
"def",
"_force",
"(",
"self",
",",
"obj",
",",
"objtype",
"=",
"None",
")",
":",
"gen",
"=",
"super",
"(",
"Dynamic",
",",
"self",
")",
".",
"__get__",
"(",
"obj",
",",
"objtype",
")",
"if",
"hasattr",
"(",
"gen",
",",
"'_Dynamic_last'",
")",
":",
"return",
"self",
".",
"_produce_value",
"(",
"gen",
",",
"force",
"=",
"True",
")",
"else",
":",
"return",
"gen"
] | 34.75 | 14.625 |
def deinit(ctx):
"""
Detach from the current cloud server
"""
utils.check_for_cloud_server()
if config["local_server"]["url"]:
utils.cancel_global_db_replication()
if config["cloud_server"]["username"]:
ctx.invoke(logout_user)
config["cloud_server"]["url"] = None | [
"def",
"deinit",
"(",
"ctx",
")",
":",
"utils",
".",
"check_for_cloud_server",
"(",
")",
"if",
"config",
"[",
"\"local_server\"",
"]",
"[",
"\"url\"",
"]",
":",
"utils",
".",
"cancel_global_db_replication",
"(",
")",
"if",
"config",
"[",
"\"cloud_server\"",
"]",
"[",
"\"username\"",
"]",
":",
"ctx",
".",
"invoke",
"(",
"logout_user",
")",
"config",
"[",
"\"cloud_server\"",
"]",
"[",
"\"url\"",
"]",
"=",
"None"
] | 29.8 | 4.8 |
def _hexvalue_to_rgb(hexvalue):
"""
Converts the hexvalue used by tuya for colour representation into
an RGB value.
Args:
hexvalue(string): The hex representation generated by BulbDevice._rgb_to_hexvalue()
"""
r = int(hexvalue[0:2], 16)
g = int(hexvalue[2:4], 16)
b = int(hexvalue[4:6], 16)
return (r, g, b) | [
"def",
"_hexvalue_to_rgb",
"(",
"hexvalue",
")",
":",
"r",
"=",
"int",
"(",
"hexvalue",
"[",
"0",
":",
"2",
"]",
",",
"16",
")",
"g",
"=",
"int",
"(",
"hexvalue",
"[",
"2",
":",
"4",
"]",
",",
"16",
")",
"b",
"=",
"int",
"(",
"hexvalue",
"[",
"4",
":",
"6",
"]",
",",
"16",
")",
"return",
"(",
"r",
",",
"g",
",",
"b",
")"
] | 29.923077 | 19.153846 |
def top3_reduced(votes):
"""
Description:
Top 3 alternatives 16 moment conditions values calculation
Parameters:
votes: ordinal preference data (numpy ndarray of integers)
"""
res = np.zeros(16)
for vote in votes:
# the top ranked alternative is in vote[0][0], second in vote[1][0]
if vote[0][0] == 0: # i.e. the first alt is ranked first
res[0] += 1
if vote[1][0] == 2:
res[4] += 1
elif vote[1][0] == 3:
res[5] += 1
elif vote[1][0] == 1 and vote[2][0] == 2:
res[14] += 1
elif vote[0][0] == 1:
res[1] += 1
if vote[1][0] == 0:
res[6] += 1
elif vote[1][0] == 3:
res[7] += 1
elif vote[1][0] == 2 and vote[2][0] == 3:
res[15] += 1
elif vote[0][0] == 2:
res[2] += 1
if vote[1][0] == 0:
res[8] += 1
elif vote[1][0] == 1:
res[9] += 1
elif vote[1][0] == 3 and vote[2][0] == 0:
res[12] += 1
elif vote[0][0] == 3:
res[3] += 1
if vote[1][0] == 1:
res[10] += 1
elif vote[1][0] == 2:
res[11] += 1
elif vote[1][0] == 0 and vote[2][0] == 1:
res[13] += 1
res /= len(votes)
return res | [
"def",
"top3_reduced",
"(",
"votes",
")",
":",
"res",
"=",
"np",
".",
"zeros",
"(",
"16",
")",
"for",
"vote",
"in",
"votes",
":",
"# the top ranked alternative is in vote[0][0], second in vote[1][0]",
"if",
"vote",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"0",
":",
"# i.e. the first alt is ranked first",
"res",
"[",
"0",
"]",
"+=",
"1",
"if",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"2",
":",
"res",
"[",
"4",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"3",
":",
"res",
"[",
"5",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"1",
"and",
"vote",
"[",
"2",
"]",
"[",
"0",
"]",
"==",
"2",
":",
"res",
"[",
"14",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"1",
":",
"res",
"[",
"1",
"]",
"+=",
"1",
"if",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"0",
":",
"res",
"[",
"6",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"3",
":",
"res",
"[",
"7",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"2",
"and",
"vote",
"[",
"2",
"]",
"[",
"0",
"]",
"==",
"3",
":",
"res",
"[",
"15",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"2",
":",
"res",
"[",
"2",
"]",
"+=",
"1",
"if",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"0",
":",
"res",
"[",
"8",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"1",
":",
"res",
"[",
"9",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"3",
"and",
"vote",
"[",
"2",
"]",
"[",
"0",
"]",
"==",
"0",
":",
"res",
"[",
"12",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"3",
":",
"res",
"[",
"3",
"]",
"+=",
"1",
"if",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"1",
":",
"res",
"[",
"10",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"2",
":",
"res",
"[",
"11",
"]",
"+=",
"1",
"elif",
"vote",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"0",
"and",
"vote",
"[",
"2",
"]",
"[",
"0",
"]",
"==",
"1",
":",
"res",
"[",
"13",
"]",
"+=",
"1",
"res",
"/=",
"len",
"(",
"votes",
")",
"return",
"res"
] | 31.727273 | 14.181818 |
def do_display(self, arg):
"""
display expression
Add expression to the display list; expressions in this list
are evaluated at each step, and printed every time its value
changes.
WARNING: since the expressions is evaluated multiple time, pay
attention not to put expressions with side-effects in the
display list.
"""
try:
value = self._getval_or_undefined(arg)
except:
return
self._get_display_list()[arg] = value | [
"def",
"do_display",
"(",
"self",
",",
"arg",
")",
":",
"try",
":",
"value",
"=",
"self",
".",
"_getval_or_undefined",
"(",
"arg",
")",
"except",
":",
"return",
"self",
".",
"_get_display_list",
"(",
")",
"[",
"arg",
"]",
"=",
"value"
] | 30.705882 | 20.705882 |
def real_main():
"""The main program."""
global RTS
global DTR
try:
default_baud = int(os.getenv('RSHELL_BAUD'))
except:
default_baud = 115200
default_port = os.getenv('RSHELL_PORT')
default_rts = os.getenv('RSHELL_RTS') or RTS
default_dtr = os.getenv('RSHELL_DTR') or DTR
default_user = os.getenv('RSHELL_USER') or 'micro'
default_password = os.getenv('RSHELL_PASSWORD') or 'python'
default_editor = os.getenv('RSHELL_EDITOR') or os.getenv('VISUAL') or os.getenv('EDITOR') or 'vi'
global BUFFER_SIZE
try:
default_buffer_size = int(os.getenv('RSHELL_BUFFER_SIZE'))
except:
default_buffer_size = BUFFER_SIZE
parser = argparse.ArgumentParser(
prog="rshell",
usage="%(prog)s [options] [command]",
description="Remote Shell for a MicroPython board.",
epilog=("You can specify the default serial port using the " +
"RSHELL_PORT environment variable.")
)
parser.add_argument(
"-b", "--baud",
dest="baud",
action="store",
type=int,
help="Set the baudrate used (default = %d)" % default_baud,
default=default_baud
)
parser.add_argument(
"--buffer-size",
dest="buffer_size",
action="store",
type=int,
help="Set the buffer size used for transfers "
"(default = %d for USB, %d for UART)" %
(USB_BUFFER_SIZE, UART_BUFFER_SIZE),
)
parser.add_argument(
"-p", "--port",
dest="port",
help="Set the serial port to use (default '%s')" % default_port,
default=default_port
)
parser.add_argument(
"--rts",
dest="rts",
help="Set the RTS state (default '%s')" % default_rts,
default=default_rts
)
parser.add_argument(
"--dtr",
dest="dtr",
help="Set the DTR state (default '%s')" % default_dtr,
default=default_dtr
)
parser.add_argument(
"-u", "--user",
dest="user",
help="Set username to use (default '%s')" % default_user,
default=default_user
)
parser.add_argument(
"-w", "--password",
dest="password",
help="Set password to use (default '%s')" % default_password,
default=default_password
)
parser.add_argument(
"-e", "--editor",
dest="editor",
help="Set the editor to use (default '%s')" % default_editor,
default=default_editor
)
parser.add_argument(
"-f", "--file",
dest="filename",
help="Specifies a file of commands to process."
)
parser.add_argument(
"-d", "--debug",
dest="debug",
action="store_true",
help="Enable debug features",
default=False
)
parser.add_argument(
"-n", "--nocolor",
dest="nocolor",
action="store_true",
help="Turn off colorized output",
default=False
)
parser.add_argument(
"-l", "--list",
dest="list",
action="store_true",
help="Display serial ports",
default=False
)
parser.add_argument(
"-a", "--ascii",
dest="ascii_xfer",
action="store_true",
help="ASCII encode binary files for transfer",
default=False
)
parser.add_argument(
"--wait",
dest="wait",
type=int,
action="store",
help="Seconds to wait for serial port",
default=0
)
parser.add_argument(
"--timing",
dest="timing",
action="store_true",
help="Print timing information about each command",
default=False
)
parser.add_argument(
'-V', '--version',
dest='version',
action='store_true',
help='Reports the version and exits.',
default=False
)
parser.add_argument(
"--quiet",
dest="quiet",
action="store_true",
help="Turns off some output (useful for testing)",
default=False
)
parser.add_argument(
"cmd",
nargs=argparse.REMAINDER,
help="Optional command to execute"
)
args = parser.parse_args(sys.argv[1:])
if args.buffer_size is not None:
BUFFER_SIZE = args.buffer_size
if args.debug:
print("Debug = %s" % args.debug)
print("Port = %s" % args.port)
print("Baud = %d" % args.baud)
print("User = %s" % args.user)
print("Password = %s" % args.password)
print("Wait = %d" % args.wait)
print("List = %d" % args.list)
print("nocolor = %d" % args.nocolor)
print("ascii = %d" % args.ascii_xfer)
print("Timing = %d" % args.timing)
print("Quiet = %d" % args.quiet)
print("BUFFER_SIZE = %d" % BUFFER_SIZE)
print("Cmd = [%s]" % ', '.join(args.cmd))
if args.version:
print(__version__)
return
global DEBUG
DEBUG = args.debug
global QUIET
QUIET = args.quiet
global EDITOR
EDITOR = args.editor
if args.nocolor:
global DIR_COLOR, PROMPT_COLOR, PY_COLOR, END_COLOR
DIR_COLOR = ''
PROMPT_COLOR = ''
PY_COLOR = ''
END_COLOR = ''
else:
if sys.platform == 'darwin':
# The readline that comes with OSX screws up colors in the prompt
global FAKE_INPUT_PROMPT
FAKE_INPUT_PROMPT = True
global ASCII_XFER
ASCII_XFER = args.ascii_xfer
RTS = args.rts
DTR = args.dtr
if args.list:
listports()
return
if args.port:
ASCII_XFER = True
if args.buffer_size is None:
if is_micropython_usb_port(args.port):
BUFFER_SIZE = USB_BUFFER_SIZE
else:
BUFFER_SIZE = UART_BUFFER_SIZE
QUIET or print('Using buffer-size of', BUFFER_SIZE)
try:
connect(args.port, baud=args.baud, wait=args.wait, user=args.user, password=args.password)
except DeviceError as err:
print(err)
else:
autoscan()
autoconnect()
if args.filename:
with open(args.filename) as cmd_file:
shell = Shell(stdin=cmd_file, filename=args.filename, timing=args.timing)
shell.cmdloop('')
else:
cmd_line = ' '.join(args.cmd)
if cmd_line == '':
print('Welcome to rshell.', EXIT_STR)
if num_devices() == 0:
print('')
print('No MicroPython boards connected - use the connect command to add one')
print('')
shell = Shell(timing=args.timing)
try:
shell.cmdloop(cmd_line)
except KeyboardInterrupt:
print('') | [
"def",
"real_main",
"(",
")",
":",
"global",
"RTS",
"global",
"DTR",
"try",
":",
"default_baud",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"'RSHELL_BAUD'",
")",
")",
"except",
":",
"default_baud",
"=",
"115200",
"default_port",
"=",
"os",
".",
"getenv",
"(",
"'RSHELL_PORT'",
")",
"default_rts",
"=",
"os",
".",
"getenv",
"(",
"'RSHELL_RTS'",
")",
"or",
"RTS",
"default_dtr",
"=",
"os",
".",
"getenv",
"(",
"'RSHELL_DTR'",
")",
"or",
"DTR",
"default_user",
"=",
"os",
".",
"getenv",
"(",
"'RSHELL_USER'",
")",
"or",
"'micro'",
"default_password",
"=",
"os",
".",
"getenv",
"(",
"'RSHELL_PASSWORD'",
")",
"or",
"'python'",
"default_editor",
"=",
"os",
".",
"getenv",
"(",
"'RSHELL_EDITOR'",
")",
"or",
"os",
".",
"getenv",
"(",
"'VISUAL'",
")",
"or",
"os",
".",
"getenv",
"(",
"'EDITOR'",
")",
"or",
"'vi'",
"global",
"BUFFER_SIZE",
"try",
":",
"default_buffer_size",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"'RSHELL_BUFFER_SIZE'",
")",
")",
"except",
":",
"default_buffer_size",
"=",
"BUFFER_SIZE",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"rshell\"",
",",
"usage",
"=",
"\"%(prog)s [options] [command]\"",
",",
"description",
"=",
"\"Remote Shell for a MicroPython board.\"",
",",
"epilog",
"=",
"(",
"\"You can specify the default serial port using the \"",
"+",
"\"RSHELL_PORT environment variable.\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"-b\"",
",",
"\"--baud\"",
",",
"dest",
"=",
"\"baud\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Set the baudrate used (default = %d)\"",
"%",
"default_baud",
",",
"default",
"=",
"default_baud",
")",
"parser",
".",
"add_argument",
"(",
"\"--buffer-size\"",
",",
"dest",
"=",
"\"buffer_size\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Set the buffer size used for transfers \"",
"\"(default = %d for USB, %d for UART)\"",
"%",
"(",
"USB_BUFFER_SIZE",
",",
"UART_BUFFER_SIZE",
")",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--port\"",
",",
"dest",
"=",
"\"port\"",
",",
"help",
"=",
"\"Set the serial port to use (default '%s')\"",
"%",
"default_port",
",",
"default",
"=",
"default_port",
")",
"parser",
".",
"add_argument",
"(",
"\"--rts\"",
",",
"dest",
"=",
"\"rts\"",
",",
"help",
"=",
"\"Set the RTS state (default '%s')\"",
"%",
"default_rts",
",",
"default",
"=",
"default_rts",
")",
"parser",
".",
"add_argument",
"(",
"\"--dtr\"",
",",
"dest",
"=",
"\"dtr\"",
",",
"help",
"=",
"\"Set the DTR state (default '%s')\"",
"%",
"default_dtr",
",",
"default",
"=",
"default_dtr",
")",
"parser",
".",
"add_argument",
"(",
"\"-u\"",
",",
"\"--user\"",
",",
"dest",
"=",
"\"user\"",
",",
"help",
"=",
"\"Set username to use (default '%s')\"",
"%",
"default_user",
",",
"default",
"=",
"default_user",
")",
"parser",
".",
"add_argument",
"(",
"\"-w\"",
",",
"\"--password\"",
",",
"dest",
"=",
"\"password\"",
",",
"help",
"=",
"\"Set password to use (default '%s')\"",
"%",
"default_password",
",",
"default",
"=",
"default_password",
")",
"parser",
".",
"add_argument",
"(",
"\"-e\"",
",",
"\"--editor\"",
",",
"dest",
"=",
"\"editor\"",
",",
"help",
"=",
"\"Set the editor to use (default '%s')\"",
"%",
"default_editor",
",",
"default",
"=",
"default_editor",
")",
"parser",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--file\"",
",",
"dest",
"=",
"\"filename\"",
",",
"help",
"=",
"\"Specifies a file of commands to process.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--debug\"",
",",
"dest",
"=",
"\"debug\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Enable debug features\"",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--nocolor\"",
",",
"dest",
"=",
"\"nocolor\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Turn off colorized output\"",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"-l\"",
",",
"\"--list\"",
",",
"dest",
"=",
"\"list\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Display serial ports\"",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--ascii\"",
",",
"dest",
"=",
"\"ascii_xfer\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"ASCII encode binary files for transfer\"",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"--wait\"",
",",
"dest",
"=",
"\"wait\"",
",",
"type",
"=",
"int",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Seconds to wait for serial port\"",
",",
"default",
"=",
"0",
")",
"parser",
".",
"add_argument",
"(",
"\"--timing\"",
",",
"dest",
"=",
"\"timing\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Print timing information about each command\"",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'-V'",
",",
"'--version'",
",",
"dest",
"=",
"'version'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Reports the version and exits.'",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"--quiet\"",
",",
"dest",
"=",
"\"quiet\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Turns off some output (useful for testing)\"",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"cmd\"",
",",
"nargs",
"=",
"argparse",
".",
"REMAINDER",
",",
"help",
"=",
"\"Optional command to execute\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"if",
"args",
".",
"buffer_size",
"is",
"not",
"None",
":",
"BUFFER_SIZE",
"=",
"args",
".",
"buffer_size",
"if",
"args",
".",
"debug",
":",
"print",
"(",
"\"Debug = %s\"",
"%",
"args",
".",
"debug",
")",
"print",
"(",
"\"Port = %s\"",
"%",
"args",
".",
"port",
")",
"print",
"(",
"\"Baud = %d\"",
"%",
"args",
".",
"baud",
")",
"print",
"(",
"\"User = %s\"",
"%",
"args",
".",
"user",
")",
"print",
"(",
"\"Password = %s\"",
"%",
"args",
".",
"password",
")",
"print",
"(",
"\"Wait = %d\"",
"%",
"args",
".",
"wait",
")",
"print",
"(",
"\"List = %d\"",
"%",
"args",
".",
"list",
")",
"print",
"(",
"\"nocolor = %d\"",
"%",
"args",
".",
"nocolor",
")",
"print",
"(",
"\"ascii = %d\"",
"%",
"args",
".",
"ascii_xfer",
")",
"print",
"(",
"\"Timing = %d\"",
"%",
"args",
".",
"timing",
")",
"print",
"(",
"\"Quiet = %d\"",
"%",
"args",
".",
"quiet",
")",
"print",
"(",
"\"BUFFER_SIZE = %d\"",
"%",
"BUFFER_SIZE",
")",
"print",
"(",
"\"Cmd = [%s]\"",
"%",
"', '",
".",
"join",
"(",
"args",
".",
"cmd",
")",
")",
"if",
"args",
".",
"version",
":",
"print",
"(",
"__version__",
")",
"return",
"global",
"DEBUG",
"DEBUG",
"=",
"args",
".",
"debug",
"global",
"QUIET",
"QUIET",
"=",
"args",
".",
"quiet",
"global",
"EDITOR",
"EDITOR",
"=",
"args",
".",
"editor",
"if",
"args",
".",
"nocolor",
":",
"global",
"DIR_COLOR",
",",
"PROMPT_COLOR",
",",
"PY_COLOR",
",",
"END_COLOR",
"DIR_COLOR",
"=",
"''",
"PROMPT_COLOR",
"=",
"''",
"PY_COLOR",
"=",
"''",
"END_COLOR",
"=",
"''",
"else",
":",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# The readline that comes with OSX screws up colors in the prompt",
"global",
"FAKE_INPUT_PROMPT",
"FAKE_INPUT_PROMPT",
"=",
"True",
"global",
"ASCII_XFER",
"ASCII_XFER",
"=",
"args",
".",
"ascii_xfer",
"RTS",
"=",
"args",
".",
"rts",
"DTR",
"=",
"args",
".",
"dtr",
"if",
"args",
".",
"list",
":",
"listports",
"(",
")",
"return",
"if",
"args",
".",
"port",
":",
"ASCII_XFER",
"=",
"True",
"if",
"args",
".",
"buffer_size",
"is",
"None",
":",
"if",
"is_micropython_usb_port",
"(",
"args",
".",
"port",
")",
":",
"BUFFER_SIZE",
"=",
"USB_BUFFER_SIZE",
"else",
":",
"BUFFER_SIZE",
"=",
"UART_BUFFER_SIZE",
"QUIET",
"or",
"print",
"(",
"'Using buffer-size of'",
",",
"BUFFER_SIZE",
")",
"try",
":",
"connect",
"(",
"args",
".",
"port",
",",
"baud",
"=",
"args",
".",
"baud",
",",
"wait",
"=",
"args",
".",
"wait",
",",
"user",
"=",
"args",
".",
"user",
",",
"password",
"=",
"args",
".",
"password",
")",
"except",
"DeviceError",
"as",
"err",
":",
"print",
"(",
"err",
")",
"else",
":",
"autoscan",
"(",
")",
"autoconnect",
"(",
")",
"if",
"args",
".",
"filename",
":",
"with",
"open",
"(",
"args",
".",
"filename",
")",
"as",
"cmd_file",
":",
"shell",
"=",
"Shell",
"(",
"stdin",
"=",
"cmd_file",
",",
"filename",
"=",
"args",
".",
"filename",
",",
"timing",
"=",
"args",
".",
"timing",
")",
"shell",
".",
"cmdloop",
"(",
"''",
")",
"else",
":",
"cmd_line",
"=",
"' '",
".",
"join",
"(",
"args",
".",
"cmd",
")",
"if",
"cmd_line",
"==",
"''",
":",
"print",
"(",
"'Welcome to rshell.'",
",",
"EXIT_STR",
")",
"if",
"num_devices",
"(",
")",
"==",
"0",
":",
"print",
"(",
"''",
")",
"print",
"(",
"'No MicroPython boards connected - use the connect command to add one'",
")",
"print",
"(",
"''",
")",
"shell",
"=",
"Shell",
"(",
"timing",
"=",
"args",
".",
"timing",
")",
"try",
":",
"shell",
".",
"cmdloop",
"(",
"cmd_line",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"''",
")"
] | 27.987124 | 19.012876 |
def substitution_set(string, indexes):
"""
for a string, return a set of all possible substitutions
"""
strlen = len(string)
return {mutate_string(string, x) for x in indexes if valid_substitution(strlen, x)} | [
"def",
"substitution_set",
"(",
"string",
",",
"indexes",
")",
":",
"strlen",
"=",
"len",
"(",
"string",
")",
"return",
"{",
"mutate_string",
"(",
"string",
",",
"x",
")",
"for",
"x",
"in",
"indexes",
"if",
"valid_substitution",
"(",
"strlen",
",",
"x",
")",
"}"
] | 37.166667 | 14.166667 |
def is_valid_int_param(param):
"""Verifica se o parâmetro é um valor inteiro válido.
:param param: Valor para ser validado.
:return: True se o parâmetro tem um valor inteiro válido, ou False, caso contrário.
"""
if param is None:
return False
try:
param = int(param)
if param < 0:
return False
except (TypeError, ValueError):
return False
return True | [
"def",
"is_valid_int_param",
"(",
"param",
")",
":",
"if",
"param",
"is",
"None",
":",
"return",
"False",
"try",
":",
"param",
"=",
"int",
"(",
"param",
")",
"if",
"param",
"<",
"0",
":",
"return",
"False",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"False",
"return",
"True"
] | 25.8125 | 19.3125 |
def is_closing(self) -> bool:
"""Return ``True`` if this connection is closing.
The connection is considered closing if either side has
initiated its closing handshake or if the stream has been
shut down uncleanly.
"""
return self.stream.closed() or self.client_terminated or self.server_terminated | [
"def",
"is_closing",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"stream",
".",
"closed",
"(",
")",
"or",
"self",
".",
"client_terminated",
"or",
"self",
".",
"server_terminated"
] | 42.5 | 19.75 |
def _inv_key(list_keys, valid_keys):
"""
-----
Brief
-----
A sub-function of _filter_keywords function.
-----------
Description
-----------
Function used for identification when a list of keywords contains invalid keywords not present
in the valid list.
----------
Parameters
----------
list_keys : list
List of keywords that must be verified, i.e., all the inputs needs to be inside valid_keys
in order to a True boolean be returned.
valid_keys : list
List of valid keywords.
Returns
-------
out : boolean, list
Boolean indicating if all the inserted keywords are valid. If true a list with invalid
keywords will be returned.
"""
inv_keys = []
bool_out = True
for i in list_keys:
if i not in valid_keys:
bool_out = False
inv_keys.append(i)
return bool_out, inv_keys | [
"def",
"_inv_key",
"(",
"list_keys",
",",
"valid_keys",
")",
":",
"inv_keys",
"=",
"[",
"]",
"bool_out",
"=",
"True",
"for",
"i",
"in",
"list_keys",
":",
"if",
"i",
"not",
"in",
"valid_keys",
":",
"bool_out",
"=",
"False",
"inv_keys",
".",
"append",
"(",
"i",
")",
"return",
"bool_out",
",",
"inv_keys"
] | 23.657895 | 24.342105 |
def destandardize_variables(self, tv, blin, bvar, errBeta, nonmissing):
"""Destandardize betas and other components."""
return self.test_variables.destandardize(tv, blin, bvar, errBeta, nonmissing) | [
"def",
"destandardize_variables",
"(",
"self",
",",
"tv",
",",
"blin",
",",
"bvar",
",",
"errBeta",
",",
"nonmissing",
")",
":",
"return",
"self",
".",
"test_variables",
".",
"destandardize",
"(",
"tv",
",",
"blin",
",",
"bvar",
",",
"errBeta",
",",
"nonmissing",
")"
] | 70.333333 | 25.333333 |
def get_token_func():
"""
This function makes a call to AAD to fetch an OAuth token
:return: the OAuth token and the interval to wait before refreshing it
"""
print("{}: token updater was triggered".format(datetime.datetime.now()))
# in this example, the OAuth token is obtained using the ADAL library
# however, the user can use any preferred method
context = adal.AuthenticationContext(
str.format("https://login.microsoftonline.com/{}", settings.ACTIVE_DIRECTORY_TENANT_ID),
api_version=None, validate_authority=True)
oauth_token = context.acquire_token_with_client_credentials(
"https://storage.azure.com",
settings.ACTIVE_DIRECTORY_APPLICATION_ID,
settings.ACTIVE_DIRECTORY_APPLICATION_SECRET)
# return the token itself and the interval to wait before this function should be called again
# generally oauth_token['expiresIn'] - 180 is a good interval to give, as it tells the caller to
# refresh the token 3 minutes before it expires, so here we are assuming that the token expiration
# is at least longer than 3 minutes, the user should adjust it according to their AAD policy
return oauth_token['accessToken'], oauth_token['expiresIn'] - 180 | [
"def",
"get_token_func",
"(",
")",
":",
"print",
"(",
"\"{}: token updater was triggered\"",
".",
"format",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
")",
"# in this example, the OAuth token is obtained using the ADAL library",
"# however, the user can use any preferred method",
"context",
"=",
"adal",
".",
"AuthenticationContext",
"(",
"str",
".",
"format",
"(",
"\"https://login.microsoftonline.com/{}\"",
",",
"settings",
".",
"ACTIVE_DIRECTORY_TENANT_ID",
")",
",",
"api_version",
"=",
"None",
",",
"validate_authority",
"=",
"True",
")",
"oauth_token",
"=",
"context",
".",
"acquire_token_with_client_credentials",
"(",
"\"https://storage.azure.com\"",
",",
"settings",
".",
"ACTIVE_DIRECTORY_APPLICATION_ID",
",",
"settings",
".",
"ACTIVE_DIRECTORY_APPLICATION_SECRET",
")",
"# return the token itself and the interval to wait before this function should be called again",
"# generally oauth_token['expiresIn'] - 180 is a good interval to give, as it tells the caller to",
"# refresh the token 3 minutes before it expires, so here we are assuming that the token expiration",
"# is at least longer than 3 minutes, the user should adjust it according to their AAD policy",
"return",
"oauth_token",
"[",
"'accessToken'",
"]",
",",
"oauth_token",
"[",
"'expiresIn'",
"]",
"-",
"180"
] | 56.565217 | 31.173913 |
def rot_consts(geom, masses, units=_EURC.INV_INERTIA, on_tol=_DEF.ORTHONORM_TOL):
"""Rotational constants for a given molecular system.
Calculates the rotational constants for the provided system with numerical
value given in the units provided in `units`. The orthnormality tolerance
`on_tol` is required in order to be passed through to the
:func:`principals` function.
If the system is linear or a single atom, the effectively-zero principal
moments of inertia will be assigned values of
:data:`opan.const.PRM.ZERO_MOMENT_TOL`
before transformation into the appropriate rotational constant units.
The moments of inertia are always sorted in increasing order as
:math:`0 \\leq I_A \\leq I_B \\leq I_C`; the rotational constants
calculated from these will thus always be in **decreasing** order
as :math:`B_A \\geq B_B \\geq B_C`, retaining the
ordering and association with the three principal ``axes[:,i]`` generated
by :func:`principals`.
Parameters
----------
geom
length-3N |npfloat_| --
Coordinates of the atoms
masses
length-N OR length-3N |npfloat_| --
Atomic masses of the atoms. Length-3N option is to allow calculation of
a per-coordinate perturbed value.
units
:class:`~opan.const.EnumUnitsRotConst`, optional --
Enum value indicating the desired units of the output rotational
constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA`
:math:`\\left(1\\over \\mathrm{uB^2}\\right)`
on_tol
|npfloat_|, optional --
Tolerance for deviation from unity/zero for principal axis dot
products, within which axes are considered orthonormal. Default is
:data:`opan.const.DEF.ORTHONORM_TOL`
Returns
-------
rc
length-3 |npfloat_| --
Vector of rotational constants in the indicated units
"""
# Imports
import numpy as np
from ..const import EnumTopType as ETT, EnumUnitsRotConst as EURC, PRM, PHYS
# Ensure units are valid
if not units in EURC:
raise ValueError("'{0}' is not a valid units value".format(units))
## end if
# Retrieve the moments, axes and top type. Geom and masses are proofed
# internally in this call.
mom, ax, top = principals(geom, masses, on_tol)
# Check for special cases
if top == ETT.ATOM:
# All moments are zero; set to zero-moment threshold
mom = np.repeat(PRM.ZERO_MOMENT_TOL, 3)
elif top == ETT.LINEAR:
# First moment is zero; set to zero-moment threshold
mom[0] = PRM.ZERO_MOMENT_TOL
## end if
# Calculate the values in the indicated units
if units == EURC.INV_INERTIA: # 1/(amu*B^2)
rc = 1.0 / (2.0 * mom)
elif units == EURC.ANGFREQ_ATOMIC: # 1/Ta
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU)
elif units == EURC.ANGFREQ_SECS: # 1/s
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA
elif units == EURC.CYCFREQ_ATOMIC: # cyc/Ta
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU)
elif units == EURC.CYCFREQ_HZ: # cyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \
PHYS.SEC_PER_TA
elif units == EURC.CYCFREQ_MHZ: # Mcyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \
PHYS.SEC_PER_TA / 1.0e6
elif units == EURC.WAVENUM_ATOMIC: # cyc/B
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \
(8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED)
elif units == EURC.WAVENUM_CM: # cyc/cm
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \
(8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED * PHYS.ANG_PER_BOHR) * 1.0e8
else: # pragma: no cover -- Valid units; not implemented
raise NotImplementedError("Units conversion not yet implemented.")
## end if
# Return the result
return rc | [
"def",
"rot_consts",
"(",
"geom",
",",
"masses",
",",
"units",
"=",
"_EURC",
".",
"INV_INERTIA",
",",
"on_tol",
"=",
"_DEF",
".",
"ORTHONORM_TOL",
")",
":",
"# Imports",
"import",
"numpy",
"as",
"np",
"from",
".",
".",
"const",
"import",
"EnumTopType",
"as",
"ETT",
",",
"EnumUnitsRotConst",
"as",
"EURC",
",",
"PRM",
",",
"PHYS",
"# Ensure units are valid",
"if",
"not",
"units",
"in",
"EURC",
":",
"raise",
"ValueError",
"(",
"\"'{0}' is not a valid units value\"",
".",
"format",
"(",
"units",
")",
")",
"## end if",
"# Retrieve the moments, axes and top type. Geom and masses are proofed",
"# internally in this call.",
"mom",
",",
"ax",
",",
"top",
"=",
"principals",
"(",
"geom",
",",
"masses",
",",
"on_tol",
")",
"# Check for special cases",
"if",
"top",
"==",
"ETT",
".",
"ATOM",
":",
"# All moments are zero; set to zero-moment threshold",
"mom",
"=",
"np",
".",
"repeat",
"(",
"PRM",
".",
"ZERO_MOMENT_TOL",
",",
"3",
")",
"elif",
"top",
"==",
"ETT",
".",
"LINEAR",
":",
"# First moment is zero; set to zero-moment threshold",
"mom",
"[",
"0",
"]",
"=",
"PRM",
".",
"ZERO_MOMENT_TOL",
"## end if",
"# Calculate the values in the indicated units",
"if",
"units",
"==",
"EURC",
".",
"INV_INERTIA",
":",
"# 1/(amu*B^2)",
"rc",
"=",
"1.0",
"/",
"(",
"2.0",
"*",
"mom",
")",
"elif",
"units",
"==",
"EURC",
".",
"ANGFREQ_ATOMIC",
":",
"# 1/Ta",
"rc",
"=",
"PHYS",
".",
"PLANCK_BAR",
"/",
"(",
"2.0",
"*",
"mom",
"*",
"PHYS",
".",
"ME_PER_AMU",
")",
"elif",
"units",
"==",
"EURC",
".",
"ANGFREQ_SECS",
":",
"# 1/s",
"rc",
"=",
"PHYS",
".",
"PLANCK_BAR",
"/",
"(",
"2.0",
"*",
"mom",
"*",
"PHYS",
".",
"ME_PER_AMU",
")",
"/",
"PHYS",
".",
"SEC_PER_TA",
"elif",
"units",
"==",
"EURC",
".",
"CYCFREQ_ATOMIC",
":",
"# cyc/Ta",
"rc",
"=",
"PHYS",
".",
"PLANCK_BAR",
"/",
"(",
"4.0",
"*",
"np",
".",
"pi",
"*",
"mom",
"*",
"PHYS",
".",
"ME_PER_AMU",
")",
"elif",
"units",
"==",
"EURC",
".",
"CYCFREQ_HZ",
":",
"# cyc/s",
"rc",
"=",
"PHYS",
".",
"PLANCK_BAR",
"/",
"(",
"4.0",
"*",
"np",
".",
"pi",
"*",
"mom",
"*",
"PHYS",
".",
"ME_PER_AMU",
")",
"/",
"PHYS",
".",
"SEC_PER_TA",
"elif",
"units",
"==",
"EURC",
".",
"CYCFREQ_MHZ",
":",
"# Mcyc/s",
"rc",
"=",
"PHYS",
".",
"PLANCK_BAR",
"/",
"(",
"4.0",
"*",
"np",
".",
"pi",
"*",
"mom",
"*",
"PHYS",
".",
"ME_PER_AMU",
")",
"/",
"PHYS",
".",
"SEC_PER_TA",
"/",
"1.0e6",
"elif",
"units",
"==",
"EURC",
".",
"WAVENUM_ATOMIC",
":",
"# cyc/B",
"rc",
"=",
"PHYS",
".",
"PLANCK",
"/",
"(",
"mom",
"*",
"PHYS",
".",
"ME_PER_AMU",
")",
"/",
"(",
"8.0",
"*",
"np",
".",
"pi",
"**",
"2.0",
"*",
"PHYS",
".",
"LIGHT_SPEED",
")",
"elif",
"units",
"==",
"EURC",
".",
"WAVENUM_CM",
":",
"# cyc/cm",
"rc",
"=",
"PHYS",
".",
"PLANCK",
"/",
"(",
"mom",
"*",
"PHYS",
".",
"ME_PER_AMU",
")",
"/",
"(",
"8.0",
"*",
"np",
".",
"pi",
"**",
"2.0",
"*",
"PHYS",
".",
"LIGHT_SPEED",
"*",
"PHYS",
".",
"ANG_PER_BOHR",
")",
"*",
"1.0e8",
"else",
":",
"# pragma: no cover -- Valid units; not implemented",
"raise",
"NotImplementedError",
"(",
"\"Units conversion not yet implemented.\"",
")",
"## end if",
"# Return the result",
"return",
"rc"
] | 40.14 | 24 |
def __get_event(self, block=True, timeout=1):
"""
Retrieves an event. If self._exceeding_event is not None, it'll be
returned. Otherwise, an event is dequeued from the event buffer. If
The event which was retrieved is bigger than the permitted batch size,
it'll be omitted, and the next event in the event buffer is returned
"""
while True:
if self._exceeding_event: # An event was omitted from last batch
event = self._exceeding_event
self._exceeding_event = None
else: # No omitted event, get an event from the queue
event = self._event_queue.get(block, timeout)
event_size = len(event)
# If the event is bigger than the permitted batch size, ignore it
# The ( - 2 ) accounts for the parentheses enclosing the batch
if event_size - 2 >= self._batch_max_size:
self._notify(logging.WARNING,
consts.LOG_MSG_OMITTED_OVERSIZED_EVENT
% event_size)
else: # Event is of valid size, return it
return event | [
"def",
"__get_event",
"(",
"self",
",",
"block",
"=",
"True",
",",
"timeout",
"=",
"1",
")",
":",
"while",
"True",
":",
"if",
"self",
".",
"_exceeding_event",
":",
"# An event was omitted from last batch",
"event",
"=",
"self",
".",
"_exceeding_event",
"self",
".",
"_exceeding_event",
"=",
"None",
"else",
":",
"# No omitted event, get an event from the queue",
"event",
"=",
"self",
".",
"_event_queue",
".",
"get",
"(",
"block",
",",
"timeout",
")",
"event_size",
"=",
"len",
"(",
"event",
")",
"# If the event is bigger than the permitted batch size, ignore it",
"# The ( - 2 ) accounts for the parentheses enclosing the batch",
"if",
"event_size",
"-",
"2",
">=",
"self",
".",
"_batch_max_size",
":",
"self",
".",
"_notify",
"(",
"logging",
".",
"WARNING",
",",
"consts",
".",
"LOG_MSG_OMITTED_OVERSIZED_EVENT",
"%",
"event_size",
")",
"else",
":",
"# Event is of valid size, return it",
"return",
"event"
] | 48.25 | 20.5 |
def isvalid(path, access=None, extensions=None, filetype=None, minsize=None):
"""Check whether file meets access, extension, size, and type criteria."""
return ((access is None or os.access(path, access)) and
(extensions is None or checkext(path, extensions)) and
(((filetype == 'all' and os.path.exists(path)) or
(filetype == 'dir' and os.path.isdir(path)) or
(filetype == 'file' and os.path.isfile(path))) or
filetype is None) and
(minsize is None or (not os.path.isfile(path) or
os.path.getsize(path) > minsize))) | [
"def",
"isvalid",
"(",
"path",
",",
"access",
"=",
"None",
",",
"extensions",
"=",
"None",
",",
"filetype",
"=",
"None",
",",
"minsize",
"=",
"None",
")",
":",
"return",
"(",
"(",
"access",
"is",
"None",
"or",
"os",
".",
"access",
"(",
"path",
",",
"access",
")",
")",
"and",
"(",
"extensions",
"is",
"None",
"or",
"checkext",
"(",
"path",
",",
"extensions",
")",
")",
"and",
"(",
"(",
"(",
"filetype",
"==",
"'all'",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
")",
"or",
"(",
"filetype",
"==",
"'dir'",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
")",
"or",
"(",
"filetype",
"==",
"'file'",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
")",
")",
"or",
"filetype",
"is",
"None",
")",
"and",
"(",
"minsize",
"is",
"None",
"or",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
"or",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
">",
"minsize",
")",
")",
")"
] | 62.5 | 19.9 |
def unix_time(dt=None, as_int=False):
"""Generate a unix style timestamp (in seconds)"""
if dt is None:
dt = datetime.datetime.utcnow()
if type(dt) is datetime.date:
dt = date_to_datetime(dt)
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
if as_int:
return int(delta.total_seconds())
return delta.total_seconds() | [
"def",
"unix_time",
"(",
"dt",
"=",
"None",
",",
"as_int",
"=",
"False",
")",
":",
"if",
"dt",
"is",
"None",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"type",
"(",
"dt",
")",
"is",
"datetime",
".",
"date",
":",
"dt",
"=",
"date_to_datetime",
"(",
"dt",
")",
"epoch",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"0",
")",
"delta",
"=",
"dt",
"-",
"epoch",
"if",
"as_int",
":",
"return",
"int",
"(",
"delta",
".",
"total_seconds",
"(",
")",
")",
"return",
"delta",
".",
"total_seconds",
"(",
")"
] | 25.066667 | 17.2 |
def gp_size(self, _gp_size):
"""Store the new start address attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.set_gp_size(self._ptr, _gp_size) | [
"def",
"gp_size",
"(",
"self",
",",
"_gp_size",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"set_gp_size",
"(",
"self",
".",
"_ptr",
",",
"_gp_size",
")"
] | 29.444444 | 16.222222 |
def _get_bottom_line_coordinates(self):
"""Returns start and stop coordinates of bottom line"""
rect_x, rect_y, rect_width, rect_height = self.rect
start_point = rect_x, rect_y + rect_height
end_point = rect_x + rect_width, rect_y + rect_height
return start_point, end_point | [
"def",
"_get_bottom_line_coordinates",
"(",
"self",
")",
":",
"rect_x",
",",
"rect_y",
",",
"rect_width",
",",
"rect_height",
"=",
"self",
".",
"rect",
"start_point",
"=",
"rect_x",
",",
"rect_y",
"+",
"rect_height",
"end_point",
"=",
"rect_x",
"+",
"rect_width",
",",
"rect_y",
"+",
"rect_height",
"return",
"start_point",
",",
"end_point"
] | 34.333333 | 19.333333 |
def calc_qdgz_v1(self):
"""Aggregate the amount of total direct flow released by all HRUs.
Required control parameters:
|Lnk|
|NHRU|
|FHRU|
Required flux sequence:
|QDB|
|NKor|
|EvI|
Calculated flux sequence:
|QDGZ|
Basic equation:
:math:`QDGZ = \\Sigma(FHRU \\cdot QDB) +
\\Sigma(FHRU \\cdot (NKor_{FLUSS}-EvI_{FLUSS}))`
Examples:
The first example shows that |QDGZ| is the area weighted sum of
|QDB| from "land type" HRUs like arable land (|ACKER|) and sealed
surfaces (|VERS|) as well as of |NKor|-|EvI| from water areas of
type |FLUSS|. Water areas of type |WASSER| and |SEE| have no
impact on |QDGZ|:
>>> from hydpy.models.lland import *
>>> parameterstep()
>>> nhru(5)
>>> lnk(ACKER, VERS, WASSER, SEE, FLUSS)
>>> fhru(0.1, 0.2, 0.1, 0.2, 0.4)
>>> fluxes.qdb = 2., 4.0, 300.0, 300.0, 300.0
>>> fluxes.nkor = 200.0, 200.0, 200.0, 200.0, 20.0
>>> fluxes.evi = 100.0, 100.0, 100.0, 100.0, 10.0
>>> model.calc_qdgz_v1()
>>> fluxes.qdgz
qdgz(5.0)
The second example shows that large evaporation values above a
HRU of type |FLUSS| can result in negative values of |QDGZ|:
>>> fluxes.evi[4] = 30
>>> model.calc_qdgz_v1()
>>> fluxes.qdgz
qdgz(-3.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.qdgz = 0.
for k in range(con.nhru):
if con.lnk[k] == FLUSS:
flu.qdgz += con.fhru[k]*(flu.nkor[k]-flu.evi[k])
elif con.lnk[k] not in (WASSER, SEE):
flu.qdgz += con.fhru[k]*flu.qdb[k] | [
"def",
"calc_qdgz_v1",
"(",
"self",
")",
":",
"con",
"=",
"self",
".",
"parameters",
".",
"control",
".",
"fastaccess",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"flu",
".",
"qdgz",
"=",
"0.",
"for",
"k",
"in",
"range",
"(",
"con",
".",
"nhru",
")",
":",
"if",
"con",
".",
"lnk",
"[",
"k",
"]",
"==",
"FLUSS",
":",
"flu",
".",
"qdgz",
"+=",
"con",
".",
"fhru",
"[",
"k",
"]",
"*",
"(",
"flu",
".",
"nkor",
"[",
"k",
"]",
"-",
"flu",
".",
"evi",
"[",
"k",
"]",
")",
"elif",
"con",
".",
"lnk",
"[",
"k",
"]",
"not",
"in",
"(",
"WASSER",
",",
"SEE",
")",
":",
"flu",
".",
"qdgz",
"+=",
"con",
".",
"fhru",
"[",
"k",
"]",
"*",
"flu",
".",
"qdb",
"[",
"k",
"]"
] | 30.125 | 20.642857 |
def _factory(importname, base_class_type, path=None, *args, **kargs):
''' Load a module of a given base class type
Parameter
--------
importname: string
Name of the module, etc. converter
base_class_type: class type
E.g converter
path: Absoulte path of the module
Neede for extensions. If not given module is in online_monitor
package
*args, **kargs:
Arguments to pass to the object init
Return
------
Object of given base class type
'''
def is_base_class(item):
return isclass(item) and item.__module__ == importname
if path:
# Needed to find the module in forked processes; if you know a better
# way tell me!
sys.path.append(path)
# Absolute full path of python module
absolute_path = os.path.join(path, importname) + '.py'
module = imp.load_source(importname, absolute_path)
else:
module = import_module(importname)
# Get the defined base class in the loaded module to be name indendend
clsmembers = getmembers(module, is_base_class)
if not len(clsmembers):
raise ValueError('Found no matching class in %s.' % importname)
else:
cls = clsmembers[0][1]
return cls(*args, **kargs) | [
"def",
"_factory",
"(",
"importname",
",",
"base_class_type",
",",
"path",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"def",
"is_base_class",
"(",
"item",
")",
":",
"return",
"isclass",
"(",
"item",
")",
"and",
"item",
".",
"__module__",
"==",
"importname",
"if",
"path",
":",
"# Needed to find the module in forked processes; if you know a better",
"# way tell me!",
"sys",
".",
"path",
".",
"append",
"(",
"path",
")",
"# Absolute full path of python module",
"absolute_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"importname",
")",
"+",
"'.py'",
"module",
"=",
"imp",
".",
"load_source",
"(",
"importname",
",",
"absolute_path",
")",
"else",
":",
"module",
"=",
"import_module",
"(",
"importname",
")",
"# Get the defined base class in the loaded module to be name indendend",
"clsmembers",
"=",
"getmembers",
"(",
"module",
",",
"is_base_class",
")",
"if",
"not",
"len",
"(",
"clsmembers",
")",
":",
"raise",
"ValueError",
"(",
"'Found no matching class in %s.'",
"%",
"importname",
")",
"else",
":",
"cls",
"=",
"clsmembers",
"[",
"0",
"]",
"[",
"1",
"]",
"return",
"cls",
"(",
"*",
"args",
",",
"*",
"*",
"kargs",
")"
] | 32.475 | 20.925 |
def deserialize(self, value, **kwargs):
"""Deserialize instance from JSON value
If a deserializer is registered, that is used. Otherwise, if the
instance_class is a HasProperties subclass, an instance can be
deserialized from a dictionary.
"""
kwargs.update({'trusted': kwargs.get('trusted', False)})
if self.deserializer is not None:
return self.deserializer(value, **kwargs)
if value is None:
return None
if isinstance(value, string_types):
return value
if issubclass(self.instance_class, base.HasProperties):
return self.instance_class.deserialize(value, **kwargs)
return self.from_json(value, **kwargs) | [
"def",
"deserialize",
"(",
"self",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"'trusted'",
":",
"kwargs",
".",
"get",
"(",
"'trusted'",
",",
"False",
")",
"}",
")",
"if",
"self",
".",
"deserializer",
"is",
"not",
"None",
":",
"return",
"self",
".",
"deserializer",
"(",
"value",
",",
"*",
"*",
"kwargs",
")",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
":",
"return",
"value",
"if",
"issubclass",
"(",
"self",
".",
"instance_class",
",",
"base",
".",
"HasProperties",
")",
":",
"return",
"self",
".",
"instance_class",
".",
"deserialize",
"(",
"value",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"from_json",
"(",
"value",
",",
"*",
"*",
"kwargs",
")"
] | 42.764706 | 14.647059 |
def set_secure_boot_mode(self, secure_boot_enable):
"""Enable/Disable secure boot on the server.
Resetting the server post updating this settings is needed
from the caller side to make this into effect.
:param secure_boot_enable: True, if secure boot needs to be
enabled for next boot, else False.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
if self._is_boot_mode_uefi():
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
try:
sushy_system.secure_boot.enable_secure_boot(secure_boot_enable)
except exception.InvalidInputError as e:
msg = (self._('Invalid input. Error %(error)s')
% {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
except sushy.exceptions.SushyError as e:
msg = (self._('The Redfish controller failed to set secure '
'boot settings on the server. Error: %(error)s')
% {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
else:
msg = (self._('System is not in UEFI boot mode. "SecureBoot" '
'related resources cannot be changed.'))
raise exception.IloCommandNotSupportedInBiosError(msg) | [
"def",
"set_secure_boot_mode",
"(",
"self",
",",
"secure_boot_enable",
")",
":",
"if",
"self",
".",
"_is_boot_mode_uefi",
"(",
")",
":",
"sushy_system",
"=",
"self",
".",
"_get_sushy_system",
"(",
"PROLIANT_SYSTEM_ID",
")",
"try",
":",
"sushy_system",
".",
"secure_boot",
".",
"enable_secure_boot",
"(",
"secure_boot_enable",
")",
"except",
"exception",
".",
"InvalidInputError",
"as",
"e",
":",
"msg",
"=",
"(",
"self",
".",
"_",
"(",
"'Invalid input. Error %(error)s'",
")",
"%",
"{",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")",
"LOG",
".",
"debug",
"(",
"msg",
")",
"raise",
"exception",
".",
"IloError",
"(",
"msg",
")",
"except",
"sushy",
".",
"exceptions",
".",
"SushyError",
"as",
"e",
":",
"msg",
"=",
"(",
"self",
".",
"_",
"(",
"'The Redfish controller failed to set secure '",
"'boot settings on the server. Error: %(error)s'",
")",
"%",
"{",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")",
"LOG",
".",
"debug",
"(",
"msg",
")",
"raise",
"exception",
".",
"IloError",
"(",
"msg",
")",
"else",
":",
"msg",
"=",
"(",
"self",
".",
"_",
"(",
"'System is not in UEFI boot mode. \"SecureBoot\" '",
"'related resources cannot be changed.'",
")",
")",
"raise",
"exception",
".",
"IloCommandNotSupportedInBiosError",
"(",
"msg",
")"
] | 49.433333 | 18.2 |
def plot_rolling_returns(returns,
factor_returns=None,
live_start_date=None,
logy=False,
cone_std=None,
legend_loc='best',
volatility_match=False,
cone_function=timeseries.forecast_cone_bootstrap,
ax=None, **kwargs):
"""
Plots cumulative rolling returns versus some benchmarks'.
Backtest returns are in green, and out-of-sample (live trading)
returns are in red.
Additionally, a non-parametric cone plot may be added to the
out-of-sample returns region.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
live_start_date : datetime, optional
The date when the strategy began live trading, after
its backtest period. This date should be normalized.
logy : bool, optional
Whether to log-scale the y-axis.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- See timeseries.forecast_cone_bounds for more details.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
volatility_match : bool, optional
Whether to normalize the volatility of the returns to those of the
benchmark returns. This helps compare strategies with different
volatilities. Requires passing of benchmark_rets.
cone_function : function, optional
Function to use when generating forecast probability cone.
The function signiture must follow the form:
def cone(in_sample_returns (pd.Series),
days_to_project_forward (int),
cone_std= (float, or tuple),
starting_value= (int, or float))
See timeseries.forecast_cone_bootstrap for an example.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_xlabel('')
ax.set_ylabel('Cumulative returns')
ax.set_yscale('log' if logy else 'linear')
if volatility_match and factor_returns is None:
raise ValueError('volatility_match requires passing of '
'factor_returns.')
elif volatility_match and factor_returns is not None:
bmark_vol = factor_returns.loc[returns.index].std()
returns = (returns / returns.std()) * bmark_vol
cum_rets = ep.cum_returns(returns, 1.0)
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
if factor_returns is not None:
cum_factor_returns = ep.cum_returns(
factor_returns[cum_rets.index], 1.0)
cum_factor_returns.plot(lw=2, color='gray',
label=factor_returns.name, alpha=0.60,
ax=ax, **kwargs)
if live_start_date is not None:
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date]
oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date]
else:
is_cum_returns = cum_rets
oos_cum_returns = pd.Series([])
is_cum_returns.plot(lw=3, color='forestgreen', alpha=0.6,
label='Backtest', ax=ax, **kwargs)
if len(oos_cum_returns) > 0:
oos_cum_returns.plot(lw=4, color='red', alpha=0.6,
label='Live', ax=ax, **kwargs)
if cone_std is not None:
if isinstance(cone_std, (float, int)):
cone_std = [cone_std]
is_returns = returns.loc[returns.index < live_start_date]
cone_bounds = cone_function(
is_returns,
len(oos_cum_returns),
cone_std=cone_std,
starting_value=is_cum_returns[-1])
cone_bounds = cone_bounds.set_index(oos_cum_returns.index)
for std in cone_std:
ax.fill_between(cone_bounds.index,
cone_bounds[float(std)],
cone_bounds[float(-std)],
color='steelblue', alpha=0.5)
if legend_loc is not None:
ax.legend(loc=legend_loc, frameon=True, framealpha=0.5)
ax.axhline(1.0, linestyle='--', color='black', lw=2)
return ax | [
"def",
"plot_rolling_returns",
"(",
"returns",
",",
"factor_returns",
"=",
"None",
",",
"live_start_date",
"=",
"None",
",",
"logy",
"=",
"False",
",",
"cone_std",
"=",
"None",
",",
"legend_loc",
"=",
"'best'",
",",
"volatility_match",
"=",
"False",
",",
"cone_function",
"=",
"timeseries",
".",
"forecast_cone_bootstrap",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"set_xlabel",
"(",
"''",
")",
"ax",
".",
"set_ylabel",
"(",
"'Cumulative returns'",
")",
"ax",
".",
"set_yscale",
"(",
"'log'",
"if",
"logy",
"else",
"'linear'",
")",
"if",
"volatility_match",
"and",
"factor_returns",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'volatility_match requires passing of '",
"'factor_returns.'",
")",
"elif",
"volatility_match",
"and",
"factor_returns",
"is",
"not",
"None",
":",
"bmark_vol",
"=",
"factor_returns",
".",
"loc",
"[",
"returns",
".",
"index",
"]",
".",
"std",
"(",
")",
"returns",
"=",
"(",
"returns",
"/",
"returns",
".",
"std",
"(",
")",
")",
"*",
"bmark_vol",
"cum_rets",
"=",
"ep",
".",
"cum_returns",
"(",
"returns",
",",
"1.0",
")",
"y_axis_formatter",
"=",
"FuncFormatter",
"(",
"utils",
".",
"two_dec_places",
")",
"ax",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"FuncFormatter",
"(",
"y_axis_formatter",
")",
")",
"if",
"factor_returns",
"is",
"not",
"None",
":",
"cum_factor_returns",
"=",
"ep",
".",
"cum_returns",
"(",
"factor_returns",
"[",
"cum_rets",
".",
"index",
"]",
",",
"1.0",
")",
"cum_factor_returns",
".",
"plot",
"(",
"lw",
"=",
"2",
",",
"color",
"=",
"'gray'",
",",
"label",
"=",
"factor_returns",
".",
"name",
",",
"alpha",
"=",
"0.60",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"if",
"live_start_date",
"is",
"not",
"None",
":",
"live_start_date",
"=",
"ep",
".",
"utils",
".",
"get_utc_timestamp",
"(",
"live_start_date",
")",
"is_cum_returns",
"=",
"cum_rets",
".",
"loc",
"[",
"cum_rets",
".",
"index",
"<",
"live_start_date",
"]",
"oos_cum_returns",
"=",
"cum_rets",
".",
"loc",
"[",
"cum_rets",
".",
"index",
">=",
"live_start_date",
"]",
"else",
":",
"is_cum_returns",
"=",
"cum_rets",
"oos_cum_returns",
"=",
"pd",
".",
"Series",
"(",
"[",
"]",
")",
"is_cum_returns",
".",
"plot",
"(",
"lw",
"=",
"3",
",",
"color",
"=",
"'forestgreen'",
",",
"alpha",
"=",
"0.6",
",",
"label",
"=",
"'Backtest'",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"oos_cum_returns",
")",
">",
"0",
":",
"oos_cum_returns",
".",
"plot",
"(",
"lw",
"=",
"4",
",",
"color",
"=",
"'red'",
",",
"alpha",
"=",
"0.6",
",",
"label",
"=",
"'Live'",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"if",
"cone_std",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"cone_std",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"cone_std",
"=",
"[",
"cone_std",
"]",
"is_returns",
"=",
"returns",
".",
"loc",
"[",
"returns",
".",
"index",
"<",
"live_start_date",
"]",
"cone_bounds",
"=",
"cone_function",
"(",
"is_returns",
",",
"len",
"(",
"oos_cum_returns",
")",
",",
"cone_std",
"=",
"cone_std",
",",
"starting_value",
"=",
"is_cum_returns",
"[",
"-",
"1",
"]",
")",
"cone_bounds",
"=",
"cone_bounds",
".",
"set_index",
"(",
"oos_cum_returns",
".",
"index",
")",
"for",
"std",
"in",
"cone_std",
":",
"ax",
".",
"fill_between",
"(",
"cone_bounds",
".",
"index",
",",
"cone_bounds",
"[",
"float",
"(",
"std",
")",
"]",
",",
"cone_bounds",
"[",
"float",
"(",
"-",
"std",
")",
"]",
",",
"color",
"=",
"'steelblue'",
",",
"alpha",
"=",
"0.5",
")",
"if",
"legend_loc",
"is",
"not",
"None",
":",
"ax",
".",
"legend",
"(",
"loc",
"=",
"legend_loc",
",",
"frameon",
"=",
"True",
",",
"framealpha",
"=",
"0.5",
")",
"ax",
".",
"axhline",
"(",
"1.0",
",",
"linestyle",
"=",
"'--'",
",",
"color",
"=",
"'black'",
",",
"lw",
"=",
"2",
")",
"return",
"ax"
] | 39.016 | 17.992 |
def p_id_expr(p):
""" bexpr : ID
| ARRAY_ID
"""
entry = SYMBOL_TABLE.access_id(p[1], p.lineno(1), default_class=CLASS.var)
if entry is None:
p[0] = None
return
entry.accessed = True
if entry.type_ == TYPE.auto:
entry.type_ = _TYPE(gl.DEFAULT_TYPE)
api.errmsg.warning_implicit_type(p.lineno(1), p[1], entry.type_)
p[0] = entry
"""
if entry.class_ == CLASS.var:
if entry.type_ == TYPE.auto:
entry.type_ = SYMBOL_TABLE.basic_types[gl.DEFAULT_TYPE]
#api.errmsg.warning_implicit_type(p.lineno(1), p[1], entry.type_)
"""
if entry.class_ == CLASS.array:
if not LET_ASSIGNMENT:
syntax_error(p.lineno(1), "Variable '%s' is an array and cannot be used in this context" % p[1])
p[0] = None
elif entry.kind == KIND.function: # Function call with 0 args
p[0] = make_call(p[1], p.lineno(1), make_arg_list(None))
elif entry.kind == KIND.sub: # Forbidden for subs
api.errmsg.syntax_error_is_a_sub_not_a_func(p.lineno(1), p[1])
p[0] = None | [
"def",
"p_id_expr",
"(",
"p",
")",
":",
"entry",
"=",
"SYMBOL_TABLE",
".",
"access_id",
"(",
"p",
"[",
"1",
"]",
",",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"default_class",
"=",
"CLASS",
".",
"var",
")",
"if",
"entry",
"is",
"None",
":",
"p",
"[",
"0",
"]",
"=",
"None",
"return",
"entry",
".",
"accessed",
"=",
"True",
"if",
"entry",
".",
"type_",
"==",
"TYPE",
".",
"auto",
":",
"entry",
".",
"type_",
"=",
"_TYPE",
"(",
"gl",
".",
"DEFAULT_TYPE",
")",
"api",
".",
"errmsg",
".",
"warning_implicit_type",
"(",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"p",
"[",
"1",
"]",
",",
"entry",
".",
"type_",
")",
"p",
"[",
"0",
"]",
"=",
"entry",
"\"\"\"\n if entry.class_ == CLASS.var:\n if entry.type_ == TYPE.auto:\n entry.type_ = SYMBOL_TABLE.basic_types[gl.DEFAULT_TYPE]\n #api.errmsg.warning_implicit_type(p.lineno(1), p[1], entry.type_)\n \"\"\"",
"if",
"entry",
".",
"class_",
"==",
"CLASS",
".",
"array",
":",
"if",
"not",
"LET_ASSIGNMENT",
":",
"syntax_error",
"(",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"\"Variable '%s' is an array and cannot be used in this context\"",
"%",
"p",
"[",
"1",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"None",
"elif",
"entry",
".",
"kind",
"==",
"KIND",
".",
"function",
":",
"# Function call with 0 args",
"p",
"[",
"0",
"]",
"=",
"make_call",
"(",
"p",
"[",
"1",
"]",
",",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"make_arg_list",
"(",
"None",
")",
")",
"elif",
"entry",
".",
"kind",
"==",
"KIND",
".",
"sub",
":",
"# Forbidden for subs",
"api",
".",
"errmsg",
".",
"syntax_error_is_a_sub_not_a_func",
"(",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"p",
"[",
"1",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"None"
] | 34.903226 | 20.548387 |
def contracts_data_path(version: Optional[str] = None):
"""Returns the deployment data directory for a version."""
if version is None:
return _BASE.joinpath('data')
return _BASE.joinpath(f'data_{version}') | [
"def",
"contracts_data_path",
"(",
"version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
":",
"if",
"version",
"is",
"None",
":",
"return",
"_BASE",
".",
"joinpath",
"(",
"'data'",
")",
"return",
"_BASE",
".",
"joinpath",
"(",
"f'data_{version}'",
")"
] | 44.2 | 7.8 |
def follow_double_underscores(obj, field_name=None, excel_dialect=True, eval_python=False, index_error_value=None):
'''Like getattr(obj, field_name) only follows model relationships through "__" or "." as link separators
>>> from django.contrib.auth.models import Permission
>>> import math
>>> p = Permission.objects.all()[0]
>>> follow_double_underscores(p, 'content_type__name') == p.content_type.name
True
>>> follow_double_underscores(p, 'math.sqrt(len(obj.content_type.name))', eval_python=True) == math.sqrt(len(p.content_type.name))
True
'''
if not obj:
return obj
if isinstance(field_name, list):
split_fields = field_name
else:
split_fields = re_model_instance_dot.split(field_name)
if False and eval_python:
try:
return eval(field_name, {'datetime': datetime, 'math': math, 'collections': collections}, {'obj': obj})
except IndexError:
return index_error_value
except:
pass
if len(split_fields) <= 1:
if hasattr(obj, split_fields[0]):
value = getattr(obj, split_fields[0])
elif hasattr(obj, split_fields[0] + '_id'):
value = getattr(obj, split_fields[0] + '_id')
elif hasattr(obj, split_fields[0] + '_set'):
value = getattr(obj, split_fields[0] + '_set')
elif split_fields[0] in obj.__dict__:
value = obj.__dict__.get(split_fields[0])
elif eval_python:
value = eval('obj.' + split_fields[0])
else:
return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value)
if value and excel_dialect and isinstance(value, datetime.datetime):
value = value.strftime('%Y-%m-%d %H:%M:%S')
return value
return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value) | [
"def",
"follow_double_underscores",
"(",
"obj",
",",
"field_name",
"=",
"None",
",",
"excel_dialect",
"=",
"True",
",",
"eval_python",
"=",
"False",
",",
"index_error_value",
"=",
"None",
")",
":",
"if",
"not",
"obj",
":",
"return",
"obj",
"if",
"isinstance",
"(",
"field_name",
",",
"list",
")",
":",
"split_fields",
"=",
"field_name",
"else",
":",
"split_fields",
"=",
"re_model_instance_dot",
".",
"split",
"(",
"field_name",
")",
"if",
"False",
"and",
"eval_python",
":",
"try",
":",
"return",
"eval",
"(",
"field_name",
",",
"{",
"'datetime'",
":",
"datetime",
",",
"'math'",
":",
"math",
",",
"'collections'",
":",
"collections",
"}",
",",
"{",
"'obj'",
":",
"obj",
"}",
")",
"except",
"IndexError",
":",
"return",
"index_error_value",
"except",
":",
"pass",
"if",
"len",
"(",
"split_fields",
")",
"<=",
"1",
":",
"if",
"hasattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
")",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
"+",
"'_id'",
")",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
"+",
"'_id'",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
"+",
"'_set'",
")",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
"+",
"'_set'",
")",
"elif",
"split_fields",
"[",
"0",
"]",
"in",
"obj",
".",
"__dict__",
":",
"value",
"=",
"obj",
".",
"__dict__",
".",
"get",
"(",
"split_fields",
"[",
"0",
"]",
")",
"elif",
"eval_python",
":",
"value",
"=",
"eval",
"(",
"'obj.'",
"+",
"split_fields",
"[",
"0",
"]",
")",
"else",
":",
"return",
"follow_double_underscores",
"(",
"getattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
")",
",",
"field_name",
"=",
"split_fields",
"[",
"1",
":",
"]",
",",
"eval_python",
"=",
"eval_python",
",",
"index_error_value",
"=",
"index_error_value",
")",
"if",
"value",
"and",
"excel_dialect",
"and",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"value",
"=",
"value",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"return",
"value",
"return",
"follow_double_underscores",
"(",
"getattr",
"(",
"obj",
",",
"split_fields",
"[",
"0",
"]",
")",
",",
"field_name",
"=",
"split_fields",
"[",
"1",
":",
"]",
",",
"eval_python",
"=",
"eval_python",
",",
"index_error_value",
"=",
"index_error_value",
")"
] | 47.333333 | 29.52381 |
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix them where possible.
'''
super(EncapsulatedControlMessage, self).sanitize()
# S: This is the Security bit. When set to 1 the following
# authentication information will be appended to the end of the Map-
# Reply. The detailed format of the Authentication Data Content is
# for further study.
if not isinstance(self.security, bool):
raise ValueError('Security flag must be a boolean')
if self.security:
raise NotImplementedError('Handling security data is not ' +
'implemented yet')
# "D" is the "DDT-originated" flag and is set by a DDT client to
# indicate that the receiver can and should return Map-Referral
# messages as appropriate.
if not isinstance(self.ddt_originated, bool):
raise ValueError('DDT originated flag must be a boolean')
# The 6th bit in the ECM LISP header is allocated as the "R"
# bit. The R bit indicates that the encapsulated Map-Register is
# to be processed by an RTR.
if not isinstance(self.for_rtr, bool):
raise ValueError('For-RTR flag must be a boolean')
# The 7th bit in the ECM header is allocated as the "N" bit. The
# N bit indicates that this Map-Register is being relayed by an
# RTR. When an RTR relays the ECM-ed Map-Register to a Map-Server,
# the N bit must be set to 1.
if not isinstance(self.relayed_by_rtr, bool):
raise ValueError('Relayed-by-RTR flag must be a boolean') | [
"def",
"sanitize",
"(",
"self",
")",
":",
"super",
"(",
"EncapsulatedControlMessage",
",",
"self",
")",
".",
"sanitize",
"(",
")",
"# S: This is the Security bit. When set to 1 the following",
"# authentication information will be appended to the end of the Map-",
"# Reply. The detailed format of the Authentication Data Content is",
"# for further study.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"security",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"'Security flag must be a boolean'",
")",
"if",
"self",
".",
"security",
":",
"raise",
"NotImplementedError",
"(",
"'Handling security data is not '",
"+",
"'implemented yet'",
")",
"# \"D\" is the \"DDT-originated\" flag and is set by a DDT client to",
"# indicate that the receiver can and should return Map-Referral",
"# messages as appropriate.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"ddt_originated",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"'DDT originated flag must be a boolean'",
")",
"# The 6th bit in the ECM LISP header is allocated as the \"R\"",
"# bit. The R bit indicates that the encapsulated Map-Register is",
"# to be processed by an RTR.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"for_rtr",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"'For-RTR flag must be a boolean'",
")",
"# The 7th bit in the ECM header is allocated as the \"N\" bit. The",
"# N bit indicates that this Map-Register is being relayed by an",
"# RTR. When an RTR relays the ECM-ed Map-Register to a Map-Server,",
"# the N bit must be set to 1.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"relayed_by_rtr",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"'Relayed-by-RTR flag must be a boolean'",
")"
] | 46.611111 | 24.777778 |
def setconfig(lookup, key, value=None):
'''setconfig will update a lookup to give priority based on the following:
1. If both values are None, we set the value to None
2. If the currently set (the config.json) is set but not runtime, use config
3. If the runtime is set but not config.json, we use runtime
4. If both are set, we use runtime
'''
lookup[key] = value or lookup.get(key)
return lookup | [
"def",
"setconfig",
"(",
"lookup",
",",
"key",
",",
"value",
"=",
"None",
")",
":",
"lookup",
"[",
"key",
"]",
"=",
"value",
"or",
"lookup",
".",
"get",
"(",
"key",
")",
"return",
"lookup"
] | 39.454545 | 24.181818 |
def _uncythonized_mb_model(self, beta, mini_batch):
""" Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
mini_batch : int
Size of each mini batch of data
Returns
----------
theta : np.array
Contains the predicted values for the time series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the scores for the time series
"""
rand_int = np.random.randint(low=0, high=self.data.shape[0]-mini_batch-self.max_lag+1)
sample = np.arange(start=rand_int, stop=rand_int+mini_batch)
Y = self.model_Y[sample]
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
theta = np.zeros(Y.shape[0])
theta_t = np.zeros(Y.shape[0])
model_scale, model_shape, model_skewness = self._get_scale_and_shape(parm)
# Loop over time series
theta, self.model_scores = gas_llt_recursion(parm, theta, theta_t, self.model_scores, Y, Y.shape[0],
self.family.score_function, self.link, model_scale, model_shape, model_skewness, self.max_lag)
return theta, theta_t, Y, self.model_scores | [
"def",
"_uncythonized_mb_model",
"(",
"self",
",",
"beta",
",",
"mini_batch",
")",
":",
"rand_int",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"-",
"mini_batch",
"-",
"self",
".",
"max_lag",
"+",
"1",
")",
"sample",
"=",
"np",
".",
"arange",
"(",
"start",
"=",
"rand_int",
",",
"stop",
"=",
"rand_int",
"+",
"mini_batch",
")",
"Y",
"=",
"self",
".",
"model_Y",
"[",
"sample",
"]",
"parm",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"k",
"]",
".",
"prior",
".",
"transform",
"(",
"beta",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"beta",
".",
"shape",
"[",
"0",
"]",
")",
"]",
")",
"theta",
"=",
"np",
".",
"zeros",
"(",
"Y",
".",
"shape",
"[",
"0",
"]",
")",
"theta_t",
"=",
"np",
".",
"zeros",
"(",
"Y",
".",
"shape",
"[",
"0",
"]",
")",
"model_scale",
",",
"model_shape",
",",
"model_skewness",
"=",
"self",
".",
"_get_scale_and_shape",
"(",
"parm",
")",
"# Loop over time series",
"theta",
",",
"self",
".",
"model_scores",
"=",
"gas_llt_recursion",
"(",
"parm",
",",
"theta",
",",
"theta_t",
",",
"self",
".",
"model_scores",
",",
"Y",
",",
"Y",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"family",
".",
"score_function",
",",
"self",
".",
"link",
",",
"model_scale",
",",
"model_shape",
",",
"model_skewness",
",",
"self",
".",
"max_lag",
")",
"return",
"theta",
",",
"theta_t",
",",
"Y",
",",
"self",
".",
"model_scores"
] | 35.631579 | 27.710526 |
def on_plot_select(self,event):
"""
Select data point if cursor is in range of a data point
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
index = None
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index==None: print("Couldn't find point %.1f,%.1f"%(xpick_data,ypick_data))
self.change_selected(index) | [
"def",
"on_plot_select",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"self",
".",
"xdata",
"or",
"not",
"self",
".",
"ydata",
":",
"return",
"pos",
"=",
"event",
".",
"GetPosition",
"(",
")",
"width",
",",
"height",
"=",
"self",
".",
"canvas",
".",
"get_width_height",
"(",
")",
"pos",
"[",
"1",
"]",
"=",
"height",
"-",
"pos",
"[",
"1",
"]",
"xpick_data",
",",
"ypick_data",
"=",
"pos",
"xdata_org",
"=",
"self",
".",
"xdata",
"ydata_org",
"=",
"self",
".",
"ydata",
"data_corrected",
"=",
"self",
".",
"map",
".",
"transData",
".",
"transform",
"(",
"vstack",
"(",
"[",
"xdata_org",
",",
"ydata_org",
"]",
")",
".",
"T",
")",
"xdata",
",",
"ydata",
"=",
"data_corrected",
".",
"T",
"xdata",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"xdata",
")",
")",
"ydata",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"ydata",
")",
")",
"e",
"=",
"4e0",
"index",
"=",
"None",
"for",
"i",
",",
"(",
"x",
",",
"y",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"xdata",
",",
"ydata",
")",
")",
":",
"if",
"0",
"<",
"sqrt",
"(",
"(",
"x",
"-",
"xpick_data",
")",
"**",
"2.",
"+",
"(",
"y",
"-",
"ypick_data",
")",
"**",
"2.",
")",
"<",
"e",
":",
"index",
"=",
"i",
"break",
"if",
"index",
"==",
"None",
":",
"print",
"(",
"\"Couldn't find point %.1f,%.1f\"",
"%",
"(",
"xpick_data",
",",
"ypick_data",
")",
")",
"self",
".",
"change_selected",
"(",
"index",
")"
] | 36.846154 | 16.076923 |
def get_queryset(self, queryset=None):
"""
Returns a queryset for this request.
Arguments:
queryset: Optional root-level queryset.
"""
serializer = self.get_serializer()
return getattr(self, 'queryset', serializer.Meta.model.objects.all()) | [
"def",
"get_queryset",
"(",
"self",
",",
"queryset",
"=",
"None",
")",
":",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
")",
"return",
"getattr",
"(",
"self",
",",
"'queryset'",
",",
"serializer",
".",
"Meta",
".",
"model",
".",
"objects",
".",
"all",
"(",
")",
")"
] | 32.222222 | 12.888889 |
def bar(x, y, **kwargs):
"""Draws a bar chart in the current context figure.
Parameters
----------
x: numpy.ndarray, 1d
The x-coordinates of the data points.
y: numpy.ndarray, 1d
The y-coordinates of the data pints.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
kwargs['x'] = x
kwargs['y'] = y
return _draw_mark(Bars, **kwargs) | [
"def",
"bar",
"(",
"x",
",",
"y",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'x'",
"]",
"=",
"x",
"kwargs",
"[",
"'y'",
"]",
"=",
"y",
"return",
"_draw_mark",
"(",
"Bars",
",",
"*",
"*",
"kwargs",
")"
] | 37.136364 | 19.590909 |
def _integrate_plugins():
"""Integrate plugins to the context"""
import sys
from airflow.plugins_manager import sensors_modules
for sensors_module in sensors_modules:
sys.modules[sensors_module.__name__] = sensors_module
globals()[sensors_module._name] = sensors_module | [
"def",
"_integrate_plugins",
"(",
")",
":",
"import",
"sys",
"from",
"airflow",
".",
"plugins_manager",
"import",
"sensors_modules",
"for",
"sensors_module",
"in",
"sensors_modules",
":",
"sys",
".",
"modules",
"[",
"sensors_module",
".",
"__name__",
"]",
"=",
"sensors_module",
"globals",
"(",
")",
"[",
"sensors_module",
".",
"_name",
"]",
"=",
"sensors_module"
] | 42.142857 | 13.571429 |
def _makeflags(self):
"""Set variable MAKEFLAGS with the numbers of
processors
"""
if self.meta.makeflags in ["on", "ON"]:
cpus = multiprocessing.cpu_count()
os.environ["MAKEFLAGS"] = "-j{0}".format(cpus) | [
"def",
"_makeflags",
"(",
"self",
")",
":",
"if",
"self",
".",
"meta",
".",
"makeflags",
"in",
"[",
"\"on\"",
",",
"\"ON\"",
"]",
":",
"cpus",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"os",
".",
"environ",
"[",
"\"MAKEFLAGS\"",
"]",
"=",
"\"-j{0}\"",
".",
"format",
"(",
"cpus",
")"
] | 36.285714 | 10.285714 |
def get_infix_items(tokens, callback=infix_error):
"""Perform infix token processing.
Takes a callback that (takes infix tokens and returns a string) to handle inner infix calls.
"""
internal_assert(len(tokens) >= 3, "invalid infix tokens", tokens)
(arg1, func, arg2), tokens = tokens[:3], tokens[3:]
args = list(arg1) + list(arg2)
while tokens:
args = [callback([args, func, []])]
(func, newarg), tokens = tokens[:2], tokens[2:]
args += list(newarg)
return func, args | [
"def",
"get_infix_items",
"(",
"tokens",
",",
"callback",
"=",
"infix_error",
")",
":",
"internal_assert",
"(",
"len",
"(",
"tokens",
")",
">=",
"3",
",",
"\"invalid infix tokens\"",
",",
"tokens",
")",
"(",
"arg1",
",",
"func",
",",
"arg2",
")",
",",
"tokens",
"=",
"tokens",
"[",
":",
"3",
"]",
",",
"tokens",
"[",
"3",
":",
"]",
"args",
"=",
"list",
"(",
"arg1",
")",
"+",
"list",
"(",
"arg2",
")",
"while",
"tokens",
":",
"args",
"=",
"[",
"callback",
"(",
"[",
"args",
",",
"func",
",",
"[",
"]",
"]",
")",
"]",
"(",
"func",
",",
"newarg",
")",
",",
"tokens",
"=",
"tokens",
"[",
":",
"2",
"]",
",",
"tokens",
"[",
"2",
":",
"]",
"args",
"+=",
"list",
"(",
"newarg",
")",
"return",
"func",
",",
"args"
] | 39.461538 | 17.538462 |
def iter_nearest_neighbours(umis, substr_idx):
'''
Added by Matt 06/05/17
use substring dict to get (approximately) all the nearest neighbours to
each in a set of umis.
'''
for i, u in enumerate(umis, 1):
neighbours = set()
for idx, substr_map in substr_idx.items():
u_sub = u[slice(*idx)]
neighbours = neighbours.union(substr_map[u_sub])
neighbours.difference_update(umis[:i])
for nbr in neighbours:
yield u, nbr | [
"def",
"iter_nearest_neighbours",
"(",
"umis",
",",
"substr_idx",
")",
":",
"for",
"i",
",",
"u",
"in",
"enumerate",
"(",
"umis",
",",
"1",
")",
":",
"neighbours",
"=",
"set",
"(",
")",
"for",
"idx",
",",
"substr_map",
"in",
"substr_idx",
".",
"items",
"(",
")",
":",
"u_sub",
"=",
"u",
"[",
"slice",
"(",
"*",
"idx",
")",
"]",
"neighbours",
"=",
"neighbours",
".",
"union",
"(",
"substr_map",
"[",
"u_sub",
"]",
")",
"neighbours",
".",
"difference_update",
"(",
"umis",
"[",
":",
"i",
"]",
")",
"for",
"nbr",
"in",
"neighbours",
":",
"yield",
"u",
",",
"nbr"
] | 35.142857 | 15.857143 |
def make_energy_bounds_hdu(self, extname="EBOUNDS"):
""" Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name
"""
if self._ebins is None:
return None
cols = [fits.Column("CHANNEL", "I", array=np.arange(1, len(self._ebins + 1))),
fits.Column("E_MIN", "1E", unit='keV',
array=1000 * self._ebins[0:-1]),
fits.Column("E_MAX", "1E", unit='keV', array=1000 * self._ebins[1:])]
hdu = fits.BinTableHDU.from_columns(
cols, self.make_header(), name=extname)
return hdu | [
"def",
"make_energy_bounds_hdu",
"(",
"self",
",",
"extname",
"=",
"\"EBOUNDS\"",
")",
":",
"if",
"self",
".",
"_ebins",
"is",
"None",
":",
"return",
"None",
"cols",
"=",
"[",
"fits",
".",
"Column",
"(",
"\"CHANNEL\"",
",",
"\"I\"",
",",
"array",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"len",
"(",
"self",
".",
"_ebins",
"+",
"1",
")",
")",
")",
",",
"fits",
".",
"Column",
"(",
"\"E_MIN\"",
",",
"\"1E\"",
",",
"unit",
"=",
"'keV'",
",",
"array",
"=",
"1000",
"*",
"self",
".",
"_ebins",
"[",
"0",
":",
"-",
"1",
"]",
")",
",",
"fits",
".",
"Column",
"(",
"\"E_MAX\"",
",",
"\"1E\"",
",",
"unit",
"=",
"'keV'",
",",
"array",
"=",
"1000",
"*",
"self",
".",
"_ebins",
"[",
"1",
":",
"]",
")",
"]",
"hdu",
"=",
"fits",
".",
"BinTableHDU",
".",
"from_columns",
"(",
"cols",
",",
"self",
".",
"make_header",
"(",
")",
",",
"name",
"=",
"extname",
")",
"return",
"hdu"
] | 45.714286 | 18.142857 |
def get_html_source_with_base_href(driver, page_source):
''' Combines the domain base href with the html source.
This is needed for the page html to render correctly. '''
last_page = get_last_page(driver)
if '://' in last_page:
base_href_html = get_base_href_html(last_page)
return '%s\n%s' % (base_href_html, page_source)
return '' | [
"def",
"get_html_source_with_base_href",
"(",
"driver",
",",
"page_source",
")",
":",
"last_page",
"=",
"get_last_page",
"(",
"driver",
")",
"if",
"'://'",
"in",
"last_page",
":",
"base_href_html",
"=",
"get_base_href_html",
"(",
"last_page",
")",
"return",
"'%s\\n%s'",
"%",
"(",
"base_href_html",
",",
"page_source",
")",
"return",
"''"
] | 45.625 | 16.625 |
def set(self, attr_dict):
"""Sets attributes of this user object.
:type attr_dict: dict
:param attr_dict: Parameters to set, with attribute keys.
:rtype: :class:`.Base`
:return: The current object.
"""
for key in attr_dict:
if key == self._id_attribute:
setattr(self, self._id_attribute, attr_dict[key])
else:
setattr(self, u"_" + key, attr_dict[key])
return self | [
"def",
"set",
"(",
"self",
",",
"attr_dict",
")",
":",
"for",
"key",
"in",
"attr_dict",
":",
"if",
"key",
"==",
"self",
".",
"_id_attribute",
":",
"setattr",
"(",
"self",
",",
"self",
".",
"_id_attribute",
",",
"attr_dict",
"[",
"key",
"]",
")",
"else",
":",
"setattr",
"(",
"self",
",",
"u\"_\"",
"+",
"key",
",",
"attr_dict",
"[",
"key",
"]",
")",
"return",
"self"
] | 25.6875 | 18.3125 |
def _create_gitlab_runner_prometheus_instance(self, instance, init_config):
"""
Set up the gitlab_runner instance so it can be used in OpenMetricsBaseCheck
"""
# Mapping from Prometheus metrics names to Datadog ones
# For now it's a 1:1 mapping
allowed_metrics = init_config.get('allowed_metrics')
if allowed_metrics is None:
raise CheckException("At least one metric must be whitelisted in `allowed_metrics`.")
gitlab_runner_instance = deepcopy(instance)
# gitlab_runner uses 'prometheus_endpoint' and not 'prometheus_url', so we have to rename the key
gitlab_runner_instance['prometheus_url'] = instance.get('prometheus_endpoint', None)
gitlab_runner_instance.update(
{
'namespace': 'gitlab_runner',
'metrics': allowed_metrics,
# Defaults that were set when gitlab_runner was based on PrometheusCheck
'send_monotonic_counter': instance.get('send_monotonic_counter', False),
'health_service_check': instance.get('health_service_check', False),
}
)
return gitlab_runner_instance | [
"def",
"_create_gitlab_runner_prometheus_instance",
"(",
"self",
",",
"instance",
",",
"init_config",
")",
":",
"# Mapping from Prometheus metrics names to Datadog ones",
"# For now it's a 1:1 mapping",
"allowed_metrics",
"=",
"init_config",
".",
"get",
"(",
"'allowed_metrics'",
")",
"if",
"allowed_metrics",
"is",
"None",
":",
"raise",
"CheckException",
"(",
"\"At least one metric must be whitelisted in `allowed_metrics`.\"",
")",
"gitlab_runner_instance",
"=",
"deepcopy",
"(",
"instance",
")",
"# gitlab_runner uses 'prometheus_endpoint' and not 'prometheus_url', so we have to rename the key",
"gitlab_runner_instance",
"[",
"'prometheus_url'",
"]",
"=",
"instance",
".",
"get",
"(",
"'prometheus_endpoint'",
",",
"None",
")",
"gitlab_runner_instance",
".",
"update",
"(",
"{",
"'namespace'",
":",
"'gitlab_runner'",
",",
"'metrics'",
":",
"allowed_metrics",
",",
"# Defaults that were set when gitlab_runner was based on PrometheusCheck",
"'send_monotonic_counter'",
":",
"instance",
".",
"get",
"(",
"'send_monotonic_counter'",
",",
"False",
")",
",",
"'health_service_check'",
":",
"instance",
".",
"get",
"(",
"'health_service_check'",
",",
"False",
")",
",",
"}",
")",
"return",
"gitlab_runner_instance"
] | 45.269231 | 27.423077 |
def next_request(self):
""" Provides a request to be scheduled.
:return: Request object or None
"""
method_frame, header_frame, url = self.server.basic_get(queue=self.rabbitmq_key)
# TODO(royce): Remove print
print url
if url:
return self.make_requests_from_url(url) | [
"def",
"next_request",
"(",
"self",
")",
":",
"method_frame",
",",
"header_frame",
",",
"url",
"=",
"self",
".",
"server",
".",
"basic_get",
"(",
"queue",
"=",
"self",
".",
"rabbitmq_key",
")",
"# TODO(royce): Remove print",
"print",
"url",
"if",
"url",
":",
"return",
"self",
".",
"make_requests_from_url",
"(",
"url",
")"
] | 27.166667 | 20.833333 |
def logpdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Log Likelihood function given inverse link of f.
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = y_{i}\\log\\lambda(f_{i}) + (1-y_{i})\\log (1-f_{i})
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata must contain 'trials'
:returns: log likelihood evaluated at points inverse link of f.
:rtype: float
"""
N = Y_metadata['trials']
np.testing.assert_array_equal(N.shape, y.shape)
nchoosey = special.gammaln(N+1) - special.gammaln(y+1) - special.gammaln(N-y+1)
Ny = N-y
t1 = np.zeros(y.shape)
t2 = np.zeros(y.shape)
t1[y>0] = y[y>0]*np.log(inv_link_f[y>0])
t2[Ny>0] = Ny[Ny>0]*np.log(1.-inv_link_f[Ny>0])
return nchoosey + t1 + t2 | [
"def",
"logpdf_link",
"(",
"self",
",",
"inv_link_f",
",",
"y",
",",
"Y_metadata",
"=",
"None",
")",
":",
"N",
"=",
"Y_metadata",
"[",
"'trials'",
"]",
"np",
".",
"testing",
".",
"assert_array_equal",
"(",
"N",
".",
"shape",
",",
"y",
".",
"shape",
")",
"nchoosey",
"=",
"special",
".",
"gammaln",
"(",
"N",
"+",
"1",
")",
"-",
"special",
".",
"gammaln",
"(",
"y",
"+",
"1",
")",
"-",
"special",
".",
"gammaln",
"(",
"N",
"-",
"y",
"+",
"1",
")",
"Ny",
"=",
"N",
"-",
"y",
"t1",
"=",
"np",
".",
"zeros",
"(",
"y",
".",
"shape",
")",
"t2",
"=",
"np",
".",
"zeros",
"(",
"y",
".",
"shape",
")",
"t1",
"[",
"y",
">",
"0",
"]",
"=",
"y",
"[",
"y",
">",
"0",
"]",
"*",
"np",
".",
"log",
"(",
"inv_link_f",
"[",
"y",
">",
"0",
"]",
")",
"t2",
"[",
"Ny",
">",
"0",
"]",
"=",
"Ny",
"[",
"Ny",
">",
"0",
"]",
"*",
"np",
".",
"log",
"(",
"1.",
"-",
"inv_link_f",
"[",
"Ny",
">",
"0",
"]",
")",
"return",
"nchoosey",
"+",
"t1",
"+",
"t2"
] | 34.925926 | 20.925926 |
def missing(self, *args, **kwds):
"""Return whether an output is considered missing or not."""
from functools import reduce
indexer = kwds['indexer']
freq = kwds['freq'] or generic.default_freq(**indexer)
miss = (checks.missing_any(generic.select_time(da, **indexer), freq) for da in args)
return reduce(np.logical_or, miss) | [
"def",
"missing",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"from",
"functools",
"import",
"reduce",
"indexer",
"=",
"kwds",
"[",
"'indexer'",
"]",
"freq",
"=",
"kwds",
"[",
"'freq'",
"]",
"or",
"generic",
".",
"default_freq",
"(",
"*",
"*",
"indexer",
")",
"miss",
"=",
"(",
"checks",
".",
"missing_any",
"(",
"generic",
".",
"select_time",
"(",
"da",
",",
"*",
"*",
"indexer",
")",
",",
"freq",
")",
"for",
"da",
"in",
"args",
")",
"return",
"reduce",
"(",
"np",
".",
"logical_or",
",",
"miss",
")"
] | 40.666667 | 19.333333 |
def get_absolute_path(some_path):
"""
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
"""
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) | [
"def",
"get_absolute_path",
"(",
"some_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"some_path",
")",
":",
"return",
"some_path",
"else",
":",
"return",
"evaluate_relative_path",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"some_path",
")"
] | 40.8 | 19.8 |
def is_installed(self, bug: Bug) -> bool:
"""
Determines whether the Docker image for a given bug has been installed
on the server.
"""
r = self.__api.get('bugs/{}/installed'.format(bug.name))
if r.status_code == 200:
answer = r.json()
assert isinstance(answer, bool)
return answer
# TODO bug not registered on server
if r.status_code == 404:
raise KeyError("no bug found with given name: {}".format(bug.name))
self.__api.handle_erroneous_response(r) | [
"def",
"is_installed",
"(",
"self",
",",
"bug",
":",
"Bug",
")",
"->",
"bool",
":",
"r",
"=",
"self",
".",
"__api",
".",
"get",
"(",
"'bugs/{}/installed'",
".",
"format",
"(",
"bug",
".",
"name",
")",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"answer",
"=",
"r",
".",
"json",
"(",
")",
"assert",
"isinstance",
"(",
"answer",
",",
"bool",
")",
"return",
"answer",
"# TODO bug not registered on server",
"if",
"r",
".",
"status_code",
"==",
"404",
":",
"raise",
"KeyError",
"(",
"\"no bug found with given name: {}\"",
".",
"format",
"(",
"bug",
".",
"name",
")",
")",
"self",
".",
"__api",
".",
"handle_erroneous_response",
"(",
"r",
")"
] | 32.764706 | 17.352941 |
def binsearch(reader, key, compare_func=cmp, block_size=8192):
"""
Perform a binary search for a specified key to within a 'block_size'
(default 8192) granularity, and return first full line found.
"""
min_ = binsearch_offset(reader, key, compare_func, block_size)
reader.seek(min_)
if min_ > 0:
reader.readline() # skip partial line
def gen_iter(line):
while line:
yield line.rstrip()
line = reader.readline()
return gen_iter(reader.readline()) | [
"def",
"binsearch",
"(",
"reader",
",",
"key",
",",
"compare_func",
"=",
"cmp",
",",
"block_size",
"=",
"8192",
")",
":",
"min_",
"=",
"binsearch_offset",
"(",
"reader",
",",
"key",
",",
"compare_func",
",",
"block_size",
")",
"reader",
".",
"seek",
"(",
"min_",
")",
"if",
"min_",
">",
"0",
":",
"reader",
".",
"readline",
"(",
")",
"# skip partial line",
"def",
"gen_iter",
"(",
"line",
")",
":",
"while",
"line",
":",
"yield",
"line",
".",
"rstrip",
"(",
")",
"line",
"=",
"reader",
".",
"readline",
"(",
")",
"return",
"gen_iter",
"(",
"reader",
".",
"readline",
"(",
")",
")"
] | 26.789474 | 21.421053 |
def send(self, sender, to, subject, plain=None, html=None, cc=None, bcc=None,
replyto=None, attach=None):
"""
Send the message.
If we have PLAIN and HTML versions, send a multipart alternative
MIME message, else send whichever we do have.
If we have neither, raise NoContentError
Arguments:
- `sender`: str
- `to`: list
- `subject`: str
- `plain`: str
- `html`: str
- `cc`: str or [str]
- `bcc`: str or [str]
- `replyto`: str
- `attach`: str or [str]
Return: None
Exceptions: NoContentError
"""
self.sanity_check(sender, to, subject, plain=plain, html=html)
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('mixed')
msg['Subject'] = u(subject)
msg['From'] = u(sender)
msg['To'] = self.tolist(to)
if cc:
msg['Cc'] = self.tolist(cc)
recipients = _stringlist(to, cc, bcc)
if replyto:
msg.add_header('reply-to', replyto)
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
if plain:
msg.attach(MIMEText(u(plain), 'plain'))
if html:
msg.attach(MIMEText(u(html), 'html'))
# Deal with attachments.
if attach:
for p in _stringlist(attach):
msg.attach(Attachment(p).as_msg())
self.deliver(msg, recipients) | [
"def",
"send",
"(",
"self",
",",
"sender",
",",
"to",
",",
"subject",
",",
"plain",
"=",
"None",
",",
"html",
"=",
"None",
",",
"cc",
"=",
"None",
",",
"bcc",
"=",
"None",
",",
"replyto",
"=",
"None",
",",
"attach",
"=",
"None",
")",
":",
"self",
".",
"sanity_check",
"(",
"sender",
",",
"to",
",",
"subject",
",",
"plain",
"=",
"plain",
",",
"html",
"=",
"html",
")",
"# Create message container - the correct MIME type is multipart/alternative.",
"msg",
"=",
"MIMEMultipart",
"(",
"'mixed'",
")",
"msg",
"[",
"'Subject'",
"]",
"=",
"u",
"(",
"subject",
")",
"msg",
"[",
"'From'",
"]",
"=",
"u",
"(",
"sender",
")",
"msg",
"[",
"'To'",
"]",
"=",
"self",
".",
"tolist",
"(",
"to",
")",
"if",
"cc",
":",
"msg",
"[",
"'Cc'",
"]",
"=",
"self",
".",
"tolist",
"(",
"cc",
")",
"recipients",
"=",
"_stringlist",
"(",
"to",
",",
"cc",
",",
"bcc",
")",
"if",
"replyto",
":",
"msg",
".",
"add_header",
"(",
"'reply-to'",
",",
"replyto",
")",
"# Attach parts into message container.",
"# According to RFC 2046, the last part of a multipart message, in this case",
"# the HTML message, is best and preferred.",
"if",
"plain",
":",
"msg",
".",
"attach",
"(",
"MIMEText",
"(",
"u",
"(",
"plain",
")",
",",
"'plain'",
")",
")",
"if",
"html",
":",
"msg",
".",
"attach",
"(",
"MIMEText",
"(",
"u",
"(",
"html",
")",
",",
"'html'",
")",
")",
"# Deal with attachments.",
"if",
"attach",
":",
"for",
"p",
"in",
"_stringlist",
"(",
"attach",
")",
":",
"msg",
".",
"attach",
"(",
"Attachment",
"(",
"p",
")",
".",
"as_msg",
"(",
")",
")",
"self",
".",
"deliver",
"(",
"msg",
",",
"recipients",
")"
] | 30.576923 | 18.692308 |
def join_list(values, delimiter=', ', transform=None):
"""
Concatenates the upper-cased values using the given delimiter if
the given values variable is a list. Otherwise it is just returned.
:param values: List of strings or string .
:param delimiter: The delimiter used to join the values.
:return: The concatenation or identity.
"""
# type: (Union[List[str], str], str)->str
if transform is None:
transform = _identity
if values is not None and not isinstance(values, (str, bytes)):
values = delimiter.join(transform(x) for x in values)
return values | [
"def",
"join_list",
"(",
"values",
",",
"delimiter",
"=",
"', '",
",",
"transform",
"=",
"None",
")",
":",
"# type: (Union[List[str], str], str)->str",
"if",
"transform",
"is",
"None",
":",
"transform",
"=",
"_identity",
"if",
"values",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"values",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"values",
"=",
"delimiter",
".",
"join",
"(",
"transform",
"(",
"x",
")",
"for",
"x",
"in",
"values",
")",
"return",
"values"
] | 40 | 16.266667 |
def jsonrpc_map(self):
""" Map of json-rpc available calls.
:return str:
"""
result = "<h1>JSON-RPC map</h1><pre>{0}</pre>".format("\n\n".join([
"{0}: {1}".format(fname, f.__doc__)
for fname, f in self.dispatcher.items()
]))
return Response(result) | [
"def",
"jsonrpc_map",
"(",
"self",
")",
":",
"result",
"=",
"\"<h1>JSON-RPC map</h1><pre>{0}</pre>\"",
".",
"format",
"(",
"\"\\n\\n\"",
".",
"join",
"(",
"[",
"\"{0}: {1}\"",
".",
"format",
"(",
"fname",
",",
"f",
".",
"__doc__",
")",
"for",
"fname",
",",
"f",
"in",
"self",
".",
"dispatcher",
".",
"items",
"(",
")",
"]",
")",
")",
"return",
"Response",
"(",
"result",
")"
] | 28.363636 | 19 |
def list_rds(region, filter_by_kwargs):
"""List all RDS thingys."""
conn = boto.rds.connect_to_region(region)
instances = conn.get_all_dbinstances()
return lookup(instances, filter_by=filter_by_kwargs) | [
"def",
"list_rds",
"(",
"region",
",",
"filter_by_kwargs",
")",
":",
"conn",
"=",
"boto",
".",
"rds",
".",
"connect_to_region",
"(",
"region",
")",
"instances",
"=",
"conn",
".",
"get_all_dbinstances",
"(",
")",
"return",
"lookup",
"(",
"instances",
",",
"filter_by",
"=",
"filter_by_kwargs",
")"
] | 42.6 | 4.8 |
def verify_and_fill_address_paths_from_bip32key(address_paths, master_key, network):
'''
Take address paths and verifies their accuracy client-side.
Also fills in all the available metadata (WIF, public key, etc)
'''
assert network, network
wallet_obj = Wallet.deserialize(master_key, network=network)
address_paths_cleaned = []
for address_path in address_paths:
path = address_path['path']
input_address = address_path['address']
child_wallet = wallet_obj.get_child_for_path(path)
if child_wallet.to_address() != input_address:
err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % (
path,
master_key,
child_wallet.to_address(),
input_address,
)
raise Exception(err_msg)
pubkeyhex = child_wallet.get_public_key_hex(compressed=True)
server_pubkeyhex = address_path.get('public')
if server_pubkeyhex and server_pubkeyhex != pubkeyhex:
err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % (
path,
master_key,
pubkeyhex,
server_pubkeyhex,
)
raise Exception(err_msg)
address_path_cleaned = {
'pub_address': input_address,
'path': path,
'pubkeyhex': pubkeyhex,
}
if child_wallet.private_key:
privkeyhex = child_wallet.get_private_key_hex()
address_path_cleaned['wif'] = child_wallet.export_to_wif()
address_path_cleaned['privkeyhex'] = privkeyhex
address_paths_cleaned.append(address_path_cleaned)
return address_paths_cleaned | [
"def",
"verify_and_fill_address_paths_from_bip32key",
"(",
"address_paths",
",",
"master_key",
",",
"network",
")",
":",
"assert",
"network",
",",
"network",
"wallet_obj",
"=",
"Wallet",
".",
"deserialize",
"(",
"master_key",
",",
"network",
"=",
"network",
")",
"address_paths_cleaned",
"=",
"[",
"]",
"for",
"address_path",
"in",
"address_paths",
":",
"path",
"=",
"address_path",
"[",
"'path'",
"]",
"input_address",
"=",
"address_path",
"[",
"'address'",
"]",
"child_wallet",
"=",
"wallet_obj",
".",
"get_child_for_path",
"(",
"path",
")",
"if",
"child_wallet",
".",
"to_address",
"(",
")",
"!=",
"input_address",
":",
"err_msg",
"=",
"'Client Side Verification Fail for %s on %s:\\n%s != %s'",
"%",
"(",
"path",
",",
"master_key",
",",
"child_wallet",
".",
"to_address",
"(",
")",
",",
"input_address",
",",
")",
"raise",
"Exception",
"(",
"err_msg",
")",
"pubkeyhex",
"=",
"child_wallet",
".",
"get_public_key_hex",
"(",
"compressed",
"=",
"True",
")",
"server_pubkeyhex",
"=",
"address_path",
".",
"get",
"(",
"'public'",
")",
"if",
"server_pubkeyhex",
"and",
"server_pubkeyhex",
"!=",
"pubkeyhex",
":",
"err_msg",
"=",
"'Client Side Verification Fail for %s on %s:\\n%s != %s'",
"%",
"(",
"path",
",",
"master_key",
",",
"pubkeyhex",
",",
"server_pubkeyhex",
",",
")",
"raise",
"Exception",
"(",
"err_msg",
")",
"address_path_cleaned",
"=",
"{",
"'pub_address'",
":",
"input_address",
",",
"'path'",
":",
"path",
",",
"'pubkeyhex'",
":",
"pubkeyhex",
",",
"}",
"if",
"child_wallet",
".",
"private_key",
":",
"privkeyhex",
"=",
"child_wallet",
".",
"get_private_key_hex",
"(",
")",
"address_path_cleaned",
"[",
"'wif'",
"]",
"=",
"child_wallet",
".",
"export_to_wif",
"(",
")",
"address_path_cleaned",
"[",
"'privkeyhex'",
"]",
"=",
"privkeyhex",
"address_paths_cleaned",
".",
"append",
"(",
"address_path_cleaned",
")",
"return",
"address_paths_cleaned"
] | 33.826923 | 21.365385 |
def reset(self):
"""Re-initialize the sampler."""
# live points
self.live_u = self.rstate.rand(self.nlive, self.npdim)
if self.use_pool_ptform:
# Use the pool to compute the prior transform.
self.live_v = np.array(list(self.M(self.prior_transform,
np.array(self.live_u))))
else:
# Compute the prior transform using the default `map` function.
self.live_v = np.array(list(map(self.prior_transform,
np.array(self.live_u))))
if self.use_pool_logl:
# Use the pool to compute the log-likelihoods.
self.live_logl = np.array(list(self.M(self.loglikelihood,
np.array(self.live_v))))
else:
# Compute the log-likelihoods using the default `map` function.
self.live_logl = np.array(list(map(self.loglikelihood,
np.array(self.live_v))))
self.live_bound = np.zeros(self.nlive, dtype='int')
self.live_it = np.zeros(self.nlive, dtype='int')
# parallelism
self.queue = []
self.nqueue = 0
self.unused = 0
self.used = 0
# sampling
self.it = 1
self.since_update = 0
self.ncall = self.nlive
self.bound = [UnitCube(self.npdim)]
self.nbound = 1
self.added_live = False
# results
self.saved_id = []
self.saved_u = []
self.saved_v = []
self.saved_logl = []
self.saved_logvol = []
self.saved_logwt = []
self.saved_logz = []
self.saved_logzvar = []
self.saved_h = []
self.saved_nc = []
self.saved_boundidx = []
self.saved_it = []
self.saved_bounditer = []
self.saved_scale = [] | [
"def",
"reset",
"(",
"self",
")",
":",
"# live points",
"self",
".",
"live_u",
"=",
"self",
".",
"rstate",
".",
"rand",
"(",
"self",
".",
"nlive",
",",
"self",
".",
"npdim",
")",
"if",
"self",
".",
"use_pool_ptform",
":",
"# Use the pool to compute the prior transform.",
"self",
".",
"live_v",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"M",
"(",
"self",
".",
"prior_transform",
",",
"np",
".",
"array",
"(",
"self",
".",
"live_u",
")",
")",
")",
")",
"else",
":",
"# Compute the prior transform using the default `map` function.",
"self",
".",
"live_v",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"self",
".",
"prior_transform",
",",
"np",
".",
"array",
"(",
"self",
".",
"live_u",
")",
")",
")",
")",
"if",
"self",
".",
"use_pool_logl",
":",
"# Use the pool to compute the log-likelihoods.",
"self",
".",
"live_logl",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"M",
"(",
"self",
".",
"loglikelihood",
",",
"np",
".",
"array",
"(",
"self",
".",
"live_v",
")",
")",
")",
")",
"else",
":",
"# Compute the log-likelihoods using the default `map` function.",
"self",
".",
"live_logl",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"self",
".",
"loglikelihood",
",",
"np",
".",
"array",
"(",
"self",
".",
"live_v",
")",
")",
")",
")",
"self",
".",
"live_bound",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"nlive",
",",
"dtype",
"=",
"'int'",
")",
"self",
".",
"live_it",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"nlive",
",",
"dtype",
"=",
"'int'",
")",
"# parallelism",
"self",
".",
"queue",
"=",
"[",
"]",
"self",
".",
"nqueue",
"=",
"0",
"self",
".",
"unused",
"=",
"0",
"self",
".",
"used",
"=",
"0",
"# sampling",
"self",
".",
"it",
"=",
"1",
"self",
".",
"since_update",
"=",
"0",
"self",
".",
"ncall",
"=",
"self",
".",
"nlive",
"self",
".",
"bound",
"=",
"[",
"UnitCube",
"(",
"self",
".",
"npdim",
")",
"]",
"self",
".",
"nbound",
"=",
"1",
"self",
".",
"added_live",
"=",
"False",
"# results",
"self",
".",
"saved_id",
"=",
"[",
"]",
"self",
".",
"saved_u",
"=",
"[",
"]",
"self",
".",
"saved_v",
"=",
"[",
"]",
"self",
".",
"saved_logl",
"=",
"[",
"]",
"self",
".",
"saved_logvol",
"=",
"[",
"]",
"self",
".",
"saved_logwt",
"=",
"[",
"]",
"self",
".",
"saved_logz",
"=",
"[",
"]",
"self",
".",
"saved_logzvar",
"=",
"[",
"]",
"self",
".",
"saved_h",
"=",
"[",
"]",
"self",
".",
"saved_nc",
"=",
"[",
"]",
"self",
".",
"saved_boundidx",
"=",
"[",
"]",
"self",
".",
"saved_it",
"=",
"[",
"]",
"self",
".",
"saved_bounditer",
"=",
"[",
"]",
"self",
".",
"saved_scale",
"=",
"[",
"]"
] | 35.245283 | 19.509434 |
def report(self, item_id, report_format="json"):
"""Retrieves the specified report for the analyzed item, referenced by item_id.
For available report formats, see online Joe Sandbox documentation.
:type item_id: str
:param item_id: File ID number
:type report_format: str
:param report_format: Return format
:rtype: dict
:return: Dictionary representing the JSON parsed data or raw, for other
formats / JSON parsing failure.
"""
if report_format == "json":
report_format = "jsonfixed"
try:
return json.loads(self.jbx.download(item_id, report_format)[1].decode('utf-8'))
except (jbxapi.JoeException, ValueError, IndexError) as e:
raise sandboxapi.SandboxError("error in report fetch: {e}".format(e=e)) | [
"def",
"report",
"(",
"self",
",",
"item_id",
",",
"report_format",
"=",
"\"json\"",
")",
":",
"if",
"report_format",
"==",
"\"json\"",
":",
"report_format",
"=",
"\"jsonfixed\"",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"self",
".",
"jbx",
".",
"download",
"(",
"item_id",
",",
"report_format",
")",
"[",
"1",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"except",
"(",
"jbxapi",
".",
"JoeException",
",",
"ValueError",
",",
"IndexError",
")",
"as",
"e",
":",
"raise",
"sandboxapi",
".",
"SandboxError",
"(",
"\"error in report fetch: {e}\"",
".",
"format",
"(",
"e",
"=",
"e",
")",
")"
] | 40.380952 | 21.142857 |
def _dir_out(self):
"""Create string of the data directory to save individual .nc files."""
return os.path.join(self.proj.direc_out, self.proj.name,
self.model.name, self.run.name, self.name) | [
"def",
"_dir_out",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"proj",
".",
"direc_out",
",",
"self",
".",
"proj",
".",
"name",
",",
"self",
".",
"model",
".",
"name",
",",
"self",
".",
"run",
".",
"name",
",",
"self",
".",
"name",
")"
] | 58 | 18.75 |
def write_base (self, url_data):
"""Write url_data.base_ref."""
self.write(self.part("base") + self.spaces("base"))
self.writeln(url_data.base_ref, color=self.colorbase) | [
"def",
"write_base",
"(",
"self",
",",
"url_data",
")",
":",
"self",
".",
"write",
"(",
"self",
".",
"part",
"(",
"\"base\"",
")",
"+",
"self",
".",
"spaces",
"(",
"\"base\"",
")",
")",
"self",
".",
"writeln",
"(",
"url_data",
".",
"base_ref",
",",
"color",
"=",
"self",
".",
"colorbase",
")"
] | 47.5 | 12 |
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df) | [
"def",
"check_frequencies",
"(",
"pfeed",
",",
"*",
",",
"as_df",
"=",
"False",
",",
"include_warnings",
"=",
"False",
")",
":",
"table",
"=",
"'frequencies'",
"problems",
"=",
"[",
"]",
"# Preliminary checks",
"if",
"pfeed",
".",
"frequencies",
"is",
"None",
":",
"problems",
".",
"append",
"(",
"[",
"'error'",
",",
"'Missing table'",
",",
"table",
",",
"[",
"]",
"]",
")",
"else",
":",
"f",
"=",
"pfeed",
".",
"frequencies",
".",
"copy",
"(",
")",
"problems",
"=",
"check_for_required_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"if",
"problems",
":",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")",
"if",
"include_warnings",
":",
"problems",
"=",
"check_for_invalid_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"# Check route_short_name and route_long_name",
"for",
"column",
"in",
"[",
"'route_short_name'",
",",
"'route_long_name'",
"]",
":",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"column",
",",
"gt",
".",
"valid_str",
",",
"column_required",
"=",
"False",
")",
"cond",
"=",
"~",
"(",
"f",
"[",
"'route_short_name'",
"]",
".",
"notnull",
"(",
")",
"|",
"f",
"[",
"'route_long_name'",
"]",
".",
"notnull",
"(",
")",
")",
"problems",
"=",
"gt",
".",
"check_table",
"(",
"problems",
",",
"table",
",",
"f",
",",
"cond",
",",
"'route_short_name and route_long_name both empty'",
")",
"# Check route_type",
"v",
"=",
"lambda",
"x",
":",
"x",
"in",
"range",
"(",
"8",
")",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'route_type'",
",",
"v",
")",
"# Check service window ID",
"problems",
"=",
"gt",
".",
"check_column_linked_id",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'service_window_id'",
",",
"pfeed",
".",
"service_windows",
")",
"# Check direction",
"v",
"=",
"lambda",
"x",
":",
"x",
"in",
"range",
"(",
"3",
")",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'direction'",
",",
"v",
")",
"# Check frequency",
"v",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"int",
")",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'frequency'",
",",
"v",
")",
"# Check speed",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'speed'",
",",
"valid_speed",
",",
"column_required",
"=",
"False",
")",
"# Check shape ID",
"problems",
"=",
"gt",
".",
"check_column_linked_id",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'shape_id'",
",",
"pfeed",
".",
"shapes",
")",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")"
] | 33.767857 | 22.160714 |
def comparison(self, t):
"""
<PropertyIsEqualTo>
<PropertyName>NAME</PropertyName>
<Literal>Sydney</Literal>
</PropertyIsEqualTo>
"""
assert(len(t) == 3)
d = {"PropertyIsEqualTo": [
t[0], t[1], t[2]
]}
#parts = [str(p.value) for p in t]
#v = " ".join(parts)
#v = "( {} )".format(v)
#t[0].value = v
#return t[0]
return d | [
"def",
"comparison",
"(",
"self",
",",
"t",
")",
":",
"assert",
"(",
"len",
"(",
"t",
")",
"==",
"3",
")",
"d",
"=",
"{",
"\"PropertyIsEqualTo\"",
":",
"[",
"t",
"[",
"0",
"]",
",",
"t",
"[",
"1",
"]",
",",
"t",
"[",
"2",
"]",
"]",
"}",
"#parts = [str(p.value) for p in t]",
"#v = \" \".join(parts)",
"#v = \"( {} )\".format(v)",
"#t[0].value = v",
"#return t[0]",
"return",
"d"
] | 21.15 | 16.15 |
def get_temp_directory():
"""Return an absolute path to an existing temporary directory"""
# Supports all platforms supported by tempfile
directory = os.path.join(gettempdir(), "ttkthemes")
if not os.path.exists(directory):
os.makedirs(directory)
return directory | [
"def",
"get_temp_directory",
"(",
")",
":",
"# Supports all platforms supported by tempfile",
"directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"gettempdir",
"(",
")",
",",
"\"ttkthemes\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"os",
".",
"makedirs",
"(",
"directory",
")",
"return",
"directory"
] | 40.714286 | 10.428571 |
def notify(title,
message,
jid,
password,
recipient,
hostname=None,
port=5222,
path_to_certs=None,
mtype=None,
retcode=None):
"""
Optional parameters
* ``hostname`` (if not from jid)
* ``port``
* ``path_to_certs``
* ``mtype`` ('chat' required for Google Hangouts)
To verify the SSL certificates offered by a server:
path_to_certs = "path/to/ca/cert"
Without dnspython library installed, you will need
to specify the server hostname if it doesn't match the jid.
For example, to use Google Talk you would need to use:
hostname = 'talk.google.com'
Specify port if other than 5222.
NOTE: Ignored without specified hostname
"""
xmpp_bot = NtfySendMsgBot(jid, password, recipient, title, message, mtype)
# NOTE: Below plugins weren't needed for Google Hangouts
# but may be useful (from original sleekxmpp example)
# xmpp_bot.register_plugin('xep_0030') # Service Discovery
# xmpp_bot.register_plugin('xep_0199') # XMPP Ping
if path_to_certs and os.path.isdir(path_to_certs):
xmpp_bot.ca_certs = path_to_certs
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp_bot.connect(*([(hostname, int(port)) if hostname else []])):
xmpp_bot.process(block=True)
else:
logging.getLogger(__name__).error('Unable to connect', exc_info=True) | [
"def",
"notify",
"(",
"title",
",",
"message",
",",
"jid",
",",
"password",
",",
"recipient",
",",
"hostname",
"=",
"None",
",",
"port",
"=",
"5222",
",",
"path_to_certs",
"=",
"None",
",",
"mtype",
"=",
"None",
",",
"retcode",
"=",
"None",
")",
":",
"xmpp_bot",
"=",
"NtfySendMsgBot",
"(",
"jid",
",",
"password",
",",
"recipient",
",",
"title",
",",
"message",
",",
"mtype",
")",
"# NOTE: Below plugins weren't needed for Google Hangouts",
"# but may be useful (from original sleekxmpp example)",
"# xmpp_bot.register_plugin('xep_0030') # Service Discovery",
"# xmpp_bot.register_plugin('xep_0199') # XMPP Ping",
"if",
"path_to_certs",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path_to_certs",
")",
":",
"xmpp_bot",
".",
"ca_certs",
"=",
"path_to_certs",
"# Connect to the XMPP server and start processing XMPP stanzas.",
"if",
"xmpp_bot",
".",
"connect",
"(",
"*",
"(",
"[",
"(",
"hostname",
",",
"int",
"(",
"port",
")",
")",
"if",
"hostname",
"else",
"[",
"]",
"]",
")",
")",
":",
"xmpp_bot",
".",
"process",
"(",
"block",
"=",
"True",
")",
"else",
":",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"error",
"(",
"'Unable to connect'",
",",
"exc_info",
"=",
"True",
")"
] | 32 | 20.444444 |
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True} | [
"def",
"set_attributes",
"(",
"name",
",",
"attributes",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"attributes",
"=",
"_preprocess_attributes",
"(",
"attributes",
")",
"try",
":",
"url",
"=",
"conn",
".",
"get_queue_url",
"(",
"QueueName",
"=",
"name",
")",
"[",
"'QueueUrl'",
"]",
"conn",
".",
"set_queue_attributes",
"(",
"QueueUrl",
"=",
"url",
",",
"Attributes",
"=",
"attributes",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}",
"return",
"{",
"'result'",
":",
"True",
"}"
] | 25.851852 | 28.074074 |
def format_log(ret):
'''
Format the state into a log message
'''
msg = ''
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if 'changes' in ret:
# Yep, looks like a valid state return
chg = ret['changes']
if not chg:
if ret['comment']:
msg = ret['comment']
else:
msg = 'No changes made for {0[name]}'.format(ret)
elif isinstance(chg, dict):
if 'diff' in chg:
if isinstance(chg['diff'], six.string_types):
msg = 'File changed:\n{0}'.format(chg['diff'])
if all([isinstance(x, dict) for x in six.itervalues(chg)]):
if all([('old' in x and 'new' in x)
for x in six.itervalues(chg)]):
msg = 'Made the following changes:\n'
for pkg in chg:
old = chg[pkg]['old']
if not old and old not in (False, None):
old = 'absent'
new = chg[pkg]['new']
if not new and new not in (False, None):
new = 'absent'
# This must be able to handle unicode as some package names contain
# non-ascii characters like "Français" or "Español". See Issue #33605.
msg += '\'{0}\' changed from \'{1}\' to \'{2}\'\n'.format(pkg, old, new)
if not msg:
msg = six.text_type(ret['changes'])
if ret['result'] is True or ret['result'] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(six.text_type(ret)) | [
"def",
"format_log",
"(",
"ret",
")",
":",
"msg",
"=",
"''",
"if",
"isinstance",
"(",
"ret",
",",
"dict",
")",
":",
"# Looks like the ret may be a valid state return",
"if",
"'changes'",
"in",
"ret",
":",
"# Yep, looks like a valid state return",
"chg",
"=",
"ret",
"[",
"'changes'",
"]",
"if",
"not",
"chg",
":",
"if",
"ret",
"[",
"'comment'",
"]",
":",
"msg",
"=",
"ret",
"[",
"'comment'",
"]",
"else",
":",
"msg",
"=",
"'No changes made for {0[name]}'",
".",
"format",
"(",
"ret",
")",
"elif",
"isinstance",
"(",
"chg",
",",
"dict",
")",
":",
"if",
"'diff'",
"in",
"chg",
":",
"if",
"isinstance",
"(",
"chg",
"[",
"'diff'",
"]",
",",
"six",
".",
"string_types",
")",
":",
"msg",
"=",
"'File changed:\\n{0}'",
".",
"format",
"(",
"chg",
"[",
"'diff'",
"]",
")",
"if",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"dict",
")",
"for",
"x",
"in",
"six",
".",
"itervalues",
"(",
"chg",
")",
"]",
")",
":",
"if",
"all",
"(",
"[",
"(",
"'old'",
"in",
"x",
"and",
"'new'",
"in",
"x",
")",
"for",
"x",
"in",
"six",
".",
"itervalues",
"(",
"chg",
")",
"]",
")",
":",
"msg",
"=",
"'Made the following changes:\\n'",
"for",
"pkg",
"in",
"chg",
":",
"old",
"=",
"chg",
"[",
"pkg",
"]",
"[",
"'old'",
"]",
"if",
"not",
"old",
"and",
"old",
"not",
"in",
"(",
"False",
",",
"None",
")",
":",
"old",
"=",
"'absent'",
"new",
"=",
"chg",
"[",
"pkg",
"]",
"[",
"'new'",
"]",
"if",
"not",
"new",
"and",
"new",
"not",
"in",
"(",
"False",
",",
"None",
")",
":",
"new",
"=",
"'absent'",
"# This must be able to handle unicode as some package names contain",
"# non-ascii characters like \"Français\" or \"Español\". See Issue #33605.",
"msg",
"+=",
"'\\'{0}\\' changed from \\'{1}\\' to \\'{2}\\'\\n'",
".",
"format",
"(",
"pkg",
",",
"old",
",",
"new",
")",
"if",
"not",
"msg",
":",
"msg",
"=",
"six",
".",
"text_type",
"(",
"ret",
"[",
"'changes'",
"]",
")",
"if",
"ret",
"[",
"'result'",
"]",
"is",
"True",
"or",
"ret",
"[",
"'result'",
"]",
"is",
"None",
":",
"log",
".",
"info",
"(",
"msg",
")",
"else",
":",
"log",
".",
"error",
"(",
"msg",
")",
"else",
":",
"# catch unhandled data",
"log",
".",
"info",
"(",
"six",
".",
"text_type",
"(",
"ret",
")",
")"
] | 44.5 | 18.928571 |
def extern_get_type_for(self, context_handle, val):
"""Return a representation of the object's type."""
c = self._ffi.from_handle(context_handle)
obj = self._ffi.from_handle(val[0])
type_id = c.to_id(type(obj))
return TypeId(type_id) | [
"def",
"extern_get_type_for",
"(",
"self",
",",
"context_handle",
",",
"val",
")",
":",
"c",
"=",
"self",
".",
"_ffi",
".",
"from_handle",
"(",
"context_handle",
")",
"obj",
"=",
"self",
".",
"_ffi",
".",
"from_handle",
"(",
"val",
"[",
"0",
"]",
")",
"type_id",
"=",
"c",
".",
"to_id",
"(",
"type",
"(",
"obj",
")",
")",
"return",
"TypeId",
"(",
"type_id",
")"
] | 41.333333 | 6.5 |
def callback_liveIn_button_press(red_clicks, blue_clicks, green_clicks,
rc_timestamp, bc_timestamp, gc_timestamp, **kwargs): # pylint: disable=unused-argument
'Input app button pressed, so do something interesting'
if not rc_timestamp:
rc_timestamp = 0
if not bc_timestamp:
bc_timestamp = 0
if not gc_timestamp:
gc_timestamp = 0
if (rc_timestamp + bc_timestamp + gc_timestamp) < 1:
change_col = None
timestamp = 0
else:
if rc_timestamp > bc_timestamp:
change_col = "red"
timestamp = rc_timestamp
else:
change_col = "blue"
timestamp = bc_timestamp
if gc_timestamp > timestamp:
timestamp = gc_timestamp
change_col = "green"
value = {'red_clicks':red_clicks,
'blue_clicks':blue_clicks,
'green_clicks':green_clicks,
'click_colour':change_col,
'click_timestamp':timestamp,
'user':str(kwargs.get('user', 'UNKNOWN'))}
send_to_pipe_channel(channel_name="live_button_counter",
label="named_counts",
value=value)
return "Number of local clicks so far is %s red and %s blue; last change is %s at %s" % (red_clicks,
blue_clicks,
change_col,
datetime.fromtimestamp(0.001*timestamp)) | [
"def",
"callback_liveIn_button_press",
"(",
"red_clicks",
",",
"blue_clicks",
",",
"green_clicks",
",",
"rc_timestamp",
",",
"bc_timestamp",
",",
"gc_timestamp",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"if",
"not",
"rc_timestamp",
":",
"rc_timestamp",
"=",
"0",
"if",
"not",
"bc_timestamp",
":",
"bc_timestamp",
"=",
"0",
"if",
"not",
"gc_timestamp",
":",
"gc_timestamp",
"=",
"0",
"if",
"(",
"rc_timestamp",
"+",
"bc_timestamp",
"+",
"gc_timestamp",
")",
"<",
"1",
":",
"change_col",
"=",
"None",
"timestamp",
"=",
"0",
"else",
":",
"if",
"rc_timestamp",
">",
"bc_timestamp",
":",
"change_col",
"=",
"\"red\"",
"timestamp",
"=",
"rc_timestamp",
"else",
":",
"change_col",
"=",
"\"blue\"",
"timestamp",
"=",
"bc_timestamp",
"if",
"gc_timestamp",
">",
"timestamp",
":",
"timestamp",
"=",
"gc_timestamp",
"change_col",
"=",
"\"green\"",
"value",
"=",
"{",
"'red_clicks'",
":",
"red_clicks",
",",
"'blue_clicks'",
":",
"blue_clicks",
",",
"'green_clicks'",
":",
"green_clicks",
",",
"'click_colour'",
":",
"change_col",
",",
"'click_timestamp'",
":",
"timestamp",
",",
"'user'",
":",
"str",
"(",
"kwargs",
".",
"get",
"(",
"'user'",
",",
"'UNKNOWN'",
")",
")",
"}",
"send_to_pipe_channel",
"(",
"channel_name",
"=",
"\"live_button_counter\"",
",",
"label",
"=",
"\"named_counts\"",
",",
"value",
"=",
"value",
")",
"return",
"\"Number of local clicks so far is %s red and %s blue; last change is %s at %s\"",
"%",
"(",
"red_clicks",
",",
"blue_clicks",
",",
"change_col",
",",
"datetime",
".",
"fromtimestamp",
"(",
"0.001",
"*",
"timestamp",
")",
")"
] | 41.75 | 23.35 |
def generate_key_value_sequences(entry, default_value):
"""Parse a key value config entry (used in duplicate foreach)
If we have a key that look like [X-Y] we will expand it into Y-X+1 keys
:param str entry: The config line to be parsed.
:param str default_value: The default value to be used when none is available.
:return: a generator yielding dicts with 'KEY' & 'VALUE' & 'VALUE1' keys,
with eventual others 'VALUEx' (x 1 -> N) keys.
>>> rsp = list(generate_key_value_sequences("var$(/var)$,root $(/)$"))
>>> import pprint
>>> pprint.pprint(rsp)
[{'KEY': 'var', 'VALUE': '/var', 'VALUE1': '/var'},
{'KEY': 'root', 'VALUE': '/', 'VALUE1': '/'}]
"""
no_one_yielded = True
for value in entry.split(','):
value = value.strip()
if not value:
continue
full_match = KEY_VALUES_REGEX.match(value)
if full_match is None:
raise KeyValueSyntaxError("%r is an invalid key(-values) pattern" % value)
key = full_match.group(1)
tmp = {'KEY': key}
values = full_match.group(2)
if values: # there is, at least, one value provided
for idx, value_match in enumerate(VALUE_REGEX.finditer(values), 1):
tmp['VALUE%s' % idx] = value_match.group(1)
else: # no value provided for this key, use the default provided:
tmp['VALUE1'] = default_value
tmp['VALUE'] = tmp['VALUE1'] # alias from VALUE -> VALUE1
for subkey in expand_ranges(key):
current = tmp.copy()
current['KEY'] = subkey
yield current
no_one_yielded = False
if no_one_yielded:
raise KeyValueSyntaxError('At least one key must be present') | [
"def",
"generate_key_value_sequences",
"(",
"entry",
",",
"default_value",
")",
":",
"no_one_yielded",
"=",
"True",
"for",
"value",
"in",
"entry",
".",
"split",
"(",
"','",
")",
":",
"value",
"=",
"value",
".",
"strip",
"(",
")",
"if",
"not",
"value",
":",
"continue",
"full_match",
"=",
"KEY_VALUES_REGEX",
".",
"match",
"(",
"value",
")",
"if",
"full_match",
"is",
"None",
":",
"raise",
"KeyValueSyntaxError",
"(",
"\"%r is an invalid key(-values) pattern\"",
"%",
"value",
")",
"key",
"=",
"full_match",
".",
"group",
"(",
"1",
")",
"tmp",
"=",
"{",
"'KEY'",
":",
"key",
"}",
"values",
"=",
"full_match",
".",
"group",
"(",
"2",
")",
"if",
"values",
":",
"# there is, at least, one value provided",
"for",
"idx",
",",
"value_match",
"in",
"enumerate",
"(",
"VALUE_REGEX",
".",
"finditer",
"(",
"values",
")",
",",
"1",
")",
":",
"tmp",
"[",
"'VALUE%s'",
"%",
"idx",
"]",
"=",
"value_match",
".",
"group",
"(",
"1",
")",
"else",
":",
"# no value provided for this key, use the default provided:",
"tmp",
"[",
"'VALUE1'",
"]",
"=",
"default_value",
"tmp",
"[",
"'VALUE'",
"]",
"=",
"tmp",
"[",
"'VALUE1'",
"]",
"# alias from VALUE -> VALUE1",
"for",
"subkey",
"in",
"expand_ranges",
"(",
"key",
")",
":",
"current",
"=",
"tmp",
".",
"copy",
"(",
")",
"current",
"[",
"'KEY'",
"]",
"=",
"subkey",
"yield",
"current",
"no_one_yielded",
"=",
"False",
"if",
"no_one_yielded",
":",
"raise",
"KeyValueSyntaxError",
"(",
"'At least one key must be present'",
")"
] | 43.1 | 18.85 |
def _get_filtered_stmts(self, _, node, _stmts, mystmt):
"""method used in _filter_stmts to get statements and trigger break"""
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False | [
"def",
"_get_filtered_stmts",
"(",
"self",
",",
"_",
",",
"node",
",",
"_stmts",
",",
"mystmt",
")",
":",
"if",
"self",
".",
"statement",
"(",
")",
"is",
"mystmt",
":",
"# original node's statement is the assignment, only keep",
"# current node (gen exp, list comp)",
"return",
"[",
"node",
"]",
",",
"True",
"return",
"_stmts",
",",
"False"
] | 49.285714 | 10.428571 |
def _SanitizeField(self, field):
"""Sanitizes a field for output.
This method removes the field delimiter from the field string.
Args:
field (str): field value.
Returns:
str: formatted field value.
"""
if self._FIELD_DELIMITER and isinstance(field, py2to3.STRING_TYPES):
return field.replace(self._FIELD_DELIMITER, ' ')
return field | [
"def",
"_SanitizeField",
"(",
"self",
",",
"field",
")",
":",
"if",
"self",
".",
"_FIELD_DELIMITER",
"and",
"isinstance",
"(",
"field",
",",
"py2to3",
".",
"STRING_TYPES",
")",
":",
"return",
"field",
".",
"replace",
"(",
"self",
".",
"_FIELD_DELIMITER",
",",
"' '",
")",
"return",
"field"
] | 26.428571 | 21.214286 |
def randomize_es(es_queryset):
"""Randomize an elasticsearch queryset."""
return es_queryset.query(
query.FunctionScore(
functions=[function.RandomScore()]
)
).sort("-_score") | [
"def",
"randomize_es",
"(",
"es_queryset",
")",
":",
"return",
"es_queryset",
".",
"query",
"(",
"query",
".",
"FunctionScore",
"(",
"functions",
"=",
"[",
"function",
".",
"RandomScore",
"(",
")",
"]",
")",
")",
".",
"sort",
"(",
"\"-_score\"",
")"
] | 29.857143 | 12.714286 |
def create_experiment(args):
'''start a new experiment'''
config_file_name = ''.join(random.sample(string.ascii_letters + string.digits, 8))
nni_config = Config(config_file_name)
config_path = os.path.abspath(args.config)
if not os.path.exists(config_path):
print_error('Please set correct config path!')
exit(1)
experiment_config = get_yml_content(config_path)
validate_all_content(experiment_config, config_path)
nni_config.set_config('experimentConfig', experiment_config)
launch_experiment(args, experiment_config, 'new', config_file_name)
nni_config.set_config('restServerPort', args.port) | [
"def",
"create_experiment",
"(",
"args",
")",
":",
"config_file_name",
"=",
"''",
".",
"join",
"(",
"random",
".",
"sample",
"(",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
",",
"8",
")",
")",
"nni_config",
"=",
"Config",
"(",
"config_file_name",
")",
"config_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"config",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config_path",
")",
":",
"print_error",
"(",
"'Please set correct config path!'",
")",
"exit",
"(",
"1",
")",
"experiment_config",
"=",
"get_yml_content",
"(",
"config_path",
")",
"validate_all_content",
"(",
"experiment_config",
",",
"config_path",
")",
"nni_config",
".",
"set_config",
"(",
"'experimentConfig'",
",",
"experiment_config",
")",
"launch_experiment",
"(",
"args",
",",
"experiment_config",
",",
"'new'",
",",
"config_file_name",
")",
"nni_config",
".",
"set_config",
"(",
"'restServerPort'",
",",
"args",
".",
"port",
")"
] | 45.571429 | 17.857143 |
def GetVectorAsNumpy(self, flags, off):
"""
GetVectorAsNumpy returns the vector that starts at `Vector(off)`
as a numpy array with the type specified by `flags`. The array is
a `view` into Bytes, so modifying the returned array will
modify Bytes in place.
"""
offset = self.Vector(off)
length = self.VectorLen(off) # TODO: length accounts for bytewidth, right?
numpy_dtype = N.to_numpy_type(flags)
return encode.GetVectorAsNumpy(numpy_dtype, self.Bytes, length, offset) | [
"def",
"GetVectorAsNumpy",
"(",
"self",
",",
"flags",
",",
"off",
")",
":",
"offset",
"=",
"self",
".",
"Vector",
"(",
"off",
")",
"length",
"=",
"self",
".",
"VectorLen",
"(",
"off",
")",
"# TODO: length accounts for bytewidth, right?",
"numpy_dtype",
"=",
"N",
".",
"to_numpy_type",
"(",
"flags",
")",
"return",
"encode",
".",
"GetVectorAsNumpy",
"(",
"numpy_dtype",
",",
"self",
".",
"Bytes",
",",
"length",
",",
"offset",
")"
] | 49 | 17.545455 |
def showMessageOverlay(self, pchText, pchCaption, pchButton0Text, pchButton1Text, pchButton2Text, pchButton3Text):
"""Show the message overlay. This will block and return you a result."""
fn = self.function_table.showMessageOverlay
result = fn(pchText, pchCaption, pchButton0Text, pchButton1Text, pchButton2Text, pchButton3Text)
return result | [
"def",
"showMessageOverlay",
"(",
"self",
",",
"pchText",
",",
"pchCaption",
",",
"pchButton0Text",
",",
"pchButton1Text",
",",
"pchButton2Text",
",",
"pchButton3Text",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"showMessageOverlay",
"result",
"=",
"fn",
"(",
"pchText",
",",
"pchCaption",
",",
"pchButton0Text",
",",
"pchButton1Text",
",",
"pchButton2Text",
",",
"pchButton3Text",
")",
"return",
"result"
] | 61.666667 | 34.666667 |
def get_dates_in_period(start=None, top=None, step=1, step_dict={}):
"""Return a list of dates from the `start` to `top`."""
delta = relativedelta(**step_dict) if step_dict else timedelta(days=step)
start = start or datetime.today()
top = top or start + delta
dates = []
current = start
while current <= top:
dates.append(current)
current += delta
return dates | [
"def",
"get_dates_in_period",
"(",
"start",
"=",
"None",
",",
"top",
"=",
"None",
",",
"step",
"=",
"1",
",",
"step_dict",
"=",
"{",
"}",
")",
":",
"delta",
"=",
"relativedelta",
"(",
"*",
"*",
"step_dict",
")",
"if",
"step_dict",
"else",
"timedelta",
"(",
"days",
"=",
"step",
")",
"start",
"=",
"start",
"or",
"datetime",
".",
"today",
"(",
")",
"top",
"=",
"top",
"or",
"start",
"+",
"delta",
"dates",
"=",
"[",
"]",
"current",
"=",
"start",
"while",
"current",
"<=",
"top",
":",
"dates",
".",
"append",
"(",
"current",
")",
"current",
"+=",
"delta",
"return",
"dates"
] | 30.615385 | 20.846154 |