text
stringlengths 89
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def trailing_stop_loss_replace(self, accountID, orderID, **kwargs):
"""
Shortcut to replace a pending Trailing Stop Loss Order in an Account
Args:
accountID : The ID of the Account
orderID : The ID of the Take Profit Order to replace
kwargs : The arguments to create a TrailingStopLossOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request
"""
return self.replace(
accountID,
orderID,
order=TrailingStopLossOrderRequest(**kwargs)
) | [
"def",
"trailing_stop_loss_replace",
"(",
"self",
",",
"accountID",
",",
"orderID",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"replace",
"(",
"accountID",
",",
"orderID",
",",
"order",
"=",
"TrailingStopLossOrderRequest",
"(",
"*",
"*",
"kwargs",
")",
")"
] | 33.777778 | 22.444444 |
def set_site():
"""
This method is part of the prepare_data helper.
Sets the site. Default implementation uses localhost.
For production settings refine this helper.
:return:
"""
from django.contrib.sites.models import Site
from django.conf import settings
# Initially set localhost as default domain
#
site = Site.objects.get(id=settings.SITE_ID)
site.domain = 'http://localhost:8000'
site.name = 'http://localhost:8000'
site.save() | [
"def",
"set_site",
"(",
")",
":",
"from",
"django",
".",
"contrib",
".",
"sites",
".",
"models",
"import",
"Site",
"from",
"django",
".",
"conf",
"import",
"settings",
"# Initially set localhost as default domain",
"#",
"site",
"=",
"Site",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"settings",
".",
"SITE_ID",
")",
"site",
".",
"domain",
"=",
"'http://localhost:8000'",
"site",
".",
"name",
"=",
"'http://localhost:8000'",
"site",
".",
"save",
"(",
")"
] | 31.666667 | 11.8 |
def send_ready(self):
"""
Returns true if data can be written to this channel without blocking.
This means the channel is either closed (so any write attempt would
return immediately) or there is at least one byte of space in the
outbound buffer. If there is at least one byte of space in the
outbound buffer, a `send` call will succeed immediately and return
the number of bytes actually written.
:return:
``True`` if a `send` call on this channel would immediately succeed
or fail
"""
self.lock.acquire()
try:
if self.closed or self.eof_sent:
return True
return self.out_window_size > 0
finally:
self.lock.release() | [
"def",
"send_ready",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"self",
".",
"closed",
"or",
"self",
".",
"eof_sent",
":",
"return",
"True",
"return",
"self",
".",
"out_window_size",
">",
"0",
"finally",
":",
"self",
".",
"lock",
".",
"release",
"(",
")"
] | 38.55 | 20.55 |
def delete_additional_charge(self, recurring_billing_id):
"""
Remove an extra charge from an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
"""
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._delete(self.url + fmt, headers=self.get_headers()) | [
"def",
"delete_additional_charge",
"(",
"self",
",",
"recurring_billing_id",
")",
":",
"fmt",
"=",
"'recurringBillItems/{}'",
".",
"format",
"(",
"recurring_billing_id",
")",
"return",
"self",
".",
"client",
".",
"_delete",
"(",
"self",
".",
"url",
"+",
"fmt",
",",
"headers",
"=",
"self",
".",
"get_headers",
"(",
")",
")"
] | 30.75 | 24.083333 |
def sign(self, data, nonce = None):
"""
Sign data using the Montgomery private key stored by this XEdDSA instance.
:param data: A bytes-like object containing the data to sign.
:param nonce: A bytes-like object with length 64 or None.
:returns: A bytes-like object encoding the signature with length SIGNATURE_SIZE.
If the nonce parameter is None, a new nonce is generated and used.
:raises MissingKeyException: If the Montgomery private key is not available.
"""
cls = self.__class__
if not self.__mont_priv:
raise MissingKeyException(
"Cannot sign using this XEdDSA instance, Montgomery private key missing."
)
if not isinstance(data, bytes):
raise TypeError("The data parameter must be a bytes-like object.")
if nonce == None:
nonce = os.urandom(64)
if not isinstance(nonce, bytes):
raise TypeError("Wrong type passed for the nonce parameter.")
if len(nonce) != 64:
raise ValueError("Invalid value passed for the nonce parameter.")
ed_priv, ed_pub = cls._mont_priv_to_ed_pair(bytearray(self.__mont_priv))
return bytes(cls._sign(
bytearray(data),
bytearray(nonce),
ed_priv,
ed_pub
)) | [
"def",
"sign",
"(",
"self",
",",
"data",
",",
"nonce",
"=",
"None",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"if",
"not",
"self",
".",
"__mont_priv",
":",
"raise",
"MissingKeyException",
"(",
"\"Cannot sign using this XEdDSA instance, Montgomery private key missing.\"",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"\"The data parameter must be a bytes-like object.\"",
")",
"if",
"nonce",
"==",
"None",
":",
"nonce",
"=",
"os",
".",
"urandom",
"(",
"64",
")",
"if",
"not",
"isinstance",
"(",
"nonce",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"\"Wrong type passed for the nonce parameter.\"",
")",
"if",
"len",
"(",
"nonce",
")",
"!=",
"64",
":",
"raise",
"ValueError",
"(",
"\"Invalid value passed for the nonce parameter.\"",
")",
"ed_priv",
",",
"ed_pub",
"=",
"cls",
".",
"_mont_priv_to_ed_pair",
"(",
"bytearray",
"(",
"self",
".",
"__mont_priv",
")",
")",
"return",
"bytes",
"(",
"cls",
".",
"_sign",
"(",
"bytearray",
"(",
"data",
")",
",",
"bytearray",
"(",
"nonce",
")",
",",
"ed_priv",
",",
"ed_pub",
")",
")"
] | 33.225 | 26.275 |
def keyPressEvent( self, event ):
"""
Handles the Ctrl+C/Ctrl+V events for copy & paste.
:param event | <QKeyEvent>
"""
if ( event.key() == Qt.Key_C and \
event.modifiers() == Qt.ControlModifier ):
self.copy()
event.accept()
return
elif ( event.key() == Qt.Key_V and \
event.modifiers() == Qt.ControlModifier ):
self.paste()
event.accept()
return
elif ( event.key() == Qt.Key_Delete ):
indexes = map(self.row, self.selectedItems())
for index in reversed(sorted(indexes)):
self.takeItem(index)
event.accept()
return
elif event.key() == Qt.Key_Backspace:
if self.count() > 1:
self.takeItem(self.count() - 2)
self.setFocus()
super(XMultiTagEdit, self).keyPressEvent(event) | [
"def",
"keyPressEvent",
"(",
"self",
",",
"event",
")",
":",
"if",
"(",
"event",
".",
"key",
"(",
")",
"==",
"Qt",
".",
"Key_C",
"and",
"event",
".",
"modifiers",
"(",
")",
"==",
"Qt",
".",
"ControlModifier",
")",
":",
"self",
".",
"copy",
"(",
")",
"event",
".",
"accept",
"(",
")",
"return",
"elif",
"(",
"event",
".",
"key",
"(",
")",
"==",
"Qt",
".",
"Key_V",
"and",
"event",
".",
"modifiers",
"(",
")",
"==",
"Qt",
".",
"ControlModifier",
")",
":",
"self",
".",
"paste",
"(",
")",
"event",
".",
"accept",
"(",
")",
"return",
"elif",
"(",
"event",
".",
"key",
"(",
")",
"==",
"Qt",
".",
"Key_Delete",
")",
":",
"indexes",
"=",
"map",
"(",
"self",
".",
"row",
",",
"self",
".",
"selectedItems",
"(",
")",
")",
"for",
"index",
"in",
"reversed",
"(",
"sorted",
"(",
"indexes",
")",
")",
":",
"self",
".",
"takeItem",
"(",
"index",
")",
"event",
".",
"accept",
"(",
")",
"return",
"elif",
"event",
".",
"key",
"(",
")",
"==",
"Qt",
".",
"Key_Backspace",
":",
"if",
"self",
".",
"count",
"(",
")",
">",
"1",
":",
"self",
".",
"takeItem",
"(",
"self",
".",
"count",
"(",
")",
"-",
"2",
")",
"self",
".",
"setFocus",
"(",
")",
"super",
"(",
"XMultiTagEdit",
",",
"self",
")",
".",
"keyPressEvent",
"(",
"event",
")"
] | 30.875 | 14.625 |
def gravitational_force(position_a, mass_a, position_b, mass_b):
"""Returns the gravitational force between the two bodies a and b."""
distance = distance_between(position_a, position_b)
# Calculate the direction and magnitude of the force.
angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0])
magnitude = G * mass_a * mass_b / (distance**2)
# Find the x and y components of the force.
# Determine sign based on which one is the larger body.
sign = -1 if mass_b > mass_a else 1
x_force = sign * magnitude * math.cos(angle)
y_force = sign * magnitude * math.sin(angle)
return x_force, y_force | [
"def",
"gravitational_force",
"(",
"position_a",
",",
"mass_a",
",",
"position_b",
",",
"mass_b",
")",
":",
"distance",
"=",
"distance_between",
"(",
"position_a",
",",
"position_b",
")",
"# Calculate the direction and magnitude of the force.",
"angle",
"=",
"math",
".",
"atan2",
"(",
"position_a",
"[",
"1",
"]",
"-",
"position_b",
"[",
"1",
"]",
",",
"position_a",
"[",
"0",
"]",
"-",
"position_b",
"[",
"0",
"]",
")",
"magnitude",
"=",
"G",
"*",
"mass_a",
"*",
"mass_b",
"/",
"(",
"distance",
"**",
"2",
")",
"# Find the x and y components of the force.",
"# Determine sign based on which one is the larger body.",
"sign",
"=",
"-",
"1",
"if",
"mass_b",
">",
"mass_a",
"else",
"1",
"x_force",
"=",
"sign",
"*",
"magnitude",
"*",
"math",
".",
"cos",
"(",
"angle",
")",
"y_force",
"=",
"sign",
"*",
"magnitude",
"*",
"math",
".",
"sin",
"(",
"angle",
")",
"return",
"x_force",
",",
"y_force"
] | 46.571429 | 17.642857 |
def determine_override_options(selected_options: tuple, override_opts: DictLike, set_of_possible_options: tuple = ()) -> Dict[str, Any]:
""" Recursively extract the dict described in override_options().
In particular, this searches for selected options in the override_opts dict. It stores only
the override options that are selected.
Args:
selected_options: The options selected for this analysis, in the order defined used
with ``override_options()`` and in the configuration file.
override_opts: dict-like object returned by ruamel.yaml which contains the options that
should be used to override the configuration options.
set_of_possible_options (tuple of enums): Possible options for the override value categories.
"""
override_dict: Dict[str, Any] = {}
for option in override_opts:
# We need to cast the option to a string to effectively compare to the selected option,
# since only some of the options will already be strings
if str(option) in list(map(lambda opt: str(opt), selected_options)):
override_dict.update(determine_override_options(selected_options, override_opts[option], set_of_possible_options))
else:
logger.debug(f"override_opts: {override_opts}")
# Look for whether the key is one of the possible but unselected options.
# If so, we haven't selected it for this analysis, and therefore they should be ignored.
# NOTE: We compare both the names and value because sometimes the name is not sufficient,
# such as in the case of the energy (because a number is not allowed to be a field name.)
found_as_possible_option = False
for possible_options in set_of_possible_options:
# Same type of comparison as above, but for all possible options instead of the selected
# options.
if str(option) in list(map(lambda opt: str(opt), possible_options)):
found_as_possible_option = True
# Below is more or less equivalent to the above (although .str() hides the details or
# whether we should compare to the name or the value in the enum and only compares against
# the designated value).
#for possible_opt in possible_options:
#if possible_opt.name == option or possible_opt.value == option:
# found_as_possible_option = True
if not found_as_possible_option:
# Store the override value, since it doesn't correspond with a selected option or a possible
# option and therefore must be an option that we want to override.
logger.debug(f"Storing override option \"{option}\", with value \"{override_opts[option]}\"")
override_dict[option] = override_opts[option]
else:
logger.debug(f"Found option \"{option}\" as possible option, so skipping!")
return override_dict | [
"def",
"determine_override_options",
"(",
"selected_options",
":",
"tuple",
",",
"override_opts",
":",
"DictLike",
",",
"set_of_possible_options",
":",
"tuple",
"=",
"(",
")",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"override_dict",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"for",
"option",
"in",
"override_opts",
":",
"# We need to cast the option to a string to effectively compare to the selected option,",
"# since only some of the options will already be strings",
"if",
"str",
"(",
"option",
")",
"in",
"list",
"(",
"map",
"(",
"lambda",
"opt",
":",
"str",
"(",
"opt",
")",
",",
"selected_options",
")",
")",
":",
"override_dict",
".",
"update",
"(",
"determine_override_options",
"(",
"selected_options",
",",
"override_opts",
"[",
"option",
"]",
",",
"set_of_possible_options",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"override_opts: {override_opts}\"",
")",
"# Look for whether the key is one of the possible but unselected options.",
"# If so, we haven't selected it for this analysis, and therefore they should be ignored.",
"# NOTE: We compare both the names and value because sometimes the name is not sufficient,",
"# such as in the case of the energy (because a number is not allowed to be a field name.)",
"found_as_possible_option",
"=",
"False",
"for",
"possible_options",
"in",
"set_of_possible_options",
":",
"# Same type of comparison as above, but for all possible options instead of the selected",
"# options.",
"if",
"str",
"(",
"option",
")",
"in",
"list",
"(",
"map",
"(",
"lambda",
"opt",
":",
"str",
"(",
"opt",
")",
",",
"possible_options",
")",
")",
":",
"found_as_possible_option",
"=",
"True",
"# Below is more or less equivalent to the above (although .str() hides the details or",
"# whether we should compare to the name or the value in the enum and only compares against",
"# the designated value).",
"#for possible_opt in possible_options:",
"#if possible_opt.name == option or possible_opt.value == option:",
"# found_as_possible_option = True",
"if",
"not",
"found_as_possible_option",
":",
"# Store the override value, since it doesn't correspond with a selected option or a possible",
"# option and therefore must be an option that we want to override.",
"logger",
".",
"debug",
"(",
"f\"Storing override option \\\"{option}\\\", with value \\\"{override_opts[option]}\\\"\"",
")",
"override_dict",
"[",
"option",
"]",
"=",
"override_opts",
"[",
"option",
"]",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"Found option \\\"{option}\\\" as possible option, so skipping!\"",
")",
"return",
"override_dict"
] | 64.319149 | 36.361702 |
def activate_left(self, token):
"""Make a copy of the received token and call `_activate_left`."""
watchers.MATCHER.debug(
"Node <%s> activated left with token %r", self, token)
return self._activate_left(token.copy()) | [
"def",
"activate_left",
"(",
"self",
",",
"token",
")",
":",
"watchers",
".",
"MATCHER",
".",
"debug",
"(",
"\"Node <%s> activated left with token %r\"",
",",
"self",
",",
"token",
")",
"return",
"self",
".",
"_activate_left",
"(",
"token",
".",
"copy",
"(",
")",
")"
] | 50 | 10.4 |
def extract_bad_ami(e):
"""Handle various client side errors when describing images"""
msg = e.response['Error']['Message']
error = e.response['Error']['Code']
e_ami_ids = None
if error == 'InvalidAMIID.NotFound':
e_ami_ids = [
e_ami_id.strip() for e_ami_id
in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')]
log.warning("Image not found %s" % e_ami_ids)
elif error == 'InvalidAMIID.Malformed':
e_ami_ids = [msg[msg.find('"') + 1:msg.rfind('"')]]
log.warning("Image id malformed %s" % e_ami_ids)
return e_ami_ids | [
"def",
"extract_bad_ami",
"(",
"e",
")",
":",
"msg",
"=",
"e",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Message'",
"]",
"error",
"=",
"e",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Code'",
"]",
"e_ami_ids",
"=",
"None",
"if",
"error",
"==",
"'InvalidAMIID.NotFound'",
":",
"e_ami_ids",
"=",
"[",
"e_ami_id",
".",
"strip",
"(",
")",
"for",
"e_ami_id",
"in",
"msg",
"[",
"msg",
".",
"find",
"(",
"\"'[\"",
")",
"+",
"2",
":",
"msg",
".",
"rfind",
"(",
"\"]'\"",
")",
"]",
".",
"split",
"(",
"','",
")",
"]",
"log",
".",
"warning",
"(",
"\"Image not found %s\"",
"%",
"e_ami_ids",
")",
"elif",
"error",
"==",
"'InvalidAMIID.Malformed'",
":",
"e_ami_ids",
"=",
"[",
"msg",
"[",
"msg",
".",
"find",
"(",
"'\"'",
")",
"+",
"1",
":",
"msg",
".",
"rfind",
"(",
"'\"'",
")",
"]",
"]",
"log",
".",
"warning",
"(",
"\"Image id malformed %s\"",
"%",
"e_ami_ids",
")",
"return",
"e_ami_ids"
] | 45.642857 | 12.642857 |
def _reduce_dynamic_table(self, new_entry_size=0):
# type: (int) -> None
"""_reduce_dynamic_table evicts entries from the dynamic table until it
fits in less than the current size limit. The optional parameter,
new_entry_size, allows the resize to happen so that a new entry of this
size fits in.
@param int new_entry_size: if called before adding a new entry, the size of the new entry in bytes (following # noqa: E501
the RFC7541 definition of the size of an entry)
@raise AssertionError
"""
assert(new_entry_size >= 0)
cur_sz = len(self)
dyn_tbl_sz = len(self._dynamic_table)
while dyn_tbl_sz > 0 and cur_sz + new_entry_size > self._dynamic_table_max_size: # noqa: E501
last_elmt_sz = len(self._dynamic_table[-1])
self._dynamic_table.pop()
dyn_tbl_sz -= 1
cur_sz -= last_elmt_sz | [
"def",
"_reduce_dynamic_table",
"(",
"self",
",",
"new_entry_size",
"=",
"0",
")",
":",
"# type: (int) -> None",
"assert",
"(",
"new_entry_size",
">=",
"0",
")",
"cur_sz",
"=",
"len",
"(",
"self",
")",
"dyn_tbl_sz",
"=",
"len",
"(",
"self",
".",
"_dynamic_table",
")",
"while",
"dyn_tbl_sz",
">",
"0",
"and",
"cur_sz",
"+",
"new_entry_size",
">",
"self",
".",
"_dynamic_table_max_size",
":",
"# noqa: E501",
"last_elmt_sz",
"=",
"len",
"(",
"self",
".",
"_dynamic_table",
"[",
"-",
"1",
"]",
")",
"self",
".",
"_dynamic_table",
".",
"pop",
"(",
")",
"dyn_tbl_sz",
"-=",
"1",
"cur_sz",
"-=",
"last_elmt_sz"
] | 51 | 19.555556 |
def interprocess_locked(path):
"""Acquires & releases a interprocess lock around call into
decorated function."""
lock = InterProcessLock(path)
def decorator(f):
@six.wraps(f)
def wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return wrapper
return decorator | [
"def",
"interprocess_locked",
"(",
"path",
")",
":",
"lock",
"=",
"InterProcessLock",
"(",
"path",
")",
"def",
"decorator",
"(",
"f",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"lock",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] | 21.1875 | 19.6875 |
def parse(obj, required_properties=None, additional_properties=None,
ignore_optional_property_errors=None):
"""Try to parse the given ``obj`` as a validator instance.
:param obj: The object to be parsed. If it is a...:
- :py:class:`Validator` instance, return it.
- :py:class:`Validator` subclass, instantiate it without arguments and
return it.
- :py:attr:`~Validator.name` of a known :py:class:`Validator` subclass,
instantiate the subclass without arguments and return it.
- otherwise find the first registered :py:class:`Validator` factory that
can create it. The search order is the reverse of the factory registration
order. The caller is responsible for ensuring there are no ambiguous
values that can be parsed by more than one factory.
:param required_properties: Specifies for this parse call whether parsed
:py:class:`~valideer.validators.Object` properties are required or
optional by default. It can be:
- ``True`` for required.
- ``False`` for optional.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.REQUIRED_PROPERTIES` attribute.
:param additional_properties: Specifies for this parse call the schema of
all :py:class:`~valideer.validators.Object` properties that are not
explicitly defined as optional or required. It can also be:
- ``True`` to allow any value for additional properties.
- ``False`` to disallow any additional properties.
- :py:attr:`~valideer.validators.Object.REMOVE` to remove any additional
properties from the adapted object.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.ADDITIONAL_PROPERTIES` attribute.
:param ignore_optional_property_errors: Determines if invalid optional
properties are ignored:
- ``True`` to ignore invalid optional properties.
- ``False`` to raise ValidationError for invalid optional properties.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.IGNORE_OPTIONAL_PROPERTY_ERRORS`
attribute.
:raises SchemaError: If no appropriate validator could be found.
.. warning:: Passing ``required_properties`` and/or ``additional_properties``
with value other than ``None`` may be non intuitive for schemas that
involve nested validators. Take for example the following schema::
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
}, required_properties=True)
Here the top-level properties 'x' and 'child' are required but the nested
'y' property is not. This is because by the time :py:meth:`parse` is called,
:py:class:`~valideer.validators.Nullable` has already parsed its argument
with the default value of ``required_properties``. Several other builtin
validators work similarly to :py:class:`~valideer.validators.Nullable`,
accepting one or more schemas to parse. In order to parse an arbitrarily
complex nested validator with the same value for ``required_properties``
and/or ``additional_properties``, use the :py:func:`parsing` context
manager instead::
with V.parsing(required_properties=True):
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
})
"""
if not (required_properties is
additional_properties is
ignore_optional_property_errors is None):
with parsing(required_properties=required_properties,
additional_properties=additional_properties,
ignore_optional_property_errors=ignore_optional_property_errors):
return parse(obj)
validator = None
if isinstance(obj, Validator):
validator = obj
elif inspect.isclass(obj) and issubclass(obj, Validator):
validator = obj()
else:
try:
validator = _NAMED_VALIDATORS[obj]
except (KeyError, TypeError):
for factory in _VALIDATOR_FACTORIES:
validator = factory(obj)
if validator is not None:
break
else:
if inspect.isclass(validator) and issubclass(validator, Validator):
_NAMED_VALIDATORS[obj] = validator = validator()
if not isinstance(validator, Validator):
raise SchemaError("%r cannot be parsed as a Validator" % obj)
return validator | [
"def",
"parse",
"(",
"obj",
",",
"required_properties",
"=",
"None",
",",
"additional_properties",
"=",
"None",
",",
"ignore_optional_property_errors",
"=",
"None",
")",
":",
"if",
"not",
"(",
"required_properties",
"is",
"additional_properties",
"is",
"ignore_optional_property_errors",
"is",
"None",
")",
":",
"with",
"parsing",
"(",
"required_properties",
"=",
"required_properties",
",",
"additional_properties",
"=",
"additional_properties",
",",
"ignore_optional_property_errors",
"=",
"ignore_optional_property_errors",
")",
":",
"return",
"parse",
"(",
"obj",
")",
"validator",
"=",
"None",
"if",
"isinstance",
"(",
"obj",
",",
"Validator",
")",
":",
"validator",
"=",
"obj",
"elif",
"inspect",
".",
"isclass",
"(",
"obj",
")",
"and",
"issubclass",
"(",
"obj",
",",
"Validator",
")",
":",
"validator",
"=",
"obj",
"(",
")",
"else",
":",
"try",
":",
"validator",
"=",
"_NAMED_VALIDATORS",
"[",
"obj",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"for",
"factory",
"in",
"_VALIDATOR_FACTORIES",
":",
"validator",
"=",
"factory",
"(",
"obj",
")",
"if",
"validator",
"is",
"not",
"None",
":",
"break",
"else",
":",
"if",
"inspect",
".",
"isclass",
"(",
"validator",
")",
"and",
"issubclass",
"(",
"validator",
",",
"Validator",
")",
":",
"_NAMED_VALIDATORS",
"[",
"obj",
"]",
"=",
"validator",
"=",
"validator",
"(",
")",
"if",
"not",
"isinstance",
"(",
"validator",
",",
"Validator",
")",
":",
"raise",
"SchemaError",
"(",
"\"%r cannot be parsed as a Validator\"",
"%",
"obj",
")",
"return",
"validator"
] | 43.811321 | 24.216981 |
def compute_attention_component(antecedent,
total_depth,
filter_width=1,
padding="VALID",
name="c",
vars_3d_num_heads=0,
layer_collection=None):
"""Computes attention compoenent (query, key or value).
Args:
antecedent: a Tensor with shape [batch, length, channels]
total_depth: an integer
filter_width: An integer specifying how wide you want the attention
component to be.
padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
name: a string specifying scope name.
vars_3d_num_heads: an optional integer (if we want to use 3d variables)
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
c : [batch, length, depth] tensor
"""
if layer_collection is not None:
if filter_width != 1 or vars_3d_num_heads != 0:
raise ValueError(
"KFAC implementation only supports filter_width=1 (actual: {}) and "
"vars_3d_num_heads=0 (actual: {}).".format(
filter_width, vars_3d_num_heads))
if vars_3d_num_heads > 0:
assert filter_width == 1
input_depth = antecedent.get_shape().as_list()[-1]
depth_per_head = total_depth // vars_3d_num_heads
initializer_stddev = input_depth ** -0.5
if "q" in name:
initializer_stddev *= depth_per_head ** -0.5
var = tf.get_variable(
name, [input_depth,
vars_3d_num_heads,
total_depth // vars_3d_num_heads],
initializer=tf.random_normal_initializer(stddev=initializer_stddev))
var = tf.cast(var, antecedent.dtype)
var = tf.reshape(var, [input_depth, total_depth])
return tf.tensordot(antecedent, var, axes=1)
if filter_width == 1:
return common_layers.dense(
antecedent, total_depth, use_bias=False, name=name,
layer_collection=layer_collection)
else:
return common_layers.conv1d(
antecedent, total_depth, filter_width, padding=padding, name=name) | [
"def",
"compute_attention_component",
"(",
"antecedent",
",",
"total_depth",
",",
"filter_width",
"=",
"1",
",",
"padding",
"=",
"\"VALID\"",
",",
"name",
"=",
"\"c\"",
",",
"vars_3d_num_heads",
"=",
"0",
",",
"layer_collection",
"=",
"None",
")",
":",
"if",
"layer_collection",
"is",
"not",
"None",
":",
"if",
"filter_width",
"!=",
"1",
"or",
"vars_3d_num_heads",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"KFAC implementation only supports filter_width=1 (actual: {}) and \"",
"\"vars_3d_num_heads=0 (actual: {}).\"",
".",
"format",
"(",
"filter_width",
",",
"vars_3d_num_heads",
")",
")",
"if",
"vars_3d_num_heads",
">",
"0",
":",
"assert",
"filter_width",
"==",
"1",
"input_depth",
"=",
"antecedent",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"depth_per_head",
"=",
"total_depth",
"//",
"vars_3d_num_heads",
"initializer_stddev",
"=",
"input_depth",
"**",
"-",
"0.5",
"if",
"\"q\"",
"in",
"name",
":",
"initializer_stddev",
"*=",
"depth_per_head",
"**",
"-",
"0.5",
"var",
"=",
"tf",
".",
"get_variable",
"(",
"name",
",",
"[",
"input_depth",
",",
"vars_3d_num_heads",
",",
"total_depth",
"//",
"vars_3d_num_heads",
"]",
",",
"initializer",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"stddev",
"=",
"initializer_stddev",
")",
")",
"var",
"=",
"tf",
".",
"cast",
"(",
"var",
",",
"antecedent",
".",
"dtype",
")",
"var",
"=",
"tf",
".",
"reshape",
"(",
"var",
",",
"[",
"input_depth",
",",
"total_depth",
"]",
")",
"return",
"tf",
".",
"tensordot",
"(",
"antecedent",
",",
"var",
",",
"axes",
"=",
"1",
")",
"if",
"filter_width",
"==",
"1",
":",
"return",
"common_layers",
".",
"dense",
"(",
"antecedent",
",",
"total_depth",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"name",
",",
"layer_collection",
"=",
"layer_collection",
")",
"else",
":",
"return",
"common_layers",
".",
"conv1d",
"(",
"antecedent",
",",
"total_depth",
",",
"filter_width",
",",
"padding",
"=",
"padding",
",",
"name",
"=",
"name",
")"
] | 41.392157 | 15.431373 |
def t_escaped_TAB_CHAR(self, t):
r'\x74' # 't'
t.lexer.pop_state()
t.value = unichr(0x0009)
return t | [
"def",
"t_escaped_TAB_CHAR",
"(",
"self",
",",
"t",
")",
":",
"# 't'",
"t",
".",
"lexer",
".",
"pop_state",
"(",
")",
"t",
".",
"value",
"=",
"unichr",
"(",
"0x0009",
")",
"return",
"t"
] | 25.8 | 14.2 |
def save(self, path: str):
"""
Save lexicon in Numpy array format. Lexicon will be specific to Sockeye model.
:param path: Path to Numpy array output file.
"""
with open(path, 'wb') as out:
np.save(out, self.lex)
logger.info("Saved top-k lexicon to \"%s\"", path) | [
"def",
"save",
"(",
"self",
",",
"path",
":",
"str",
")",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"out",
":",
"np",
".",
"save",
"(",
"out",
",",
"self",
".",
"lex",
")",
"logger",
".",
"info",
"(",
"\"Saved top-k lexicon to \\\"%s\\\"\"",
",",
"path",
")"
] | 35.222222 | 15.666667 |
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment | [
"def",
"rename",
"(",
"self",
",",
"oldkey",
",",
"newkey",
")",
":",
"if",
"oldkey",
"in",
"self",
".",
"scalars",
":",
"the_list",
"=",
"self",
".",
"scalars",
"elif",
"oldkey",
"in",
"self",
".",
"sections",
":",
"the_list",
"=",
"self",
".",
"sections",
"else",
":",
"raise",
"KeyError",
"(",
"'Key \"%s\" not found.'",
"%",
"oldkey",
")",
"pos",
"=",
"the_list",
".",
"index",
"(",
"oldkey",
")",
"#",
"val",
"=",
"self",
"[",
"oldkey",
"]",
"dict",
".",
"__delitem__",
"(",
"self",
",",
"oldkey",
")",
"dict",
".",
"__setitem__",
"(",
"self",
",",
"newkey",
",",
"val",
")",
"the_list",
".",
"remove",
"(",
"oldkey",
")",
"the_list",
".",
"insert",
"(",
"pos",
",",
"newkey",
")",
"comm",
"=",
"self",
".",
"comments",
"[",
"oldkey",
"]",
"inline_comment",
"=",
"self",
".",
"inline_comments",
"[",
"oldkey",
"]",
"del",
"self",
".",
"comments",
"[",
"oldkey",
"]",
"del",
"self",
".",
"inline_comments",
"[",
"oldkey",
"]",
"self",
".",
"comments",
"[",
"newkey",
"]",
"=",
"comm",
"self",
".",
"inline_comments",
"[",
"newkey",
"]",
"=",
"inline_comment"
] | 34.428571 | 12.285714 |
def get_ethernet_networks(self):
"""
Gets a list of associated ethernet networks of an uplink set.
Args:
id_or_uri: Can be either the uplink set id or the uplink set uri.
Returns:
list: Associated ethernet networks.
"""
network_uris = self.data.get('networkUris')
networks = []
if network_uris:
for uri in network_uris:
networks.append(self._ethernet_networks.get_by_uri(uri))
return networks | [
"def",
"get_ethernet_networks",
"(",
"self",
")",
":",
"network_uris",
"=",
"self",
".",
"data",
".",
"get",
"(",
"'networkUris'",
")",
"networks",
"=",
"[",
"]",
"if",
"network_uris",
":",
"for",
"uri",
"in",
"network_uris",
":",
"networks",
".",
"append",
"(",
"self",
".",
"_ethernet_networks",
".",
"get_by_uri",
"(",
"uri",
")",
")",
"return",
"networks"
] | 31.4375 | 19.4375 |
def save_json(obj, filename, **kwargs):
"""
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs) | [
"def",
"save_json",
"(",
"obj",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"obj",
",",
"f",
",",
"*",
"*",
"kwargs",
")"
] | 28.166667 | 14.166667 |
def get_samples(self, init_points_count, criterion='center'):
"""
Generates required amount of sample points
:param init_points_count: Number of samples to generate
:param criterion: For details of the effect of this parameter, please refer to pyDOE.lhs documentation
Default: 'center'
:returns: Generated samples
"""
samples = np.empty((init_points_count, self.space.dimensionality))
# Use random design to fill non-continuous variables
random_design = RandomDesign(self.space)
random_design.fill_noncontinous_variables(samples)
if self.space.has_continuous():
bounds = self.space.get_continuous_bounds()
lower_bound = np.asarray(bounds)[:,0].reshape(1, len(bounds))
upper_bound = np.asarray(bounds)[:,1].reshape(1, len(bounds))
diff = upper_bound - lower_bound
from pyDOE import lhs
X_design_aux = lhs(len(self.space.get_continuous_bounds()), init_points_count, criterion=criterion)
I = np.ones((X_design_aux.shape[0], 1))
X_design = np.dot(I, lower_bound) + X_design_aux * np.dot(I, diff)
samples[:, self.space.get_continuous_dims()] = X_design
return samples | [
"def",
"get_samples",
"(",
"self",
",",
"init_points_count",
",",
"criterion",
"=",
"'center'",
")",
":",
"samples",
"=",
"np",
".",
"empty",
"(",
"(",
"init_points_count",
",",
"self",
".",
"space",
".",
"dimensionality",
")",
")",
"# Use random design to fill non-continuous variables",
"random_design",
"=",
"RandomDesign",
"(",
"self",
".",
"space",
")",
"random_design",
".",
"fill_noncontinous_variables",
"(",
"samples",
")",
"if",
"self",
".",
"space",
".",
"has_continuous",
"(",
")",
":",
"bounds",
"=",
"self",
".",
"space",
".",
"get_continuous_bounds",
"(",
")",
"lower_bound",
"=",
"np",
".",
"asarray",
"(",
"bounds",
")",
"[",
":",
",",
"0",
"]",
".",
"reshape",
"(",
"1",
",",
"len",
"(",
"bounds",
")",
")",
"upper_bound",
"=",
"np",
".",
"asarray",
"(",
"bounds",
")",
"[",
":",
",",
"1",
"]",
".",
"reshape",
"(",
"1",
",",
"len",
"(",
"bounds",
")",
")",
"diff",
"=",
"upper_bound",
"-",
"lower_bound",
"from",
"pyDOE",
"import",
"lhs",
"X_design_aux",
"=",
"lhs",
"(",
"len",
"(",
"self",
".",
"space",
".",
"get_continuous_bounds",
"(",
")",
")",
",",
"init_points_count",
",",
"criterion",
"=",
"criterion",
")",
"I",
"=",
"np",
".",
"ones",
"(",
"(",
"X_design_aux",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"X_design",
"=",
"np",
".",
"dot",
"(",
"I",
",",
"lower_bound",
")",
"+",
"X_design_aux",
"*",
"np",
".",
"dot",
"(",
"I",
",",
"diff",
")",
"samples",
"[",
":",
",",
"self",
".",
"space",
".",
"get_continuous_dims",
"(",
")",
"]",
"=",
"X_design",
"return",
"samples"
] | 44.068966 | 24.206897 |
def mkzip(source_dir, output_filename):
'''Usage:
p = r'D:\auto\env\ttest\ins\build\lib\rock4\softtest\support'
mkzip(os.path.join(p, "appiumroot"),os.path.join(p, "appiumroot.zip"))
unzip(os.path.join(p, "appiumroot.zip"),os.path.join(p, "appiumroot2"))
'''
zipf = zipfile.ZipFile(output_filename, 'w', zipfile.zlib.DEFLATED)
pre_len = len(os.path.dirname(source_dir))
for parent, dirnames, filenames in os.walk(source_dir):
for filename in filenames:
pathfile = os.path.join(parent, filename)
arcname = pathfile[pre_len:].strip(os.path.sep);#相对路径
zipf.write(pathfile, arcname)
zipf.close() | [
"def",
"mkzip",
"(",
"source_dir",
",",
"output_filename",
")",
":",
"zipf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"output_filename",
",",
"'w'",
",",
"zipfile",
".",
"zlib",
".",
"DEFLATED",
")",
"pre_len",
"=",
"len",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"source_dir",
")",
")",
"for",
"parent",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"source_dir",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"pathfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent",
",",
"filename",
")",
"arcname",
"=",
"pathfile",
"[",
"pre_len",
":",
"]",
".",
"strip",
"(",
"os",
".",
"path",
".",
"sep",
")",
"#相对路径\r",
"zipf",
".",
"write",
"(",
"pathfile",
",",
"arcname",
")",
"zipf",
".",
"close",
"(",
")"
] | 52.642857 | 22.785714 |
def wrap_traceback(traceback):
"""
For internal use only (until further notice)
"""
if email().format == 'html':
try:
from pygments import highlight
from pygments.lexers import PythonTracebackLexer
from pygments.formatters import HtmlFormatter
with_pygments = True
except ImportError:
with_pygments = False
if with_pygments:
formatter = HtmlFormatter(noclasses=True)
wrapped = highlight(traceback, PythonTracebackLexer(), formatter)
else:
wrapped = '<pre>%s</pre>' % traceback
else:
wrapped = traceback
return wrapped | [
"def",
"wrap_traceback",
"(",
"traceback",
")",
":",
"if",
"email",
"(",
")",
".",
"format",
"==",
"'html'",
":",
"try",
":",
"from",
"pygments",
"import",
"highlight",
"from",
"pygments",
".",
"lexers",
"import",
"PythonTracebackLexer",
"from",
"pygments",
".",
"formatters",
"import",
"HtmlFormatter",
"with_pygments",
"=",
"True",
"except",
"ImportError",
":",
"with_pygments",
"=",
"False",
"if",
"with_pygments",
":",
"formatter",
"=",
"HtmlFormatter",
"(",
"noclasses",
"=",
"True",
")",
"wrapped",
"=",
"highlight",
"(",
"traceback",
",",
"PythonTracebackLexer",
"(",
")",
",",
"formatter",
")",
"else",
":",
"wrapped",
"=",
"'<pre>%s</pre>'",
"%",
"traceback",
"else",
":",
"wrapped",
"=",
"traceback",
"return",
"wrapped"
] | 29.909091 | 16.727273 |
def __get_connection_SNS():
""" Ensure connection to SNS """
region = get_global_option('region')
try:
if (get_global_option('aws_access_key_id') and
get_global_option('aws_secret_access_key')):
logger.debug(
'Authenticating to SNS using '
'credentials in configuration file')
connection = sns.connect_to_region(
region,
aws_access_key_id=get_global_option(
'aws_access_key_id'),
aws_secret_access_key=get_global_option(
'aws_secret_access_key'))
else:
logger.debug(
'Authenticating using boto\'s authentication handler')
connection = sns.connect_to_region(region)
except Exception as err:
logger.error('Failed connecting to SNS: {0}'.format(err))
logger.error(
'Please report an issue at: '
'https://github.com/sebdah/dynamic-dynamodb/issues')
raise
logger.debug('Connected to SNS in {0}'.format(region))
return connection | [
"def",
"__get_connection_SNS",
"(",
")",
":",
"region",
"=",
"get_global_option",
"(",
"'region'",
")",
"try",
":",
"if",
"(",
"get_global_option",
"(",
"'aws_access_key_id'",
")",
"and",
"get_global_option",
"(",
"'aws_secret_access_key'",
")",
")",
":",
"logger",
".",
"debug",
"(",
"'Authenticating to SNS using '",
"'credentials in configuration file'",
")",
"connection",
"=",
"sns",
".",
"connect_to_region",
"(",
"region",
",",
"aws_access_key_id",
"=",
"get_global_option",
"(",
"'aws_access_key_id'",
")",
",",
"aws_secret_access_key",
"=",
"get_global_option",
"(",
"'aws_secret_access_key'",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Authenticating using boto\\'s authentication handler'",
")",
"connection",
"=",
"sns",
".",
"connect_to_region",
"(",
"region",
")",
"except",
"Exception",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"'Failed connecting to SNS: {0}'",
".",
"format",
"(",
"err",
")",
")",
"logger",
".",
"error",
"(",
"'Please report an issue at: '",
"'https://github.com/sebdah/dynamic-dynamodb/issues'",
")",
"raise",
"logger",
".",
"debug",
"(",
"'Connected to SNS in {0}'",
".",
"format",
"(",
"region",
")",
")",
"return",
"connection"
] | 36.166667 | 17.366667 |
def get_engine_from_session(dbsession: Session) -> Engine:
"""
Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`.
"""
engine = dbsession.bind
assert isinstance(engine, Engine)
return engine | [
"def",
"get_engine_from_session",
"(",
"dbsession",
":",
"Session",
")",
"->",
"Engine",
":",
"engine",
"=",
"dbsession",
".",
"bind",
"assert",
"isinstance",
"(",
"engine",
",",
"Engine",
")",
"return",
"engine"
] | 32.571429 | 13.142857 |
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
# Allow pragma once instead of header guards
for i in raw_lines:
if Search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar) | [
"def",
"CheckForHeaderGuard",
"(",
"filename",
",",
"clean_lines",
",",
"error",
")",
":",
"# Don't check for header guards if there are error suppression",
"# comments somewhere in this file.",
"#",
"# Because this is silencing a warning for a nonexistent line, we",
"# only support the very specific NOLINT(build/header_guard) syntax,",
"# and not the general NOLINT or NOLINT(*) syntax.",
"raw_lines",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"for",
"i",
"in",
"raw_lines",
":",
"if",
"Search",
"(",
"r'//\\s*NOLINT\\(build/header_guard\\)'",
",",
"i",
")",
":",
"return",
"# Allow pragma once instead of header guards",
"for",
"i",
"in",
"raw_lines",
":",
"if",
"Search",
"(",
"r'^\\s*#pragma\\s+once'",
",",
"i",
")",
":",
"return",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"ifndef",
"=",
"''",
"ifndef_linenum",
"=",
"0",
"define",
"=",
"''",
"endif",
"=",
"''",
"endif_linenum",
"=",
"0",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"raw_lines",
")",
":",
"linesplit",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"linesplit",
")",
">=",
"2",
":",
"# find the first occurrence of #ifndef and #define, save arg",
"if",
"not",
"ifndef",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"'#ifndef'",
":",
"# set ifndef to the header guard presented on the #ifndef line.",
"ifndef",
"=",
"linesplit",
"[",
"1",
"]",
"ifndef_linenum",
"=",
"linenum",
"if",
"not",
"define",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"'#define'",
":",
"define",
"=",
"linesplit",
"[",
"1",
"]",
"# find the last occurrence of #endif, save entire line",
"if",
"line",
".",
"startswith",
"(",
"'#endif'",
")",
":",
"endif",
"=",
"line",
"endif_linenum",
"=",
"linenum",
"if",
"not",
"ifndef",
"or",
"not",
"define",
"or",
"ifndef",
"!=",
"define",
":",
"error",
"(",
"filename",
",",
"0",
",",
"'build/header_guard'",
",",
"5",
",",
"'No #ifndef header guard found, suggested CPP variable is: %s'",
"%",
"cppvar",
")",
"return",
"# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__",
"# for backward compatibility.",
"if",
"ifndef",
"!=",
"cppvar",
":",
"error_level",
"=",
"0",
"if",
"ifndef",
"!=",
"cppvar",
"+",
"'_'",
":",
"error_level",
"=",
"5",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"ifndef_linenum",
"]",
",",
"ifndef_linenum",
",",
"error",
")",
"error",
"(",
"filename",
",",
"ifndef_linenum",
",",
"'build/header_guard'",
",",
"error_level",
",",
"'#ifndef header guard has wrong style, please use: %s'",
"%",
"cppvar",
")",
"# Check for \"//\" comments on endif line.",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"endif_linenum",
"]",
",",
"endif_linenum",
",",
"error",
")",
"match",
"=",
"Match",
"(",
"r'#endif\\s*//\\s*'",
"+",
"cppvar",
"+",
"r'(_)?\\b'",
",",
"endif",
")",
"if",
"match",
":",
"if",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'_'",
":",
"# Issue low severity warning for deprecated double trailing underscore",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"'build/header_guard'",
",",
"0",
",",
"'#endif line should be \"#endif // %s\"'",
"%",
"cppvar",
")",
"return",
"# Didn't find the corresponding \"//\" comment. If this file does not",
"# contain any \"//\" comments at all, it could be that the compiler",
"# only wants \"/**/\" comments, look for those instead.",
"no_single_line_comments",
"=",
"True",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"raw_lines",
")",
"-",
"1",
")",
":",
"line",
"=",
"raw_lines",
"[",
"i",
"]",
"if",
"Match",
"(",
"r'^(?:(?:\\'(?:\\.|[^\\'])*\\')|(?:\"(?:\\.|[^\"])*\")|[^\\'\"])*//'",
",",
"line",
")",
":",
"no_single_line_comments",
"=",
"False",
"break",
"if",
"no_single_line_comments",
":",
"match",
"=",
"Match",
"(",
"r'#endif\\s*/\\*\\s*'",
"+",
"cppvar",
"+",
"r'(_)?\\s*\\*/'",
",",
"endif",
")",
"if",
"match",
":",
"if",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'_'",
":",
"# Low severity warning for double trailing underscore",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"'build/header_guard'",
",",
"0",
",",
"'#endif line should be \"#endif /* %s */\"'",
"%",
"cppvar",
")",
"return",
"# Didn't find anything",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"'build/header_guard'",
",",
"5",
",",
"'#endif line should be \"#endif // %s\"'",
"%",
"cppvar",
")"
] | 35.950495 | 21.306931 |
def revoke_permission(user, permission_name):
"""
Revoke a specified permission from a user.
Permissions are only revoked if they are in the scope any of the user's
roles. If the permission is out of scope, a RolePermissionScopeException
is raised.
"""
roles = get_user_roles(user)
for role in roles:
if permission_name in role.permission_names_list():
permission = get_permission(permission_name)
user.user_permissions.remove(permission)
return
raise RolePermissionScopeException(
"This permission isn't in the scope of "
"any of this user's roles.") | [
"def",
"revoke_permission",
"(",
"user",
",",
"permission_name",
")",
":",
"roles",
"=",
"get_user_roles",
"(",
"user",
")",
"for",
"role",
"in",
"roles",
":",
"if",
"permission_name",
"in",
"role",
".",
"permission_names_list",
"(",
")",
":",
"permission",
"=",
"get_permission",
"(",
"permission_name",
")",
"user",
".",
"user_permissions",
".",
"remove",
"(",
"permission",
")",
"return",
"raise",
"RolePermissionScopeException",
"(",
"\"This permission isn't in the scope of \"",
"\"any of this user's roles.\"",
")"
] | 33.263158 | 17.684211 |
def build_diagonals(self):
"""
Builds the diagonals for the coefficient array
"""
##########################################################
# INCORPORATE BOUNDARY CONDITIONS INTO COEFFICIENT ARRAY #
##########################################################
# Roll to keep the proper coefficients at the proper places in the
# arrays: Python will naturally just do vertical shifts instead of
# diagonal shifts, so this takes into account the horizontal compoent
# to ensure that boundary values are at the right place.
self.l2 = np.roll(self.l2, -2)
self.l1 = np.roll(self.l1, -1)
self.r1 = np.roll(self.r1, 1)
self.r2 = np.roll(self.r2, 2)
# Then assemble these rows: this is where the periodic boundary condition
# can matter.
if self.coeff_matrix is not None:
pass
elif self.BC_E == 'Periodic' and self.BC_W == 'Periodic':
# In this case, the boundary-condition-related stacking has already
# happened inside b.c.-handling function. This is because periodic
# boundary conditions require extra diagonals to exist on the edges of
# the solution array
pass
else:
self.diags = np.vstack((self.l2,self.l1,self.c0,self.r1,self.r2))
self.offsets = np.array([-2,-1,0,1,2])
# Everybody now (including periodic b.c. cases)
self.coeff_matrix = spdiags(self.diags, self.offsets, self.nx, self.nx, format='csr') | [
"def",
"build_diagonals",
"(",
"self",
")",
":",
"##########################################################\r",
"# INCORPORATE BOUNDARY CONDITIONS INTO COEFFICIENT ARRAY #\r",
"##########################################################\r",
"# Roll to keep the proper coefficients at the proper places in the\r",
"# arrays: Python will naturally just do vertical shifts instead of \r",
"# diagonal shifts, so this takes into account the horizontal compoent \r",
"# to ensure that boundary values are at the right place.\r",
"self",
".",
"l2",
"=",
"np",
".",
"roll",
"(",
"self",
".",
"l2",
",",
"-",
"2",
")",
"self",
".",
"l1",
"=",
"np",
".",
"roll",
"(",
"self",
".",
"l1",
",",
"-",
"1",
")",
"self",
".",
"r1",
"=",
"np",
".",
"roll",
"(",
"self",
".",
"r1",
",",
"1",
")",
"self",
".",
"r2",
"=",
"np",
".",
"roll",
"(",
"self",
".",
"r2",
",",
"2",
")",
"# Then assemble these rows: this is where the periodic boundary condition \r",
"# can matter.\r",
"if",
"self",
".",
"coeff_matrix",
"is",
"not",
"None",
":",
"pass",
"elif",
"self",
".",
"BC_E",
"==",
"'Periodic'",
"and",
"self",
".",
"BC_W",
"==",
"'Periodic'",
":",
"# In this case, the boundary-condition-related stacking has already \r",
"# happened inside b.c.-handling function. This is because periodic\r",
"# boundary conditions require extra diagonals to exist on the edges of \r",
"# the solution array\r",
"pass",
"else",
":",
"self",
".",
"diags",
"=",
"np",
".",
"vstack",
"(",
"(",
"self",
".",
"l2",
",",
"self",
".",
"l1",
",",
"self",
".",
"c0",
",",
"self",
".",
"r1",
",",
"self",
".",
"r2",
")",
")",
"self",
".",
"offsets",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"2",
",",
"-",
"1",
",",
"0",
",",
"1",
",",
"2",
"]",
")",
"# Everybody now (including periodic b.c. cases)\r",
"self",
".",
"coeff_matrix",
"=",
"spdiags",
"(",
"self",
".",
"diags",
",",
"self",
".",
"offsets",
",",
"self",
".",
"nx",
",",
"self",
".",
"nx",
",",
"format",
"=",
"'csr'",
")"
] | 42.470588 | 22.941176 |
def _update_enabled(self, name, enabled_value):
'''
Update whether an individual beacon is enabled
'''
if isinstance(self.opts['beacons'][name], dict):
# Backwards compatibility
self.opts['beacons'][name]['enabled'] = enabled_value
else:
enabled_index = self._get_index(self.opts['beacons'][name], 'enabled')
if enabled_index >= 0:
self.opts['beacons'][name][enabled_index]['enabled'] = enabled_value
else:
self.opts['beacons'][name].append({'enabled': enabled_value}) | [
"def",
"_update_enabled",
"(",
"self",
",",
"name",
",",
"enabled_value",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"opts",
"[",
"'beacons'",
"]",
"[",
"name",
"]",
",",
"dict",
")",
":",
"# Backwards compatibility",
"self",
".",
"opts",
"[",
"'beacons'",
"]",
"[",
"name",
"]",
"[",
"'enabled'",
"]",
"=",
"enabled_value",
"else",
":",
"enabled_index",
"=",
"self",
".",
"_get_index",
"(",
"self",
".",
"opts",
"[",
"'beacons'",
"]",
"[",
"name",
"]",
",",
"'enabled'",
")",
"if",
"enabled_index",
">=",
"0",
":",
"self",
".",
"opts",
"[",
"'beacons'",
"]",
"[",
"name",
"]",
"[",
"enabled_index",
"]",
"[",
"'enabled'",
"]",
"=",
"enabled_value",
"else",
":",
"self",
".",
"opts",
"[",
"'beacons'",
"]",
"[",
"name",
"]",
".",
"append",
"(",
"{",
"'enabled'",
":",
"enabled_value",
"}",
")"
] | 42 | 24.428571 |
def _needs_dependencies(p_function):
"""
A decorator that triggers the population of the dependency tree in a
TodoList (and other administration). The decorator should be applied to
methods of TodoList that require dependency information.
"""
def build_dependency_information(p_todolist):
for todo in p_todolist._todos:
p_todolist._register_todo(todo)
def inner(self, *args, **kwargs):
if not self._initialized:
self._initialized = True
from topydo.lib.Graph import DirectedGraph
self._depgraph = DirectedGraph()
build_dependency_information(self)
return p_function(self, *args, **kwargs)
return inner | [
"def",
"_needs_dependencies",
"(",
"p_function",
")",
":",
"def",
"build_dependency_information",
"(",
"p_todolist",
")",
":",
"for",
"todo",
"in",
"p_todolist",
".",
"_todos",
":",
"p_todolist",
".",
"_register_todo",
"(",
"todo",
")",
"def",
"inner",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"_initialized",
":",
"self",
".",
"_initialized",
"=",
"True",
"from",
"topydo",
".",
"lib",
".",
"Graph",
"import",
"DirectedGraph",
"self",
".",
"_depgraph",
"=",
"DirectedGraph",
"(",
")",
"build_dependency_information",
"(",
"self",
")",
"return",
"p_function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"inner"
] | 31.863636 | 17.045455 |
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, transfer_to_final = self.insert_and_publish_uow(job_record, start_id, end_id)
self.update_job(job_record, uow, job.STATE_FINAL_RUN)
if transfer_to_final:
self._process_state_final_run(job_record) | [
"def",
"_compute_and_transfer_to_final_run",
"(",
"self",
",",
"process_name",
",",
"start_timeperiod",
",",
"end_timeperiod",
",",
"job_record",
")",
":",
"source_collection_name",
"=",
"context",
".",
"process_context",
"[",
"process_name",
"]",
".",
"source",
"start_id",
"=",
"self",
".",
"ds",
".",
"highest_primary_key",
"(",
"source_collection_name",
",",
"start_timeperiod",
",",
"end_timeperiod",
")",
"end_id",
"=",
"self",
".",
"ds",
".",
"lowest_primary_key",
"(",
"source_collection_name",
",",
"start_timeperiod",
",",
"end_timeperiod",
")",
"uow",
",",
"transfer_to_final",
"=",
"self",
".",
"insert_and_publish_uow",
"(",
"job_record",
",",
"start_id",
",",
"end_id",
")",
"self",
".",
"update_job",
"(",
"job_record",
",",
"uow",
",",
"job",
".",
"STATE_FINAL_RUN",
")",
"if",
"transfer_to_final",
":",
"self",
".",
"_process_state_final_run",
"(",
"job_record",
")"
] | 73.454545 | 32.909091 |
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self) | [
"def",
"release",
"(",
"self",
")",
":",
"if",
"self",
".",
"__owner",
"!=",
"_get_ident",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"cannot release un-acquired lock\"",
")",
"self",
".",
"__count",
"=",
"count",
"=",
"self",
".",
"__count",
"-",
"1",
"if",
"not",
"count",
":",
"self",
".",
"__owner",
"=",
"None",
"self",
".",
"__block",
".",
"release",
"(",
")",
"if",
"__debug__",
":",
"self",
".",
"_note",
"(",
"\"%s.release(): final release\"",
",",
"self",
")",
"else",
":",
"if",
"__debug__",
":",
"self",
".",
"_note",
"(",
"\"%s.release(): non-final release\"",
",",
"self",
")"
] | 39.62963 | 22.407407 |
def get_info(certificate_id, returncertificate=False, returntype=None):
'''
Retrieves information about the requested SSL certificate. Returns a
dictionary of information about the SSL certificate with two keys:
- **ssl** - Contains the metadata information
- **certificate** - Contains the details for the certificate such as the
CSR, Approver, and certificate data
certificate_id
Unique ID of the SSL certificate
returncertificate : False
Set to ``True`` to ask for the certificate in response
returntype
Optional type for the returned certificate. Can be either "Individual"
(for X.509 format) or "PKCS7"
.. note::
Required if ``returncertificate`` is ``True``
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_ssl.get_info my-cert-id
'''
opts = salt.utils.namecheap.get_opts('namecheap.ssl.getinfo')
opts['certificateID'] = certificate_id
if returncertificate:
opts['returncertificate'] = "true"
if returntype is None:
log.error('returntype must be specified when returncertificate is set to True')
raise Exception('returntype must be specified when returncertificate is set to True')
if returntype not in ["Individual", "PKCS7"]:
log.error('returntype must be specified as Individual or PKCS7, not %s', returntype)
raise Exception('returntype must be specified as Individual or PKCS7, not ' + returntype)
opts['returntype'] = returntype
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return {}
sslinforesult = response_xml.getElementsByTagName('SSLGetInfoResult')[0]
return salt.utils.namecheap.xml_to_dict(sslinforesult) | [
"def",
"get_info",
"(",
"certificate_id",
",",
"returncertificate",
"=",
"False",
",",
"returntype",
"=",
"None",
")",
":",
"opts",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"get_opts",
"(",
"'namecheap.ssl.getinfo'",
")",
"opts",
"[",
"'certificateID'",
"]",
"=",
"certificate_id",
"if",
"returncertificate",
":",
"opts",
"[",
"'returncertificate'",
"]",
"=",
"\"true\"",
"if",
"returntype",
"is",
"None",
":",
"log",
".",
"error",
"(",
"'returntype must be specified when returncertificate is set to True'",
")",
"raise",
"Exception",
"(",
"'returntype must be specified when returncertificate is set to True'",
")",
"if",
"returntype",
"not",
"in",
"[",
"\"Individual\"",
",",
"\"PKCS7\"",
"]",
":",
"log",
".",
"error",
"(",
"'returntype must be specified as Individual or PKCS7, not %s'",
",",
"returntype",
")",
"raise",
"Exception",
"(",
"'returntype must be specified as Individual or PKCS7, not '",
"+",
"returntype",
")",
"opts",
"[",
"'returntype'",
"]",
"=",
"returntype",
"response_xml",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"get_request",
"(",
"opts",
")",
"if",
"response_xml",
"is",
"None",
":",
"return",
"{",
"}",
"sslinforesult",
"=",
"response_xml",
".",
"getElementsByTagName",
"(",
"'SSLGetInfoResult'",
")",
"[",
"0",
"]",
"return",
"salt",
".",
"utils",
".",
"namecheap",
".",
"xml_to_dict",
"(",
"sslinforesult",
")"
] | 35.918367 | 27.428571 |
def update_policy(self,defaultHeaders):
""" rewrite update policy so that additional pins are added and not overwritten """
if self.inputs is not None:
for k,v in defaultHeaders.items():
if k not in self.inputs:
self.inputs[k] = v
if k == 'pins':
self.inputs[k] = self.inputs[k] + defaultHeaders[k]
return self.inputs
else:
return self.inputs | [
"def",
"update_policy",
"(",
"self",
",",
"defaultHeaders",
")",
":",
"if",
"self",
".",
"inputs",
"is",
"not",
"None",
":",
"for",
"k",
",",
"v",
"in",
"defaultHeaders",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"self",
".",
"inputs",
":",
"self",
".",
"inputs",
"[",
"k",
"]",
"=",
"v",
"if",
"k",
"==",
"'pins'",
":",
"self",
".",
"inputs",
"[",
"k",
"]",
"=",
"self",
".",
"inputs",
"[",
"k",
"]",
"+",
"defaultHeaders",
"[",
"k",
"]",
"return",
"self",
".",
"inputs",
"else",
":",
"return",
"self",
".",
"inputs"
] | 33.181818 | 13.818182 |
def write_data(worksheet, data):
"""Writes data into worksheet.
Args:
worksheet: worksheet to write into
data: data to be written
"""
if not data:
return
if isinstance(data, list):
rows = data
else:
rows = [data]
if isinstance(rows[0], dict):
keys = get_keys(rows)
worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys])
for row in rows:
values = [get_value_from_row(row, key) for key in keys]
worksheet.append(values)
elif isinstance(rows[0], list):
for row in rows:
values = [utilities.normalize_cell_value(value) for value in row]
worksheet.append(values)
else:
for row in rows:
worksheet.append([utilities.normalize_cell_value(row)]) | [
"def",
"write_data",
"(",
"worksheet",
",",
"data",
")",
":",
"if",
"not",
"data",
":",
"return",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"rows",
"=",
"data",
"else",
":",
"rows",
"=",
"[",
"data",
"]",
"if",
"isinstance",
"(",
"rows",
"[",
"0",
"]",
",",
"dict",
")",
":",
"keys",
"=",
"get_keys",
"(",
"rows",
")",
"worksheet",
".",
"append",
"(",
"[",
"utilities",
".",
"convert_snake_to_title_case",
"(",
"key",
")",
"for",
"key",
"in",
"keys",
"]",
")",
"for",
"row",
"in",
"rows",
":",
"values",
"=",
"[",
"get_value_from_row",
"(",
"row",
",",
"key",
")",
"for",
"key",
"in",
"keys",
"]",
"worksheet",
".",
"append",
"(",
"values",
")",
"elif",
"isinstance",
"(",
"rows",
"[",
"0",
"]",
",",
"list",
")",
":",
"for",
"row",
"in",
"rows",
":",
"values",
"=",
"[",
"utilities",
".",
"normalize_cell_value",
"(",
"value",
")",
"for",
"value",
"in",
"row",
"]",
"worksheet",
".",
"append",
"(",
"values",
")",
"else",
":",
"for",
"row",
"in",
"rows",
":",
"worksheet",
".",
"append",
"(",
"[",
"utilities",
".",
"normalize_cell_value",
"(",
"row",
")",
"]",
")"
] | 29 | 19.535714 |
def _jws_signature(signdata, privkey, algorithm):
"""
Produce a base64-encoded JWS signature based on the signdata
specified, the privkey instance, and the algorithm passed.
"""
signature = algorithm.sign(privkey, signdata)
return base64url_encode(signature) | [
"def",
"_jws_signature",
"(",
"signdata",
",",
"privkey",
",",
"algorithm",
")",
":",
"signature",
"=",
"algorithm",
".",
"sign",
"(",
"privkey",
",",
"signdata",
")",
"return",
"base64url_encode",
"(",
"signature",
")"
] | 39.428571 | 9.428571 |
def cp_single_file(self, pool, source, target, delete_source):
'''Copy a single file or a directory by adding a task into queue'''
if source[-1] == PATH_SEP:
if self.opt.recursive:
basepath = S3URL(source).path
for f in (f for f in self.s3walk(source) if not f['is_dir']):
pool.copy(f['name'], os.path.join(target, os.path.relpath(S3URL(f['name']).path, basepath)), delete_source=delete_source)
else:
message('omitting directory "%s".' % source)
else:
pool.copy(source, target, delete_source=delete_source) | [
"def",
"cp_single_file",
"(",
"self",
",",
"pool",
",",
"source",
",",
"target",
",",
"delete_source",
")",
":",
"if",
"source",
"[",
"-",
"1",
"]",
"==",
"PATH_SEP",
":",
"if",
"self",
".",
"opt",
".",
"recursive",
":",
"basepath",
"=",
"S3URL",
"(",
"source",
")",
".",
"path",
"for",
"f",
"in",
"(",
"f",
"for",
"f",
"in",
"self",
".",
"s3walk",
"(",
"source",
")",
"if",
"not",
"f",
"[",
"'is_dir'",
"]",
")",
":",
"pool",
".",
"copy",
"(",
"f",
"[",
"'name'",
"]",
",",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"os",
".",
"path",
".",
"relpath",
"(",
"S3URL",
"(",
"f",
"[",
"'name'",
"]",
")",
".",
"path",
",",
"basepath",
")",
")",
",",
"delete_source",
"=",
"delete_source",
")",
"else",
":",
"message",
"(",
"'omitting directory \"%s\".'",
"%",
"source",
")",
"else",
":",
"pool",
".",
"copy",
"(",
"source",
",",
"target",
",",
"delete_source",
"=",
"delete_source",
")"
] | 50.909091 | 26.363636 |
def command_mount(self, system_id, *system_ids):
"""Mounts the specified sftp system, unless it's already mounted.
Usage: sftpman mount {id}..
"""
system_ids = (system_id,) + system_ids
has_failed = False
for system_id in system_ids:
try:
system = SystemModel.create_by_id(system_id, self.environment)
controller = SystemControllerModel(system, self.environment)
controller.mount()
except SftpConfigException as e:
sys.stderr.write('Cannot mount %s: %s\n\n' % (system_id, str(e)))
has_failed = True
except SftpMountException as e:
sys.stderr.write('Cannot mount %s!\n\n' % system_id)
sys.stderr.write('Mount command: \n%s\n\n' % e.mount_cmd)
sys.stderr.write('Command output: \n%s\n\n' % e.mount_cmd_output)
has_failed = True
if has_failed:
sys.exit(1) | [
"def",
"command_mount",
"(",
"self",
",",
"system_id",
",",
"*",
"system_ids",
")",
":",
"system_ids",
"=",
"(",
"system_id",
",",
")",
"+",
"system_ids",
"has_failed",
"=",
"False",
"for",
"system_id",
"in",
"system_ids",
":",
"try",
":",
"system",
"=",
"SystemModel",
".",
"create_by_id",
"(",
"system_id",
",",
"self",
".",
"environment",
")",
"controller",
"=",
"SystemControllerModel",
"(",
"system",
",",
"self",
".",
"environment",
")",
"controller",
".",
"mount",
"(",
")",
"except",
"SftpConfigException",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Cannot mount %s: %s\\n\\n'",
"%",
"(",
"system_id",
",",
"str",
"(",
"e",
")",
")",
")",
"has_failed",
"=",
"True",
"except",
"SftpMountException",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Cannot mount %s!\\n\\n'",
"%",
"system_id",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Mount command: \\n%s\\n\\n'",
"%",
"e",
".",
"mount_cmd",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Command output: \\n%s\\n\\n'",
"%",
"e",
".",
"mount_cmd_output",
")",
"has_failed",
"=",
"True",
"if",
"has_failed",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] | 46.666667 | 16.190476 |
def _send_content(self, content, content_type, code=200):
"""Send content to client."""
assert isinstance(content, bytes)
self.send_response(code)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content) | [
"def",
"_send_content",
"(",
"self",
",",
"content",
",",
"content_type",
",",
"code",
"=",
"200",
")",
":",
"assert",
"isinstance",
"(",
"content",
",",
"bytes",
")",
"self",
".",
"send_response",
"(",
"code",
")",
"self",
".",
"send_header",
"(",
"'Content-Type'",
",",
"content_type",
")",
"self",
".",
"send_header",
"(",
"'Content-Length'",
",",
"str",
"(",
"len",
"(",
"content",
")",
")",
")",
"self",
".",
"end_headers",
"(",
")",
"self",
".",
"wfile",
".",
"write",
"(",
"content",
")"
] | 39.125 | 11 |
def _input_file_as_lines(cls, session: AppSession):
'''Read lines from input file and return them.'''
if session.args.input_file == sys.stdin:
input_file = session.args.input_file
else:
reader = codecs.getreader(session.args.local_encoding or 'utf-8')
input_file = reader(session.args.input_file)
return input_file | [
"def",
"_input_file_as_lines",
"(",
"cls",
",",
"session",
":",
"AppSession",
")",
":",
"if",
"session",
".",
"args",
".",
"input_file",
"==",
"sys",
".",
"stdin",
":",
"input_file",
"=",
"session",
".",
"args",
".",
"input_file",
"else",
":",
"reader",
"=",
"codecs",
".",
"getreader",
"(",
"session",
".",
"args",
".",
"local_encoding",
"or",
"'utf-8'",
")",
"input_file",
"=",
"reader",
"(",
"session",
".",
"args",
".",
"input_file",
")",
"return",
"input_file"
] | 41.666667 | 19.888889 |
def append_with(self, obj, **properties):
'''Add an item to the dictionary with the given metadata properties'''
for prop, val in properties.items():
val = self.serialize(val)
self._meta.setdefault(prop, {}).setdefault(val, []).append(obj)
self.append(obj) | [
"def",
"append_with",
"(",
"self",
",",
"obj",
",",
"*",
"*",
"properties",
")",
":",
"for",
"prop",
",",
"val",
"in",
"properties",
".",
"items",
"(",
")",
":",
"val",
"=",
"self",
".",
"serialize",
"(",
"val",
")",
"self",
".",
"_meta",
".",
"setdefault",
"(",
"prop",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"val",
",",
"[",
"]",
")",
".",
"append",
"(",
"obj",
")",
"self",
".",
"append",
"(",
"obj",
")"
] | 49.833333 | 16.166667 |
def sort_stats(self, *args):
"""
Sort the tracked objects according to the supplied criteria. The
argument is a string identifying the basis of a sort (example: 'size'
or 'classname'). When more than one key is provided, then additional
keys are used as secondary criteria when there is equality in all keys
selected before them. For example, ``sort_stats('name', 'size')`` will
sort all the entries according to their class name, and resolve all
ties (identical class names) by sorting by size. The criteria are
fields in the tracked object instances. Results are stored in the
``self.sorted`` list which is used by ``Stats.print_stats()`` and other
methods. The fields available for sorting are:
'classname'
the name with which the class was registered
'name'
the classname
'birth'
creation timestamp
'death'
destruction timestamp
'size'
the maximum measured size of the object
'tsize'
the measured size during the largest snapshot
'repr'
string representation of the object
Note that sorts on size are in descending order (placing most memory
consuming items first), whereas name, repr, and creation time searches
are in ascending order (alphabetical).
The function returns self to allow calling functions on the result::
stats.sort_stats('size').reverse_order().print_stats()
"""
criteria = ('classname', 'tsize', 'birth', 'death',
'name', 'repr', 'size')
if not set(criteria).issuperset(set(args)):
raise ValueError("Invalid sort criteria")
if not args:
args = criteria
def args_to_tuple(obj):
keys = []
for attr in args:
attribute = getattr(obj, attr)
if attr in ('tsize', 'size'):
attribute = -attribute
keys.append(attribute)
return tuple(keys)
self._init_sort()
self.sorted.sort(key=args_to_tuple)
return self | [
"def",
"sort_stats",
"(",
"self",
",",
"*",
"args",
")",
":",
"criteria",
"=",
"(",
"'classname'",
",",
"'tsize'",
",",
"'birth'",
",",
"'death'",
",",
"'name'",
",",
"'repr'",
",",
"'size'",
")",
"if",
"not",
"set",
"(",
"criteria",
")",
".",
"issuperset",
"(",
"set",
"(",
"args",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid sort criteria\"",
")",
"if",
"not",
"args",
":",
"args",
"=",
"criteria",
"def",
"args_to_tuple",
"(",
"obj",
")",
":",
"keys",
"=",
"[",
"]",
"for",
"attr",
"in",
"args",
":",
"attribute",
"=",
"getattr",
"(",
"obj",
",",
"attr",
")",
"if",
"attr",
"in",
"(",
"'tsize'",
",",
"'size'",
")",
":",
"attribute",
"=",
"-",
"attribute",
"keys",
".",
"append",
"(",
"attribute",
")",
"return",
"tuple",
"(",
"keys",
")",
"self",
".",
"_init_sort",
"(",
")",
"self",
".",
"sorted",
".",
"sort",
"(",
"key",
"=",
"args_to_tuple",
")",
"return",
"self"
] | 37.474576 | 22.118644 |
def GMailer(recipients, username, password, subject='Log message from lggr.py'):
""" Sends messages as emails to the given list
of recipients, from a GMail account. """
import smtplib
srvr = smtplib.SMTP('smtp.gmail.com', 587)
srvr.ehlo()
srvr.starttls()
srvr.ehlo()
srvr.login(username, password)
if not (isinstance(recipients, list) or isinstance(recipients, tuple)):
recipients = [recipients]
gmail_sender = '{0}@gmail.com'.format(username)
msg = 'To: {0}\nFrom: '+gmail_sender+'\nSubject: '+subject+'\n'
msg = msg + '\n{1}\n\n'
try:
while True:
logstr = (yield)
for rcp in recipients:
message = msg.format(rcp, logstr)
srvr.sendmail(gmail_sender, rcp, message)
except GeneratorExit:
srvr.quit() | [
"def",
"GMailer",
"(",
"recipients",
",",
"username",
",",
"password",
",",
"subject",
"=",
"'Log message from lggr.py'",
")",
":",
"import",
"smtplib",
"srvr",
"=",
"smtplib",
".",
"SMTP",
"(",
"'smtp.gmail.com'",
",",
"587",
")",
"srvr",
".",
"ehlo",
"(",
")",
"srvr",
".",
"starttls",
"(",
")",
"srvr",
".",
"ehlo",
"(",
")",
"srvr",
".",
"login",
"(",
"username",
",",
"password",
")",
"if",
"not",
"(",
"isinstance",
"(",
"recipients",
",",
"list",
")",
"or",
"isinstance",
"(",
"recipients",
",",
"tuple",
")",
")",
":",
"recipients",
"=",
"[",
"recipients",
"]",
"gmail_sender",
"=",
"'{0}@gmail.com'",
".",
"format",
"(",
"username",
")",
"msg",
"=",
"'To: {0}\\nFrom: '",
"+",
"gmail_sender",
"+",
"'\\nSubject: '",
"+",
"subject",
"+",
"'\\n'",
"msg",
"=",
"msg",
"+",
"'\\n{1}\\n\\n'",
"try",
":",
"while",
"True",
":",
"logstr",
"=",
"(",
"yield",
")",
"for",
"rcp",
"in",
"recipients",
":",
"message",
"=",
"msg",
".",
"format",
"(",
"rcp",
",",
"logstr",
")",
"srvr",
".",
"sendmail",
"(",
"gmail_sender",
",",
"rcp",
",",
"message",
")",
"except",
"GeneratorExit",
":",
"srvr",
".",
"quit",
"(",
")"
] | 31.423077 | 20.423077 |
def display_completions_like_readline(event):
"""
Key binding handler for readline-style tab completion.
This is meant to be as similar as possible to the way how readline displays
completions.
Generate the completions immediately (blocking) and display them above the
prompt in columns.
Usage::
# Call this handler when 'Tab' has been pressed.
registry.add_binding(Keys.ControlI)(display_completions_like_readline)
"""
# Request completions.
b = event.current_buffer
if b.completer is None:
return
complete_event = CompleteEvent(completion_requested=True)
completions = list(b.completer.get_completions(b.document, complete_event))
# Calculate the common suffix.
common_suffix = get_common_complete_suffix(b.document, completions)
# One completion: insert it.
if len(completions) == 1:
b.delete_before_cursor(-completions[0].start_position)
b.insert_text(completions[0].text)
# Multiple completions with common part.
elif common_suffix:
b.insert_text(common_suffix)
# Otherwise: display all completions.
elif completions:
_display_completions_like_readline(event.cli, completions) | [
"def",
"display_completions_like_readline",
"(",
"event",
")",
":",
"# Request completions.",
"b",
"=",
"event",
".",
"current_buffer",
"if",
"b",
".",
"completer",
"is",
"None",
":",
"return",
"complete_event",
"=",
"CompleteEvent",
"(",
"completion_requested",
"=",
"True",
")",
"completions",
"=",
"list",
"(",
"b",
".",
"completer",
".",
"get_completions",
"(",
"b",
".",
"document",
",",
"complete_event",
")",
")",
"# Calculate the common suffix.",
"common_suffix",
"=",
"get_common_complete_suffix",
"(",
"b",
".",
"document",
",",
"completions",
")",
"# One completion: insert it.",
"if",
"len",
"(",
"completions",
")",
"==",
"1",
":",
"b",
".",
"delete_before_cursor",
"(",
"-",
"completions",
"[",
"0",
"]",
".",
"start_position",
")",
"b",
".",
"insert_text",
"(",
"completions",
"[",
"0",
"]",
".",
"text",
")",
"# Multiple completions with common part.",
"elif",
"common_suffix",
":",
"b",
".",
"insert_text",
"(",
"common_suffix",
")",
"# Otherwise: display all completions.",
"elif",
"completions",
":",
"_display_completions_like_readline",
"(",
"event",
".",
"cli",
",",
"completions",
")"
] | 35.088235 | 20.617647 |
def robots(request):
"""Return a simple "don't index me" robots.txt file."""
resp = request.response
resp.status = '200 OK'
resp.content_type = 'text/plain'
resp.body = """
User-Agent: *
Disallow: /
"""
return resp | [
"def",
"robots",
"(",
"request",
")",
":",
"resp",
"=",
"request",
".",
"response",
"resp",
".",
"status",
"=",
"'200 OK'",
"resp",
".",
"content_type",
"=",
"'text/plain'",
"resp",
".",
"body",
"=",
"\"\"\"\nUser-Agent: *\nDisallow: /\n\"\"\"",
"return",
"resp"
] | 20.818182 | 15.636364 |
def readcfg(filepath, section):
"""
Reads the configuration file. If section is not available, calls
create_oedb_config_file to add the new section to an existing config.ini.
Parameters
----------
filepath : str
Absolute path of config file including the filename itself
section : str
Section in config file which contains connection details
Returns
-------
cfg : configparser.ConfigParser
Used for configuration file parser language.
"""
cfg = cp.ConfigParser()
cfg.read(filepath)
if not cfg.has_section(section):
print('The section "{sec}" is not in the config file {file}.'
.format(sec=section,
file=filepath))
cfg = create_oedb_config_file(filepath, section)
return cfg | [
"def",
"readcfg",
"(",
"filepath",
",",
"section",
")",
":",
"cfg",
"=",
"cp",
".",
"ConfigParser",
"(",
")",
"cfg",
".",
"read",
"(",
"filepath",
")",
"if",
"not",
"cfg",
".",
"has_section",
"(",
"section",
")",
":",
"print",
"(",
"'The section \"{sec}\" is not in the config file {file}.'",
".",
"format",
"(",
"sec",
"=",
"section",
",",
"file",
"=",
"filepath",
")",
")",
"cfg",
"=",
"create_oedb_config_file",
"(",
"filepath",
",",
"section",
")",
"return",
"cfg"
] | 29.592593 | 20.962963 |
def add_property_to_response(self, code='200', prop_name='data', **kwargs):
"""Add a property (http://json-schema.org/latest/json-schema-validation.html#anchor64) # noqa: E501
to the schema of the response identified by the code"""
self['responses'] \
.setdefault(str(code), self._new_operation()) \
.setdefault('schema', {'type': 'object'}) \
.setdefault('properties', {}) \
.setdefault(prop_name, {}) \
.update(**kwargs) | [
"def",
"add_property_to_response",
"(",
"self",
",",
"code",
"=",
"'200'",
",",
"prop_name",
"=",
"'data'",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
"[",
"'responses'",
"]",
".",
"setdefault",
"(",
"str",
"(",
"code",
")",
",",
"self",
".",
"_new_operation",
"(",
")",
")",
".",
"setdefault",
"(",
"'schema'",
",",
"{",
"'type'",
":",
"'object'",
"}",
")",
".",
"setdefault",
"(",
"'properties'",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"prop_name",
",",
"{",
"}",
")",
".",
"update",
"(",
"*",
"*",
"kwargs",
")"
] | 55.444444 | 10.666667 |
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True) | [
"def",
"change_db_user_password",
"(",
"username",
",",
"password",
")",
":",
"sql",
"=",
"\"ALTER USER %s WITH PASSWORD '%s'\"",
"%",
"(",
"username",
",",
"password",
")",
"excute_query",
"(",
"sql",
",",
"use_sudo",
"=",
"True",
")"
] | 37.8 | 15.8 |
def set_text(self, text):
"""Sets the text attribute of the payload
:param text: (str) Text of the message
:return: None
"""
log = logging.getLogger(self.cls_logger + '.set_text')
if not isinstance(text, basestring):
msg = 'text arg must be a string'
log.error(msg)
raise ValueError(msg)
self.payload['text'] = text
log.debug('Set message text to: {t}'.format(t=text)) | [
"def",
"set_text",
"(",
"self",
",",
"text",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"self",
".",
"cls_logger",
"+",
"'.set_text'",
")",
"if",
"not",
"isinstance",
"(",
"text",
",",
"basestring",
")",
":",
"msg",
"=",
"'text arg must be a string'",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"self",
".",
"payload",
"[",
"'text'",
"]",
"=",
"text",
"log",
".",
"debug",
"(",
"'Set message text to: {t}'",
".",
"format",
"(",
"t",
"=",
"text",
")",
")"
] | 35.153846 | 12.076923 |
def render_it(self, kind, num, with_tag=False, glyph=''):
'''
render, no user logged in
'''
all_cats = MPost.query_recent(num, kind=kind)
kwd = {
'with_tag': with_tag,
'router': router_post[kind],
'glyph': glyph
}
return self.render_string('modules/info/list_equation.html',
recs=all_cats,
kwd=kwd) | [
"def",
"render_it",
"(",
"self",
",",
"kind",
",",
"num",
",",
"with_tag",
"=",
"False",
",",
"glyph",
"=",
"''",
")",
":",
"all_cats",
"=",
"MPost",
".",
"query_recent",
"(",
"num",
",",
"kind",
"=",
"kind",
")",
"kwd",
"=",
"{",
"'with_tag'",
":",
"with_tag",
",",
"'router'",
":",
"router_post",
"[",
"kind",
"]",
",",
"'glyph'",
":",
"glyph",
"}",
"return",
"self",
".",
"render_string",
"(",
"'modules/info/list_equation.html'",
",",
"recs",
"=",
"all_cats",
",",
"kwd",
"=",
"kwd",
")"
] | 34.307692 | 16.153846 |
def parse_response(response):
"""
parse response and return a dictionary if the content type.
is json/application.
:param response: HTTPRequest
:return dictionary for json content type otherwise response body
"""
if response.headers.get('Content-Type', JSON_TYPE).startswith(JSON_TYPE):
return ResponseObject(json.loads(response.body))
else:
return response.body | [
"def",
"parse_response",
"(",
"response",
")",
":",
"if",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"JSON_TYPE",
")",
".",
"startswith",
"(",
"JSON_TYPE",
")",
":",
"return",
"ResponseObject",
"(",
"json",
".",
"loads",
"(",
"response",
".",
"body",
")",
")",
"else",
":",
"return",
"response",
".",
"body"
] | 36.363636 | 16.545455 |
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first object of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case insensitive.
| If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object().
| If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find.
"""
# Call regular find() if USER_IFIND_MODE is nocase_collation
if self.user_manager.USER_IFIND_MODE=='nocase_collation':
return self.find_first_object(ObjectClass, **kwargs)
# Convert ...(email=value) to ...(email__iexact=value)
iexact_kwargs = {}
for key, value in kwargs.items():
iexact_kwargs[key+'__iexact'] = value
# Retrieve first object -- case insensitive
return ObjectClass.objects(**iexact_kwargs).first() | [
"def",
"ifind_first_object",
"(",
"self",
",",
"ObjectClass",
",",
"*",
"*",
"kwargs",
")",
":",
"# Call regular find() if USER_IFIND_MODE is nocase_collation",
"if",
"self",
".",
"user_manager",
".",
"USER_IFIND_MODE",
"==",
"'nocase_collation'",
":",
"return",
"self",
".",
"find_first_object",
"(",
"ObjectClass",
",",
"*",
"*",
"kwargs",
")",
"# Convert ...(email=value) to ...(email__iexact=value)",
"iexact_kwargs",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"iexact_kwargs",
"[",
"key",
"+",
"'__iexact'",
"]",
"=",
"value",
"# Retrieve first object -- case insensitive",
"return",
"ObjectClass",
".",
"objects",
"(",
"*",
"*",
"iexact_kwargs",
")",
".",
"first",
"(",
")"
] | 50.647059 | 22.117647 |
def visit_Call(self, node):
"""
Transform call site to have normal function call.
Examples
--------
For methods:
>> a = [1, 2, 3]
>> a.append(1)
Becomes
>> __list__.append(a, 1)
For functions:
>> __builtin__.dict.fromkeys([1, 2, 3])
Becomes
>> __builtin__.__dict__.fromkeys([1, 2, 3])
"""
node = self.generic_visit(node)
# Only attributes function can be Pythonic and should be normalized
if isinstance(node.func, ast.Attribute):
if node.func.attr in methods:
# Get object targeted by methods
obj = lhs = node.func.value
# Get the most left identifier to check if it is not an
# imported module
while isinstance(obj, ast.Attribute):
obj = obj.value
is_not_module = (not isinstance(obj, ast.Name) or
obj.id not in self.imports)
if is_not_module:
self.update = True
# As it was a methods call, push targeted object as first
# arguments and add correct module prefix
node.args.insert(0, lhs)
mod = methods[node.func.attr][0]
# Submodules import full module
self.to_import.add(mangle(mod[0]))
node.func = reduce(
lambda v, o: ast.Attribute(v, o, ast.Load()),
mod[1:] + (node.func.attr,),
ast.Name(mangle(mod[0]), ast.Load(), None)
)
# else methods have been called using function syntax
if node.func.attr in methods or node.func.attr in functions:
# Now, methods and function have both function syntax
def rec(path, cur_module):
"""
Recursively rename path content looking in matching module.
Prefers __module__ to module if it exists.
This recursion is done as modules are visited top->bottom
while attributes have to be visited bottom->top.
"""
err = "Function path is chained attributes and name"
assert isinstance(path, (ast.Name, ast.Attribute)), err
if isinstance(path, ast.Attribute):
new_node, cur_module = rec(path.value, cur_module)
new_id, mname = self.renamer(path.attr, cur_module)
return (ast.Attribute(new_node, new_id, ast.Load()),
cur_module[mname])
else:
new_id, mname = self.renamer(path.id, cur_module)
if mname not in cur_module:
raise PythranSyntaxError(
"Unbound identifier '{}'".format(mname), node)
return (ast.Name(new_id, ast.Load(), None),
cur_module[mname])
# Rename module path to avoid naming issue.
node.func.value, _ = rec(node.func.value, MODULES)
self.update = True
return node | [
"def",
"visit_Call",
"(",
"self",
",",
"node",
")",
":",
"node",
"=",
"self",
".",
"generic_visit",
"(",
"node",
")",
"# Only attributes function can be Pythonic and should be normalized",
"if",
"isinstance",
"(",
"node",
".",
"func",
",",
"ast",
".",
"Attribute",
")",
":",
"if",
"node",
".",
"func",
".",
"attr",
"in",
"methods",
":",
"# Get object targeted by methods",
"obj",
"=",
"lhs",
"=",
"node",
".",
"func",
".",
"value",
"# Get the most left identifier to check if it is not an",
"# imported module",
"while",
"isinstance",
"(",
"obj",
",",
"ast",
".",
"Attribute",
")",
":",
"obj",
"=",
"obj",
".",
"value",
"is_not_module",
"=",
"(",
"not",
"isinstance",
"(",
"obj",
",",
"ast",
".",
"Name",
")",
"or",
"obj",
".",
"id",
"not",
"in",
"self",
".",
"imports",
")",
"if",
"is_not_module",
":",
"self",
".",
"update",
"=",
"True",
"# As it was a methods call, push targeted object as first",
"# arguments and add correct module prefix",
"node",
".",
"args",
".",
"insert",
"(",
"0",
",",
"lhs",
")",
"mod",
"=",
"methods",
"[",
"node",
".",
"func",
".",
"attr",
"]",
"[",
"0",
"]",
"# Submodules import full module",
"self",
".",
"to_import",
".",
"add",
"(",
"mangle",
"(",
"mod",
"[",
"0",
"]",
")",
")",
"node",
".",
"func",
"=",
"reduce",
"(",
"lambda",
"v",
",",
"o",
":",
"ast",
".",
"Attribute",
"(",
"v",
",",
"o",
",",
"ast",
".",
"Load",
"(",
")",
")",
",",
"mod",
"[",
"1",
":",
"]",
"+",
"(",
"node",
".",
"func",
".",
"attr",
",",
")",
",",
"ast",
".",
"Name",
"(",
"mangle",
"(",
"mod",
"[",
"0",
"]",
")",
",",
"ast",
".",
"Load",
"(",
")",
",",
"None",
")",
")",
"# else methods have been called using function syntax",
"if",
"node",
".",
"func",
".",
"attr",
"in",
"methods",
"or",
"node",
".",
"func",
".",
"attr",
"in",
"functions",
":",
"# Now, methods and function have both function syntax",
"def",
"rec",
"(",
"path",
",",
"cur_module",
")",
":",
"\"\"\"\n Recursively rename path content looking in matching module.\n\n Prefers __module__ to module if it exists.\n This recursion is done as modules are visited top->bottom\n while attributes have to be visited bottom->top.\n \"\"\"",
"err",
"=",
"\"Function path is chained attributes and name\"",
"assert",
"isinstance",
"(",
"path",
",",
"(",
"ast",
".",
"Name",
",",
"ast",
".",
"Attribute",
")",
")",
",",
"err",
"if",
"isinstance",
"(",
"path",
",",
"ast",
".",
"Attribute",
")",
":",
"new_node",
",",
"cur_module",
"=",
"rec",
"(",
"path",
".",
"value",
",",
"cur_module",
")",
"new_id",
",",
"mname",
"=",
"self",
".",
"renamer",
"(",
"path",
".",
"attr",
",",
"cur_module",
")",
"return",
"(",
"ast",
".",
"Attribute",
"(",
"new_node",
",",
"new_id",
",",
"ast",
".",
"Load",
"(",
")",
")",
",",
"cur_module",
"[",
"mname",
"]",
")",
"else",
":",
"new_id",
",",
"mname",
"=",
"self",
".",
"renamer",
"(",
"path",
".",
"id",
",",
"cur_module",
")",
"if",
"mname",
"not",
"in",
"cur_module",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Unbound identifier '{}'\"",
".",
"format",
"(",
"mname",
")",
",",
"node",
")",
"return",
"(",
"ast",
".",
"Name",
"(",
"new_id",
",",
"ast",
".",
"Load",
"(",
")",
",",
"None",
")",
",",
"cur_module",
"[",
"mname",
"]",
")",
"# Rename module path to avoid naming issue.",
"node",
".",
"func",
".",
"value",
",",
"_",
"=",
"rec",
"(",
"node",
".",
"func",
".",
"value",
",",
"MODULES",
")",
"self",
".",
"update",
"=",
"True",
"return",
"node"
] | 40.666667 | 21.333333 |
def disable_svc_check(self, service):
"""Disable checks for a service
Format of the line that triggers function call::
DISABLE_SVC_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.active_checks_enabled:
service.disable_active_checks(self.daemon.checks)
service.modified_attributes |= \
DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.send_an_element(service.get_update_status_brok()) | [
"def",
"disable_svc_check",
"(",
"self",
",",
"service",
")",
":",
"if",
"service",
".",
"active_checks_enabled",
":",
"service",
".",
"disable_active_checks",
"(",
"self",
".",
"daemon",
".",
"checks",
")",
"service",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_ACTIVE_CHECKS_ENABLED\"",
"]",
".",
"value",
"self",
".",
"send_an_element",
"(",
"service",
".",
"get_update_status_brok",
"(",
")",
")"
] | 39.666667 | 15.4 |
def is_email(data):
"""
Check if given data string is an email.
Usage::
>>> is_email("john.doe@domain.com")
True
>>> is_email("john.doe:domain.com")
False
:param data: Data to check.
:type data: unicode
:return: Is email.
:rtype: bool
"""
if re.match(r"[\w.%+-]+@[\w.]+\.[a-zA-Z]{2,4}", data):
LOGGER.debug("> {0}' is matched as email.".format(data))
return True
else:
LOGGER.debug("> {0}' is not matched as email.".format(data))
return False | [
"def",
"is_email",
"(",
"data",
")",
":",
"if",
"re",
".",
"match",
"(",
"r\"[\\w.%+-]+@[\\w.]+\\.[a-zA-Z]{2,4}\"",
",",
"data",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"> {0}' is matched as email.\"",
".",
"format",
"(",
"data",
")",
")",
"return",
"True",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"\"> {0}' is not matched as email.\"",
".",
"format",
"(",
"data",
")",
")",
"return",
"False"
] | 22.956522 | 21.043478 |
def _xy_locs(mask):
"""Mask should be a set of bools from comparison with a feature layer."""
y, x = mask.nonzero()
return list(zip(x, y)) | [
"def",
"_xy_locs",
"(",
"mask",
")",
":",
"y",
",",
"x",
"=",
"mask",
".",
"nonzero",
"(",
")",
"return",
"list",
"(",
"zip",
"(",
"x",
",",
"y",
")",
")"
] | 35.25 | 13.5 |
def to_boolean(value, ctx):
"""
Tries conversion of any value to a boolean
"""
if isinstance(value, bool):
return value
elif isinstance(value, int):
return value != 0
elif isinstance(value, Decimal):
return value != Decimal(0)
elif isinstance(value, str):
value = value.lower()
if value == 'true':
return True
elif value == 'false':
return False
elif isinstance(value, datetime.date) or isinstance(value, datetime.time):
return True
raise EvaluationError("Can't convert '%s' to a boolean" % str(value)) | [
"def",
"to_boolean",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"value",
"!=",
"0",
"elif",
"isinstance",
"(",
"value",
",",
"Decimal",
")",
":",
"return",
"value",
"!=",
"Decimal",
"(",
"0",
")",
"elif",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"if",
"value",
"==",
"'true'",
":",
"return",
"True",
"elif",
"value",
"==",
"'false'",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
"or",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"time",
")",
":",
"return",
"True",
"raise",
"EvaluationError",
"(",
"\"Can't convert '%s' to a boolean\"",
"%",
"str",
"(",
"value",
")",
")"
] | 30 | 14.4 |
def trimSegments(self, minPermanence=None, minNumSyns=None):
"""
This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
:param minPermanence: (float) Any syn whose permanence is 0 or <
``minPermanence`` will be deleted. If None is passed in, then
``self.connectedPerm`` is used.
:param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses
remaining in it will be deleted. If None is passed in, then
``self.activationThreshold`` is used.
:returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self._trimSegmentsInCell(
colIdx=c, cellIdx=i, segList=self.cells[c][i],
minPermanence=minPermanence, minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
# Print all cells if verbosity says to
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return totalSegsRemoved, totalSynsRemoved | [
"def",
"trimSegments",
"(",
"self",
",",
"minPermanence",
"=",
"None",
",",
"minNumSyns",
"=",
"None",
")",
":",
"# Fill in defaults",
"if",
"minPermanence",
"is",
"None",
":",
"minPermanence",
"=",
"self",
".",
"connectedPerm",
"if",
"minNumSyns",
"is",
"None",
":",
"minNumSyns",
"=",
"self",
".",
"activationThreshold",
"# Loop through all cells",
"totalSegsRemoved",
",",
"totalSynsRemoved",
"=",
"0",
",",
"0",
"for",
"c",
",",
"i",
"in",
"itertools",
".",
"product",
"(",
"xrange",
"(",
"self",
".",
"numberOfCols",
")",
",",
"xrange",
"(",
"self",
".",
"cellsPerColumn",
")",
")",
":",
"(",
"segsRemoved",
",",
"synsRemoved",
")",
"=",
"self",
".",
"_trimSegmentsInCell",
"(",
"colIdx",
"=",
"c",
",",
"cellIdx",
"=",
"i",
",",
"segList",
"=",
"self",
".",
"cells",
"[",
"c",
"]",
"[",
"i",
"]",
",",
"minPermanence",
"=",
"minPermanence",
",",
"minNumSyns",
"=",
"minNumSyns",
")",
"totalSegsRemoved",
"+=",
"segsRemoved",
"totalSynsRemoved",
"+=",
"synsRemoved",
"# Print all cells if verbosity says to",
"if",
"self",
".",
"verbosity",
">=",
"5",
":",
"print",
"\"Cells, all segments:\"",
"self",
".",
"printCells",
"(",
"predictedOnly",
"=",
"False",
")",
"return",
"totalSegsRemoved",
",",
"totalSynsRemoved"
] | 39.918919 | 17.162162 |
def _add_task(self, task):
'''Add an already existing task to the task group.'''
if hasattr(task, '_task_group'):
raise RuntimeError('task is already part of a group')
if self._closed:
raise RuntimeError('task group is closed')
task._task_group = self
if task.done():
self._done.append(task)
else:
self._pending.add(task)
task.add_done_callback(self._on_done) | [
"def",
"_add_task",
"(",
"self",
",",
"task",
")",
":",
"if",
"hasattr",
"(",
"task",
",",
"'_task_group'",
")",
":",
"raise",
"RuntimeError",
"(",
"'task is already part of a group'",
")",
"if",
"self",
".",
"_closed",
":",
"raise",
"RuntimeError",
"(",
"'task group is closed'",
")",
"task",
".",
"_task_group",
"=",
"self",
"if",
"task",
".",
"done",
"(",
")",
":",
"self",
".",
"_done",
".",
"append",
"(",
"task",
")",
"else",
":",
"self",
".",
"_pending",
".",
"add",
"(",
"task",
")",
"task",
".",
"add_done_callback",
"(",
"self",
".",
"_on_done",
")"
] | 38 | 13.5 |
def upload(ctx, release, rebuild):
""" Uploads distribuition files to pypi or pypitest. """
dist_path = Path(DIST_PATH)
if rebuild is False:
if not dist_path.exists() or not list(dist_path.glob('*')):
print("No distribution files found. Please run 'build' command first")
return
else:
ctx.invoke(build, force=True)
if release:
args = ['twine', 'upload', 'dist/*']
else:
repository = 'https://test.pypi.org/legacy/'
args = ['twine', 'upload', '--repository-url', repository, 'dist/*']
env = os.environ.copy()
p = subprocess.Popen(args, env=env)
p.wait() | [
"def",
"upload",
"(",
"ctx",
",",
"release",
",",
"rebuild",
")",
":",
"dist_path",
"=",
"Path",
"(",
"DIST_PATH",
")",
"if",
"rebuild",
"is",
"False",
":",
"if",
"not",
"dist_path",
".",
"exists",
"(",
")",
"or",
"not",
"list",
"(",
"dist_path",
".",
"glob",
"(",
"'*'",
")",
")",
":",
"print",
"(",
"\"No distribution files found. Please run 'build' command first\"",
")",
"return",
"else",
":",
"ctx",
".",
"invoke",
"(",
"build",
",",
"force",
"=",
"True",
")",
"if",
"release",
":",
"args",
"=",
"[",
"'twine'",
",",
"'upload'",
",",
"'dist/*'",
"]",
"else",
":",
"repository",
"=",
"'https://test.pypi.org/legacy/'",
"args",
"=",
"[",
"'twine'",
",",
"'upload'",
",",
"'--repository-url'",
",",
"repository",
",",
"'dist/*'",
"]",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"env",
"=",
"env",
")",
"p",
".",
"wait",
"(",
")"
] | 31.8 | 21.3 |
def emit(self):
"""Get a mapping from a transcript
:return: One random Transcript sequence
:rtype: sequence
"""
i = self.options.rand.get_weighted_random_index(self._weights)
return self._transcriptome.transcripts[i] | [
"def",
"emit",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"options",
".",
"rand",
".",
"get_weighted_random_index",
"(",
"self",
".",
"_weights",
")",
"return",
"self",
".",
"_transcriptome",
".",
"transcripts",
"[",
"i",
"]"
] | 29.25 | 14.875 |
def stayOpen(self):
"""optional dialog restore"""
if not self._wantToClose:
self.show()
self.setGeometry(self._geometry) | [
"def",
"stayOpen",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_wantToClose",
":",
"self",
".",
"show",
"(",
")",
"self",
".",
"setGeometry",
"(",
"self",
".",
"_geometry",
")"
] | 32 | 9.2 |
def to_dict_list(df, use_ordered_dict=True):
"""Transform each row to dict, and put them into a list.
**中文文档**
将 ``pandas.DataFrame`` 转换成一个字典的列表。列表的长度与行数相同, 其中
每一个字典相当于表中的一行, 相当于一个 ``pandas.Series`` 对象。
"""
if use_ordered_dict:
dict = OrderedDict
columns = df.columns
data = list()
for tp in itertuple(df):
data.append(dict(zip(columns, tp)))
return data | [
"def",
"to_dict_list",
"(",
"df",
",",
"use_ordered_dict",
"=",
"True",
")",
":",
"if",
"use_ordered_dict",
":",
"dict",
"=",
"OrderedDict",
"columns",
"=",
"df",
".",
"columns",
"data",
"=",
"list",
"(",
")",
"for",
"tp",
"in",
"itertuple",
"(",
"df",
")",
":",
"data",
".",
"append",
"(",
"dict",
"(",
"zip",
"(",
"columns",
",",
"tp",
")",
")",
")",
"return",
"data"
] | 24.875 | 17.4375 |
def intervalleftjoin(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False,
missing=None, lprefix=None, rprefix=None):
"""
Like :func:`petl.transform.intervals.intervaljoin` but rows from the left
table without a match in the right table are also included. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervalleftjoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | None | None | None |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | None | None | None |
+-------+-----+------+-------+------+-------+
| 10 | 10 | 'f' | None | None | None |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalLeftJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
missing=missing, lprefix=lprefix,
rprefix=rprefix) | [
"def",
"intervalleftjoin",
"(",
"left",
",",
"right",
",",
"lstart",
"=",
"'start'",
",",
"lstop",
"=",
"'stop'",
",",
"rstart",
"=",
"'start'",
",",
"rstop",
"=",
"'stop'",
",",
"lkey",
"=",
"None",
",",
"rkey",
"=",
"None",
",",
"include_stop",
"=",
"False",
",",
"missing",
"=",
"None",
",",
"lprefix",
"=",
"None",
",",
"rprefix",
"=",
"None",
")",
":",
"assert",
"(",
"lkey",
"is",
"None",
")",
"==",
"(",
"rkey",
"is",
"None",
")",
",",
"'facet key field must be provided for both or neither table'",
"return",
"IntervalLeftJoinView",
"(",
"left",
",",
"right",
",",
"lstart",
"=",
"lstart",
",",
"lstop",
"=",
"lstop",
",",
"rstart",
"=",
"rstart",
",",
"rstop",
"=",
"rstop",
",",
"lkey",
"=",
"lkey",
",",
"rkey",
"=",
"rkey",
",",
"include_stop",
"=",
"include_stop",
",",
"missing",
"=",
"missing",
",",
"lprefix",
"=",
"lprefix",
",",
"rprefix",
"=",
"rprefix",
")"
] | 48.017241 | 17.362069 |
def _open_connection(self):
"""Open a connection to the easyfire unit."""
if (self._mode == PROP_MODE_SERIAL):
self._serial = serial.Serial(self._serial_device, self._serial_speed)
elif (self._mode == PROP_MODE_TCP):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((self._ip, self._port))
elif (self._mode == PROP_MODE_FILE):
self._file = open(self._file_path, "r") | [
"def",
"_open_connection",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"_mode",
"==",
"PROP_MODE_SERIAL",
")",
":",
"self",
".",
"_serial",
"=",
"serial",
".",
"Serial",
"(",
"self",
".",
"_serial_device",
",",
"self",
".",
"_serial_speed",
")",
"elif",
"(",
"self",
".",
"_mode",
"==",
"PROP_MODE_TCP",
")",
":",
"self",
".",
"_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"_socket",
".",
"connect",
"(",
"(",
"self",
".",
"_ip",
",",
"self",
".",
"_port",
")",
")",
"elif",
"(",
"self",
".",
"_mode",
"==",
"PROP_MODE_FILE",
")",
":",
"self",
".",
"_file",
"=",
"open",
"(",
"self",
".",
"_file_path",
",",
"\"r\"",
")"
] | 52.777778 | 14.222222 |
def get_build_work_items_refs_from_commits(self, commit_ids, project, build_id, top=None):
"""GetBuildWorkItemsRefsFromCommits.
Gets the work items associated with a build, filtered to specific commits.
:param [str] commit_ids: A comma-delimited list of commit IDs.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int top: The maximum number of work items to return, or the number of commits to consider if no commit IDs are specified.
:rtype: [ResourceRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
content = self._serialize.body(commit_ids, '[str]')
response = self._send(http_method='POST',
location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee',
version='5.0',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[ResourceRef]', self._unwrap_collection(response)) | [
"def",
"get_build_work_items_refs_from_commits",
"(",
"self",
",",
"commit_ids",
",",
"project",
",",
"build_id",
",",
"top",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"build_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'buildId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'build_id'",
",",
"build_id",
",",
"'int'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"top",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'$top'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'top'",
",",
"top",
",",
"'int'",
")",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"commit_ids",
",",
"'[str]'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'POST'",
",",
"location_id",
"=",
"'5a21f5d2-5642-47e4-a0bd-1356e6731bee'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[ResourceRef]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] | 57.64 | 24 |
def register_drop(self, task, event_details=None):
""" :meth:`.WSimpleTrackerStorage.register_drop` method implementation
"""
if self.record_drop() is True:
record_type = WTrackerEvents.drop
record = WSimpleTrackerStorage.Record(record_type, task, event_details=event_details)
self.__store_record(record) | [
"def",
"register_drop",
"(",
"self",
",",
"task",
",",
"event_details",
"=",
"None",
")",
":",
"if",
"self",
".",
"record_drop",
"(",
")",
"is",
"True",
":",
"record_type",
"=",
"WTrackerEvents",
".",
"drop",
"record",
"=",
"WSimpleTrackerStorage",
".",
"Record",
"(",
"record_type",
",",
"task",
",",
"event_details",
"=",
"event_details",
")",
"self",
".",
"__store_record",
"(",
"record",
")"
] | 44.714286 | 11.428571 |
def export(self, fmt, filename=None, **kargs):
""" export *MDF* to other formats. The *MDF* file name is used is
available, else the *filename* argument must be provided.
The *pandas* export option was removed. you should use the method
*to_dataframe* instead.
Parameters
----------
fmt : string
can be one of the following:
* `csv` : CSV export that uses the ";" delimiter. This option
will generate a new csv file for each data group
(<MDFNAME>_DataGroup_<cntr>.csv)
* `hdf5` : HDF5 file output; each *MDF* data group is mapped to
a *HDF5* group with the name 'DataGroup_<cntr>'
(where <cntr> is the index)
* `excel` : Excel file output (very slow). This option will
generate a new excel file for each data group
(<MDFNAME>_DataGroup_<cntr>.xlsx)
* `mat` : Matlab .mat version 4, 5 or 7.3 export. If
*single_time_base==False* the channels will be renamed in the mat
file to 'D<cntr>_<channel name>'. The channel group
master will be renamed to 'DM<cntr>_<channel name>'
( *<cntr>* is the data group index starting from 0)
* `parquet` : export to Apache parquet format
filename : string | pathlib.Path
export file name
**kwargs
* `single_time_base`: resample all channels to common time base,
default *False*
* `raster`: float time raster for resampling. Valid if
*single_time_base* is *True*
* `time_from_zero`: adjust time channel to start from 0
* `use_display_names`: use display name instead of standard channel
name, if available.
* `empty_channels`: behaviour for channels without samples; the
options are *skip* or *zeros*; default is *skip*
* `format`: only valid for *mat* export; can be '4', '5' or '7.3',
default is '5'
* `oned_as`: only valid for *mat* export; can be 'row' or 'column'
* `keep_arrays` : keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
* reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
* compression : str
compression to be used
* for ``parquet`` : "GZIP" or "SANPPY"
* for ``hfd5`` : "gzip", "lzf" or "szip"
* for ``mat`` : bool
"""
from time import perf_counter as pc
header_items = ("date", "time", "author", "department", "project", "subject")
if fmt != "pandas" and filename is None and self.name is None:
message = (
"Must specify filename for export"
"if MDF was created without a file name"
)
logger.warning(message)
return
single_time_base = kargs.get("single_time_base", False)
raster = kargs.get("raster", 0)
time_from_zero = kargs.get("time_from_zero", True)
use_display_names = kargs.get("use_display_names", True)
empty_channels = kargs.get("empty_channels", "skip")
format = kargs.get("format", "5")
oned_as = kargs.get("oned_as", "row")
reduce_memory_usage = kargs.get("reduce_memory_usage", False)
compression = kargs.get("compression", "")
if compression == 'SNAPPY':
try:
import snappy
except ImportError:
logger.warning(
"snappy compressor is not installed; compression will be set to GZIP"
)
compression = "GZIP"
name = Path(filename) if filename else self.name
if fmt == "parquet":
try:
from fastparquet import write as write_parquet
except ImportError:
logger.warning(
"fastparquet not found; export to parquet is unavailable"
)
return
elif fmt == "hdf5":
try:
from h5py import File as HDF5
except ImportError:
logger.warning("h5py not found; export to HDF5 is unavailable")
return
elif fmt == "excel":
try:
import xlsxwriter
except ImportError:
logger.warning("xlsxwriter not found; export to Excel unavailable")
return
elif fmt == "mat":
if format == "7.3":
try:
from hdf5storage import savemat
except ImportError:
logger.warning(
"hdf5storage not found; export to mat v7.3 is unavailable"
)
return
else:
try:
from scipy.io import savemat
except ImportError:
logger.warning("scipy not found; export to mat is unavailable")
return
if single_time_base or fmt == "parquet":
df = self.to_dataframe(
raster=raster,
time_from_zero=time_from_zero,
use_display_names=use_display_names,
empty_channels=empty_channels,
reduce_memory_usage=reduce_memory_usage,
)
units = OrderedDict()
comments = OrderedDict()
used_names = UniqueDB()
for i, grp in enumerate(self.groups):
if self._terminate:
return
included_channels = self._included_channels(i)
for j in included_channels:
ch = grp.channels[j]
if use_display_names:
channel_name = ch.display_name or ch.name
else:
channel_name = ch.name
channel_name = used_names.get_unique_name(channel_name)
if hasattr(ch, 'unit'):
unit = ch.unit
if ch.conversion:
unit = unit or ch.conversion.unit
comment = ch.comment
units[channel_name] = unit
comments[channel_name] = comment
if fmt == "hdf5":
name = name.with_suffix(".hdf")
if single_time_base:
with HDF5(str(name), "w") as hdf:
# header information
group = hdf.create_group(str(name))
if self.version in MDF2_VERSIONS + MDF3_VERSIONS:
for item in header_items:
group.attrs[item] = self.header[item]
# save each data group in a HDF5 group called
# "DataGroup_<cntr>" with the index starting from 1
# each HDF5 group will have a string attribute "master"
# that will hold the name of the master channel
for channel in df:
samples = df[channel]
unit = units[channel]
comment = comments[channel]
if samples.dtype.kind == 'O':
continue
if compression:
dataset = group.create_dataset(
channel,
data=samples,
compression=compression,
)
else:
dataset = group.create_dataset(channel, data=samples)
unit = unit.replace("\0", "")
if unit:
dataset.attrs["unit"] = unit
comment = comment.replace("\0", "")
if comment:
dataset.attrs["comment"] = comment
else:
with HDF5(str(name), "w") as hdf:
# header information
group = hdf.create_group(str(name))
if self.version in MDF2_VERSIONS + MDF3_VERSIONS:
for item in header_items:
group.attrs[item] = self.header[item]
# save each data group in a HDF5 group called
# "DataGroup_<cntr>" with the index starting from 1
# each HDF5 group will have a string attribute "master"
# that will hold the name of the master channel
for i, grp in enumerate(self.groups):
names = UniqueDB()
if self._terminate:
return
group_name = r"/" + f"DataGroup_{i+1}"
group = hdf.create_group(group_name)
master_index = self.masters_db.get(i, -1)
if master_index:
group.attrs["master"] = grp.channels[master_index].name
channels = self.select(
[(ch.name, i, None) for ch in grp.channels]
)
for j, sig in enumerate(channels):
if use_display_names:
name = sig.display_name or sig.name
else:
name = sig.name
name = names.get_unique_name(name)
if reduce_memory_usage:
sig.samples = downcast(sig.samples)
if compression:
dataset = group.create_dataset(
name,
data=sig.samples,
compression=compression,
)
else:
dataset = group.create_dataset(name, data=sig.samples, dtype=sig.samples.dtype)
unit = sig.unit
if unit:
dataset.attrs["unit"] = unit
comment = sig.comment.replace("\0", "")
if comment:
dataset.attrs["comment"] = comment
elif fmt == "excel":
if single_time_base:
name = name.with_suffix(".xlsx")
message = f'Writing excel export to file "{name}"'
logger.info(message)
workbook = xlsxwriter.Workbook(str(name))
sheet = workbook.add_worksheet("Channels")
for col, (channel_name, channel_unit) in enumerate(units.items()):
if self._terminate:
return
samples = df[channel_name]
sig_description = f"{channel_name} [{channel_unit}]"
sheet.write(0, col, sig_description)
try:
sheet.write_column(1, col, samples.astype(str))
except:
vals = [str(e) for e in sig.samples]
sheet.write_column(1, col, vals)
workbook.close()
else:
while name.suffix == ".xlsx":
name = name.stem
count = len(self.groups)
for i, grp in enumerate(self.groups):
if self._terminate:
return
message = f"Exporting group {i+1} of {count}"
logger.info(message)
data = self._load_data(grp)
data = b"".join(d[0] for d in data)
data = (data, 0, -1)
master_index = self.masters_db.get(i, None)
if master_index is not None:
master = self.get(group=i, index=master_index, data=data)
if raster and len(master):
raster_ = np.arange(
master[0], master[-1], raster, dtype=np.float64
)
master = master.interp(raster_)
else:
raster_ = None
else:
master = None
raster_ = None
if time_from_zero:
master.samples -= master.samples[0]
group_name = f"DataGroup_{i+1}"
wb_name = Path(f"{name.stem}_{group_name}.xlsx")
workbook = xlsxwriter.Workbook(str(wb_name))
sheet = workbook.add_worksheet(group_name)
if master is not None:
sig_description = f"{master.name} [{master.unit}]"
sheet.write(0, 0, sig_description)
sheet.write_column(1, 0, master.samples.astype(str))
offset = 1
else:
offset = 0
for col, _ in enumerate(grp.channels):
if self._terminate:
return
if col == master_index:
offset -= 1
continue
sig = self.get(group=i, index=col, data=data)
if raster_ is not None:
sig = sig.interp(raster_, self._integer_interpolation)
sig_description = f"{sig.name} [{sig.unit}]"
sheet.write(0, col + offset, sig_description)
try:
sheet.write_column(1, col + offset, sig.samples.astype(str))
except:
vals = [str(e) for e in sig.samples]
sheet.write_column(1, col + offset, vals)
workbook.close()
del self._master_channel_cache[(i, 0, -1)]
elif fmt == "csv":
if single_time_base:
name = name.with_suffix(".csv")
message = f'Writing csv export to file "{name}"'
logger.info(message)
with open(name, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
names_row = [
f"{channel_name} [{channel_unit}]"
for (channel_name, channel_unit) in units.items()
]
writer.writerow(names_row)
vals = [df[name] for name in df]
if self._terminate:
return
for row in zip(*vals):
writer.writerow(row)
else:
name = name.with_suffix(".csv")
count = len(self.groups)
for i, grp in enumerate(self.groups):
if self._terminate:
return
message = f"Exporting group {i+1} of {count}"
logger.info(message)
data = self._load_data(grp)
data = b"".join(d[0] for d in data)
data = (data, 0, -1)
group_csv_name = name.parent / f"{name.stem}_DataGroup_{i+1}.csv"
with open(group_csv_name, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
master_index = self.masters_db.get(i, None)
if master_index is not None:
master = self.get(group=i, index=master_index, data=data)
if raster and len(master):
raster_ = np.arange(
master[0], master[-1], raster, dtype=np.float64
)
master = master.interp(raster_)
else:
raster_ = None
else:
master = None
raster_ = None
if time_from_zero:
if master is None:
pass
elif len(master):
master.samples -= master.samples[0]
ch_nr = len(grp.channels)
if master is None:
channels = [
self.get(group=i, index=j, data=data)
for j in range(ch_nr)
]
else:
if raster_ is not None:
channels = [
self.get(group=i, index=j, data=data).interp(
raster_, self._integer_interpolation
)
for j in range(ch_nr)
if j != master_index
]
else:
channels = [
self.get(group=i, index=j, data=data)
for j in range(ch_nr)
if j != master_index
]
if raster_ is not None:
cycles = len(raster_)
else:
cycles = grp.channel_group["cycles_nr"]
if empty_channels == "zeros":
for channel in channels:
if not len(channel):
channel.samples = np.zeros(
cycles, dtype=channel.samples.dtype
)
if master is not None:
names_row = [master.name]
vals = [master.samples]
else:
names_row = []
vals = []
names_row += [f"{ch.name} [{ch.unit}]" for ch in channels]
writer.writerow(names_row)
vals += [ch.samples for ch in channels]
for idx, row in enumerate(zip(*vals)):
writer.writerow(row)
del self._master_channel_cache[(i, 0, -1)]
elif fmt == "mat":
name = name.with_suffix(".mat")
if not single_time_base:
mdict = {}
master_name_template = "DGM{}_{}"
channel_name_template = "DG{}_{}"
used_names = UniqueDB()
for i, grp in enumerate(self.groups):
if self._terminate:
return
included_channels = self._included_channels(i)
master_index = self.masters_db.get(i, -1)
if master_index >= 0:
included_channels.add(master_index)
channels = self.select(
[(None, i, idx) for idx in included_channels]
)
for j, sig in zip(included_channels, channels):
if j == master_index:
channel_name = master_name_template.format(i, sig.name)
else:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = channel_name_template.format(i, channel_name)
channel_name = matlab_compatible(channel_name)
channel_name = used_names.get_unique_name(channel_name)
if sig.samples.dtype.names:
sig.samples.dtype.names = [
matlab_compatible(name)
for name in sig.samples.dtype.names
]
mdict[channel_name] = sig.samples
else:
used_names = UniqueDB()
mdict = {}
for name in df.columns:
channel_name = matlab_compatible(name)
channel_name = used_names.get_unique_name(channel_name)
mdict[channel_name] = df[name].values
mdict['time'] = df.index.values
if format == "7.3":
savemat(
str(name),
mdict,
long_field_names=True,
format="7.3",
delete_unused_variables=False,
oned_as=oned_as,
)
else:
savemat(
str(name),
mdict,
long_field_names=True,
oned_as=oned_as,
do_compression=bool(compression),
)
elif fmt == "parquet":
name = name.with_suffix(".parquet")
if compression:
write_parquet(name, df, compression=compression)
else:
write_parquet(name, df)
else:
message = (
'Unsopported export type "{}". '
'Please select "csv", "excel", "hdf5", "mat" or "pandas"'
)
message.format(fmt)
logger.warning(message) | [
"def",
"export",
"(",
"self",
",",
"fmt",
",",
"filename",
"=",
"None",
",",
"*",
"*",
"kargs",
")",
":",
"from",
"time",
"import",
"perf_counter",
"as",
"pc",
"header_items",
"=",
"(",
"\"date\"",
",",
"\"time\"",
",",
"\"author\"",
",",
"\"department\"",
",",
"\"project\"",
",",
"\"subject\"",
")",
"if",
"fmt",
"!=",
"\"pandas\"",
"and",
"filename",
"is",
"None",
"and",
"self",
".",
"name",
"is",
"None",
":",
"message",
"=",
"(",
"\"Must specify filename for export\"",
"\"if MDF was created without a file name\"",
")",
"logger",
".",
"warning",
"(",
"message",
")",
"return",
"single_time_base",
"=",
"kargs",
".",
"get",
"(",
"\"single_time_base\"",
",",
"False",
")",
"raster",
"=",
"kargs",
".",
"get",
"(",
"\"raster\"",
",",
"0",
")",
"time_from_zero",
"=",
"kargs",
".",
"get",
"(",
"\"time_from_zero\"",
",",
"True",
")",
"use_display_names",
"=",
"kargs",
".",
"get",
"(",
"\"use_display_names\"",
",",
"True",
")",
"empty_channels",
"=",
"kargs",
".",
"get",
"(",
"\"empty_channels\"",
",",
"\"skip\"",
")",
"format",
"=",
"kargs",
".",
"get",
"(",
"\"format\"",
",",
"\"5\"",
")",
"oned_as",
"=",
"kargs",
".",
"get",
"(",
"\"oned_as\"",
",",
"\"row\"",
")",
"reduce_memory_usage",
"=",
"kargs",
".",
"get",
"(",
"\"reduce_memory_usage\"",
",",
"False",
")",
"compression",
"=",
"kargs",
".",
"get",
"(",
"\"compression\"",
",",
"\"\"",
")",
"if",
"compression",
"==",
"'SNAPPY'",
":",
"try",
":",
"import",
"snappy",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"snappy compressor is not installed; compression will be set to GZIP\"",
")",
"compression",
"=",
"\"GZIP\"",
"name",
"=",
"Path",
"(",
"filename",
")",
"if",
"filename",
"else",
"self",
".",
"name",
"if",
"fmt",
"==",
"\"parquet\"",
":",
"try",
":",
"from",
"fastparquet",
"import",
"write",
"as",
"write_parquet",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"fastparquet not found; export to parquet is unavailable\"",
")",
"return",
"elif",
"fmt",
"==",
"\"hdf5\"",
":",
"try",
":",
"from",
"h5py",
"import",
"File",
"as",
"HDF5",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"h5py not found; export to HDF5 is unavailable\"",
")",
"return",
"elif",
"fmt",
"==",
"\"excel\"",
":",
"try",
":",
"import",
"xlsxwriter",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"xlsxwriter not found; export to Excel unavailable\"",
")",
"return",
"elif",
"fmt",
"==",
"\"mat\"",
":",
"if",
"format",
"==",
"\"7.3\"",
":",
"try",
":",
"from",
"hdf5storage",
"import",
"savemat",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"hdf5storage not found; export to mat v7.3 is unavailable\"",
")",
"return",
"else",
":",
"try",
":",
"from",
"scipy",
".",
"io",
"import",
"savemat",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"scipy not found; export to mat is unavailable\"",
")",
"return",
"if",
"single_time_base",
"or",
"fmt",
"==",
"\"parquet\"",
":",
"df",
"=",
"self",
".",
"to_dataframe",
"(",
"raster",
"=",
"raster",
",",
"time_from_zero",
"=",
"time_from_zero",
",",
"use_display_names",
"=",
"use_display_names",
",",
"empty_channels",
"=",
"empty_channels",
",",
"reduce_memory_usage",
"=",
"reduce_memory_usage",
",",
")",
"units",
"=",
"OrderedDict",
"(",
")",
"comments",
"=",
"OrderedDict",
"(",
")",
"used_names",
"=",
"UniqueDB",
"(",
")",
"for",
"i",
",",
"grp",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"if",
"self",
".",
"_terminate",
":",
"return",
"included_channels",
"=",
"self",
".",
"_included_channels",
"(",
"i",
")",
"for",
"j",
"in",
"included_channels",
":",
"ch",
"=",
"grp",
".",
"channels",
"[",
"j",
"]",
"if",
"use_display_names",
":",
"channel_name",
"=",
"ch",
".",
"display_name",
"or",
"ch",
".",
"name",
"else",
":",
"channel_name",
"=",
"ch",
".",
"name",
"channel_name",
"=",
"used_names",
".",
"get_unique_name",
"(",
"channel_name",
")",
"if",
"hasattr",
"(",
"ch",
",",
"'unit'",
")",
":",
"unit",
"=",
"ch",
".",
"unit",
"if",
"ch",
".",
"conversion",
":",
"unit",
"=",
"unit",
"or",
"ch",
".",
"conversion",
".",
"unit",
"comment",
"=",
"ch",
".",
"comment",
"units",
"[",
"channel_name",
"]",
"=",
"unit",
"comments",
"[",
"channel_name",
"]",
"=",
"comment",
"if",
"fmt",
"==",
"\"hdf5\"",
":",
"name",
"=",
"name",
".",
"with_suffix",
"(",
"\".hdf\"",
")",
"if",
"single_time_base",
":",
"with",
"HDF5",
"(",
"str",
"(",
"name",
")",
",",
"\"w\"",
")",
"as",
"hdf",
":",
"# header information",
"group",
"=",
"hdf",
".",
"create_group",
"(",
"str",
"(",
"name",
")",
")",
"if",
"self",
".",
"version",
"in",
"MDF2_VERSIONS",
"+",
"MDF3_VERSIONS",
":",
"for",
"item",
"in",
"header_items",
":",
"group",
".",
"attrs",
"[",
"item",
"]",
"=",
"self",
".",
"header",
"[",
"item",
"]",
"# save each data group in a HDF5 group called",
"# \"DataGroup_<cntr>\" with the index starting from 1",
"# each HDF5 group will have a string attribute \"master\"",
"# that will hold the name of the master channel",
"for",
"channel",
"in",
"df",
":",
"samples",
"=",
"df",
"[",
"channel",
"]",
"unit",
"=",
"units",
"[",
"channel",
"]",
"comment",
"=",
"comments",
"[",
"channel",
"]",
"if",
"samples",
".",
"dtype",
".",
"kind",
"==",
"'O'",
":",
"continue",
"if",
"compression",
":",
"dataset",
"=",
"group",
".",
"create_dataset",
"(",
"channel",
",",
"data",
"=",
"samples",
",",
"compression",
"=",
"compression",
",",
")",
"else",
":",
"dataset",
"=",
"group",
".",
"create_dataset",
"(",
"channel",
",",
"data",
"=",
"samples",
")",
"unit",
"=",
"unit",
".",
"replace",
"(",
"\"\\0\"",
",",
"\"\"",
")",
"if",
"unit",
":",
"dataset",
".",
"attrs",
"[",
"\"unit\"",
"]",
"=",
"unit",
"comment",
"=",
"comment",
".",
"replace",
"(",
"\"\\0\"",
",",
"\"\"",
")",
"if",
"comment",
":",
"dataset",
".",
"attrs",
"[",
"\"comment\"",
"]",
"=",
"comment",
"else",
":",
"with",
"HDF5",
"(",
"str",
"(",
"name",
")",
",",
"\"w\"",
")",
"as",
"hdf",
":",
"# header information",
"group",
"=",
"hdf",
".",
"create_group",
"(",
"str",
"(",
"name",
")",
")",
"if",
"self",
".",
"version",
"in",
"MDF2_VERSIONS",
"+",
"MDF3_VERSIONS",
":",
"for",
"item",
"in",
"header_items",
":",
"group",
".",
"attrs",
"[",
"item",
"]",
"=",
"self",
".",
"header",
"[",
"item",
"]",
"# save each data group in a HDF5 group called",
"# \"DataGroup_<cntr>\" with the index starting from 1",
"# each HDF5 group will have a string attribute \"master\"",
"# that will hold the name of the master channel",
"for",
"i",
",",
"grp",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"names",
"=",
"UniqueDB",
"(",
")",
"if",
"self",
".",
"_terminate",
":",
"return",
"group_name",
"=",
"r\"/\"",
"+",
"f\"DataGroup_{i+1}\"",
"group",
"=",
"hdf",
".",
"create_group",
"(",
"group_name",
")",
"master_index",
"=",
"self",
".",
"masters_db",
".",
"get",
"(",
"i",
",",
"-",
"1",
")",
"if",
"master_index",
":",
"group",
".",
"attrs",
"[",
"\"master\"",
"]",
"=",
"grp",
".",
"channels",
"[",
"master_index",
"]",
".",
"name",
"channels",
"=",
"self",
".",
"select",
"(",
"[",
"(",
"ch",
".",
"name",
",",
"i",
",",
"None",
")",
"for",
"ch",
"in",
"grp",
".",
"channels",
"]",
")",
"for",
"j",
",",
"sig",
"in",
"enumerate",
"(",
"channels",
")",
":",
"if",
"use_display_names",
":",
"name",
"=",
"sig",
".",
"display_name",
"or",
"sig",
".",
"name",
"else",
":",
"name",
"=",
"sig",
".",
"name",
"name",
"=",
"names",
".",
"get_unique_name",
"(",
"name",
")",
"if",
"reduce_memory_usage",
":",
"sig",
".",
"samples",
"=",
"downcast",
"(",
"sig",
".",
"samples",
")",
"if",
"compression",
":",
"dataset",
"=",
"group",
".",
"create_dataset",
"(",
"name",
",",
"data",
"=",
"sig",
".",
"samples",
",",
"compression",
"=",
"compression",
",",
")",
"else",
":",
"dataset",
"=",
"group",
".",
"create_dataset",
"(",
"name",
",",
"data",
"=",
"sig",
".",
"samples",
",",
"dtype",
"=",
"sig",
".",
"samples",
".",
"dtype",
")",
"unit",
"=",
"sig",
".",
"unit",
"if",
"unit",
":",
"dataset",
".",
"attrs",
"[",
"\"unit\"",
"]",
"=",
"unit",
"comment",
"=",
"sig",
".",
"comment",
".",
"replace",
"(",
"\"\\0\"",
",",
"\"\"",
")",
"if",
"comment",
":",
"dataset",
".",
"attrs",
"[",
"\"comment\"",
"]",
"=",
"comment",
"elif",
"fmt",
"==",
"\"excel\"",
":",
"if",
"single_time_base",
":",
"name",
"=",
"name",
".",
"with_suffix",
"(",
"\".xlsx\"",
")",
"message",
"=",
"f'Writing excel export to file \"{name}\"'",
"logger",
".",
"info",
"(",
"message",
")",
"workbook",
"=",
"xlsxwriter",
".",
"Workbook",
"(",
"str",
"(",
"name",
")",
")",
"sheet",
"=",
"workbook",
".",
"add_worksheet",
"(",
"\"Channels\"",
")",
"for",
"col",
",",
"(",
"channel_name",
",",
"channel_unit",
")",
"in",
"enumerate",
"(",
"units",
".",
"items",
"(",
")",
")",
":",
"if",
"self",
".",
"_terminate",
":",
"return",
"samples",
"=",
"df",
"[",
"channel_name",
"]",
"sig_description",
"=",
"f\"{channel_name} [{channel_unit}]\"",
"sheet",
".",
"write",
"(",
"0",
",",
"col",
",",
"sig_description",
")",
"try",
":",
"sheet",
".",
"write_column",
"(",
"1",
",",
"col",
",",
"samples",
".",
"astype",
"(",
"str",
")",
")",
"except",
":",
"vals",
"=",
"[",
"str",
"(",
"e",
")",
"for",
"e",
"in",
"sig",
".",
"samples",
"]",
"sheet",
".",
"write_column",
"(",
"1",
",",
"col",
",",
"vals",
")",
"workbook",
".",
"close",
"(",
")",
"else",
":",
"while",
"name",
".",
"suffix",
"==",
"\".xlsx\"",
":",
"name",
"=",
"name",
".",
"stem",
"count",
"=",
"len",
"(",
"self",
".",
"groups",
")",
"for",
"i",
",",
"grp",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"if",
"self",
".",
"_terminate",
":",
"return",
"message",
"=",
"f\"Exporting group {i+1} of {count}\"",
"logger",
".",
"info",
"(",
"message",
")",
"data",
"=",
"self",
".",
"_load_data",
"(",
"grp",
")",
"data",
"=",
"b\"\"",
".",
"join",
"(",
"d",
"[",
"0",
"]",
"for",
"d",
"in",
"data",
")",
"data",
"=",
"(",
"data",
",",
"0",
",",
"-",
"1",
")",
"master_index",
"=",
"self",
".",
"masters_db",
".",
"get",
"(",
"i",
",",
"None",
")",
"if",
"master_index",
"is",
"not",
"None",
":",
"master",
"=",
"self",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"master_index",
",",
"data",
"=",
"data",
")",
"if",
"raster",
"and",
"len",
"(",
"master",
")",
":",
"raster_",
"=",
"np",
".",
"arange",
"(",
"master",
"[",
"0",
"]",
",",
"master",
"[",
"-",
"1",
"]",
",",
"raster",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"master",
"=",
"master",
".",
"interp",
"(",
"raster_",
")",
"else",
":",
"raster_",
"=",
"None",
"else",
":",
"master",
"=",
"None",
"raster_",
"=",
"None",
"if",
"time_from_zero",
":",
"master",
".",
"samples",
"-=",
"master",
".",
"samples",
"[",
"0",
"]",
"group_name",
"=",
"f\"DataGroup_{i+1}\"",
"wb_name",
"=",
"Path",
"(",
"f\"{name.stem}_{group_name}.xlsx\"",
")",
"workbook",
"=",
"xlsxwriter",
".",
"Workbook",
"(",
"str",
"(",
"wb_name",
")",
")",
"sheet",
"=",
"workbook",
".",
"add_worksheet",
"(",
"group_name",
")",
"if",
"master",
"is",
"not",
"None",
":",
"sig_description",
"=",
"f\"{master.name} [{master.unit}]\"",
"sheet",
".",
"write",
"(",
"0",
",",
"0",
",",
"sig_description",
")",
"sheet",
".",
"write_column",
"(",
"1",
",",
"0",
",",
"master",
".",
"samples",
".",
"astype",
"(",
"str",
")",
")",
"offset",
"=",
"1",
"else",
":",
"offset",
"=",
"0",
"for",
"col",
",",
"_",
"in",
"enumerate",
"(",
"grp",
".",
"channels",
")",
":",
"if",
"self",
".",
"_terminate",
":",
"return",
"if",
"col",
"==",
"master_index",
":",
"offset",
"-=",
"1",
"continue",
"sig",
"=",
"self",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"col",
",",
"data",
"=",
"data",
")",
"if",
"raster_",
"is",
"not",
"None",
":",
"sig",
"=",
"sig",
".",
"interp",
"(",
"raster_",
",",
"self",
".",
"_integer_interpolation",
")",
"sig_description",
"=",
"f\"{sig.name} [{sig.unit}]\"",
"sheet",
".",
"write",
"(",
"0",
",",
"col",
"+",
"offset",
",",
"sig_description",
")",
"try",
":",
"sheet",
".",
"write_column",
"(",
"1",
",",
"col",
"+",
"offset",
",",
"sig",
".",
"samples",
".",
"astype",
"(",
"str",
")",
")",
"except",
":",
"vals",
"=",
"[",
"str",
"(",
"e",
")",
"for",
"e",
"in",
"sig",
".",
"samples",
"]",
"sheet",
".",
"write_column",
"(",
"1",
",",
"col",
"+",
"offset",
",",
"vals",
")",
"workbook",
".",
"close",
"(",
")",
"del",
"self",
".",
"_master_channel_cache",
"[",
"(",
"i",
",",
"0",
",",
"-",
"1",
")",
"]",
"elif",
"fmt",
"==",
"\"csv\"",
":",
"if",
"single_time_base",
":",
"name",
"=",
"name",
".",
"with_suffix",
"(",
"\".csv\"",
")",
"message",
"=",
"f'Writing csv export to file \"{name}\"'",
"logger",
".",
"info",
"(",
"message",
")",
"with",
"open",
"(",
"name",
",",
"\"w\"",
",",
"newline",
"=",
"\"\"",
")",
"as",
"csvfile",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"csvfile",
")",
"names_row",
"=",
"[",
"f\"{channel_name} [{channel_unit}]\"",
"for",
"(",
"channel_name",
",",
"channel_unit",
")",
"in",
"units",
".",
"items",
"(",
")",
"]",
"writer",
".",
"writerow",
"(",
"names_row",
")",
"vals",
"=",
"[",
"df",
"[",
"name",
"]",
"for",
"name",
"in",
"df",
"]",
"if",
"self",
".",
"_terminate",
":",
"return",
"for",
"row",
"in",
"zip",
"(",
"*",
"vals",
")",
":",
"writer",
".",
"writerow",
"(",
"row",
")",
"else",
":",
"name",
"=",
"name",
".",
"with_suffix",
"(",
"\".csv\"",
")",
"count",
"=",
"len",
"(",
"self",
".",
"groups",
")",
"for",
"i",
",",
"grp",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"if",
"self",
".",
"_terminate",
":",
"return",
"message",
"=",
"f\"Exporting group {i+1} of {count}\"",
"logger",
".",
"info",
"(",
"message",
")",
"data",
"=",
"self",
".",
"_load_data",
"(",
"grp",
")",
"data",
"=",
"b\"\"",
".",
"join",
"(",
"d",
"[",
"0",
"]",
"for",
"d",
"in",
"data",
")",
"data",
"=",
"(",
"data",
",",
"0",
",",
"-",
"1",
")",
"group_csv_name",
"=",
"name",
".",
"parent",
"/",
"f\"{name.stem}_DataGroup_{i+1}.csv\"",
"with",
"open",
"(",
"group_csv_name",
",",
"\"w\"",
",",
"newline",
"=",
"\"\"",
")",
"as",
"csvfile",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"csvfile",
")",
"master_index",
"=",
"self",
".",
"masters_db",
".",
"get",
"(",
"i",
",",
"None",
")",
"if",
"master_index",
"is",
"not",
"None",
":",
"master",
"=",
"self",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"master_index",
",",
"data",
"=",
"data",
")",
"if",
"raster",
"and",
"len",
"(",
"master",
")",
":",
"raster_",
"=",
"np",
".",
"arange",
"(",
"master",
"[",
"0",
"]",
",",
"master",
"[",
"-",
"1",
"]",
",",
"raster",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"master",
"=",
"master",
".",
"interp",
"(",
"raster_",
")",
"else",
":",
"raster_",
"=",
"None",
"else",
":",
"master",
"=",
"None",
"raster_",
"=",
"None",
"if",
"time_from_zero",
":",
"if",
"master",
"is",
"None",
":",
"pass",
"elif",
"len",
"(",
"master",
")",
":",
"master",
".",
"samples",
"-=",
"master",
".",
"samples",
"[",
"0",
"]",
"ch_nr",
"=",
"len",
"(",
"grp",
".",
"channels",
")",
"if",
"master",
"is",
"None",
":",
"channels",
"=",
"[",
"self",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"j",
",",
"data",
"=",
"data",
")",
"for",
"j",
"in",
"range",
"(",
"ch_nr",
")",
"]",
"else",
":",
"if",
"raster_",
"is",
"not",
"None",
":",
"channels",
"=",
"[",
"self",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"j",
",",
"data",
"=",
"data",
")",
".",
"interp",
"(",
"raster_",
",",
"self",
".",
"_integer_interpolation",
")",
"for",
"j",
"in",
"range",
"(",
"ch_nr",
")",
"if",
"j",
"!=",
"master_index",
"]",
"else",
":",
"channels",
"=",
"[",
"self",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"j",
",",
"data",
"=",
"data",
")",
"for",
"j",
"in",
"range",
"(",
"ch_nr",
")",
"if",
"j",
"!=",
"master_index",
"]",
"if",
"raster_",
"is",
"not",
"None",
":",
"cycles",
"=",
"len",
"(",
"raster_",
")",
"else",
":",
"cycles",
"=",
"grp",
".",
"channel_group",
"[",
"\"cycles_nr\"",
"]",
"if",
"empty_channels",
"==",
"\"zeros\"",
":",
"for",
"channel",
"in",
"channels",
":",
"if",
"not",
"len",
"(",
"channel",
")",
":",
"channel",
".",
"samples",
"=",
"np",
".",
"zeros",
"(",
"cycles",
",",
"dtype",
"=",
"channel",
".",
"samples",
".",
"dtype",
")",
"if",
"master",
"is",
"not",
"None",
":",
"names_row",
"=",
"[",
"master",
".",
"name",
"]",
"vals",
"=",
"[",
"master",
".",
"samples",
"]",
"else",
":",
"names_row",
"=",
"[",
"]",
"vals",
"=",
"[",
"]",
"names_row",
"+=",
"[",
"f\"{ch.name} [{ch.unit}]\"",
"for",
"ch",
"in",
"channels",
"]",
"writer",
".",
"writerow",
"(",
"names_row",
")",
"vals",
"+=",
"[",
"ch",
".",
"samples",
"for",
"ch",
"in",
"channels",
"]",
"for",
"idx",
",",
"row",
"in",
"enumerate",
"(",
"zip",
"(",
"*",
"vals",
")",
")",
":",
"writer",
".",
"writerow",
"(",
"row",
")",
"del",
"self",
".",
"_master_channel_cache",
"[",
"(",
"i",
",",
"0",
",",
"-",
"1",
")",
"]",
"elif",
"fmt",
"==",
"\"mat\"",
":",
"name",
"=",
"name",
".",
"with_suffix",
"(",
"\".mat\"",
")",
"if",
"not",
"single_time_base",
":",
"mdict",
"=",
"{",
"}",
"master_name_template",
"=",
"\"DGM{}_{}\"",
"channel_name_template",
"=",
"\"DG{}_{}\"",
"used_names",
"=",
"UniqueDB",
"(",
")",
"for",
"i",
",",
"grp",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"if",
"self",
".",
"_terminate",
":",
"return",
"included_channels",
"=",
"self",
".",
"_included_channels",
"(",
"i",
")",
"master_index",
"=",
"self",
".",
"masters_db",
".",
"get",
"(",
"i",
",",
"-",
"1",
")",
"if",
"master_index",
">=",
"0",
":",
"included_channels",
".",
"add",
"(",
"master_index",
")",
"channels",
"=",
"self",
".",
"select",
"(",
"[",
"(",
"None",
",",
"i",
",",
"idx",
")",
"for",
"idx",
"in",
"included_channels",
"]",
")",
"for",
"j",
",",
"sig",
"in",
"zip",
"(",
"included_channels",
",",
"channels",
")",
":",
"if",
"j",
"==",
"master_index",
":",
"channel_name",
"=",
"master_name_template",
".",
"format",
"(",
"i",
",",
"sig",
".",
"name",
")",
"else",
":",
"if",
"use_display_names",
":",
"channel_name",
"=",
"sig",
".",
"display_name",
"or",
"sig",
".",
"name",
"else",
":",
"channel_name",
"=",
"sig",
".",
"name",
"channel_name",
"=",
"channel_name_template",
".",
"format",
"(",
"i",
",",
"channel_name",
")",
"channel_name",
"=",
"matlab_compatible",
"(",
"channel_name",
")",
"channel_name",
"=",
"used_names",
".",
"get_unique_name",
"(",
"channel_name",
")",
"if",
"sig",
".",
"samples",
".",
"dtype",
".",
"names",
":",
"sig",
".",
"samples",
".",
"dtype",
".",
"names",
"=",
"[",
"matlab_compatible",
"(",
"name",
")",
"for",
"name",
"in",
"sig",
".",
"samples",
".",
"dtype",
".",
"names",
"]",
"mdict",
"[",
"channel_name",
"]",
"=",
"sig",
".",
"samples",
"else",
":",
"used_names",
"=",
"UniqueDB",
"(",
")",
"mdict",
"=",
"{",
"}",
"for",
"name",
"in",
"df",
".",
"columns",
":",
"channel_name",
"=",
"matlab_compatible",
"(",
"name",
")",
"channel_name",
"=",
"used_names",
".",
"get_unique_name",
"(",
"channel_name",
")",
"mdict",
"[",
"channel_name",
"]",
"=",
"df",
"[",
"name",
"]",
".",
"values",
"mdict",
"[",
"'time'",
"]",
"=",
"df",
".",
"index",
".",
"values",
"if",
"format",
"==",
"\"7.3\"",
":",
"savemat",
"(",
"str",
"(",
"name",
")",
",",
"mdict",
",",
"long_field_names",
"=",
"True",
",",
"format",
"=",
"\"7.3\"",
",",
"delete_unused_variables",
"=",
"False",
",",
"oned_as",
"=",
"oned_as",
",",
")",
"else",
":",
"savemat",
"(",
"str",
"(",
"name",
")",
",",
"mdict",
",",
"long_field_names",
"=",
"True",
",",
"oned_as",
"=",
"oned_as",
",",
"do_compression",
"=",
"bool",
"(",
"compression",
")",
",",
")",
"elif",
"fmt",
"==",
"\"parquet\"",
":",
"name",
"=",
"name",
".",
"with_suffix",
"(",
"\".parquet\"",
")",
"if",
"compression",
":",
"write_parquet",
"(",
"name",
",",
"df",
",",
"compression",
"=",
"compression",
")",
"else",
":",
"write_parquet",
"(",
"name",
",",
"df",
")",
"else",
":",
"message",
"=",
"(",
"'Unsopported export type \"{}\". '",
"'Please select \"csv\", \"excel\", \"hdf5\", \"mat\" or \"pandas\"'",
")",
"message",
".",
"format",
"(",
"fmt",
")",
"logger",
".",
"warning",
"(",
"message",
")"
] | 38.512238 | 20.232517 |
def _baseattrs(self):
"""A dict of members expressed in literals"""
result = super()._baseattrs
result["static_spaces"] = self.static_spaces._baseattrs
result["dynamic_spaces"] = self.dynamic_spaces._baseattrs
result["cells"] = self.cells._baseattrs
result["refs"] = self.refs._baseattrs
if self.has_params():
result["params"] = ", ".join(self.parameters)
else:
result["params"] = ""
return result | [
"def",
"_baseattrs",
"(",
"self",
")",
":",
"result",
"=",
"super",
"(",
")",
".",
"_baseattrs",
"result",
"[",
"\"static_spaces\"",
"]",
"=",
"self",
".",
"static_spaces",
".",
"_baseattrs",
"result",
"[",
"\"dynamic_spaces\"",
"]",
"=",
"self",
".",
"dynamic_spaces",
".",
"_baseattrs",
"result",
"[",
"\"cells\"",
"]",
"=",
"self",
".",
"cells",
".",
"_baseattrs",
"result",
"[",
"\"refs\"",
"]",
"=",
"self",
".",
"refs",
".",
"_baseattrs",
"if",
"self",
".",
"has_params",
"(",
")",
":",
"result",
"[",
"\"params\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"self",
".",
"parameters",
")",
"else",
":",
"result",
"[",
"\"params\"",
"]",
"=",
"\"\"",
"return",
"result"
] | 32.133333 | 19 |
def lsr_top1(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for top-1 data (see :ref:`data-top1`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_top1` for an idea on how this works). If it is set to
`None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float
Regularization parameter.
initial_params : array_like
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for winner, losers in data:
val = 1 / (weights.take(losers).sum() + weights[winner])
for loser in losers:
chain[loser, winner] += val
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain)) | [
"def",
"lsr_top1",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
")",
":",
"weights",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"initial_params",
")",
"for",
"winner",
",",
"losers",
"in",
"data",
":",
"val",
"=",
"1",
"/",
"(",
"weights",
".",
"take",
"(",
"losers",
")",
".",
"sum",
"(",
")",
"+",
"weights",
"[",
"winner",
"]",
")",
"for",
"loser",
"in",
"losers",
":",
"chain",
"[",
"loser",
",",
"winner",
"]",
"+=",
"val",
"chain",
"-=",
"np",
".",
"diag",
"(",
"chain",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"return",
"log_transform",
"(",
"statdist",
"(",
"chain",
")",
")"
] | 35.973684 | 20.368421 |
def get_metrics(self, timestamp):
"""Get a Metric for each registered view.
Convert each registered view's associated `ViewData` into a `Metric` to
be exported.
:type timestamp: :class: `datetime.datetime`
:param timestamp: The timestamp to use for metric conversions, usually
the current time.
:rtype: Iterator[:class: `opencensus.metrics.export.metric.Metric`]
"""
for vdl in self._measure_to_view_data_list_map.values():
for vd in vdl:
metric = metric_utils.view_data_to_metric(vd, timestamp)
if metric is not None:
yield metric | [
"def",
"get_metrics",
"(",
"self",
",",
"timestamp",
")",
":",
"for",
"vdl",
"in",
"self",
".",
"_measure_to_view_data_list_map",
".",
"values",
"(",
")",
":",
"for",
"vd",
"in",
"vdl",
":",
"metric",
"=",
"metric_utils",
".",
"view_data_to_metric",
"(",
"vd",
",",
"timestamp",
")",
"if",
"metric",
"is",
"not",
"None",
":",
"yield",
"metric"
] | 38.470588 | 21.529412 |
def video(request, video_id):
"""
Displays a video in an embed player
"""
# Check video availability
# Available states are: processing
api = Api()
api.authenticate()
availability = api.check_upload_status(video_id)
if availability is not True:
# Video is not available
video = Video.objects.filter(video_id=video_id).get()
state = availability["upload_state"]
# Add additional states here. I'm not sure what states are available
if state == "failed" or state == "rejected":
return render_to_response(
"django_youtube/video_failed.html",
{"video": video, "video_id": video_id, "message":
_("Invalid video."), "availability": availability},
context_instance=RequestContext(request)
)
else:
return render_to_response(
"django_youtube/video_unavailable.html",
{"video": video, "video_id": video_id,
"message": _("This video is currently being processed"), "availability": availability},
context_instance=RequestContext(request)
)
video_params = _video_params(request, video_id)
return render_to_response(
"django_youtube/video.html",
video_params,
context_instance=RequestContext(request)
) | [
"def",
"video",
"(",
"request",
",",
"video_id",
")",
":",
"# Check video availability",
"# Available states are: processing",
"api",
"=",
"Api",
"(",
")",
"api",
".",
"authenticate",
"(",
")",
"availability",
"=",
"api",
".",
"check_upload_status",
"(",
"video_id",
")",
"if",
"availability",
"is",
"not",
"True",
":",
"# Video is not available",
"video",
"=",
"Video",
".",
"objects",
".",
"filter",
"(",
"video_id",
"=",
"video_id",
")",
".",
"get",
"(",
")",
"state",
"=",
"availability",
"[",
"\"upload_state\"",
"]",
"# Add additional states here. I'm not sure what states are available",
"if",
"state",
"==",
"\"failed\"",
"or",
"state",
"==",
"\"rejected\"",
":",
"return",
"render_to_response",
"(",
"\"django_youtube/video_failed.html\"",
",",
"{",
"\"video\"",
":",
"video",
",",
"\"video_id\"",
":",
"video_id",
",",
"\"message\"",
":",
"_",
"(",
"\"Invalid video.\"",
")",
",",
"\"availability\"",
":",
"availability",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")",
"else",
":",
"return",
"render_to_response",
"(",
"\"django_youtube/video_unavailable.html\"",
",",
"{",
"\"video\"",
":",
"video",
",",
"\"video_id\"",
":",
"video_id",
",",
"\"message\"",
":",
"_",
"(",
"\"This video is currently being processed\"",
")",
",",
"\"availability\"",
":",
"availability",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")",
"video_params",
"=",
"_video_params",
"(",
"request",
",",
"video_id",
")",
"return",
"render_to_response",
"(",
"\"django_youtube/video.html\"",
",",
"video_params",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] | 33.875 | 19.325 |
def check_url(url):
"""
Function that verifies that the string passed is a valid url.
Original regex author Diego Perini (http://www.iport.it)
regex ported to Python by adamrofer (https://github.com/adamrofer)
Used under MIT license.
:param url:
:return: Nothing
"""
URL_REGEX = re.compile(
u"^"
u"(?:(?:https?|ftp)://)"
u"(?:\S+(?::\S*)?@)?"
u"(?:"
u"(?!(?:10|127)(?:\.\d{1,3}){3})"
u"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})"
u"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})"
u"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])"
u"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}"
u"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))"
u"|"
u"(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)"
u"(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*"
u"(?:\.(?:[a-z\u00a1-\uffff]{2,}))"
u")"
u"(?::\d{2,5})?"
u"(?:/\S*)?"
u"$"
, re.UNICODE)
if not re.match(URL_REGEX, url):
raise ValueError('String passed is not a valid url')
return | [
"def",
"check_url",
"(",
"url",
")",
":",
"URL_REGEX",
"=",
"re",
".",
"compile",
"(",
"u\"^\"",
"u\"(?:(?:https?|ftp)://)\"",
"u\"(?:\\S+(?::\\S*)?@)?\"",
"u\"(?:\"",
"u\"(?!(?:10|127)(?:\\.\\d{1,3}){3})\"",
"u\"(?!(?:169\\.254|192\\.168)(?:\\.\\d{1,3}){2})\"",
"u\"(?!172\\.(?:1[6-9]|2\\d|3[0-1])(?:\\.\\d{1,3}){2})\"",
"u\"(?:[1-9]\\d?|1\\d\\d|2[01]\\d|22[0-3])\"",
"u\"(?:\\.(?:1?\\d{1,2}|2[0-4]\\d|25[0-5])){2}\"",
"u\"(?:\\.(?:[1-9]\\d?|1\\d\\d|2[0-4]\\d|25[0-4]))\"",
"u\"|\"",
"u\"(?:(?:[a-z\\u00a1-\\uffff0-9]-?)*[a-z\\u00a1-\\uffff0-9]+)\"",
"u\"(?:\\.(?:[a-z\\u00a1-\\uffff0-9]-?)*[a-z\\u00a1-\\uffff0-9]+)*\"",
"u\"(?:\\.(?:[a-z\\u00a1-\\uffff]{2,}))\"",
"u\")\"",
"u\"(?::\\d{2,5})?\"",
"u\"(?:/\\S*)?\"",
"u\"$\"",
",",
"re",
".",
"UNICODE",
")",
"if",
"not",
"re",
".",
"match",
"(",
"URL_REGEX",
",",
"url",
")",
":",
"raise",
"ValueError",
"(",
"'String passed is not a valid url'",
")",
"return"
] | 29.647059 | 18.764706 |
def rest_del(self, url, params=None, session=None, verify=True, cert=None):
"""
Perform a DELETE request to url with requests.session
"""
res = session.delete(url, params=params, verify=verify, cert=cert)
return res.text, res.status_code | [
"def",
"rest_del",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
",",
"session",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"cert",
"=",
"None",
")",
":",
"res",
"=",
"session",
".",
"delete",
"(",
"url",
",",
"params",
"=",
"params",
",",
"verify",
"=",
"verify",
",",
"cert",
"=",
"cert",
")",
"return",
"res",
".",
"text",
",",
"res",
".",
"status_code"
] | 45.333333 | 15 |
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.
This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == FloatType:
return np.float32
else:
return None | [
"def",
"_to_corrected_pandas_type",
"(",
"dt",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"type",
"(",
"dt",
")",
"==",
"ByteType",
":",
"return",
"np",
".",
"int8",
"elif",
"type",
"(",
"dt",
")",
"==",
"ShortType",
":",
"return",
"np",
".",
"int16",
"elif",
"type",
"(",
"dt",
")",
"==",
"IntegerType",
":",
"return",
"np",
".",
"int32",
"elif",
"type",
"(",
"dt",
")",
"==",
"FloatType",
":",
"return",
"np",
".",
"float32",
"else",
":",
"return",
"None"
] | 31.625 | 18.25 |
def free_params(self, value):
"""Set the free parameters. Note that this bypasses enforce_bounds.
"""
value = scipy.asarray(value, dtype=float)
self.K_up_to_date = False
self.k.free_params = value[:self.k.num_free_params]
self.noise_k.free_params = value[self.k.num_free_params:self.k.num_free_params + self.noise_k.num_free_params]
if self.mu is not None:
self.mu.free_params = value[self.k.num_free_params + self.noise_k.num_free_params:] | [
"def",
"free_params",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"scipy",
".",
"asarray",
"(",
"value",
",",
"dtype",
"=",
"float",
")",
"self",
".",
"K_up_to_date",
"=",
"False",
"self",
".",
"k",
".",
"free_params",
"=",
"value",
"[",
":",
"self",
".",
"k",
".",
"num_free_params",
"]",
"self",
".",
"noise_k",
".",
"free_params",
"=",
"value",
"[",
"self",
".",
"k",
".",
"num_free_params",
":",
"self",
".",
"k",
".",
"num_free_params",
"+",
"self",
".",
"noise_k",
".",
"num_free_params",
"]",
"if",
"self",
".",
"mu",
"is",
"not",
"None",
":",
"self",
".",
"mu",
".",
"free_params",
"=",
"value",
"[",
"self",
".",
"k",
".",
"num_free_params",
"+",
"self",
".",
"noise_k",
".",
"num_free_params",
":",
"]"
] | 55.555556 | 20.888889 |
def decompose(
cls,
heights,
t = None,
t0 = None,
interval = None,
constituents = constituent.noaa,
initial = None,
n_period = 2,
callback = None,
full_output = False
):
"""
Return an instance of Tide which has been fitted to a series of tidal observations.
Arguments:
It is not necessary to provide t0 or interval if t is provided.
heights -- ndarray of tidal observation heights
t -- ndarray of tidal observation times
t0 -- datetime representing the time at which heights[0] was recorded
interval -- hourly interval between readings
constituents -- list of constituents to use in the fit (default: constituent.noaa)
initial -- optional Tide instance to use as first guess for least squares solver
n_period -- only include constituents which complete at least this many periods (default: 2)
callback -- optional function to be called at each iteration of the solver
full_output -- whether to return the output of scipy's leastsq solver (default: False)
"""
if t is not None:
if isinstance(t[0], datetime):
hours = Tide._hours(t[0], t)
t0 = t[0]
elif t0 is not None:
hours = t
else:
raise ValueError("t can be an array of datetimes, or an array "
"of hours since t0 in which case t0 must be "
"specified.")
elif None not in [t0, interval]:
hours = np.arange(len(heights)) * interval
else:
raise ValueError("Must provide t(datetimes), or t(hours) and "
"t0(datetime), or interval(hours) and t0(datetime) "
"so that each height can be identified with an "
"instant in time.")
#Remove duplicate constituents (those which travel at exactly the same
#speed, irrespective of phase)
constituents = list(OrderedDict.fromkeys(constituents))
#No need for least squares to find the mean water level constituent z0,
#work relative to mean
constituents = [c for c in constituents if not c == constituent._Z0]
z0 = np.mean(heights)
heights = heights - z0
#Only analyse frequencies which complete at least n_period cycles over
#the data period.
constituents = [
c for c in constituents
if 360.0 * n_period < hours[-1] * c.speed(astro(t0))
]
n = len(constituents)
sort = np.argsort(hours)
hours = hours[sort]
heights = heights[sort]
#We partition our time/height data into intervals over which we consider
#the values of u and f to assume a constant value (that is, their true
#value at the midpoint of the interval). Constituent
#speeds change much more slowly than the node factors, so we will
#consider these constant and equal to their speed at t0, regardless of
#the length of the time series.
partition = 240.0
t = Tide._partition(hours, partition)
times = Tide._times(t0, [(i + 0.5)*partition for i in range(len(t))])
speed, u, f, V0 = Tide._prepare(constituents, t0, times, radians = True)
#Residual to be minimised by variation of parameters (amplitudes, phases)
def residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
s = np.concatenate([
Tide._tidal_series(t_i, H, p, speed, u_i, f_i, V0)
for t_i, u_i, f_i in izip(t, u, f)
])
res = heights - s
if callback:
callback(res)
return res
#Analytic Jacobian of the residual - this makes solving significantly
#faster than just using gradient approximation, especially with many
#measurements / constituents.
def D_residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
ds_dH = np.concatenate([
f_i*np.cos(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
ds_dp = np.concatenate([
H*f_i*np.sin(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
return np.append(-ds_dH, -ds_dp, axis=0)
#Initial guess for solver, haven't done any analysis on this since the
#solver seems to converge well regardless of the initial guess We do
#however scale the initial amplitude guess with some measure of the
#variation
amplitudes = np.ones(n) * (np.sqrt(np.dot(heights, heights)) / len(heights))
phases = np.ones(n)
if initial:
for (c0, amplitude, phase) in initial.model:
for i, c in enumerate(constituents):
if c0 == c:
amplitudes[i] = amplitude
phases[i] = d2r*phase
initial = np.append(amplitudes, phases)
lsq = leastsq(residual, initial, Dfun=D_residual, col_deriv=True, ftol=1e-7)
model = np.zeros(1+n, dtype=cls.dtype)
model[0] = (constituent._Z0, z0, 0)
model[1:]['constituent'] = constituents[:]
model[1:]['amplitude'] = lsq[0][:n]
model[1:]['phase'] = lsq[0][n:]
if full_output:
return cls(model = model, radians = True), lsq
return cls(model = model, radians = True) | [
"def",
"decompose",
"(",
"cls",
",",
"heights",
",",
"t",
"=",
"None",
",",
"t0",
"=",
"None",
",",
"interval",
"=",
"None",
",",
"constituents",
"=",
"constituent",
".",
"noaa",
",",
"initial",
"=",
"None",
",",
"n_period",
"=",
"2",
",",
"callback",
"=",
"None",
",",
"full_output",
"=",
"False",
")",
":",
"if",
"t",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"t",
"[",
"0",
"]",
",",
"datetime",
")",
":",
"hours",
"=",
"Tide",
".",
"_hours",
"(",
"t",
"[",
"0",
"]",
",",
"t",
")",
"t0",
"=",
"t",
"[",
"0",
"]",
"elif",
"t0",
"is",
"not",
"None",
":",
"hours",
"=",
"t",
"else",
":",
"raise",
"ValueError",
"(",
"\"t can be an array of datetimes, or an array \"",
"\"of hours since t0 in which case t0 must be \"",
"\"specified.\"",
")",
"elif",
"None",
"not",
"in",
"[",
"t0",
",",
"interval",
"]",
":",
"hours",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"heights",
")",
")",
"*",
"interval",
"else",
":",
"raise",
"ValueError",
"(",
"\"Must provide t(datetimes), or t(hours) and \"",
"\"t0(datetime), or interval(hours) and t0(datetime) \"",
"\"so that each height can be identified with an \"",
"\"instant in time.\"",
")",
"#Remove duplicate constituents (those which travel at exactly the same",
"#speed, irrespective of phase)",
"constituents",
"=",
"list",
"(",
"OrderedDict",
".",
"fromkeys",
"(",
"constituents",
")",
")",
"#No need for least squares to find the mean water level constituent z0,",
"#work relative to mean",
"constituents",
"=",
"[",
"c",
"for",
"c",
"in",
"constituents",
"if",
"not",
"c",
"==",
"constituent",
".",
"_Z0",
"]",
"z0",
"=",
"np",
".",
"mean",
"(",
"heights",
")",
"heights",
"=",
"heights",
"-",
"z0",
"#Only analyse frequencies which complete at least n_period cycles over",
"#the data period.",
"constituents",
"=",
"[",
"c",
"for",
"c",
"in",
"constituents",
"if",
"360.0",
"*",
"n_period",
"<",
"hours",
"[",
"-",
"1",
"]",
"*",
"c",
".",
"speed",
"(",
"astro",
"(",
"t0",
")",
")",
"]",
"n",
"=",
"len",
"(",
"constituents",
")",
"sort",
"=",
"np",
".",
"argsort",
"(",
"hours",
")",
"hours",
"=",
"hours",
"[",
"sort",
"]",
"heights",
"=",
"heights",
"[",
"sort",
"]",
"#We partition our time/height data into intervals over which we consider",
"#the values of u and f to assume a constant value (that is, their true",
"#value at the midpoint of the interval). Constituent",
"#speeds change much more slowly than the node factors, so we will",
"#consider these constant and equal to their speed at t0, regardless of",
"#the length of the time series.",
"partition",
"=",
"240.0",
"t",
"=",
"Tide",
".",
"_partition",
"(",
"hours",
",",
"partition",
")",
"times",
"=",
"Tide",
".",
"_times",
"(",
"t0",
",",
"[",
"(",
"i",
"+",
"0.5",
")",
"*",
"partition",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"t",
")",
")",
"]",
")",
"speed",
",",
"u",
",",
"f",
",",
"V0",
"=",
"Tide",
".",
"_prepare",
"(",
"constituents",
",",
"t0",
",",
"times",
",",
"radians",
"=",
"True",
")",
"#Residual to be minimised by variation of parameters (amplitudes, phases)",
"def",
"residual",
"(",
"hp",
")",
":",
"H",
",",
"p",
"=",
"hp",
"[",
":",
"n",
",",
"np",
".",
"newaxis",
"]",
",",
"hp",
"[",
"n",
":",
",",
"np",
".",
"newaxis",
"]",
"s",
"=",
"np",
".",
"concatenate",
"(",
"[",
"Tide",
".",
"_tidal_series",
"(",
"t_i",
",",
"H",
",",
"p",
",",
"speed",
",",
"u_i",
",",
"f_i",
",",
"V0",
")",
"for",
"t_i",
",",
"u_i",
",",
"f_i",
"in",
"izip",
"(",
"t",
",",
"u",
",",
"f",
")",
"]",
")",
"res",
"=",
"heights",
"-",
"s",
"if",
"callback",
":",
"callback",
"(",
"res",
")",
"return",
"res",
"#Analytic Jacobian of the residual - this makes solving significantly",
"#faster than just using gradient approximation, especially with many",
"#measurements / constituents.",
"def",
"D_residual",
"(",
"hp",
")",
":",
"H",
",",
"p",
"=",
"hp",
"[",
":",
"n",
",",
"np",
".",
"newaxis",
"]",
",",
"hp",
"[",
"n",
":",
",",
"np",
".",
"newaxis",
"]",
"ds_dH",
"=",
"np",
".",
"concatenate",
"(",
"[",
"f_i",
"*",
"np",
".",
"cos",
"(",
"speed",
"*",
"t_i",
"+",
"u_i",
"+",
"V0",
"-",
"p",
")",
"for",
"t_i",
",",
"u_i",
",",
"f_i",
"in",
"izip",
"(",
"t",
",",
"u",
",",
"f",
")",
"]",
",",
"axis",
"=",
"1",
")",
"ds_dp",
"=",
"np",
".",
"concatenate",
"(",
"[",
"H",
"*",
"f_i",
"*",
"np",
".",
"sin",
"(",
"speed",
"*",
"t_i",
"+",
"u_i",
"+",
"V0",
"-",
"p",
")",
"for",
"t_i",
",",
"u_i",
",",
"f_i",
"in",
"izip",
"(",
"t",
",",
"u",
",",
"f",
")",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"np",
".",
"append",
"(",
"-",
"ds_dH",
",",
"-",
"ds_dp",
",",
"axis",
"=",
"0",
")",
"#Initial guess for solver, haven't done any analysis on this since the",
"#solver seems to converge well regardless of the initial guess We do",
"#however scale the initial amplitude guess with some measure of the",
"#variation",
"amplitudes",
"=",
"np",
".",
"ones",
"(",
"n",
")",
"*",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"heights",
",",
"heights",
")",
")",
"/",
"len",
"(",
"heights",
")",
")",
"phases",
"=",
"np",
".",
"ones",
"(",
"n",
")",
"if",
"initial",
":",
"for",
"(",
"c0",
",",
"amplitude",
",",
"phase",
")",
"in",
"initial",
".",
"model",
":",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"constituents",
")",
":",
"if",
"c0",
"==",
"c",
":",
"amplitudes",
"[",
"i",
"]",
"=",
"amplitude",
"phases",
"[",
"i",
"]",
"=",
"d2r",
"*",
"phase",
"initial",
"=",
"np",
".",
"append",
"(",
"amplitudes",
",",
"phases",
")",
"lsq",
"=",
"leastsq",
"(",
"residual",
",",
"initial",
",",
"Dfun",
"=",
"D_residual",
",",
"col_deriv",
"=",
"True",
",",
"ftol",
"=",
"1e-7",
")",
"model",
"=",
"np",
".",
"zeros",
"(",
"1",
"+",
"n",
",",
"dtype",
"=",
"cls",
".",
"dtype",
")",
"model",
"[",
"0",
"]",
"=",
"(",
"constituent",
".",
"_Z0",
",",
"z0",
",",
"0",
")",
"model",
"[",
"1",
":",
"]",
"[",
"'constituent'",
"]",
"=",
"constituents",
"[",
":",
"]",
"model",
"[",
"1",
":",
"]",
"[",
"'amplitude'",
"]",
"=",
"lsq",
"[",
"0",
"]",
"[",
":",
"n",
"]",
"model",
"[",
"1",
":",
"]",
"[",
"'phase'",
"]",
"=",
"lsq",
"[",
"0",
"]",
"[",
"n",
":",
"]",
"if",
"full_output",
":",
"return",
"cls",
"(",
"model",
"=",
"model",
",",
"radians",
"=",
"True",
")",
",",
"lsq",
"return",
"cls",
"(",
"model",
"=",
"model",
",",
"radians",
"=",
"True",
")"
] | 34.360294 | 22.345588 |
def method_codes_to_geomagia(magic_method_codes,geomagia_table):
"""
Looks at the MagIC method code list and returns the correct GEOMAGIA code number depending
on the method code list and the GEOMAGIA table specified. Returns O, GEOMAGIA's "Not specified" value, if no match.
When mutiple codes are matched they are separated with -
"""
codes=magic_method_codes
geomagia=geomagia_table.lower()
geomagia_code='0'
if geomagia=='alteration_monit_corr':
if "DA-ALT-V" or "LP-PI-ALT-PTRM" or "LP-PI-ALT-PMRM" in codes:
geomagia_code='1'
elif "LP-PI-ALT-SUSC" in codes:
geomagia_code='2'
elif "DA-ALT-RS" or "LP-PI-ALT-AFARM" in codes:
geomagia_code='3'
elif "LP-PI-ALT-WALTON" in codes:
geomagia_code='4'
elif "LP-PI-ALT-TANGUY" in codes:
geomagia_code='5'
elif "DA-ALT" in codes:
geomagia_code='6' #at end to fill generic if others don't exist
elif "LP-PI-ALT-FABIAN" in codes:
geomagia_code='7'
if geomagia=='md_checks':
if ("LT-PTRM-MD" in codes) or ("LT-PMRM-MD" in codes):
geomagia_code='1:'
if ("LP-PI-BT-LT" in codes) or ("LT-LT-Z" in codes):
if "0" in geomagia_code:
geomagia_code="23:"
else:
geomagia_code+='2:'
geomagia_code=geomagia_code[:-1]
if geomagia=='anisotropy_correction':
if "DA-AC-AMS" in codes:
geomagia_code='1'
elif "DA-AC-AARM" in codes:
geomagia_code='2'
elif "DA-AC-ATRM" in codes:
geomagia_code='3'
elif "LT-NRM-PAR" in codes:
geomagia_code='4'
elif "DA-AC-AIRM" in codes:
geomagia_code='6'
elif "DA-AC" in codes: #at end to fill generic if others don't exist
geomagia_code='5'
if geomagia=='cooling_rate':
if "DA-CR" in codes: #all current CR codes but CR-EG are a 1 but may change in the future
geomagia_code='1'
if "DA-CR-EG" in codes:
geomagia_code='2'
if geomagia=='dm_methods':
if "LP-DIR-AF" in codes:
geomagia_code='1'
elif "LT-AF-D" in codes:
geomagia_code='1'
elif "LT-AF-G" in codes:
geomagia_code='1'
elif "LT-AF-Z" in codes:
geomagia_code='1'
elif "LP-DIR-T" in codes:
geomagia_code='2'
elif "LT-AF-Z" in codes:
geomagia_code='2'
elif "LP-DIR-M" in codes:
geomagia_code='5'
elif "LT-M-Z" in codes:
geomagia_code='5'
if geomagia=='dm_analysis':
if "DE-BFL" in codes:
geomagia_code='1'
elif "DE-BLANKET" in codes:
geomagia_code='2'
elif "DE-FM" in codes:
geomagia_code='3'
elif "DE-NRM" in codes:
geomagia_code='6'
if geomagia=='specimen_type_id':
if "SC-TYPE-CYC" in codes:
geomagia_code='1'
elif "SC-TYPE-CUBE" in codes:
geomagia_code='2'
elif "SC-TYPE-MINI" in codes:
geomagia_code='3'
elif "SC-TYPE-SC" in codes:
geomagia_code='4'
elif "SC-TYPE-UC" in codes:
geomagia_code='5'
elif "SC-TYPE-LARGE" in codes:
geomagia_code='6'
return geomagia_code | [
"def",
"method_codes_to_geomagia",
"(",
"magic_method_codes",
",",
"geomagia_table",
")",
":",
"codes",
"=",
"magic_method_codes",
"geomagia",
"=",
"geomagia_table",
".",
"lower",
"(",
")",
"geomagia_code",
"=",
"'0'",
"if",
"geomagia",
"==",
"'alteration_monit_corr'",
":",
"if",
"\"DA-ALT-V\"",
"or",
"\"LP-PI-ALT-PTRM\"",
"or",
"\"LP-PI-ALT-PMRM\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"LP-PI-ALT-SUSC\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'2'",
"elif",
"\"DA-ALT-RS\"",
"or",
"\"LP-PI-ALT-AFARM\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'3'",
"elif",
"\"LP-PI-ALT-WALTON\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'4'",
"elif",
"\"LP-PI-ALT-TANGUY\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'5'",
"elif",
"\"DA-ALT\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'6'",
"#at end to fill generic if others don't exist",
"elif",
"\"LP-PI-ALT-FABIAN\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'7'",
"if",
"geomagia",
"==",
"'md_checks'",
":",
"if",
"(",
"\"LT-PTRM-MD\"",
"in",
"codes",
")",
"or",
"(",
"\"LT-PMRM-MD\"",
"in",
"codes",
")",
":",
"geomagia_code",
"=",
"'1:'",
"if",
"(",
"\"LP-PI-BT-LT\"",
"in",
"codes",
")",
"or",
"(",
"\"LT-LT-Z\"",
"in",
"codes",
")",
":",
"if",
"\"0\"",
"in",
"geomagia_code",
":",
"geomagia_code",
"=",
"\"23:\"",
"else",
":",
"geomagia_code",
"+=",
"'2:'",
"geomagia_code",
"=",
"geomagia_code",
"[",
":",
"-",
"1",
"]",
"if",
"geomagia",
"==",
"'anisotropy_correction'",
":",
"if",
"\"DA-AC-AMS\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"DA-AC-AARM\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'2'",
"elif",
"\"DA-AC-ATRM\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'3'",
"elif",
"\"LT-NRM-PAR\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'4'",
"elif",
"\"DA-AC-AIRM\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'6'",
"elif",
"\"DA-AC\"",
"in",
"codes",
":",
"#at end to fill generic if others don't exist",
"geomagia_code",
"=",
"'5'",
"if",
"geomagia",
"==",
"'cooling_rate'",
":",
"if",
"\"DA-CR\"",
"in",
"codes",
":",
"#all current CR codes but CR-EG are a 1 but may change in the future ",
"geomagia_code",
"=",
"'1'",
"if",
"\"DA-CR-EG\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'2'",
"if",
"geomagia",
"==",
"'dm_methods'",
":",
"if",
"\"LP-DIR-AF\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"LT-AF-D\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"LT-AF-G\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"LT-AF-Z\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"LP-DIR-T\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'2'",
"elif",
"\"LT-AF-Z\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'2'",
"elif",
"\"LP-DIR-M\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'5'",
"elif",
"\"LT-M-Z\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'5'",
"if",
"geomagia",
"==",
"'dm_analysis'",
":",
"if",
"\"DE-BFL\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"DE-BLANKET\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'2'",
"elif",
"\"DE-FM\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'3'",
"elif",
"\"DE-NRM\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'6'",
"if",
"geomagia",
"==",
"'specimen_type_id'",
":",
"if",
"\"SC-TYPE-CYC\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'1'",
"elif",
"\"SC-TYPE-CUBE\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'2'",
"elif",
"\"SC-TYPE-MINI\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'3'",
"elif",
"\"SC-TYPE-SC\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'4'",
"elif",
"\"SC-TYPE-UC\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'5'",
"elif",
"\"SC-TYPE-LARGE\"",
"in",
"codes",
":",
"geomagia_code",
"=",
"'6'",
"return",
"geomagia_code"
] | 33.326733 | 14.079208 |
def hget(self, key):
"""Read data from Redis for the provided key.
Args:
key (string): The key to read in Redis.
Returns:
(any): The response data from Redis.
"""
data = self.r.hget(self.hash, key)
if data is not None and not isinstance(data, str):
data = str(self.r.hget(self.hash, key), 'utf-8')
return data | [
"def",
"hget",
"(",
"self",
",",
"key",
")",
":",
"data",
"=",
"self",
".",
"r",
".",
"hget",
"(",
"self",
".",
"hash",
",",
"key",
")",
"if",
"data",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"data",
"=",
"str",
"(",
"self",
".",
"r",
".",
"hget",
"(",
"self",
".",
"hash",
",",
"key",
")",
",",
"'utf-8'",
")",
"return",
"data"
] | 30.076923 | 17.769231 |
def _footer_start_thread(self, text, time):
"""Display given text in the footer. Clears after <time> seconds
"""
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
load_thread = Thread(target=self._loading_thread, args=(time,))
load_thread.daemon = True
load_thread.start() | [
"def",
"_footer_start_thread",
"(",
"self",
",",
"text",
",",
"time",
")",
":",
"footerwid",
"=",
"urwid",
".",
"AttrMap",
"(",
"urwid",
".",
"Text",
"(",
"text",
")",
",",
"'footer'",
")",
"self",
".",
"top",
".",
"footer",
"=",
"footerwid",
"load_thread",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"_loading_thread",
",",
"args",
"=",
"(",
"time",
",",
")",
")",
"load_thread",
".",
"daemon",
"=",
"True",
"load_thread",
".",
"start",
"(",
")"
] | 39.222222 | 13.333333 |
def gaussian(cls, mu=0, sigma=1):
'''
:mu: mean
:sigma: standard deviation
:return: Point subclass
Returns a point whose coordinates are picked from a Gaussian
distribution with mean 'mu' and standard deviation 'sigma'.
See random.gauss for further explanation of those parameters.
'''
return cls(random.gauss(mu, sigma),
random.gauss(mu, sigma),
random.gauss(mu, sigma)) | [
"def",
"gaussian",
"(",
"cls",
",",
"mu",
"=",
"0",
",",
"sigma",
"=",
"1",
")",
":",
"return",
"cls",
"(",
"random",
".",
"gauss",
"(",
"mu",
",",
"sigma",
")",
",",
"random",
".",
"gauss",
"(",
"mu",
",",
"sigma",
")",
",",
"random",
".",
"gauss",
"(",
"mu",
",",
"sigma",
")",
")"
] | 36.538462 | 17.769231 |
def getexptimeimg(self,chip):
"""
Notes
=====
Return an array representing the exposure time per pixel for the detector.
This method will be overloaded for IR detectors which have their own
EXP arrays, namely, WFC3/IR and NICMOS images.
:units:
None
Returns
=======
exptimeimg : numpy array
The method will return an array of the same shape as the image.
"""
sci_chip = self._image[self.scienceExt,chip]
if sci_chip._wtscl_par == 'expsq':
wtscl = sci_chip._exptime*sci_chip._exptime
else:
wtscl = sci_chip._exptime
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*wtscl | [
"def",
"getexptimeimg",
"(",
"self",
",",
"chip",
")",
":",
"sci_chip",
"=",
"self",
".",
"_image",
"[",
"self",
".",
"scienceExt",
",",
"chip",
"]",
"if",
"sci_chip",
".",
"_wtscl_par",
"==",
"'expsq'",
":",
"wtscl",
"=",
"sci_chip",
".",
"_exptime",
"*",
"sci_chip",
".",
"_exptime",
"else",
":",
"wtscl",
"=",
"sci_chip",
".",
"_exptime",
"return",
"np",
".",
"ones",
"(",
"sci_chip",
".",
"image_shape",
",",
"dtype",
"=",
"sci_chip",
".",
"image_dtype",
")",
"*",
"wtscl"
] | 30.458333 | 23.208333 |
def handle(self, *args, **options):
"""Run do_index_command on each specified index and log the output."""
for index in options.pop("indexes"):
data = {}
try:
data = self.do_index_command(index, **options)
except TransportError as ex:
logger.warning("ElasticSearch threw an error: %s", ex)
data = {"index": index, "status": ex.status_code, "reason": ex.error}
finally:
logger.info(data) | [
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"for",
"index",
"in",
"options",
".",
"pop",
"(",
"\"indexes\"",
")",
":",
"data",
"=",
"{",
"}",
"try",
":",
"data",
"=",
"self",
".",
"do_index_command",
"(",
"index",
",",
"*",
"*",
"options",
")",
"except",
"TransportError",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"\"ElasticSearch threw an error: %s\"",
",",
"ex",
")",
"data",
"=",
"{",
"\"index\"",
":",
"index",
",",
"\"status\"",
":",
"ex",
".",
"status_code",
",",
"\"reason\"",
":",
"ex",
".",
"error",
"}",
"finally",
":",
"logger",
".",
"info",
"(",
"data",
")"
] | 45.818182 | 16 |
def do_POST(self):
"""
This method will be called for each POST request to one of the
listener ports.
It parses the CIM-XML export message and delivers the contained
CIM indication to the stored listener object.
"""
# Accept header check described in DSP0200
accept = self.headers.get('Accept', 'text/xml')
if accept not in ('text/xml', 'application/xml', '*/*'):
self.send_http_error(
406, 'header-mismatch',
_format("Invalid Accept header value: {0} (need text/xml, "
"application/xml or */*)", accept))
return
# Accept-Charset header check described in DSP0200
accept_charset = self.headers.get('Accept-Charset', 'UTF-8')
tq_list = re.findall(TOKEN_QUALITY_FINDALL_PATTERN, accept_charset)
found = False
if tq_list is not None:
for token, quality in tq_list:
if token.lower() in ('utf-8', '*'):
found = True
break
if not found:
self.send_http_error(
406, 'header-mismatch',
_format("Invalid Accept-Charset header value: {0} "
"(need UTF-8 or *)", accept_charset))
return
# Accept-Encoding header check described in DSP0200
accept_encoding = self.headers.get('Accept-Encoding', 'Identity')
tq_list = re.findall(TOKEN_QUALITY_FINDALL_PATTERN, accept_encoding)
identity_acceptable = False
identity_found = False
if tq_list is not None:
for token, quality in tq_list:
quality = 1 if quality == '' else float(quality)
if token.lower() == 'identity':
identity_found = True
if quality > 0:
identity_acceptable = True
break
if not identity_found:
for token, quality in tq_list:
quality = 1 if quality == '' else float(quality)
if token == '*' and quality > 0:
identity_acceptable = True
break
if not identity_acceptable:
self.send_http_error(
406, 'header-mismatch',
_format("Invalid Accept-Encoding header value: {0} "
"(need Identity to be acceptable)", accept_encoding))
return
# Accept-Language header check described in DSP0200.
# Ignored, because this WBEM listener does not support multiple
# languages, and hence any language is allowed to be returned.
# Accept-Range header check described in DSP0200
accept_range = self.headers.get('Accept-Range', None)
if accept_range is not None:
self.send_http_error(
406, 'header-mismatch',
_format("Accept-Range header is not permitted {0}",
accept_range))
return
# Content-Type header check described in DSP0200
content_type = self.headers.get('Content-Type', None)
if content_type is None:
self.send_http_error(
406, 'header-mismatch',
"Content-Type header is required")
return
tc_list = re.findall(TOKEN_CHARSET_FINDALL_PATTERN, content_type)
found = False
if tc_list is not None:
for token, charset in tc_list:
if token.lower() in ('text/xml', 'application/xml') and \
(charset == '' or charset.lower() == 'utf-8'):
found = True
break
if not found:
self.send_http_error(
406, 'header-mismatch',
_format("Invalid Content-Type header value: {0} "
"(need text/xml or application/xml with "
"charset=utf-8 or empty)",
content_type))
return
# Content-Encoding header check described in DSP0200
content_encoding = self.headers.get('Content-Encoding', 'identity')
if content_encoding.lower() != 'identity':
self.send_http_error(
406, 'header-mismatch',
_format("Invalid Content-Encoding header value: {0}"
"(listener supports only identity)",
content_encoding))
return
# Content-Language header check described in DSP0200.
# Ignored, because this WBEM listener does not support multiple
# languages, and hence any language is allowed in the request.
# The following headers are ignored. They are not allowed to be used
# by servers, but listeners are not required to reject them:
# Content-Range, Expires, If-Range, Range.
# Start processing the request
content_len = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_len)
try:
msgid, methodname, params = self.parse_export_request(body)
except (CIMXMLParseError, XMLParseError) as exc:
self.send_http_error(400, "request-not-well-formed", str(exc))
return
except VersionError as exc:
if str(exc).startswith("DTD"):
self.send_http_error(400, "unsupported-dtd-version",
str(exc))
elif str(exc).startswith("Protocol"):
self.send_http_error(400, "unsupported-protocol-version",
str(exc))
else:
self.send_http_error(400, "unsupported-version", str(exc))
return
if methodname == 'ExportIndication':
if len(params) != 1 or 'NewIndication' not in params:
self.send_error_response(
msgid, methodname, CIM_ERR_INVALID_PARAMETER,
_format("Expecting one parameter NewIndication, got {0!A}",
params.keys()))
return
indication_inst = params['NewIndication']
if not isinstance(indication_inst, CIMInstance):
self.send_error_response(
msgid, methodname, CIM_ERR_INVALID_PARAMETER,
_format("NewIndication parameter is not a CIM instance, "
"but {0!A}", indication_inst))
return
# server.listener created in WBEMListener.start function
self.server.listener.deliver_indication(indication_inst,
self.client_address[0])
self.send_success_response(msgid, methodname)
else:
self.send_error_response(
msgid, methodname, CIM_ERR_NOT_SUPPORTED,
_format("Unknown export method: {0!A}", methodname)) | [
"def",
"do_POST",
"(",
"self",
")",
":",
"# Accept header check described in DSP0200",
"accept",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'Accept'",
",",
"'text/xml'",
")",
"if",
"accept",
"not",
"in",
"(",
"'text/xml'",
",",
"'application/xml'",
",",
"'*/*'",
")",
":",
"self",
".",
"send_http_error",
"(",
"406",
",",
"'header-mismatch'",
",",
"_format",
"(",
"\"Invalid Accept header value: {0} (need text/xml, \"",
"\"application/xml or */*)\"",
",",
"accept",
")",
")",
"return",
"# Accept-Charset header check described in DSP0200",
"accept_charset",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'Accept-Charset'",
",",
"'UTF-8'",
")",
"tq_list",
"=",
"re",
".",
"findall",
"(",
"TOKEN_QUALITY_FINDALL_PATTERN",
",",
"accept_charset",
")",
"found",
"=",
"False",
"if",
"tq_list",
"is",
"not",
"None",
":",
"for",
"token",
",",
"quality",
"in",
"tq_list",
":",
"if",
"token",
".",
"lower",
"(",
")",
"in",
"(",
"'utf-8'",
",",
"'*'",
")",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"self",
".",
"send_http_error",
"(",
"406",
",",
"'header-mismatch'",
",",
"_format",
"(",
"\"Invalid Accept-Charset header value: {0} \"",
"\"(need UTF-8 or *)\"",
",",
"accept_charset",
")",
")",
"return",
"# Accept-Encoding header check described in DSP0200",
"accept_encoding",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'Accept-Encoding'",
",",
"'Identity'",
")",
"tq_list",
"=",
"re",
".",
"findall",
"(",
"TOKEN_QUALITY_FINDALL_PATTERN",
",",
"accept_encoding",
")",
"identity_acceptable",
"=",
"False",
"identity_found",
"=",
"False",
"if",
"tq_list",
"is",
"not",
"None",
":",
"for",
"token",
",",
"quality",
"in",
"tq_list",
":",
"quality",
"=",
"1",
"if",
"quality",
"==",
"''",
"else",
"float",
"(",
"quality",
")",
"if",
"token",
".",
"lower",
"(",
")",
"==",
"'identity'",
":",
"identity_found",
"=",
"True",
"if",
"quality",
">",
"0",
":",
"identity_acceptable",
"=",
"True",
"break",
"if",
"not",
"identity_found",
":",
"for",
"token",
",",
"quality",
"in",
"tq_list",
":",
"quality",
"=",
"1",
"if",
"quality",
"==",
"''",
"else",
"float",
"(",
"quality",
")",
"if",
"token",
"==",
"'*'",
"and",
"quality",
">",
"0",
":",
"identity_acceptable",
"=",
"True",
"break",
"if",
"not",
"identity_acceptable",
":",
"self",
".",
"send_http_error",
"(",
"406",
",",
"'header-mismatch'",
",",
"_format",
"(",
"\"Invalid Accept-Encoding header value: {0} \"",
"\"(need Identity to be acceptable)\"",
",",
"accept_encoding",
")",
")",
"return",
"# Accept-Language header check described in DSP0200.",
"# Ignored, because this WBEM listener does not support multiple",
"# languages, and hence any language is allowed to be returned.",
"# Accept-Range header check described in DSP0200",
"accept_range",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'Accept-Range'",
",",
"None",
")",
"if",
"accept_range",
"is",
"not",
"None",
":",
"self",
".",
"send_http_error",
"(",
"406",
",",
"'header-mismatch'",
",",
"_format",
"(",
"\"Accept-Range header is not permitted {0}\"",
",",
"accept_range",
")",
")",
"return",
"# Content-Type header check described in DSP0200",
"content_type",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"None",
")",
"if",
"content_type",
"is",
"None",
":",
"self",
".",
"send_http_error",
"(",
"406",
",",
"'header-mismatch'",
",",
"\"Content-Type header is required\"",
")",
"return",
"tc_list",
"=",
"re",
".",
"findall",
"(",
"TOKEN_CHARSET_FINDALL_PATTERN",
",",
"content_type",
")",
"found",
"=",
"False",
"if",
"tc_list",
"is",
"not",
"None",
":",
"for",
"token",
",",
"charset",
"in",
"tc_list",
":",
"if",
"token",
".",
"lower",
"(",
")",
"in",
"(",
"'text/xml'",
",",
"'application/xml'",
")",
"and",
"(",
"charset",
"==",
"''",
"or",
"charset",
".",
"lower",
"(",
")",
"==",
"'utf-8'",
")",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"self",
".",
"send_http_error",
"(",
"406",
",",
"'header-mismatch'",
",",
"_format",
"(",
"\"Invalid Content-Type header value: {0} \"",
"\"(need text/xml or application/xml with \"",
"\"charset=utf-8 or empty)\"",
",",
"content_type",
")",
")",
"return",
"# Content-Encoding header check described in DSP0200",
"content_encoding",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'Content-Encoding'",
",",
"'identity'",
")",
"if",
"content_encoding",
".",
"lower",
"(",
")",
"!=",
"'identity'",
":",
"self",
".",
"send_http_error",
"(",
"406",
",",
"'header-mismatch'",
",",
"_format",
"(",
"\"Invalid Content-Encoding header value: {0}\"",
"\"(listener supports only identity)\"",
",",
"content_encoding",
")",
")",
"return",
"# Content-Language header check described in DSP0200.",
"# Ignored, because this WBEM listener does not support multiple",
"# languages, and hence any language is allowed in the request.",
"# The following headers are ignored. They are not allowed to be used",
"# by servers, but listeners are not required to reject them:",
"# Content-Range, Expires, If-Range, Range.",
"# Start processing the request",
"content_len",
"=",
"int",
"(",
"self",
".",
"headers",
".",
"get",
"(",
"'Content-Length'",
",",
"0",
")",
")",
"body",
"=",
"self",
".",
"rfile",
".",
"read",
"(",
"content_len",
")",
"try",
":",
"msgid",
",",
"methodname",
",",
"params",
"=",
"self",
".",
"parse_export_request",
"(",
"body",
")",
"except",
"(",
"CIMXMLParseError",
",",
"XMLParseError",
")",
"as",
"exc",
":",
"self",
".",
"send_http_error",
"(",
"400",
",",
"\"request-not-well-formed\"",
",",
"str",
"(",
"exc",
")",
")",
"return",
"except",
"VersionError",
"as",
"exc",
":",
"if",
"str",
"(",
"exc",
")",
".",
"startswith",
"(",
"\"DTD\"",
")",
":",
"self",
".",
"send_http_error",
"(",
"400",
",",
"\"unsupported-dtd-version\"",
",",
"str",
"(",
"exc",
")",
")",
"elif",
"str",
"(",
"exc",
")",
".",
"startswith",
"(",
"\"Protocol\"",
")",
":",
"self",
".",
"send_http_error",
"(",
"400",
",",
"\"unsupported-protocol-version\"",
",",
"str",
"(",
"exc",
")",
")",
"else",
":",
"self",
".",
"send_http_error",
"(",
"400",
",",
"\"unsupported-version\"",
",",
"str",
"(",
"exc",
")",
")",
"return",
"if",
"methodname",
"==",
"'ExportIndication'",
":",
"if",
"len",
"(",
"params",
")",
"!=",
"1",
"or",
"'NewIndication'",
"not",
"in",
"params",
":",
"self",
".",
"send_error_response",
"(",
"msgid",
",",
"methodname",
",",
"CIM_ERR_INVALID_PARAMETER",
",",
"_format",
"(",
"\"Expecting one parameter NewIndication, got {0!A}\"",
",",
"params",
".",
"keys",
"(",
")",
")",
")",
"return",
"indication_inst",
"=",
"params",
"[",
"'NewIndication'",
"]",
"if",
"not",
"isinstance",
"(",
"indication_inst",
",",
"CIMInstance",
")",
":",
"self",
".",
"send_error_response",
"(",
"msgid",
",",
"methodname",
",",
"CIM_ERR_INVALID_PARAMETER",
",",
"_format",
"(",
"\"NewIndication parameter is not a CIM instance, \"",
"\"but {0!A}\"",
",",
"indication_inst",
")",
")",
"return",
"# server.listener created in WBEMListener.start function",
"self",
".",
"server",
".",
"listener",
".",
"deliver_indication",
"(",
"indication_inst",
",",
"self",
".",
"client_address",
"[",
"0",
"]",
")",
"self",
".",
"send_success_response",
"(",
"msgid",
",",
"methodname",
")",
"else",
":",
"self",
".",
"send_error_response",
"(",
"msgid",
",",
"methodname",
",",
"CIM_ERR_NOT_SUPPORTED",
",",
"_format",
"(",
"\"Unknown export method: {0!A}\"",
",",
"methodname",
")",
")"
] | 42.228395 | 19.364198 |
def generate_protocol(self,sweep=None):
"""
Create (x,y) points necessary to graph protocol for the current sweep.
"""
#TODO: make a line protocol that's plottable
if sweep is None:
sweep = self.currentSweep
if sweep is None:
sweep = 0
if not self.channel in self.header['dictEpochInfoPerDAC'].keys():
self.protoX=[0,self.sweepSize]
self.protoY=[self.holding,self.holding]
self.protoSeqX=self.protoX
self.protoSeqY=self.protoY
return
proto=self.header['dictEpochInfoPerDAC'][self.channel]
self.protoX=[] #plottable Xs
self.protoY=[] #plottable Ys
self.protoX.append(0)
self.protoY.append(self.holding)
for step in proto:
dX = proto[step]['lEpochInitDuration']
Y = proto[step]['fEpochInitLevel']+proto[step]['fEpochLevelInc']*sweep
self.protoX.append(self.protoX[-1])
self.protoY.append(Y) #go to new Y
self.protoX.append(self.protoX[-1]+dX) #take it to the new X
self.protoY.append(Y) #update the new Y #TODO: fix for ramps
if self.header['listDACInfo'][0]['nInterEpisodeLevel']: #nInterEpisodeLevel
finalVal=self.protoY[-1] #last holding
else:
finalVal=self.holding #regular holding
self.protoX.append(self.protoX[-1])
self.protoY.append(finalVal)
self.protoX.append(self.sweepSize)
self.protoY.append(finalVal)
for i in range(1,len(self.protoX)-1): #correct for weird ABF offset issue.
self.protoX[i]=self.protoX[i]+self.offsetX
self.protoSeqY=[self.protoY[0]]
self.protoSeqX=[self.protoX[0]]
for i in range(1,len(self.protoY)):
if not self.protoY[i]==self.protoY[i-1]:
self.protoSeqY.append(self.protoY[i])
self.protoSeqX.append(self.protoX[i])
if self.protoY[0]!=self.protoY[1]:
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[1])
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[0]+self.offsetX/2)
self.protoSeqY.append(finalVal)
self.protoSeqX.append(self.sweepSize)
self.protoX=np.array(self.protoX)
self.protoY=np.array(self.protoY) | [
"def",
"generate_protocol",
"(",
"self",
",",
"sweep",
"=",
"None",
")",
":",
"#TODO: make a line protocol that's plottable",
"if",
"sweep",
"is",
"None",
":",
"sweep",
"=",
"self",
".",
"currentSweep",
"if",
"sweep",
"is",
"None",
":",
"sweep",
"=",
"0",
"if",
"not",
"self",
".",
"channel",
"in",
"self",
".",
"header",
"[",
"'dictEpochInfoPerDAC'",
"]",
".",
"keys",
"(",
")",
":",
"self",
".",
"protoX",
"=",
"[",
"0",
",",
"self",
".",
"sweepSize",
"]",
"self",
".",
"protoY",
"=",
"[",
"self",
".",
"holding",
",",
"self",
".",
"holding",
"]",
"self",
".",
"protoSeqX",
"=",
"self",
".",
"protoX",
"self",
".",
"protoSeqY",
"=",
"self",
".",
"protoY",
"return",
"proto",
"=",
"self",
".",
"header",
"[",
"'dictEpochInfoPerDAC'",
"]",
"[",
"self",
".",
"channel",
"]",
"self",
".",
"protoX",
"=",
"[",
"]",
"#plottable Xs",
"self",
".",
"protoY",
"=",
"[",
"]",
"#plottable Ys",
"self",
".",
"protoX",
".",
"append",
"(",
"0",
")",
"self",
".",
"protoY",
".",
"append",
"(",
"self",
".",
"holding",
")",
"for",
"step",
"in",
"proto",
":",
"dX",
"=",
"proto",
"[",
"step",
"]",
"[",
"'lEpochInitDuration'",
"]",
"Y",
"=",
"proto",
"[",
"step",
"]",
"[",
"'fEpochInitLevel'",
"]",
"+",
"proto",
"[",
"step",
"]",
"[",
"'fEpochLevelInc'",
"]",
"*",
"sweep",
"self",
".",
"protoX",
".",
"append",
"(",
"self",
".",
"protoX",
"[",
"-",
"1",
"]",
")",
"self",
".",
"protoY",
".",
"append",
"(",
"Y",
")",
"#go to new Y",
"self",
".",
"protoX",
".",
"append",
"(",
"self",
".",
"protoX",
"[",
"-",
"1",
"]",
"+",
"dX",
")",
"#take it to the new X",
"self",
".",
"protoY",
".",
"append",
"(",
"Y",
")",
"#update the new Y #TODO: fix for ramps",
"if",
"self",
".",
"header",
"[",
"'listDACInfo'",
"]",
"[",
"0",
"]",
"[",
"'nInterEpisodeLevel'",
"]",
":",
"#nInterEpisodeLevel",
"finalVal",
"=",
"self",
".",
"protoY",
"[",
"-",
"1",
"]",
"#last holding",
"else",
":",
"finalVal",
"=",
"self",
".",
"holding",
"#regular holding",
"self",
".",
"protoX",
".",
"append",
"(",
"self",
".",
"protoX",
"[",
"-",
"1",
"]",
")",
"self",
".",
"protoY",
".",
"append",
"(",
"finalVal",
")",
"self",
".",
"protoX",
".",
"append",
"(",
"self",
".",
"sweepSize",
")",
"self",
".",
"protoY",
".",
"append",
"(",
"finalVal",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"protoX",
")",
"-",
"1",
")",
":",
"#correct for weird ABF offset issue.",
"self",
".",
"protoX",
"[",
"i",
"]",
"=",
"self",
".",
"protoX",
"[",
"i",
"]",
"+",
"self",
".",
"offsetX",
"self",
".",
"protoSeqY",
"=",
"[",
"self",
".",
"protoY",
"[",
"0",
"]",
"]",
"self",
".",
"protoSeqX",
"=",
"[",
"self",
".",
"protoX",
"[",
"0",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"protoY",
")",
")",
":",
"if",
"not",
"self",
".",
"protoY",
"[",
"i",
"]",
"==",
"self",
".",
"protoY",
"[",
"i",
"-",
"1",
"]",
":",
"self",
".",
"protoSeqY",
".",
"append",
"(",
"self",
".",
"protoY",
"[",
"i",
"]",
")",
"self",
".",
"protoSeqX",
".",
"append",
"(",
"self",
".",
"protoX",
"[",
"i",
"]",
")",
"if",
"self",
".",
"protoY",
"[",
"0",
"]",
"!=",
"self",
".",
"protoY",
"[",
"1",
"]",
":",
"self",
".",
"protoY",
".",
"insert",
"(",
"1",
",",
"self",
".",
"protoY",
"[",
"0",
"]",
")",
"self",
".",
"protoX",
".",
"insert",
"(",
"1",
",",
"self",
".",
"protoX",
"[",
"1",
"]",
")",
"self",
".",
"protoY",
".",
"insert",
"(",
"1",
",",
"self",
".",
"protoY",
"[",
"0",
"]",
")",
"self",
".",
"protoX",
".",
"insert",
"(",
"1",
",",
"self",
".",
"protoX",
"[",
"0",
"]",
"+",
"self",
".",
"offsetX",
"/",
"2",
")",
"self",
".",
"protoSeqY",
".",
"append",
"(",
"finalVal",
")",
"self",
".",
"protoSeqX",
".",
"append",
"(",
"self",
".",
"sweepSize",
")",
"self",
".",
"protoX",
"=",
"np",
".",
"array",
"(",
"self",
".",
"protoX",
")",
"self",
".",
"protoY",
"=",
"np",
".",
"array",
"(",
"self",
".",
"protoY",
")"
] | 41.785714 | 13.892857 |
def finish(self):
"""Finish performing the action."""
self.status = 'completed'
self.time_completed = timestamp()
self.thing.action_notify(self) | [
"def",
"finish",
"(",
"self",
")",
":",
"self",
".",
"status",
"=",
"'completed'",
"self",
".",
"time_completed",
"=",
"timestamp",
"(",
")",
"self",
".",
"thing",
".",
"action_notify",
"(",
"self",
")"
] | 34.4 | 6.6 |
def reset(self):
"""
Reset the state of the instance to when it was constructed
"""
self.operations = []
self._last_overflow = 'WRAP'
self.overflow(self._default_overflow or self._last_overflow) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"operations",
"=",
"[",
"]",
"self",
".",
"_last_overflow",
"=",
"'WRAP'",
"self",
".",
"overflow",
"(",
"self",
".",
"_default_overflow",
"or",
"self",
".",
"_last_overflow",
")"
] | 33.714286 | 13.428571 |
def main():
"""
NAME
uniform.py
DESCRIPTION
draws N directions from uniform distribution on a sphere
SYNTAX
uniform.py [-h][command line options]
-h prints help message and quits
-n N, specify N on the command line (default is 100)
-F file, specify output file name, default is standard output
"""
outf=""
N=100
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outf=sys.argv[ind+1]
if outf!="": out=open(outf,'w')
if '-n' in sys.argv:
ind=sys.argv.index('-n')
N=int(sys.argv[ind+1])
dirs=pmag.get_unf(N)
if outf=='':
for dir in dirs:
print('%7.1f %7.1f'%(dir[0],dir[1]))
else:
numpy.savetxt(outf,dirs,fmt='%7.1f %7.1f') | [
"def",
"main",
"(",
")",
":",
"outf",
"=",
"\"\"",
"N",
"=",
"100",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-F'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-F'",
")",
"outf",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"outf",
"!=",
"\"\"",
":",
"out",
"=",
"open",
"(",
"outf",
",",
"'w'",
")",
"if",
"'-n'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-n'",
")",
"N",
"=",
"int",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
")",
"dirs",
"=",
"pmag",
".",
"get_unf",
"(",
"N",
")",
"if",
"outf",
"==",
"''",
":",
"for",
"dir",
"in",
"dirs",
":",
"print",
"(",
"'%7.1f %7.1f'",
"%",
"(",
"dir",
"[",
"0",
"]",
",",
"dir",
"[",
"1",
"]",
")",
")",
"else",
":",
"numpy",
".",
"savetxt",
"(",
"outf",
",",
"dirs",
",",
"fmt",
"=",
"'%7.1f %7.1f'",
")"
] | 25.75 | 18.1875 |
def order_by(self, **kwargs):
"""
Orders the query by the key passed in +kwargs+. Only pass one key, as
it cannot sort by multiple columns at once. Raises QueryInvalid if this
method is called when there is already a custom order (i.e. this
method was already called on this query). Analog to "ORDER BY" in SQL.
"""
# Only get one thing from kwargs (we can only order by one thing...)
if self._order_with:
raise QueryInvalid("Cannot order by more than one column")
self._order_with = dict([kwargs.popitem()])
return self | [
"def",
"order_by",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Only get one thing from kwargs (we can only order by one thing...)",
"if",
"self",
".",
"_order_with",
":",
"raise",
"QueryInvalid",
"(",
"\"Cannot order by more than one column\"",
")",
"self",
".",
"_order_with",
"=",
"dict",
"(",
"[",
"kwargs",
".",
"popitem",
"(",
")",
"]",
")",
"return",
"self"
] | 50.083333 | 22.25 |
def setup(self, redis_conn=None, host='localhost', port=6379):
'''
Set up the redis connection
'''
if redis_conn is None:
if host is not None and port is not None:
self.redis_conn = redis.Redis(host=host, port=port)
else:
raise Exception("Please specify some form of connection "
"to Redis")
else:
self.redis_conn = redis_conn
self.redis_conn.info() | [
"def",
"setup",
"(",
"self",
",",
"redis_conn",
"=",
"None",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"6379",
")",
":",
"if",
"redis_conn",
"is",
"None",
":",
"if",
"host",
"is",
"not",
"None",
"and",
"port",
"is",
"not",
"None",
":",
"self",
".",
"redis_conn",
"=",
"redis",
".",
"Redis",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Please specify some form of connection \"",
"\"to Redis\"",
")",
"else",
":",
"self",
".",
"redis_conn",
"=",
"redis_conn",
"self",
".",
"redis_conn",
".",
"info",
"(",
")"
] | 34.928571 | 19.642857 |
def configure(self, listener):
"""
Configure a :class:`.Listener` to capture callback query
"""
listener.capture([
lambda msg: flavor(msg) == 'callback_query',
{'message': self._chat_origin_included}
])
listener.capture([
lambda msg: flavor(msg) == 'callback_query',
{'inline_message_id': self._inline_origin_included}
]) | [
"def",
"configure",
"(",
"self",
",",
"listener",
")",
":",
"listener",
".",
"capture",
"(",
"[",
"lambda",
"msg",
":",
"flavor",
"(",
"msg",
")",
"==",
"'callback_query'",
",",
"{",
"'message'",
":",
"self",
".",
"_chat_origin_included",
"}",
"]",
")",
"listener",
".",
"capture",
"(",
"[",
"lambda",
"msg",
":",
"flavor",
"(",
"msg",
")",
"==",
"'callback_query'",
",",
"{",
"'inline_message_id'",
":",
"self",
".",
"_inline_origin_included",
"}",
"]",
")"
] | 31.846154 | 17.538462 |
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1) | [
"def",
"dumpindented",
"(",
"self",
",",
"pn",
",",
"indent",
"=",
"0",
")",
":",
"page",
"=",
"self",
".",
"readpage",
"(",
"pn",
")",
"print",
"(",
"\" \"",
"*",
"indent",
",",
"page",
")",
"if",
"page",
".",
"isindex",
"(",
")",
":",
"print",
"(",
"\" \"",
"*",
"indent",
",",
"end",
"=",
"\"\"",
")",
"self",
".",
"dumpindented",
"(",
"page",
".",
"preceeding",
",",
"indent",
"+",
"1",
")",
"for",
"p",
"in",
"range",
"(",
"len",
"(",
"page",
".",
"index",
")",
")",
":",
"print",
"(",
"\" \"",
"*",
"indent",
",",
"end",
"=",
"\"\"",
")",
"self",
".",
"dumpindented",
"(",
"page",
".",
"getpage",
"(",
"p",
")",
",",
"indent",
"+",
"1",
")"
] | 39.923077 | 11.153846 |
def tridisolve(d, e, b, overwrite_b=True):
"""
Symmetric tridiagonal system solver,
from Golub and Van Loan, Matrix Computations pg 157
Parameters
----------
d : ndarray
main diagonal stored in d[:]
e : ndarray
superdiagonal stored in e[:-1]
b : ndarray
RHS vector
Returns
-------
x : ndarray
Solution to Ax = b (if overwrite_b is False). Otherwise solution is
stored in previous RHS vector b
"""
N = len(b)
# work vectors
dw = d.copy()
ew = e.copy()
if overwrite_b:
x = b
else:
x = b.copy()
for k in range(1, N):
# e^(k-1) = e(k-1) / d(k-1)
# d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)
t = ew[k - 1]
ew[k - 1] = t / dw[k - 1]
dw[k] = dw[k] - t * ew[k - 1]
for k in range(1, N):
x[k] = x[k] - ew[k - 1] * x[k - 1]
x[N - 1] = x[N - 1] / dw[N - 1]
for k in range(N - 2, -1, -1):
x[k] = x[k] / dw[k] - ew[k] * x[k + 1]
if not overwrite_b:
return x | [
"def",
"tridisolve",
"(",
"d",
",",
"e",
",",
"b",
",",
"overwrite_b",
"=",
"True",
")",
":",
"N",
"=",
"len",
"(",
"b",
")",
"# work vectors",
"dw",
"=",
"d",
".",
"copy",
"(",
")",
"ew",
"=",
"e",
".",
"copy",
"(",
")",
"if",
"overwrite_b",
":",
"x",
"=",
"b",
"else",
":",
"x",
"=",
"b",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"N",
")",
":",
"# e^(k-1) = e(k-1) / d(k-1)",
"# d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)",
"t",
"=",
"ew",
"[",
"k",
"-",
"1",
"]",
"ew",
"[",
"k",
"-",
"1",
"]",
"=",
"t",
"/",
"dw",
"[",
"k",
"-",
"1",
"]",
"dw",
"[",
"k",
"]",
"=",
"dw",
"[",
"k",
"]",
"-",
"t",
"*",
"ew",
"[",
"k",
"-",
"1",
"]",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"N",
")",
":",
"x",
"[",
"k",
"]",
"=",
"x",
"[",
"k",
"]",
"-",
"ew",
"[",
"k",
"-",
"1",
"]",
"*",
"x",
"[",
"k",
"-",
"1",
"]",
"x",
"[",
"N",
"-",
"1",
"]",
"=",
"x",
"[",
"N",
"-",
"1",
"]",
"/",
"dw",
"[",
"N",
"-",
"1",
"]",
"for",
"k",
"in",
"range",
"(",
"N",
"-",
"2",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"x",
"[",
"k",
"]",
"=",
"x",
"[",
"k",
"]",
"/",
"dw",
"[",
"k",
"]",
"-",
"ew",
"[",
"k",
"]",
"*",
"x",
"[",
"k",
"+",
"1",
"]",
"if",
"not",
"overwrite_b",
":",
"return",
"x"
] | 22.266667 | 19.111111 |
def virtual(cls, **options):
"""
Allows for defining virtual columns and collectors on models -- these
are objects that are defined in code and not directly in a data store.
:param cls:
:param options:
:return:
"""
def wrapped(func):
param_name = inflection.underscore(func.__name__)
options.setdefault('name', param_name)
if 'flags' in options:
if isinstance(options['flags'], set):
options['flags'].add('Virtual')
options['flags'].add('ReadOnly')
else:
options['flags'] |= (cls.Flags.Virtual | cls.Flags.ReadOnly)
else:
options['flags'] = {'Virtual', 'ReadOnly'}
def define_setter():
def setter_wrapped(setter_func):
func.__orb__.setFlags(func.__orb__.flags() & ~cls.Flags.ReadOnly)
func.__orb__.setter()(setter_func)
return setter_func
return setter_wrapped
def define_query_filter():
def shortcut_wrapped(shortcut_func):
func.__orb__.queryFilter(shortcut_func)
return shortcut_func
return shortcut_wrapped
func.__orb__ = cls(**options)
func.__orb__.getter()(func)
func.setter = define_setter
func.queryFilter = define_query_filter
return func
return wrapped | [
"def",
"virtual",
"(",
"cls",
",",
"*",
"*",
"options",
")",
":",
"def",
"wrapped",
"(",
"func",
")",
":",
"param_name",
"=",
"inflection",
".",
"underscore",
"(",
"func",
".",
"__name__",
")",
"options",
".",
"setdefault",
"(",
"'name'",
",",
"param_name",
")",
"if",
"'flags'",
"in",
"options",
":",
"if",
"isinstance",
"(",
"options",
"[",
"'flags'",
"]",
",",
"set",
")",
":",
"options",
"[",
"'flags'",
"]",
".",
"add",
"(",
"'Virtual'",
")",
"options",
"[",
"'flags'",
"]",
".",
"add",
"(",
"'ReadOnly'",
")",
"else",
":",
"options",
"[",
"'flags'",
"]",
"|=",
"(",
"cls",
".",
"Flags",
".",
"Virtual",
"|",
"cls",
".",
"Flags",
".",
"ReadOnly",
")",
"else",
":",
"options",
"[",
"'flags'",
"]",
"=",
"{",
"'Virtual'",
",",
"'ReadOnly'",
"}",
"def",
"define_setter",
"(",
")",
":",
"def",
"setter_wrapped",
"(",
"setter_func",
")",
":",
"func",
".",
"__orb__",
".",
"setFlags",
"(",
"func",
".",
"__orb__",
".",
"flags",
"(",
")",
"&",
"~",
"cls",
".",
"Flags",
".",
"ReadOnly",
")",
"func",
".",
"__orb__",
".",
"setter",
"(",
")",
"(",
"setter_func",
")",
"return",
"setter_func",
"return",
"setter_wrapped",
"def",
"define_query_filter",
"(",
")",
":",
"def",
"shortcut_wrapped",
"(",
"shortcut_func",
")",
":",
"func",
".",
"__orb__",
".",
"queryFilter",
"(",
"shortcut_func",
")",
"return",
"shortcut_func",
"return",
"shortcut_wrapped",
"func",
".",
"__orb__",
"=",
"cls",
"(",
"*",
"*",
"options",
")",
"func",
".",
"__orb__",
".",
"getter",
"(",
")",
"(",
"func",
")",
"func",
".",
"setter",
"=",
"define_setter",
"func",
".",
"queryFilter",
"=",
"define_query_filter",
"return",
"func",
"return",
"wrapped"
] | 33.219512 | 17.268293 |
def describe_addresses(self, *addresses):
"""
List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}.
"""
address_set = {}
for pos, address in enumerate(addresses):
address_set["PublicIp.%d" % (pos + 1)] = address
query = self.query_factory(
action="DescribeAddresses", creds=self.creds,
endpoint=self.endpoint, other_params=address_set)
d = query.submit()
return d.addCallback(self.parser.describe_addresses) | [
"def",
"describe_addresses",
"(",
"self",
",",
"*",
"addresses",
")",
":",
"address_set",
"=",
"{",
"}",
"for",
"pos",
",",
"address",
"in",
"enumerate",
"(",
"addresses",
")",
":",
"address_set",
"[",
"\"PublicIp.%d\"",
"%",
"(",
"pos",
"+",
"1",
")",
"]",
"=",
"address",
"query",
"=",
"self",
".",
"query_factory",
"(",
"action",
"=",
"\"DescribeAddresses\"",
",",
"creds",
"=",
"self",
".",
"creds",
",",
"endpoint",
"=",
"self",
".",
"endpoint",
",",
"other_params",
"=",
"address_set",
")",
"d",
"=",
"query",
".",
"submit",
"(",
")",
"return",
"d",
".",
"addCallback",
"(",
"self",
".",
"parser",
".",
"describe_addresses",
")"
] | 41.882353 | 18.823529 |
def make_trajectory(first, filename, restart=False):
'''Factory function to easily create a trajectory object'''
mode = 'w'
if restart:
mode = 'a'
return Trajectory(first, filename, mode) | [
"def",
"make_trajectory",
"(",
"first",
",",
"filename",
",",
"restart",
"=",
"False",
")",
":",
"mode",
"=",
"'w'",
"if",
"restart",
":",
"mode",
"=",
"'a'",
"return",
"Trajectory",
"(",
"first",
",",
"filename",
",",
"mode",
")"
] | 26.75 | 23 |
def arg_comparitor(name):
"""
:param arg name
:return: pair containing name, comparitor
given an argument name, munge it and return a proper comparitor
>>> arg_comparitor("a")
a, operator.eq
>>> arg_comparitor("a__in")
a, operator.contains
"""
if name.endswith("__in"):
return name[:-4], contains
elif name.endswith("__ge"):
return name[:-4], ge
elif name.endswith("__gt"):
return name[:-4], gt
elif name.endswith("__le"):
return name[:-4], le
elif name.endswith("__lt"):
return name[:-4], lt
if name.endswith("__eq"):
return name[:-4], eq
if name.endswith("__ne"):
return name[:-4], ne
else:
return name, eq | [
"def",
"arg_comparitor",
"(",
"name",
")",
":",
"if",
"name",
".",
"endswith",
"(",
"\"__in\"",
")",
":",
"return",
"name",
"[",
":",
"-",
"4",
"]",
",",
"contains",
"elif",
"name",
".",
"endswith",
"(",
"\"__ge\"",
")",
":",
"return",
"name",
"[",
":",
"-",
"4",
"]",
",",
"ge",
"elif",
"name",
".",
"endswith",
"(",
"\"__gt\"",
")",
":",
"return",
"name",
"[",
":",
"-",
"4",
"]",
",",
"gt",
"elif",
"name",
".",
"endswith",
"(",
"\"__le\"",
")",
":",
"return",
"name",
"[",
":",
"-",
"4",
"]",
",",
"le",
"elif",
"name",
".",
"endswith",
"(",
"\"__lt\"",
")",
":",
"return",
"name",
"[",
":",
"-",
"4",
"]",
",",
"lt",
"if",
"name",
".",
"endswith",
"(",
"\"__eq\"",
")",
":",
"return",
"name",
"[",
":",
"-",
"4",
"]",
",",
"eq",
"if",
"name",
".",
"endswith",
"(",
"\"__ne\"",
")",
":",
"return",
"name",
"[",
":",
"-",
"4",
"]",
",",
"ne",
"else",
":",
"return",
"name",
",",
"eq"
] | 24.689655 | 15.310345 |
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.arrays.categorical import _factorize_from_iterables
codes, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, codes=codes, sortorder=sortorder,
names=names, verify_integrity=False) | [
"def",
"from_arrays",
"(",
"cls",
",",
"arrays",
",",
"sortorder",
"=",
"None",
",",
"names",
"=",
"None",
")",
":",
"error_msg",
"=",
"\"Input must be a list / sequence of array-likes.\"",
"if",
"not",
"is_list_like",
"(",
"arrays",
")",
":",
"raise",
"TypeError",
"(",
"error_msg",
")",
"elif",
"is_iterator",
"(",
"arrays",
")",
":",
"arrays",
"=",
"list",
"(",
"arrays",
")",
"# Check if elements of array are list-like",
"for",
"array",
"in",
"arrays",
":",
"if",
"not",
"is_list_like",
"(",
"array",
")",
":",
"raise",
"TypeError",
"(",
"error_msg",
")",
"# Check if lengths of all arrays are equal or not,",
"# raise ValueError, if not",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"arrays",
")",
")",
":",
"if",
"len",
"(",
"arrays",
"[",
"i",
"]",
")",
"!=",
"len",
"(",
"arrays",
"[",
"i",
"-",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'all arrays must be same length'",
")",
"from",
"pandas",
".",
"core",
".",
"arrays",
".",
"categorical",
"import",
"_factorize_from_iterables",
"codes",
",",
"levels",
"=",
"_factorize_from_iterables",
"(",
"arrays",
")",
"if",
"names",
"is",
"None",
":",
"names",
"=",
"[",
"getattr",
"(",
"arr",
",",
"\"name\"",
",",
"None",
")",
"for",
"arr",
"in",
"arrays",
"]",
"return",
"MultiIndex",
"(",
"levels",
"=",
"levels",
",",
"codes",
"=",
"codes",
",",
"sortorder",
"=",
"sortorder",
",",
"names",
"=",
"names",
",",
"verify_integrity",
"=",
"False",
")"
] | 36.966102 | 20.050847 |
def kill_process(modeladmin, request, queryset):
"""
restarts a dedicated process
:return:
"""
for process in queryset:
process.stop(signum=signal.SIGKILL) | [
"def",
"kill_process",
"(",
"modeladmin",
",",
"request",
",",
"queryset",
")",
":",
"for",
"process",
"in",
"queryset",
":",
"process",
".",
"stop",
"(",
"signum",
"=",
"signal",
".",
"SIGKILL",
")"
] | 25.285714 | 8.428571 |
def do_scan(self, line):
"""
scan [:tablename] [--batch=#] [-{max}] [--count|-c] [--array|-a] [+filter_attribute:filter_value] [attributes,...]
if filter_value contains '=' it's interpreted as {conditional}={value} where condtional is:
eq (equal value)
ne {value} (not equal value)
le (less or equal then value)
lt (less then value)
ge (greater or equal then value)
gt (greater then value)
:exists (value exists)
:nexists (value does not exists)
contains (contains value)
ncontains (does not contains value)
begin (attribute begins with value)
between (between value1 and value2 - use: between=value1,value2)
otherwise the value must fully match (equal attribute)
"""
table, line = self.get_table_params(line)
args = self.getargs(line)
scan_filter = {}
count = False
as_array = False
max_size = None
batch_size = None
start = None
while args:
arg = args[0]
if arg.startswith('+'):
args.pop(0)
filter_name, filter_value = arg[1:].split(':', 1)
if filter_value.startswith("begin="):
filter_cond = BEGINS_WITH(self.get_typed_value(filter_name, filter_value[6:]))
elif filter_value.startswith("eq="):
filter_cond = EQ(self.get_typed_value(filter_name, filter_value[3:]))
elif filter_value.startswith("ne="):
filter_cond = NE(self.get_typed_value(filter_name, filter_value[3:]))
elif filter_value.startswith("le="):
filter_cond = LE(self.get_typed_value(filter_name, filter_value[3:]))
elif filter_value.startswith("lt="):
filter_cond = LT(self.get_typed_value(filter_name, filter_value[3:]))
elif filter_value.startswith("ge="):
filter_cond = GE(self.get_typed_value(filter_name, filter_value[3:]))
elif filter_value.startswith("gt="):
filter_cond = GT(self.get_typed_value(filter_name, filter_value[3:]))
elif filter_value == ":exists":
filter_cond = NOT_NULL()
elif filter_value == ":nexists":
filter_cond = NULL()
elif filter_value.startswith("contains="):
filter_cond = CONTAINS(self.get_typed_value(filter_name, filter_value[9:]))
elif filter_value.startswith("between="):
parts = filter_value[8:].split(",", 1)
filter_cond = BETWEEN(self.get_typed_value(parts[0]), self.get_typed_value(filter_name, parts[1]))
else:
filter_cond = EQ(self.get_typed_value(filter_name, filter_value))
scan_filter[filter_name] = filter_cond
elif arg.startswith('--batch='):
args.pop(0)
batch_size = int(arg[8:])
elif arg.startswith('--max='):
args.pop(0)
max_size = int(arg[6:])
elif arg.startswith('--start='):
args.pop(0)
start = (arg[8:], )
elif arg == '--next':
args.pop(0)
if self.next_key:
start = self.next_key
else:
print "no next"
return
elif arg in ['--array', '-a']:
args.pop(0)
as_array = True
elif arg in ['--count', '-c']:
args.pop(0)
count = True
elif arg[0] == '-' and arg[1:].isdigit():
args.pop(0)
max_size = int(arg[1:])
elif arg == '--':
args.pop(0)
break
elif arg.startswith('-'):
args.pop(0)
print "invalid argument: %s" % arg
break
else:
break
attr_keys = args[0].split(",") if args else None
attrs = list(set(attr_keys)) if attr_keys else None
#print "scan filter:%s attributes:%s limit:%s max:%s count:%s" % (scan_filter, attrs, batch_size, max, count)
result = table.scan(scan_filter=scan_filter, attributes_to_get=attrs, request_limit=batch_size, max_results=max_size, count=count, exclusive_start_key=start)
if count:
print "count: %s/%s" % (result.scanned_count, result.count)
self.next_key = None
else:
if as_array and attr_keys:
self.print_iterator_array(result, attr_keys)
else:
self.print_iterator(result)
self.next_key = result.last_evaluated_key
if self.consumed:
print "consumed units:", result.consumed_units | [
"def",
"do_scan",
"(",
"self",
",",
"line",
")",
":",
"table",
",",
"line",
"=",
"self",
".",
"get_table_params",
"(",
"line",
")",
"args",
"=",
"self",
".",
"getargs",
"(",
"line",
")",
"scan_filter",
"=",
"{",
"}",
"count",
"=",
"False",
"as_array",
"=",
"False",
"max_size",
"=",
"None",
"batch_size",
"=",
"None",
"start",
"=",
"None",
"while",
"args",
":",
"arg",
"=",
"args",
"[",
"0",
"]",
"if",
"arg",
".",
"startswith",
"(",
"'+'",
")",
":",
"args",
".",
"pop",
"(",
"0",
")",
"filter_name",
",",
"filter_value",
"=",
"arg",
"[",
"1",
":",
"]",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"filter_value",
".",
"startswith",
"(",
"\"begin=\"",
")",
":",
"filter_cond",
"=",
"BEGINS_WITH",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"6",
":",
"]",
")",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"eq=\"",
")",
":",
"filter_cond",
"=",
"EQ",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"3",
":",
"]",
")",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"ne=\"",
")",
":",
"filter_cond",
"=",
"NE",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"3",
":",
"]",
")",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"le=\"",
")",
":",
"filter_cond",
"=",
"LE",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"3",
":",
"]",
")",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"lt=\"",
")",
":",
"filter_cond",
"=",
"LT",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"3",
":",
"]",
")",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"ge=\"",
")",
":",
"filter_cond",
"=",
"GE",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"3",
":",
"]",
")",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"gt=\"",
")",
":",
"filter_cond",
"=",
"GT",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"3",
":",
"]",
")",
")",
"elif",
"filter_value",
"==",
"\":exists\"",
":",
"filter_cond",
"=",
"NOT_NULL",
"(",
")",
"elif",
"filter_value",
"==",
"\":nexists\"",
":",
"filter_cond",
"=",
"NULL",
"(",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"contains=\"",
")",
":",
"filter_cond",
"=",
"CONTAINS",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
"[",
"9",
":",
"]",
")",
")",
"elif",
"filter_value",
".",
"startswith",
"(",
"\"between=\"",
")",
":",
"parts",
"=",
"filter_value",
"[",
"8",
":",
"]",
".",
"split",
"(",
"\",\"",
",",
"1",
")",
"filter_cond",
"=",
"BETWEEN",
"(",
"self",
".",
"get_typed_value",
"(",
"parts",
"[",
"0",
"]",
")",
",",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"parts",
"[",
"1",
"]",
")",
")",
"else",
":",
"filter_cond",
"=",
"EQ",
"(",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
")",
")",
"scan_filter",
"[",
"filter_name",
"]",
"=",
"filter_cond",
"elif",
"arg",
".",
"startswith",
"(",
"'--batch='",
")",
":",
"args",
".",
"pop",
"(",
"0",
")",
"batch_size",
"=",
"int",
"(",
"arg",
"[",
"8",
":",
"]",
")",
"elif",
"arg",
".",
"startswith",
"(",
"'--max='",
")",
":",
"args",
".",
"pop",
"(",
"0",
")",
"max_size",
"=",
"int",
"(",
"arg",
"[",
"6",
":",
"]",
")",
"elif",
"arg",
".",
"startswith",
"(",
"'--start='",
")",
":",
"args",
".",
"pop",
"(",
"0",
")",
"start",
"=",
"(",
"arg",
"[",
"8",
":",
"]",
",",
")",
"elif",
"arg",
"==",
"'--next'",
":",
"args",
".",
"pop",
"(",
"0",
")",
"if",
"self",
".",
"next_key",
":",
"start",
"=",
"self",
".",
"next_key",
"else",
":",
"print",
"\"no next\"",
"return",
"elif",
"arg",
"in",
"[",
"'--array'",
",",
"'-a'",
"]",
":",
"args",
".",
"pop",
"(",
"0",
")",
"as_array",
"=",
"True",
"elif",
"arg",
"in",
"[",
"'--count'",
",",
"'-c'",
"]",
":",
"args",
".",
"pop",
"(",
"0",
")",
"count",
"=",
"True",
"elif",
"arg",
"[",
"0",
"]",
"==",
"'-'",
"and",
"arg",
"[",
"1",
":",
"]",
".",
"isdigit",
"(",
")",
":",
"args",
".",
"pop",
"(",
"0",
")",
"max_size",
"=",
"int",
"(",
"arg",
"[",
"1",
":",
"]",
")",
"elif",
"arg",
"==",
"'--'",
":",
"args",
".",
"pop",
"(",
"0",
")",
"break",
"elif",
"arg",
".",
"startswith",
"(",
"'-'",
")",
":",
"args",
".",
"pop",
"(",
"0",
")",
"print",
"\"invalid argument: %s\"",
"%",
"arg",
"break",
"else",
":",
"break",
"attr_keys",
"=",
"args",
"[",
"0",
"]",
".",
"split",
"(",
"\",\"",
")",
"if",
"args",
"else",
"None",
"attrs",
"=",
"list",
"(",
"set",
"(",
"attr_keys",
")",
")",
"if",
"attr_keys",
"else",
"None",
"#print \"scan filter:%s attributes:%s limit:%s max:%s count:%s\" % (scan_filter, attrs, batch_size, max, count)",
"result",
"=",
"table",
".",
"scan",
"(",
"scan_filter",
"=",
"scan_filter",
",",
"attributes_to_get",
"=",
"attrs",
",",
"request_limit",
"=",
"batch_size",
",",
"max_results",
"=",
"max_size",
",",
"count",
"=",
"count",
",",
"exclusive_start_key",
"=",
"start",
")",
"if",
"count",
":",
"print",
"\"count: %s/%s\"",
"%",
"(",
"result",
".",
"scanned_count",
",",
"result",
".",
"count",
")",
"self",
".",
"next_key",
"=",
"None",
"else",
":",
"if",
"as_array",
"and",
"attr_keys",
":",
"self",
".",
"print_iterator_array",
"(",
"result",
",",
"attr_keys",
")",
"else",
":",
"self",
".",
"print_iterator",
"(",
"result",
")",
"self",
".",
"next_key",
"=",
"result",
".",
"last_evaluated_key",
"if",
"self",
".",
"consumed",
":",
"print",
"\"consumed units:\"",
",",
"result",
".",
"consumed_units"
] | 37.236641 | 22.442748 |
def expand_as_args(args):
"""Returns `True` if `args` should be expanded as `*args`."""
return (isinstance(args, collections.Sequence) and
not _is_namedtuple(args) and not _force_leaf(args)) | [
"def",
"expand_as_args",
"(",
"args",
")",
":",
"return",
"(",
"isinstance",
"(",
"args",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"_is_namedtuple",
"(",
"args",
")",
"and",
"not",
"_force_leaf",
"(",
"args",
")",
")"
] | 50.25 | 12 |