text
stringlengths
89
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
630
def help_info(self, exc, decorated_function, arg_name, arg_spec): """ Print debug information to stderr. (Do nothing if object was constructed with silent_checks=True) :param exc: raised exception :param decorated_function: target function (function to decorate) :param arg_name: function parameter name :param arg_spec: function parameter specification :return: None """ if self._silent_checks is not True: print('Exception raised:', file=sys.stderr) print(str(exc), file=sys.stderr) fn_name = Verifier.function_name(decorated_function) print('Decorated function: %s' % fn_name, file=sys.stderr) if decorated_function.__doc__ is not None: print('Decorated function docstrings:', file=sys.stderr) print(decorated_function.__doc__, file=sys.stderr) print('Argument "%s" specification:' % arg_name, file=sys.stderr) if isfunction(arg_spec): print(getsource(arg_spec), file=sys.stderr) else: print(str(arg_spec), file=sys.stderr) print('', file=sys.stderr)
[ "def", "help_info", "(", "self", ",", "exc", ",", "decorated_function", ",", "arg_name", ",", "arg_spec", ")", ":", "if", "self", ".", "_silent_checks", "is", "not", "True", ":", "print", "(", "'Exception raised:'", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "str", "(", "exc", ")", ",", "file", "=", "sys", ".", "stderr", ")", "fn_name", "=", "Verifier", ".", "function_name", "(", "decorated_function", ")", "print", "(", "'Decorated function: %s'", "%", "fn_name", ",", "file", "=", "sys", ".", "stderr", ")", "if", "decorated_function", ".", "__doc__", "is", "not", "None", ":", "print", "(", "'Decorated function docstrings:'", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "decorated_function", ".", "__doc__", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'Argument \"%s\" specification:'", "%", "arg_name", ",", "file", "=", "sys", ".", "stderr", ")", "if", "isfunction", "(", "arg_spec", ")", ":", "print", "(", "getsource", "(", "arg_spec", ")", ",", "file", "=", "sys", ".", "stderr", ")", "else", ":", "print", "(", "str", "(", "arg_spec", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "''", ",", "file", "=", "sys", ".", "stderr", ")" ]
41.291667
15.041667
def backend_run(self): """Check for cached results; then run the model if needed.""" key = self.model.hash if self.use_memory_cache and self.get_memory_cache(key): return self._results if self.use_disk_cache and self.get_disk_cache(key): return self._results results = self._backend_run() if self.use_memory_cache: self.set_memory_cache(results, key) if self.use_disk_cache: self.set_disk_cache(results, key) return results
[ "def", "backend_run", "(", "self", ")", ":", "key", "=", "self", ".", "model", ".", "hash", "if", "self", ".", "use_memory_cache", "and", "self", ".", "get_memory_cache", "(", "key", ")", ":", "return", "self", ".", "_results", "if", "self", ".", "use_disk_cache", "and", "self", ".", "get_disk_cache", "(", "key", ")", ":", "return", "self", ".", "_results", "results", "=", "self", ".", "_backend_run", "(", ")", "if", "self", ".", "use_memory_cache", ":", "self", ".", "set_memory_cache", "(", "results", ",", "key", ")", "if", "self", ".", "use_disk_cache", ":", "self", ".", "set_disk_cache", "(", "results", ",", "key", ")", "return", "results" ]
40.230769
10.615385
def recent(self): ''' List recent wiki. ''' kwd = { 'pager': '', 'title': 'Recent Pages', } self.render('wiki_page/wiki_list.html', view=MWiki.query_recent(), format_date=tools.format_date, kwd=kwd, userinfo=self.userinfo)
[ "def", "recent", "(", "self", ")", ":", "kwd", "=", "{", "'pager'", ":", "''", ",", "'title'", ":", "'Recent Pages'", ",", "}", "self", ".", "render", "(", "'wiki_page/wiki_list.html'", ",", "view", "=", "MWiki", ".", "query_recent", "(", ")", ",", "format_date", "=", "tools", ".", "format_date", ",", "kwd", "=", "kwd", ",", "userinfo", "=", "self", ".", "userinfo", ")" ]
27.846154
16.153846
def arg(name=None, **Config): # wraps the _arg decorator, in order to allow unnamed args r"""A decorator to configure an argument of a task. Config: * name (str): The name of the arg. When ommited the agument will be identified through the order of configuration. * desc (str): The description of the arg (optional). * type (type, CustomType, callable): The alias for the task (optional). Notes: * It always follows a @task or an @arg. """ if name is not None: # allow name as a positional arg Config['name'] = name return lambda decorated: _arg(decorated, **Config)
[ "def", "arg", "(", "name", "=", "None", ",", "*", "*", "Config", ")", ":", "# wraps the _arg decorator, in order to allow unnamed args", "if", "name", "is", "not", "None", ":", "# allow name as a positional arg", "Config", "[", "'name'", "]", "=", "name", "return", "lambda", "decorated", ":", "_arg", "(", "decorated", ",", "*", "*", "Config", ")" ]
39.133333
27
def focus(self, focus: Optional[URIPARM]) -> None: """ Set the focus node(s). If no focus node is specified, the evaluation will occur for all non-BNode graph subjects. Otherwise it can be a string, a URIRef or a list of string/URIRef combinations :param focus: None if focus should be all URIRefs in the graph otherwise a URI or list of URI's """ self._focus = normalize_uriparm(focus) if focus else None
[ "def", "focus", "(", "self", ",", "focus", ":", "Optional", "[", "URIPARM", "]", ")", "->", "None", ":", "self", ".", "_focus", "=", "normalize_uriparm", "(", "focus", ")", "if", "focus", "else", "None" ]
63.142857
28.714286
def OnContentChange(self, event): """Event handler for updating the content""" self.ignore_changes = True self.SetValue(u"" if event.text is None else event.text) self.ignore_changes = False event.Skip()
[ "def", "OnContentChange", "(", "self", ",", "event", ")", ":", "self", ".", "ignore_changes", "=", "True", "self", ".", "SetValue", "(", "u\"\"", "if", "event", ".", "text", "is", "None", "else", "event", ".", "text", ")", "self", ".", "ignore_changes", "=", "False", "event", ".", "Skip", "(", ")" ]
26.444444
20.222222
def task(self): """ Find the task for this build. Wraps the getTaskInfo RPC. :returns: deferred that when fired returns the Task object, or None if we could not determine the task for this build. """ # If we have no .task_id, this is a no-op to return None. if not self.task_id: return defer.succeed(None) return self.connection.getTaskInfo(self.task_id)
[ "def", "task", "(", "self", ")", ":", "# If we have no .task_id, this is a no-op to return None.", "if", "not", "self", ".", "task_id", ":", "return", "defer", ".", "succeed", "(", "None", ")", "return", "self", ".", "connection", ".", "getTaskInfo", "(", "self", ".", "task_id", ")" ]
33.692308
17.846154
def date(value, allow_empty = False, minimum = None, maximum = None, coerce_value = True, **kwargs): """Validate that ``value`` is a valid date. :param value: The value to validate. :type value: :class:`str <python:str>` / :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :param minimum: If supplied, will make sure that ``value`` is on or after this value. :type minimum: :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / compliant :class:`str <python:str>` / :obj:`None <python:None>` :param maximum: If supplied, will make sure that ``value`` is on or before this value. :type maximum: :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / compliant :class:`str <python:str>` / :obj:`None <python:None>` :param coerce_value: If ``True``, will attempt to coerce ``value`` to a :class:`date <python:datetime.date>` if it is a timestamp value. If ``False``, will not. :type coerce_value: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`date <python:datetime.date>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` cannot be coerced to a :class:`date <python:datetime.date>` and is not :obj:`None <python:None>` :raises MinimumValueError: if ``minimum`` is supplied but ``value`` occurs before ``minimum`` :raises MaximumValueError: if ``maximum`` is supplied but ``value`` occurs after ``maximum`` """ # pylint: disable=too-many-branches if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None minimum = date(minimum, allow_empty = True, force_run = True) # pylint: disable=E1123 maximum = date(maximum, allow_empty = True, force_run = True) # pylint: disable=E1123 if not isinstance(value, date_types): raise errors.CannotCoerceError( 'value (%s) must be a date object, datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp, but was %s' % (value, type(value)) ) elif isinstance(value, datetime_.datetime) and not coerce_value: raise errors.CannotCoerceError( 'value (%s) must be a date object, or ' 'ISO 8601-formatted string, ' 'but was %s' % (value, type(value)) ) elif isinstance(value, datetime_.datetime) and coerce_value: value = value.date() elif isinstance(value, timestamp_types) and coerce_value: try: value = datetime_.date.fromtimestamp(value) except ValueError: raise errors.CannotCoerceError( 'value (%s) must be a date object, datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp, but was %s' % (value, type(value)) ) elif isinstance(value, str): try: value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f') if coerce_value: value = value.date() else: raise errors.CannotCoerceError( 'value (%s) must be a date object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp, but was %s' % (value, type(value)) ) except ValueError: if len(value) > 10 and not coerce_value: raise errors.CannotCoerceError( 'value (%s) must be a date object, or ' 'ISO 8601-formatted string, ' 'but was %s' % (value, type(value)) ) if ' ' in value: value = value.split(' ')[0] if 'T' in value: value = value.split('T')[0] if len(value) != 10: raise errors.CannotCoerceError( 'value (%s) must be a date object, datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp, but was %s' % (value, type(value)) ) try: year = int(value[:4]) month = int(value[5:7]) day = int(value[-2:]) value = datetime_.date(year, month, day) except (ValueError, TypeError): raise errors.CannotCoerceError( 'value (%s) must be a date object, datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp, but was %s' % (value, type(value)) ) elif isinstance(value, numeric_types) and not coerce_value: raise errors.CannotCoerceError( 'value (%s) must be a date object, or ' 'ISO 8601-formatted string, ' 'but was %s' % (value, type(value)) ) if minimum and value and value < minimum: raise errors.MinimumValueError( 'value (%s) is before the minimum given (%s)' % (value.isoformat(), minimum.isoformat()) ) if maximum and value and value > maximum: raise errors.MaximumValueError( 'value (%s) is after the maximum given (%s)' % (value.isoformat(), maximum.isoformat()) ) return value
[ "def", "date", "(", "value", ",", "allow_empty", "=", "False", ",", "minimum", "=", "None", ",", "maximum", "=", "None", ",", "coerce_value", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=too-many-branches", "if", "not", "value", "and", "not", "allow_empty", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "minimum", "=", "date", "(", "minimum", ",", "allow_empty", "=", "True", ",", "force_run", "=", "True", ")", "# pylint: disable=E1123", "maximum", "=", "date", "(", "maximum", ",", "allow_empty", "=", "True", ",", "force_run", "=", "True", ")", "# pylint: disable=E1123", "if", "not", "isinstance", "(", "value", ",", "date_types", ")", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp, but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "datetime_", ".", "datetime", ")", "and", "not", "coerce_value", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, or '", "'ISO 8601-formatted string, '", "'but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "datetime_", ".", "datetime", ")", "and", "coerce_value", ":", "value", "=", "value", ".", "date", "(", ")", "elif", "isinstance", "(", "value", ",", "timestamp_types", ")", "and", "coerce_value", ":", "try", ":", "value", "=", "datetime_", ".", "date", ".", "fromtimestamp", "(", "value", ")", "except", "ValueError", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp, but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "try", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%dT%H:%M:%S.%f'", ")", "if", "coerce_value", ":", "value", "=", "value", ".", "date", "(", ")", "else", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp, but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "except", "ValueError", ":", "if", "len", "(", "value", ")", ">", "10", "and", "not", "coerce_value", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, or '", "'ISO 8601-formatted string, '", "'but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "if", "' '", "in", "value", ":", "value", "=", "value", ".", "split", "(", "' '", ")", "[", "0", "]", "if", "'T'", "in", "value", ":", "value", "=", "value", ".", "split", "(", "'T'", ")", "[", "0", "]", "if", "len", "(", "value", ")", "!=", "10", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp, but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "try", ":", "year", "=", "int", "(", "value", "[", ":", "4", "]", ")", "month", "=", "int", "(", "value", "[", "5", ":", "7", "]", ")", "day", "=", "int", "(", "value", "[", "-", "2", ":", "]", ")", "value", "=", "datetime_", ".", "date", "(", "year", ",", "month", ",", "day", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp, but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "numeric_types", ")", "and", "not", "coerce_value", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, or '", "'ISO 8601-formatted string, '", "'but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "if", "minimum", "and", "value", "and", "value", "<", "minimum", ":", "raise", "errors", ".", "MinimumValueError", "(", "'value (%s) is before the minimum given (%s)'", "%", "(", "value", ".", "isoformat", "(", ")", ",", "minimum", ".", "isoformat", "(", ")", ")", ")", "if", "maximum", "and", "value", "and", "value", ">", "maximum", ":", "raise", "errors", ".", "MaximumValueError", "(", "'value (%s) is after the maximum given (%s)'", "%", "(", "value", ".", "isoformat", "(", ")", ",", "maximum", ".", "isoformat", "(", ")", ")", ")", "return", "value" ]
42.42029
21.855072
def spacing_file(path): """ Perform paranoid text spacing from file. """ # TODO: read line by line with open(os.path.abspath(path)) as f: return spacing_text(f.read())
[ "def", "spacing_file", "(", "path", ")", ":", "# TODO: read line by line", "with", "open", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", "as", "f", ":", "return", "spacing_text", "(", "f", ".", "read", "(", ")", ")" ]
27
5.285714
def _get_recursive_difference(self, type): '''Returns the recursive diff between dict values''' if type == 'intersect': return [recursive_diff(item['old'], item['new']) for item in self._intersect] elif type == 'added': return [recursive_diff({}, item) for item in self._added] elif type == 'removed': return [recursive_diff(item, {}, ignore_missing_keys=False) for item in self._removed] elif type == 'all': recursive_list = [] recursive_list.extend([recursive_diff(item['old'], item['new']) for item in self._intersect]) recursive_list.extend([recursive_diff({}, item) for item in self._added]) recursive_list.extend([recursive_diff(item, {}, ignore_missing_keys=False) for item in self._removed]) return recursive_list else: raise ValueError('The given type for recursive list matching ' 'is not supported.')
[ "def", "_get_recursive_difference", "(", "self", ",", "type", ")", ":", "if", "type", "==", "'intersect'", ":", "return", "[", "recursive_diff", "(", "item", "[", "'old'", "]", ",", "item", "[", "'new'", "]", ")", "for", "item", "in", "self", ".", "_intersect", "]", "elif", "type", "==", "'added'", ":", "return", "[", "recursive_diff", "(", "{", "}", ",", "item", ")", "for", "item", "in", "self", ".", "_added", "]", "elif", "type", "==", "'removed'", ":", "return", "[", "recursive_diff", "(", "item", ",", "{", "}", ",", "ignore_missing_keys", "=", "False", ")", "for", "item", "in", "self", ".", "_removed", "]", "elif", "type", "==", "'all'", ":", "recursive_list", "=", "[", "]", "recursive_list", ".", "extend", "(", "[", "recursive_diff", "(", "item", "[", "'old'", "]", ",", "item", "[", "'new'", "]", ")", "for", "item", "in", "self", ".", "_intersect", "]", ")", "recursive_list", ".", "extend", "(", "[", "recursive_diff", "(", "{", "}", ",", "item", ")", "for", "item", "in", "self", ".", "_added", "]", ")", "recursive_list", ".", "extend", "(", "[", "recursive_diff", "(", "item", ",", "{", "}", ",", "ignore_missing_keys", "=", "False", ")", "for", "item", "in", "self", ".", "_removed", "]", ")", "return", "recursive_list", "else", ":", "raise", "ValueError", "(", "'The given type for recursive list matching '", "'is not supported.'", ")" ]
54.1
22.6
def QA_SU_save_etf_day(engine, client=DATABASE): """save etf_day Arguments: engine {[type]} -- [description] Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ engine = select_save_engine(engine) engine.QA_SU_save_etf_day(client=client)
[ "def", "QA_SU_save_etf_day", "(", "engine", ",", "client", "=", "DATABASE", ")", ":", "engine", "=", "select_save_engine", "(", "engine", ")", "engine", ".", "QA_SU_save_etf_day", "(", "client", "=", "client", ")" ]
24.583333
16.583333
def _prior_headerfooter(self): """|_Header| proxy on prior sectPr element or None if this is first section.""" preceding_sectPr = self._sectPr.preceding_sectPr return ( None if preceding_sectPr is None else _Header(preceding_sectPr, self._document_part, self._hdrftr_index) )
[ "def", "_prior_headerfooter", "(", "self", ")", ":", "preceding_sectPr", "=", "self", ".", "_sectPr", ".", "preceding_sectPr", "return", "(", "None", "if", "preceding_sectPr", "is", "None", "else", "_Header", "(", "preceding_sectPr", ",", "self", ".", "_document_part", ",", "self", ".", "_hdrftr_index", ")", ")" ]
46.428571
18.285714
def dt_from_rfc8601(date_str): """Convert 8601 (ISO) date string to datetime object. Handles "Z" and milliseconds transparently. :param date_str: Date string. :type date_str: ``string`` :return: Date time. :rtype: :class:`datetime.datetime` """ # Normalize string and adjust for milliseconds. Note that Python 2.6+ has # ".%f" format, but we're going for Python 2.5, so truncate the portion. date_str = date_str.rstrip('Z').split('.')[0] # Format string. (2010-04-13T14:02:48.000Z) fmt = "%Y-%m-%dT%H:%M:%S" # Python 2.6+: Could format and handle milliseconds. # if date_str.find('.') >= 0: # fmt += ".%f" return datetime.strptime(date_str, fmt)
[ "def", "dt_from_rfc8601", "(", "date_str", ")", ":", "# Normalize string and adjust for milliseconds. Note that Python 2.6+ has", "# \".%f\" format, but we're going for Python 2.5, so truncate the portion.", "date_str", "=", "date_str", ".", "rstrip", "(", "'Z'", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "# Format string. (2010-04-13T14:02:48.000Z)", "fmt", "=", "\"%Y-%m-%dT%H:%M:%S\"", "# Python 2.6+: Could format and handle milliseconds.", "# if date_str.find('.') >= 0:", "# fmt += \".%f\"", "return", "datetime", ".", "strptime", "(", "date_str", ",", "fmt", ")" ]
33.238095
16.952381
def internal_get_frame(dbg, seq, thread_id, frame_id): ''' Converts request into python variable ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: hidden_ns = pydevconsole.get_ipython_hidden_vars() xml = "<xml>" xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns) del frame xml += "</xml>" cmd = dbg.cmd_factory.make_get_frame_message(seq, xml) dbg.writer.add_command(cmd) else: # pydevd_vars.dump_frames(thread_id) # don't print this error: frame not found: means that the client is not synchronized (but that's ok) cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd) except: cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd)
[ "def", "internal_get_frame", "(", "dbg", ",", "seq", ",", "thread_id", ",", "frame_id", ")", ":", "try", ":", "frame", "=", "dbg", ".", "find_frame", "(", "thread_id", ",", "frame_id", ")", "if", "frame", "is", "not", "None", ":", "hidden_ns", "=", "pydevconsole", ".", "get_ipython_hidden_vars", "(", ")", "xml", "=", "\"<xml>\"", "xml", "+=", "pydevd_xml", ".", "frame_vars_to_xml", "(", "frame", ".", "f_locals", ",", "hidden_ns", ")", "del", "frame", "xml", "+=", "\"</xml>\"", "cmd", "=", "dbg", ".", "cmd_factory", ".", "make_get_frame_message", "(", "seq", ",", "xml", ")", "dbg", ".", "writer", ".", "add_command", "(", "cmd", ")", "else", ":", "# pydevd_vars.dump_frames(thread_id)", "# don't print this error: frame not found: means that the client is not synchronized (but that's ok)", "cmd", "=", "dbg", ".", "cmd_factory", ".", "make_error_message", "(", "seq", ",", "\"Frame not found: %s from thread: %s\"", "%", "(", "frame_id", ",", "thread_id", ")", ")", "dbg", ".", "writer", ".", "add_command", "(", "cmd", ")", "except", ":", "cmd", "=", "dbg", ".", "cmd_factory", ".", "make_error_message", "(", "seq", ",", "\"Error resolving frame: %s from thread: %s\"", "%", "(", "frame_id", ",", "thread_id", ")", ")", "dbg", ".", "writer", ".", "add_command", "(", "cmd", ")" ]
50.25
25.55
def spy(self): ''' Add a spy to this stub. Return the spy. ''' spy = Spy(self) self._expectations.append(spy) return spy
[ "def", "spy", "(", "self", ")", ":", "spy", "=", "Spy", "(", "self", ")", "self", ".", "_expectations", ".", "append", "(", "spy", ")", "return", "spy" ]
23.142857
18.857143
def interpolate(self, factor, minKerning, maxKerning, round=True, suppressError=True): """ Interpolates all pairs between two :class:`BaseKerning` objects: **minKerning** and **maxKerning**. The interpolation occurs on a 0 to 1.0 range where **minKerning** is located at 0 and **maxKerning** is located at 1.0. The kerning data is replaced by the interpolated kerning. * **factor** is the interpolation value. It may be less than 0 and greater than 1.0. It may be an :ref:`type-int-float`, ``tuple`` or ``list``. If it is a ``tuple`` or ``list``, the first number indicates the x factor and the second number indicates the y factor. * **round** is a ``bool`` indicating if the result should be rounded to ``int``\s. The default behavior is to round interpolated kerning. * **suppressError** is a ``bool`` indicating if incompatible data should be ignored or if an error should be raised when such incompatibilities are found. The default behavior is to ignore incompatible data. >>> myKerning.interpolate(kerningOne, kerningTwo) """ factor = normalizers.normalizeInterpolationFactor(factor) if not isinstance(minKerning, BaseKerning): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % ( self.__class__.__name__, minKerning.__class__.__name__)) if not isinstance(maxKerning, BaseKerning): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % ( self.__class__.__name__, maxKerning.__class__.__name__)) round = normalizers.normalizeBoolean(round) suppressError = normalizers.normalizeBoolean(suppressError) self._interpolate(factor, minKerning, maxKerning, round=round, suppressError=suppressError)
[ "def", "interpolate", "(", "self", ",", "factor", ",", "minKerning", ",", "maxKerning", ",", "round", "=", "True", ",", "suppressError", "=", "True", ")", ":", "factor", "=", "normalizers", ".", "normalizeInterpolationFactor", "(", "factor", ")", "if", "not", "isinstance", "(", "minKerning", ",", "BaseKerning", ")", ":", "raise", "TypeError", "(", "(", "\"Interpolation to an instance of %r can not be \"", "\"performed from an instance of %r.\"", ")", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "minKerning", ".", "__class__", ".", "__name__", ")", ")", "if", "not", "isinstance", "(", "maxKerning", ",", "BaseKerning", ")", ":", "raise", "TypeError", "(", "(", "\"Interpolation to an instance of %r can not be \"", "\"performed from an instance of %r.\"", ")", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "maxKerning", ".", "__class__", ".", "__name__", ")", ")", "round", "=", "normalizers", ".", "normalizeBoolean", "(", "round", ")", "suppressError", "=", "normalizers", ".", "normalizeBoolean", "(", "suppressError", ")", "self", ".", "_interpolate", "(", "factor", ",", "minKerning", ",", "maxKerning", ",", "round", "=", "round", ",", "suppressError", "=", "suppressError", ")" ]
57.742857
27.057143
def host_impl(self, run, tool): """Returns available hosts for the run and tool in the log directory. In the plugin log directory, each directory contains profile data for a single run (identified by the directory name), and files in the run directory contains data for different tools and hosts. The file that contains profile for a specific tool "x" will have a prefix name TOOLS["x"]. Example: log/ run1/ plugins/ profile/ host1.trace host2.trace run2/ plugins/ profile/ host1.trace host2.trace Returns: A list of host names e.g. {"host1", "host2", "host3"} for the example. """ hosts = {} run_dir = self._run_dir(run) if not run_dir: logger.warn("Cannot find asset directory for: %s", run) return hosts tool_pattern = '*' + TOOLS[tool] try: files = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern)) hosts = [os.path.basename(f).replace(TOOLS[tool], '') for f in files] except tf.errors.OpError as e: logger.warn("Cannot read asset directory: %s, OpError %s", run_dir, e) return hosts
[ "def", "host_impl", "(", "self", ",", "run", ",", "tool", ")", ":", "hosts", "=", "{", "}", "run_dir", "=", "self", ".", "_run_dir", "(", "run", ")", "if", "not", "run_dir", ":", "logger", ".", "warn", "(", "\"Cannot find asset directory for: %s\"", ",", "run", ")", "return", "hosts", "tool_pattern", "=", "'*'", "+", "TOOLS", "[", "tool", "]", "try", ":", "files", "=", "tf", ".", "io", ".", "gfile", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "run_dir", ",", "tool_pattern", ")", ")", "hosts", "=", "[", "os", ".", "path", ".", "basename", "(", "f", ")", ".", "replace", "(", "TOOLS", "[", "tool", "]", ",", "''", ")", "for", "f", "in", "files", "]", "except", "tf", ".", "errors", ".", "OpError", "as", "e", ":", "logger", ".", "warn", "(", "\"Cannot read asset directory: %s, OpError %s\"", ",", "run_dir", ",", "e", ")", "return", "hosts" ]
31.710526
21.815789
def initialize(self): """ Initialize instance attributes. You can override this method in the subclasses. """ self.main_pid = os.getpid() self.processes.extend(self.init_service_processes()) self.processes.extend(self.init_tornado_workers())
[ "def", "initialize", "(", "self", ")", ":", "self", ".", "main_pid", "=", "os", ".", "getpid", "(", ")", "self", ".", "processes", ".", "extend", "(", "self", ".", "init_service_processes", "(", ")", ")", "self", ".", "processes", ".", "extend", "(", "self", ".", "init_tornado_workers", "(", ")", ")" ]
36.25
13.75
def monkey_patch(): """ Monkey patches `zmq.Context` and `zmq.Socket` If test_suite is True, the pyzmq test suite will be patched for compatibility as well. """ ozmq = __import__('zmq') ozmq.Socket = zmq.Socket ozmq.Context = zmq.Context ozmq.Poller = zmq.Poller ioloop = __import__('zmq.eventloop.ioloop') ioloop.Poller = zmq.Poller
[ "def", "monkey_patch", "(", ")", ":", "ozmq", "=", "__import__", "(", "'zmq'", ")", "ozmq", ".", "Socket", "=", "zmq", ".", "Socket", "ozmq", ".", "Context", "=", "zmq", ".", "Context", "ozmq", ".", "Poller", "=", "zmq", ".", "Poller", "ioloop", "=", "__import__", "(", "'zmq.eventloop.ioloop'", ")", "ioloop", ".", "Poller", "=", "zmq", ".", "Poller" ]
28.153846
13.384615
def lineOffsetWithMinimumDistanceToPoint(point, line_start, line_end, perpendicular=False): """Return the offset from line (line_start, line_end) where the distance to point is minimal""" p = point p1 = line_start p2 = line_end l = distance(p1, p2) u = ((p[0] - p1[0]) * (p2[0] - p1[0])) + ((p[1] - p1[1]) * (p2[1] - p1[1])) if l == 0 or u < 0.0 or u > l * l: if perpendicular: return INVALID_DISTANCE if u < 0: return 0 return l return u / l
[ "def", "lineOffsetWithMinimumDistanceToPoint", "(", "point", ",", "line_start", ",", "line_end", ",", "perpendicular", "=", "False", ")", ":", "p", "=", "point", "p1", "=", "line_start", "p2", "=", "line_end", "l", "=", "distance", "(", "p1", ",", "p2", ")", "u", "=", "(", "(", "p", "[", "0", "]", "-", "p1", "[", "0", "]", ")", "*", "(", "p2", "[", "0", "]", "-", "p1", "[", "0", "]", ")", ")", "+", "(", "(", "p", "[", "1", "]", "-", "p1", "[", "1", "]", ")", "*", "(", "p2", "[", "1", "]", "-", "p1", "[", "1", "]", ")", ")", "if", "l", "==", "0", "or", "u", "<", "0.0", "or", "u", ">", "l", "*", "l", ":", "if", "perpendicular", ":", "return", "INVALID_DISTANCE", "if", "u", "<", "0", ":", "return", "0", "return", "l", "return", "u", "/", "l" ]
34.133333
19.333333
def create_package_file(root, master_package, subroot, py_files, opts, subs): """Build the text of the file and write the file.""" package = os.path.split(root)[-1] text = format_heading(1, '%s Package' % package) # add each package's module for py_file in py_files: if shall_skip(os.path.join(root, py_file)): continue is_package = py_file == INIT py_file = os.path.splitext(py_file)[0] py_path = makename(subroot, py_file) if is_package: heading = ':mod:`%s` Package' % package else: heading = ':mod:`%s` Module' % py_file text += format_heading(2, heading) text += format_directive(is_package and subroot or py_path, master_package) text += '\n' # build a list of directories that are packages (they contain an INIT file) subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))] # if there are some package directories, add a TOC for theses subpackages if subs: text += format_heading(2, 'Subpackages') text += '.. toctree::\n\n' for sub in subs: text += ' %s.%s\n' % (makename(master_package, subroot), sub) text += '\n' write_file(makename(master_package, subroot), text, opts)
[ "def", "create_package_file", "(", "root", ",", "master_package", ",", "subroot", ",", "py_files", ",", "opts", ",", "subs", ")", ":", "package", "=", "os", ".", "path", ".", "split", "(", "root", ")", "[", "-", "1", "]", "text", "=", "format_heading", "(", "1", ",", "'%s Package'", "%", "package", ")", "# add each package's module", "for", "py_file", "in", "py_files", ":", "if", "shall_skip", "(", "os", ".", "path", ".", "join", "(", "root", ",", "py_file", ")", ")", ":", "continue", "is_package", "=", "py_file", "==", "INIT", "py_file", "=", "os", ".", "path", ".", "splitext", "(", "py_file", ")", "[", "0", "]", "py_path", "=", "makename", "(", "subroot", ",", "py_file", ")", "if", "is_package", ":", "heading", "=", "':mod:`%s` Package'", "%", "package", "else", ":", "heading", "=", "':mod:`%s` Module'", "%", "py_file", "text", "+=", "format_heading", "(", "2", ",", "heading", ")", "text", "+=", "format_directive", "(", "is_package", "and", "subroot", "or", "py_path", ",", "master_package", ")", "text", "+=", "'\\n'", "# build a list of directories that are packages (they contain an INIT file)", "subs", "=", "[", "sub", "for", "sub", "in", "subs", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "root", ",", "sub", ",", "INIT", ")", ")", "]", "# if there are some package directories, add a TOC for theses subpackages", "if", "subs", ":", "text", "+=", "format_heading", "(", "2", ",", "'Subpackages'", ")", "text", "+=", "'.. toctree::\\n\\n'", "for", "sub", "in", "subs", ":", "text", "+=", "' %s.%s\\n'", "%", "(", "makename", "(", "master_package", ",", "subroot", ")", ",", "sub", ")", "text", "+=", "'\\n'", "write_file", "(", "makename", "(", "master_package", ",", "subroot", ")", ",", "text", ",", "opts", ")" ]
42.366667
19.366667
def present(name, timespec, tag=None, user=None, job=None, unique_tag=False): ''' .. versionchanged:: 2017.7.0 Add a job to queue. job : string Command to run. timespec : string The 'timespec' follows the format documented in the at(1) manpage. tag : string Make a tag for the job. user : string The user to run the at job .. versionadded:: 2014.1.4 unique_tag : boolean If set to True job will not be added if a job with the tag exists. .. versionadded:: 2017.7.0 .. code-block:: yaml rose: at.present: - job: 'echo "I love saltstack" > love' - timespec: '9:09 11/09/13' - tag: love - user: jam ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # if job is missing, use name if not job: job = name # quick return on test=True if __opts__['test']: ret['result'] = None ret['comment'] = 'job {0} added and will run on {1}'.format( job, timespec, ) return ret # quick return if unique_tag and job exists if unique_tag: if not tag: ret['result'] = False ret['comment'] = 'no tag provided and unique_tag is set to True' return ret elif __salt__['at.jobcheck'](tag=tag)['jobs']: ret['comment'] = 'atleast one job with tag {tag} exists.'.format( tag=tag ) return ret # create job if user: luser = __salt__['user.info'](user) if not luser: ret['result'] = False ret['comment'] = 'user {0} does not exists'.format(user) return ret ret['comment'] = 'job {0} added and will run as {1} on {2}'.format( job, user, timespec, ) res = __salt__['at.at']( timespec, job, tag=tag, runas=user, ) else: ret['comment'] = 'job {0} added and will run on {1}'.format( job, timespec, ) res = __salt__['at.at']( timespec, job, tag=tag, ) # set ret['changes'] if res.get('jobs'): ret['changes'] = res['jobs'][0] if 'error' in res: ret['result'] = False ret['comment'] = res['error'] return ret
[ "def", "present", "(", "name", ",", "timespec", ",", "tag", "=", "None", ",", "user", "=", "None", ",", "job", "=", "None", ",", "unique_tag", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "# if job is missing, use name", "if", "not", "job", ":", "job", "=", "name", "# quick return on test=True", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'job {0} added and will run on {1}'", ".", "format", "(", "job", ",", "timespec", ",", ")", "return", "ret", "# quick return if unique_tag and job exists", "if", "unique_tag", ":", "if", "not", "tag", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'no tag provided and unique_tag is set to True'", "return", "ret", "elif", "__salt__", "[", "'at.jobcheck'", "]", "(", "tag", "=", "tag", ")", "[", "'jobs'", "]", ":", "ret", "[", "'comment'", "]", "=", "'atleast one job with tag {tag} exists.'", ".", "format", "(", "tag", "=", "tag", ")", "return", "ret", "# create job", "if", "user", ":", "luser", "=", "__salt__", "[", "'user.info'", "]", "(", "user", ")", "if", "not", "luser", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'user {0} does not exists'", ".", "format", "(", "user", ")", "return", "ret", "ret", "[", "'comment'", "]", "=", "'job {0} added and will run as {1} on {2}'", ".", "format", "(", "job", ",", "user", ",", "timespec", ",", ")", "res", "=", "__salt__", "[", "'at.at'", "]", "(", "timespec", ",", "job", ",", "tag", "=", "tag", ",", "runas", "=", "user", ",", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'job {0} added and will run on {1}'", ".", "format", "(", "job", ",", "timespec", ",", ")", "res", "=", "__salt__", "[", "'at.at'", "]", "(", "timespec", ",", "job", ",", "tag", "=", "tag", ",", ")", "# set ret['changes']", "if", "res", ".", "get", "(", "'jobs'", ")", ":", "ret", "[", "'changes'", "]", "=", "res", "[", "'jobs'", "]", "[", "0", "]", "if", "'error'", "in", "res", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "res", "[", "'error'", "]", "return", "ret" ]
24.06
22.58
def enable_reporting(self): """Call this method to explicitly enable reporting. The current report will be uploaded, plus the previously recorded ones, and the configuration will be updated so that future runs also upload automatically. """ if self.status == Stats.ENABLED: return if not self.enableable: logger.critical("Can't enable reporting") return self.status = Stats.ENABLED self.write_config(self.status)
[ "def", "enable_reporting", "(", "self", ")", ":", "if", "self", ".", "status", "==", "Stats", ".", "ENABLED", ":", "return", "if", "not", "self", ".", "enableable", ":", "logger", ".", "critical", "(", "\"Can't enable reporting\"", ")", "return", "self", ".", "status", "=", "Stats", ".", "ENABLED", "self", ".", "write_config", "(", "self", ".", "status", ")" ]
36.285714
15.714286
def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0): """ Move to the indicated HDU by name In general, it is not necessary to use this method explicitly. returns the one-offset extension number """ extname = mks(extname) hdu = self._FITS.movnam_hdu(hdutype, extname, extver) return hdu
[ "def", "movnam_hdu", "(", "self", ",", "extname", ",", "hdutype", "=", "ANY_HDU", ",", "extver", "=", "0", ")", ":", "extname", "=", "mks", "(", "extname", ")", "hdu", "=", "self", ".", "_FITS", ".", "movnam_hdu", "(", "hdutype", ",", "extname", ",", "extver", ")", "return", "hdu" ]
31.454545
17.090909
def to_yaml(self, str_or_buffer=None): """ Save a model respresentation to YAML. Parameters ---------- str_or_buffer : str or file like, optional By default a YAML string is returned. If a string is given here the YAML will be written to that file. If an object with a ``.write`` method is given the YAML will be written to that object. Returns ------- j : str YAML is string if `str_or_buffer` is not given. """ logger.debug('serializing LCM model {} to YAML'.format(self.name)) if (not isinstance(self.probability_mode, str) or not isinstance(self.choice_mode, str)): raise TypeError( 'Cannot serialize model with non-string probability_mode ' 'or choice_mode attributes.') return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
[ "def", "to_yaml", "(", "self", ",", "str_or_buffer", "=", "None", ")", ":", "logger", ".", "debug", "(", "'serializing LCM model {} to YAML'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "(", "not", "isinstance", "(", "self", ".", "probability_mode", ",", "str", ")", "or", "not", "isinstance", "(", "self", ".", "choice_mode", ",", "str", ")", ")", ":", "raise", "TypeError", "(", "'Cannot serialize model with non-string probability_mode '", "'or choice_mode attributes.'", ")", "return", "yamlio", ".", "convert_to_yaml", "(", "self", ".", "to_dict", "(", ")", ",", "str_or_buffer", ")" ]
37.24
19.8
def write_modulators_to_project_file(self, tree=None): """ :type modulators: list of Modulator :return: """ if self.project_file is None or not self.modulators: return if tree is None: tree = ET.parse(self.project_file) root = tree.getroot() root.append(Modulator.modulators_to_xml_tag(self.modulators)) tree.write(self.project_file)
[ "def", "write_modulators_to_project_file", "(", "self", ",", "tree", "=", "None", ")", ":", "if", "self", ".", "project_file", "is", "None", "or", "not", "self", ".", "modulators", ":", "return", "if", "tree", "is", "None", ":", "tree", "=", "ET", ".", "parse", "(", "self", ".", "project_file", ")", "root", "=", "tree", ".", "getroot", "(", ")", "root", ".", "append", "(", "Modulator", ".", "modulators_to_xml_tag", "(", "self", ".", "modulators", ")", ")", "tree", ".", "write", "(", "self", ".", "project_file", ")" ]
27.866667
17.866667
def find_autorest_generated_folder(module_prefix="azure"): """Find all Autorest generated code in that module prefix. This actually looks for a "models" package only (not file). We could be smarter if necessary. """ _LOGGER.info(f"Looking for Autorest generated package in {module_prefix}") # Manually skip some namespaces for now if module_prefix in ["azure.cli", "azure.storage", "azure.servicemanagement", "azure.servicebus"]: _LOGGER.info(f"Skip {module_prefix}") return [] result = [] try: _LOGGER.debug(f"Try {module_prefix}") model_module = importlib.import_module(".models", module_prefix) # If not exception, we MIGHT have found it, but cannot be a file. # Keep continue to try to break it, file module have no __path__ model_module.__path__ _LOGGER.info(f"Found {module_prefix}") result.append(module_prefix) except (ModuleNotFoundError, AttributeError): # No model, might dig deeper prefix_module = importlib.import_module(module_prefix) for _, sub_package, ispkg in pkgutil.iter_modules(prefix_module.__path__, module_prefix+"."): if ispkg: result += find_autorest_generated_folder(sub_package) return result
[ "def", "find_autorest_generated_folder", "(", "module_prefix", "=", "\"azure\"", ")", ":", "_LOGGER", ".", "info", "(", "f\"Looking for Autorest generated package in {module_prefix}\"", ")", "# Manually skip some namespaces for now", "if", "module_prefix", "in", "[", "\"azure.cli\"", ",", "\"azure.storage\"", ",", "\"azure.servicemanagement\"", ",", "\"azure.servicebus\"", "]", ":", "_LOGGER", ".", "info", "(", "f\"Skip {module_prefix}\"", ")", "return", "[", "]", "result", "=", "[", "]", "try", ":", "_LOGGER", ".", "debug", "(", "f\"Try {module_prefix}\"", ")", "model_module", "=", "importlib", ".", "import_module", "(", "\".models\"", ",", "module_prefix", ")", "# If not exception, we MIGHT have found it, but cannot be a file.", "# Keep continue to try to break it, file module have no __path__", "model_module", ".", "__path__", "_LOGGER", ".", "info", "(", "f\"Found {module_prefix}\"", ")", "result", ".", "append", "(", "module_prefix", ")", "except", "(", "ModuleNotFoundError", ",", "AttributeError", ")", ":", "# No model, might dig deeper", "prefix_module", "=", "importlib", ".", "import_module", "(", "module_prefix", ")", "for", "_", ",", "sub_package", ",", "ispkg", "in", "pkgutil", ".", "iter_modules", "(", "prefix_module", ".", "__path__", ",", "module_prefix", "+", "\".\"", ")", ":", "if", "ispkg", ":", "result", "+=", "find_autorest_generated_folder", "(", "sub_package", ")", "return", "result" ]
46.666667
23.444444
def get_priors(self): ''' Returns ------- pd.Series ''' priors = self.priors priors[~np.isfinite(priors)] = 0 priors += self.starting_count return priors
[ "def", "get_priors", "(", "self", ")", ":", "priors", "=", "self", ".", "priors", "priors", "[", "~", "np", ".", "isfinite", "(", "priors", ")", "]", "=", "0", "priors", "+=", "self", ".", "starting_count", "return", "priors" ]
16.2
23.8
def convert_cifar100(directory, output_directory, output_filename='cifar100.hdf5'): """Converts the CIFAR-100 dataset to HDF5. Converts the CIFAR-100 dataset to an HDF5 dataset compatible with :class:`fuel.datasets.CIFAR100`. The converted dataset is saved as 'cifar100.hdf5'. This method assumes the existence of the following file: `cifar-100-python.tar.gz` Parameters ---------- directory : str Directory in which the required input files reside. output_directory : str Directory in which to save the converted dataset. output_filename : str, optional Name of the saved dataset. Defaults to 'cifar100.hdf5'. Returns ------- output_paths : tuple of str Single-element tuple containing the path to the converted dataset. """ output_path = os.path.join(output_directory, output_filename) h5file = h5py.File(output_path, mode="w") input_file = os.path.join(directory, 'cifar-100-python.tar.gz') tar_file = tarfile.open(input_file, 'r:gz') file = tar_file.extractfile('cifar-100-python/train') try: if six.PY3: train = cPickle.load(file, encoding='latin1') else: train = cPickle.load(file) finally: file.close() train_features = train['data'].reshape(train['data'].shape[0], 3, 32, 32) train_coarse_labels = numpy.array(train['coarse_labels'], dtype=numpy.uint8) train_fine_labels = numpy.array(train['fine_labels'], dtype=numpy.uint8) file = tar_file.extractfile('cifar-100-python/test') try: if six.PY3: test = cPickle.load(file, encoding='latin1') else: test = cPickle.load(file) finally: file.close() test_features = test['data'].reshape(test['data'].shape[0], 3, 32, 32) test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8) test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8) data = (('train', 'features', train_features), ('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))), ('train', 'fine_labels', train_fine_labels.reshape((-1, 1))), ('test', 'features', test_features), ('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))), ('test', 'fine_labels', test_fine_labels.reshape((-1, 1)))) fill_hdf5_file(h5file, data) h5file['features'].dims[0].label = 'batch' h5file['features'].dims[1].label = 'channel' h5file['features'].dims[2].label = 'height' h5file['features'].dims[3].label = 'width' h5file['coarse_labels'].dims[0].label = 'batch' h5file['coarse_labels'].dims[1].label = 'index' h5file['fine_labels'].dims[0].label = 'batch' h5file['fine_labels'].dims[1].label = 'index' h5file.flush() h5file.close() return (output_path,)
[ "def", "convert_cifar100", "(", "directory", ",", "output_directory", ",", "output_filename", "=", "'cifar100.hdf5'", ")", ":", "output_path", "=", "os", ".", "path", ".", "join", "(", "output_directory", ",", "output_filename", ")", "h5file", "=", "h5py", ".", "File", "(", "output_path", ",", "mode", "=", "\"w\"", ")", "input_file", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'cifar-100-python.tar.gz'", ")", "tar_file", "=", "tarfile", ".", "open", "(", "input_file", ",", "'r:gz'", ")", "file", "=", "tar_file", ".", "extractfile", "(", "'cifar-100-python/train'", ")", "try", ":", "if", "six", ".", "PY3", ":", "train", "=", "cPickle", ".", "load", "(", "file", ",", "encoding", "=", "'latin1'", ")", "else", ":", "train", "=", "cPickle", ".", "load", "(", "file", ")", "finally", ":", "file", ".", "close", "(", ")", "train_features", "=", "train", "[", "'data'", "]", ".", "reshape", "(", "train", "[", "'data'", "]", ".", "shape", "[", "0", "]", ",", "3", ",", "32", ",", "32", ")", "train_coarse_labels", "=", "numpy", ".", "array", "(", "train", "[", "'coarse_labels'", "]", ",", "dtype", "=", "numpy", ".", "uint8", ")", "train_fine_labels", "=", "numpy", ".", "array", "(", "train", "[", "'fine_labels'", "]", ",", "dtype", "=", "numpy", ".", "uint8", ")", "file", "=", "tar_file", ".", "extractfile", "(", "'cifar-100-python/test'", ")", "try", ":", "if", "six", ".", "PY3", ":", "test", "=", "cPickle", ".", "load", "(", "file", ",", "encoding", "=", "'latin1'", ")", "else", ":", "test", "=", "cPickle", ".", "load", "(", "file", ")", "finally", ":", "file", ".", "close", "(", ")", "test_features", "=", "test", "[", "'data'", "]", ".", "reshape", "(", "test", "[", "'data'", "]", ".", "shape", "[", "0", "]", ",", "3", ",", "32", ",", "32", ")", "test_coarse_labels", "=", "numpy", ".", "array", "(", "test", "[", "'coarse_labels'", "]", ",", "dtype", "=", "numpy", ".", "uint8", ")", "test_fine_labels", "=", "numpy", ".", "array", "(", "test", "[", "'fine_labels'", "]", ",", "dtype", "=", "numpy", ".", "uint8", ")", "data", "=", "(", "(", "'train'", ",", "'features'", ",", "train_features", ")", ",", "(", "'train'", ",", "'coarse_labels'", ",", "train_coarse_labels", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", ")", ",", "(", "'train'", ",", "'fine_labels'", ",", "train_fine_labels", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", ")", ",", "(", "'test'", ",", "'features'", ",", "test_features", ")", ",", "(", "'test'", ",", "'coarse_labels'", ",", "test_coarse_labels", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", ")", ",", "(", "'test'", ",", "'fine_labels'", ",", "test_fine_labels", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", ")", ")", "fill_hdf5_file", "(", "h5file", ",", "data", ")", "h5file", "[", "'features'", "]", ".", "dims", "[", "0", "]", ".", "label", "=", "'batch'", "h5file", "[", "'features'", "]", ".", "dims", "[", "1", "]", ".", "label", "=", "'channel'", "h5file", "[", "'features'", "]", ".", "dims", "[", "2", "]", ".", "label", "=", "'height'", "h5file", "[", "'features'", "]", ".", "dims", "[", "3", "]", ".", "label", "=", "'width'", "h5file", "[", "'coarse_labels'", "]", ".", "dims", "[", "0", "]", ".", "label", "=", "'batch'", "h5file", "[", "'coarse_labels'", "]", ".", "dims", "[", "1", "]", ".", "label", "=", "'index'", "h5file", "[", "'fine_labels'", "]", ".", "dims", "[", "0", "]", ".", "label", "=", "'batch'", "h5file", "[", "'fine_labels'", "]", ".", "dims", "[", "1", "]", ".", "label", "=", "'index'", "h5file", ".", "flush", "(", ")", "h5file", ".", "close", "(", ")", "return", "(", "output_path", ",", ")" ]
36.716049
21.691358
def chooseReliableActiveFiringRate(cellsPerAxis, bumpSigma, minimumActiveDiameter=None): """ When a cell is activated by sensory input, this implies that the phase is within a particular small patch of the rhombus. This patch is roughly equivalent to a circle of diameter (1/cellsPerAxis)(2/sqrt(3)), centered on the cell. This 2/sqrt(3) accounts for the fact that when circles are packed into hexagons, there are small uncovered spaces between the circles, so the circles need to expand by a factor of (2/sqrt(3)) to cover this space. This sensory input will activate the phase at the center of this cell. To account for uncertainty of the actual phase that was used during learning, the bump of active cells needs to be sufficiently large for this cell to remain active until the bump has moved by the above diameter. So the diameter of the bump (and, equivalently, the cell's firing field) needs to be at least 2 of the above diameters. @param minimumActiveDiameter (float or None) If specified, this makes sure the bump of active cells is always above a certain size. This is useful for testing scenarios where grid cell modules can only encode location with a limited "readout resolution", matching the biology. @return An "activeFiringRate" for use in the ThresholdedGaussian2DLocationModule. """ firingFieldDiameter = 2 * (1./cellsPerAxis)*(2./math.sqrt(3)) if minimumActiveDiameter: firingFieldDiameter = max(firingFieldDiameter, minimumActiveDiameter) return ThresholdedGaussian2DLocationModule.gaussian( bumpSigma, firingFieldDiameter / 2.)
[ "def", "chooseReliableActiveFiringRate", "(", "cellsPerAxis", ",", "bumpSigma", ",", "minimumActiveDiameter", "=", "None", ")", ":", "firingFieldDiameter", "=", "2", "*", "(", "1.", "/", "cellsPerAxis", ")", "*", "(", "2.", "/", "math", ".", "sqrt", "(", "3", ")", ")", "if", "minimumActiveDiameter", ":", "firingFieldDiameter", "=", "max", "(", "firingFieldDiameter", ",", "minimumActiveDiameter", ")", "return", "ThresholdedGaussian2DLocationModule", ".", "gaussian", "(", "bumpSigma", ",", "firingFieldDiameter", "/", "2.", ")" ]
50.545455
28.787879
def tokenized_texts_to_sequences_generator(self, tok_texts): """Transforms tokenized text to a sequence of integers. Only top "num_words" most frequent words will be taken into account. Only words known by the tokenizer will be taken into account. # Arguments tokenized texts: List[List[str]] # Yields Yields individual sequences. """ for seq in tok_texts: vect = [] for w in seq: # if the word is missing you get oov_index i = self.word_index.get(w, 1) vect.append(i) yield vect
[ "def", "tokenized_texts_to_sequences_generator", "(", "self", ",", "tok_texts", ")", ":", "for", "seq", "in", "tok_texts", ":", "vect", "=", "[", "]", "for", "w", "in", "seq", ":", "# if the word is missing you get oov_index", "i", "=", "self", ".", "word_index", ".", "get", "(", "w", ",", "1", ")", "vect", ".", "append", "(", "i", ")", "yield", "vect" ]
39.3125
14.4375
def _compile_rules(self): """Compile the rules into the internal lexer state.""" for state, table in self.RULES.items(): patterns = list() actions = list() nextstates = list() for i, row in enumerate(table): if len(row) == 2: pattern, _action = row nextstate = None elif len(row) == 3: pattern, _action, nextstate = row else: fstr = "invalid RULES: state {}, row {}" raise CompileError(fstr.format(state, i)) patterns.append(pattern) actions.append(_action) nextstates.append(nextstate) reobj = re.compile('|'.join("(" + p + ")" for p in patterns)) self._rules[state] = (reobj, actions, nextstates)
[ "def", "_compile_rules", "(", "self", ")", ":", "for", "state", ",", "table", "in", "self", ".", "RULES", ".", "items", "(", ")", ":", "patterns", "=", "list", "(", ")", "actions", "=", "list", "(", ")", "nextstates", "=", "list", "(", ")", "for", "i", ",", "row", "in", "enumerate", "(", "table", ")", ":", "if", "len", "(", "row", ")", "==", "2", ":", "pattern", ",", "_action", "=", "row", "nextstate", "=", "None", "elif", "len", "(", "row", ")", "==", "3", ":", "pattern", ",", "_action", ",", "nextstate", "=", "row", "else", ":", "fstr", "=", "\"invalid RULES: state {}, row {}\"", "raise", "CompileError", "(", "fstr", ".", "format", "(", "state", ",", "i", ")", ")", "patterns", ".", "append", "(", "pattern", ")", "actions", ".", "append", "(", "_action", ")", "nextstates", ".", "append", "(", "nextstate", ")", "reobj", "=", "re", ".", "compile", "(", "'|'", ".", "join", "(", "\"(\"", "+", "p", "+", "\")\"", "for", "p", "in", "patterns", ")", ")", "self", ".", "_rules", "[", "state", "]", "=", "(", "reobj", ",", "actions", ",", "nextstates", ")" ]
43.15
10.35
def wait(timeout=None, flush=True): """Wait for an event. Args: timeout (Optional[int]): The time in seconds that this function will wait before giving up and returning None. With the default value of None, this will block forever. flush (bool): If True a call to :any:`tdl.flush` will be made before listening for events. Returns: Type[Event]: An event, or None if the function has timed out. Anything added via :any:`push` will also be returned. """ if timeout is not None: timeout = timeout + _time.clock() # timeout at this time while True: if _eventQueue: return _eventQueue.pop(0) if flush: # a full 'round' of events need to be processed before flushing _tdl.flush() if timeout and _time.clock() >= timeout: return None # return None on timeout _time.sleep(0.001) # sleep 1ms _processEvents()
[ "def", "wait", "(", "timeout", "=", "None", ",", "flush", "=", "True", ")", ":", "if", "timeout", "is", "not", "None", ":", "timeout", "=", "timeout", "+", "_time", ".", "clock", "(", ")", "# timeout at this time", "while", "True", ":", "if", "_eventQueue", ":", "return", "_eventQueue", ".", "pop", "(", "0", ")", "if", "flush", ":", "# a full 'round' of events need to be processed before flushing", "_tdl", ".", "flush", "(", ")", "if", "timeout", "and", "_time", ".", "clock", "(", ")", ">=", "timeout", ":", "return", "None", "# return None on timeout", "_time", ".", "sleep", "(", "0.001", ")", "# sleep 1ms", "_processEvents", "(", ")" ]
36.074074
19.407407
def start(self, stdout=subprocess.PIPE, stderr=subprocess.PIPE): """ Merged copy paste from the inheritance chain with modified stdout/err behaviour """ if self.pre_start_check(): # Some other executor (or process) is running with same config: raise AlreadyRunning(self) if self.process is None: command = self.command if not self._shell: command = self.command_parts env = os.environ.copy() env[ENV_UUID] = self._uuid popen_kwargs = { 'shell': self._shell, 'stdin': subprocess.PIPE, 'stdout': stdout, 'stderr': stderr, 'universal_newlines': True, 'env': env, } if platform.system() != 'Windows': popen_kwargs['preexec_fn'] = os.setsid self.process = subprocess.Popen( command, **popen_kwargs, ) self._set_timeout() self.wait_for(self.check_subprocess) return self
[ "def", "start", "(", "self", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", ":", "if", "self", ".", "pre_start_check", "(", ")", ":", "# Some other executor (or process) is running with same config:", "raise", "AlreadyRunning", "(", "self", ")", "if", "self", ".", "process", "is", "None", ":", "command", "=", "self", ".", "command", "if", "not", "self", ".", "_shell", ":", "command", "=", "self", ".", "command_parts", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "env", "[", "ENV_UUID", "]", "=", "self", ".", "_uuid", "popen_kwargs", "=", "{", "'shell'", ":", "self", ".", "_shell", ",", "'stdin'", ":", "subprocess", ".", "PIPE", ",", "'stdout'", ":", "stdout", ",", "'stderr'", ":", "stderr", ",", "'universal_newlines'", ":", "True", ",", "'env'", ":", "env", ",", "}", "if", "platform", ".", "system", "(", ")", "!=", "'Windows'", ":", "popen_kwargs", "[", "'preexec_fn'", "]", "=", "os", ".", "setsid", "self", ".", "process", "=", "subprocess", ".", "Popen", "(", "command", ",", "*", "*", "popen_kwargs", ",", ")", "self", ".", "_set_timeout", "(", ")", "self", ".", "wait_for", "(", "self", ".", "check_subprocess", ")", "return", "self" ]
33.65625
14
def rightsibling(node): """ Return Right Sibling of `node`. >>> from anytree import Node >>> dan = Node("Dan") >>> jet = Node("Jet", parent=dan) >>> jan = Node("Jan", parent=dan) >>> joe = Node("Joe", parent=dan) >>> rightsibling(dan) >>> rightsibling(jet) Node('/Dan/Jan') >>> rightsibling(jan) Node('/Dan/Joe') >>> rightsibling(joe) """ if node.parent: pchildren = node.parent.children idx = pchildren.index(node) try: return pchildren[idx + 1] except IndexError: return None else: return None
[ "def", "rightsibling", "(", "node", ")", ":", "if", "node", ".", "parent", ":", "pchildren", "=", "node", ".", "parent", ".", "children", "idx", "=", "pchildren", ".", "index", "(", "node", ")", "try", ":", "return", "pchildren", "[", "idx", "+", "1", "]", "except", "IndexError", ":", "return", "None", "else", ":", "return", "None" ]
24
13.36
def to_table(self, sort_key="wall_time", stop=None): """Return a table (list of lists) with timer data""" table = [list(AbinitTimerSection.FIELDS), ] ord_sections = self.order_sections(sort_key) if stop is not None: ord_sections = ord_sections[:stop] for osect in ord_sections: row = [str(item) for item in osect.to_tuple()] table.append(row) return table
[ "def", "to_table", "(", "self", ",", "sort_key", "=", "\"wall_time\"", ",", "stop", "=", "None", ")", ":", "table", "=", "[", "list", "(", "AbinitTimerSection", ".", "FIELDS", ")", ",", "]", "ord_sections", "=", "self", ".", "order_sections", "(", "sort_key", ")", "if", "stop", "is", "not", "None", ":", "ord_sections", "=", "ord_sections", "[", ":", "stop", "]", "for", "osect", "in", "ord_sections", ":", "row", "=", "[", "str", "(", "item", ")", "for", "item", "in", "osect", ".", "to_tuple", "(", ")", "]", "table", ".", "append", "(", "row", ")", "return", "table" ]
33.076923
17.538462
def notice_settings(request): """ The notice settings view. Template: :template:`notification/notice_settings.html` Context: notice_types A list of all :model:`notification.NoticeType` objects. notice_settings A dictionary containing ``column_headers`` for each ``NOTICE_MEDIA`` and ``rows`` containing a list of dictionaries: ``notice_type``, a :model:`notification.NoticeType` object and ``cells``, a list of tuples whose first value is suitable for use in forms and the second value is ``True`` or ``False`` depending on a ``request.POST`` variable called ``form_label``, whose valid value is ``on``. """ notice_types = NoticeType.objects.all() settings_table = [] for notice_type in notice_types: settings_row = [] for medium_id, medium_display in NOTICE_MEDIA: form_label = "%s_%s" % (notice_type.label, medium_id) setting = NoticeSetting.for_user(request.user, notice_type, medium_id) if request.method == "POST": if request.POST.get(form_label) == "on": if not setting.send: setting.send = True setting.save() else: if setting.send: setting.send = False setting.save() settings_row.append((form_label, setting.send)) settings_table.append({"notice_type": notice_type, "cells": settings_row}) if request.method == "POST": next_page = request.POST.get("next_page", ".") return HttpResponseRedirect(next_page) settings = { "column_headers": [medium_display for medium_id, medium_display in NOTICE_MEDIA], "rows": settings_table, } return render_to_response("notification/notice_settings.html", { "notice_types": notice_types, "notice_settings": settings, }, context_instance=RequestContext(request))
[ "def", "notice_settings", "(", "request", ")", ":", "notice_types", "=", "NoticeType", ".", "objects", ".", "all", "(", ")", "settings_table", "=", "[", "]", "for", "notice_type", "in", "notice_types", ":", "settings_row", "=", "[", "]", "for", "medium_id", ",", "medium_display", "in", "NOTICE_MEDIA", ":", "form_label", "=", "\"%s_%s\"", "%", "(", "notice_type", ".", "label", ",", "medium_id", ")", "setting", "=", "NoticeSetting", ".", "for_user", "(", "request", ".", "user", ",", "notice_type", ",", "medium_id", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "if", "request", ".", "POST", ".", "get", "(", "form_label", ")", "==", "\"on\"", ":", "if", "not", "setting", ".", "send", ":", "setting", ".", "send", "=", "True", "setting", ".", "save", "(", ")", "else", ":", "if", "setting", ".", "send", ":", "setting", ".", "send", "=", "False", "setting", ".", "save", "(", ")", "settings_row", ".", "append", "(", "(", "form_label", ",", "setting", ".", "send", ")", ")", "settings_table", ".", "append", "(", "{", "\"notice_type\"", ":", "notice_type", ",", "\"cells\"", ":", "settings_row", "}", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "next_page", "=", "request", ".", "POST", ".", "get", "(", "\"next_page\"", ",", "\".\"", ")", "return", "HttpResponseRedirect", "(", "next_page", ")", "settings", "=", "{", "\"column_headers\"", ":", "[", "medium_display", "for", "medium_id", ",", "medium_display", "in", "NOTICE_MEDIA", "]", ",", "\"rows\"", ":", "settings_table", ",", "}", "return", "render_to_response", "(", "\"notification/notice_settings.html\"", ",", "{", "\"notice_types\"", ":", "notice_types", ",", "\"notice_settings\"", ":", "settings", ",", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
39.215686
20.627451
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None): """ - Parse HTML and get miniDOM - Extract CSS informations, add default CSS, parse CSS - Handle the document DOM itself and build reportlab story - Return Context object """ global CSSAttrCache CSSAttrCache = {} if xhtml: # TODO: XHTMLParser doesn't see to exist... parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom")) else: parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom")) if isinstance(src, six.text_type): # If an encoding was provided, do not change it. if not encoding: encoding = "utf-8" src = src.encode(encoding) src = pisaTempFile(src, capacity=context.capacity) # # Test for the restrictions of html5lib # if encoding: # # Workaround for html5lib<0.11.1 # if hasattr(inputstream, "isValidEncoding"): # if encoding.strip().lower() == "utf8": # encoding = "utf-8" # if not inputstream.isValidEncoding(encoding): # log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding) # else: # if inputstream.codecName(encoding) is None: # log.error("%r is not a valid encoding", encoding) document = parser.parse( src, ) # encoding=encoding) if xml_output: if encoding: xml_output.write(document.toprettyxml(encoding=encoding)) else: xml_output.write(document.toprettyxml(encoding="utf8")) if default_css: context.addDefaultCSS(default_css) pisaPreLoop(document, context) # try: context.parseCSS() # except: # context.cssText = DEFAULT_CSS # context.parseCSS() # context.debug(9, pprint.pformat(context.css)) pisaLoop(document, context) return context
[ "def", "pisaParser", "(", "src", ",", "context", ",", "default_css", "=", "\"\"", ",", "xhtml", "=", "False", ",", "encoding", "=", "None", ",", "xml_output", "=", "None", ")", ":", "global", "CSSAttrCache", "CSSAttrCache", "=", "{", "}", "if", "xhtml", ":", "# TODO: XHTMLParser doesn't see to exist...", "parser", "=", "html5lib", ".", "XHTMLParser", "(", "tree", "=", "treebuilders", ".", "getTreeBuilder", "(", "\"dom\"", ")", ")", "else", ":", "parser", "=", "html5lib", ".", "HTMLParser", "(", "tree", "=", "treebuilders", ".", "getTreeBuilder", "(", "\"dom\"", ")", ")", "if", "isinstance", "(", "src", ",", "six", ".", "text_type", ")", ":", "# If an encoding was provided, do not change it.", "if", "not", "encoding", ":", "encoding", "=", "\"utf-8\"", "src", "=", "src", ".", "encode", "(", "encoding", ")", "src", "=", "pisaTempFile", "(", "src", ",", "capacity", "=", "context", ".", "capacity", ")", "# # Test for the restrictions of html5lib", "# if encoding:", "# # Workaround for html5lib<0.11.1", "# if hasattr(inputstream, \"isValidEncoding\"):", "# if encoding.strip().lower() == \"utf8\":", "# encoding = \"utf-8\"", "# if not inputstream.isValidEncoding(encoding):", "# log.error(\"%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!\", encoding)", "# else:", "# if inputstream.codecName(encoding) is None:", "# log.error(\"%r is not a valid encoding\", encoding)", "document", "=", "parser", ".", "parse", "(", "src", ",", ")", "# encoding=encoding)", "if", "xml_output", ":", "if", "encoding", ":", "xml_output", ".", "write", "(", "document", ".", "toprettyxml", "(", "encoding", "=", "encoding", ")", ")", "else", ":", "xml_output", ".", "write", "(", "document", ".", "toprettyxml", "(", "encoding", "=", "\"utf8\"", ")", ")", "if", "default_css", ":", "context", ".", "addDefaultCSS", "(", "default_css", ")", "pisaPreLoop", "(", "document", ",", "context", ")", "# try:", "context", ".", "parseCSS", "(", ")", "# except:", "# context.cssText = DEFAULT_CSS", "# context.parseCSS()", "# context.debug(9, pprint.pformat(context.css))", "pisaLoop", "(", "document", ",", "context", ")", "return", "context" ]
32.862069
21.241379
def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]): """displays selected data from (files written by) the class `CMADataLogger`. Arguments --------- `idx` indices corresponding to rows in the data file; if idx is a scalar (int), the first two, then every idx-th, and the last three rows are displayed. Too large index values are removed. Example ------- >>> import cma, numpy as np >>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data >>> assert res[1] < 1e-9 >>> assert res[2] < 4400 >>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data >>> l.disp([0,-1]) # first and last >>> l.disp(20) # some first/last and every 20-th line >>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last >>> l.disp(np.r_[0, -10:0]) # first and ten last >>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...) Details ------- The data line with the best f-value is displayed as last line. :See: `disp()` """ filenameprefix = self.name_prefix def printdatarow(dat, iteration): """print data of iteration i""" i = np.where(dat.f[:, 0] == iteration)[0][0] j = np.where(dat.std[:, 0] == iteration)[0][0] print('%5d' % (int(dat.f[i, 0])) + ' %6d' % (int(dat.f[i, 1])) + ' %.14e' % (dat.f[i, 5]) + ' %5.1e' % (dat.f[i, 3]) + ' %6.2e' % (max(dat.std[j, 5:])) + ' %6.2e' % min(dat.std[j, 5:])) dat = CMADataLogger(filenameprefix).load() ndata = dat.f.shape[0] # map index to iteration number, is difficult if not all iteration numbers exist # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long # otherwise: if idx is None: idx = 100 if isscalar(idx): # idx = np.arange(0, ndata, idx) if idx: idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0] else: idx = np.r_[0, 1, -3:0] idx = array(idx) idx = idx[idx < ndata] idx = idx[-idx <= ndata] iters = dat.f[idx, 0] idxbest = np.argmin(dat.f[:, 5]) iterbest = dat.f[idxbest, 0] if len(iters) == 1: printdatarow(dat, iters[0]) else: self.disp_header() for i in iters: printdatarow(dat, i) self.disp_header() printdatarow(dat, iterbest) sys.stdout.flush()
[ "def", "disp", "(", "self", ",", "idx", "=", "100", ")", ":", "# r_[0:5,1e2:1e9:1e2,-10:0]):", "filenameprefix", "=", "self", ".", "name_prefix", "def", "printdatarow", "(", "dat", ",", "iteration", ")", ":", "\"\"\"print data of iteration i\"\"\"", "i", "=", "np", ".", "where", "(", "dat", ".", "f", "[", ":", ",", "0", "]", "==", "iteration", ")", "[", "0", "]", "[", "0", "]", "j", "=", "np", ".", "where", "(", "dat", ".", "std", "[", ":", ",", "0", "]", "==", "iteration", ")", "[", "0", "]", "[", "0", "]", "print", "(", "'%5d'", "%", "(", "int", "(", "dat", ".", "f", "[", "i", ",", "0", "]", ")", ")", "+", "' %6d'", "%", "(", "int", "(", "dat", ".", "f", "[", "i", ",", "1", "]", ")", ")", "+", "' %.14e'", "%", "(", "dat", ".", "f", "[", "i", ",", "5", "]", ")", "+", "' %5.1e'", "%", "(", "dat", ".", "f", "[", "i", ",", "3", "]", ")", "+", "' %6.2e'", "%", "(", "max", "(", "dat", ".", "std", "[", "j", ",", "5", ":", "]", ")", ")", "+", "' %6.2e'", "%", "min", "(", "dat", ".", "std", "[", "j", ",", "5", ":", "]", ")", ")", "dat", "=", "CMADataLogger", "(", "filenameprefix", ")", ".", "load", "(", ")", "ndata", "=", "dat", ".", "f", ".", "shape", "[", "0", "]", "# map index to iteration number, is difficult if not all iteration numbers exist", "# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long", "# otherwise:", "if", "idx", "is", "None", ":", "idx", "=", "100", "if", "isscalar", "(", "idx", ")", ":", "# idx = np.arange(0, ndata, idx)", "if", "idx", ":", "idx", "=", "np", ".", "r_", "[", "0", ",", "1", ",", "idx", ":", "ndata", "-", "3", ":", "idx", ",", "-", "3", ":", "0", "]", "else", ":", "idx", "=", "np", ".", "r_", "[", "0", ",", "1", ",", "-", "3", ":", "0", "]", "idx", "=", "array", "(", "idx", ")", "idx", "=", "idx", "[", "idx", "<", "ndata", "]", "idx", "=", "idx", "[", "-", "idx", "<=", "ndata", "]", "iters", "=", "dat", ".", "f", "[", "idx", ",", "0", "]", "idxbest", "=", "np", ".", "argmin", "(", "dat", ".", "f", "[", ":", ",", "5", "]", ")", "iterbest", "=", "dat", ".", "f", "[", "idxbest", ",", "0", "]", "if", "len", "(", "iters", ")", "==", "1", ":", "printdatarow", "(", "dat", ",", "iters", "[", "0", "]", ")", "else", ":", "self", ".", "disp_header", "(", ")", "for", "i", "in", "iters", ":", "printdatarow", "(", "dat", ",", "i", ")", "self", ".", "disp_header", "(", ")", "printdatarow", "(", "dat", ",", "iterbest", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
36.597222
21.819444
def _gt_from_le(self, other): """Return a > b. Computed by @total_ordering from (not a <= b).""" op_result = self.__le__(other) if op_result is NotImplemented: return NotImplemented return not op_result
[ "def", "_gt_from_le", "(", "self", ",", "other", ")", ":", "op_result", "=", "self", ".", "__le__", "(", "other", ")", "if", "op_result", "is", "NotImplemented", ":", "return", "NotImplemented", "return", "not", "op_result" ]
37
8.166667
def wrap(self, text, width=None, indent=None): """Return ``text`` wrapped to ``width`` and indented with ``indent``. By default: * ``width`` is ``self.options.wrap_length`` * ``indent`` is ``self.indentation``. """ width = width if width is not None else self.options.wrap_length indent = indent if indent is not None else self.indentation initial_indent = self.initial_indentation return textwrap.fill(text, width=width, initial_indent=initial_indent, subsequent_indent=indent)
[ "def", "wrap", "(", "self", ",", "text", ",", "width", "=", "None", ",", "indent", "=", "None", ")", ":", "width", "=", "width", "if", "width", "is", "not", "None", "else", "self", ".", "options", ".", "wrap_length", "indent", "=", "indent", "if", "indent", "is", "not", "None", "else", "self", ".", "indentation", "initial_indent", "=", "self", ".", "initial_indentation", "return", "textwrap", ".", "fill", "(", "text", ",", "width", "=", "width", ",", "initial_indent", "=", "initial_indent", ",", "subsequent_indent", "=", "indent", ")" ]
39.8
18.066667
def contains_exclusive(self, x, y): """ Return True if the given point is contained within the bounding box, where the bottom and right boundaries are considered exclusive. """ left, bottom, right, top = self._aarect.lbrt() return (left <= x < right) and (bottom < y <= top)
[ "def", "contains_exclusive", "(", "self", ",", "x", ",", "y", ")", ":", "left", ",", "bottom", ",", "right", ",", "top", "=", "self", ".", "_aarect", ".", "lbrt", "(", ")", "return", "(", "left", "<=", "x", "<", "right", ")", "and", "(", "bottom", "<", "y", "<=", "top", ")" ]
40.375
11.625
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15): """ Convert a document into a list of tokens. This lowercases, tokenizes, de-accents (optional). -- the output are final tokens = unicode strings, that won't be processed any further. """ tokens = [ token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore') if min_len <= len(token) <= max_len and not token.startswith('_') ] return tokens
[ "def", "simple_preprocess", "(", "doc", ",", "deacc", "=", "False", ",", "min_len", "=", "2", ",", "max_len", "=", "15", ")", ":", "tokens", "=", "[", "token", "for", "token", "in", "tokenize", "(", "doc", ",", "lower", "=", "True", ",", "deacc", "=", "deacc", ",", "errors", "=", "'ignore'", ")", "if", "min_len", "<=", "len", "(", "token", ")", "<=", "max_len", "and", "not", "token", ".", "startswith", "(", "'_'", ")", "]", "return", "tokens" ]
35.153846
25.461538
def splash_url_as(self, *, format='webp', size=2048): """:class:`Asset`: The same operation as :meth:`Guild.splash_url_as`.""" return Asset._from_guild_image(self._state, self.id, self.splash, 'splashes', format=format, size=size)
[ "def", "splash_url_as", "(", "self", ",", "*", ",", "format", "=", "'webp'", ",", "size", "=", "2048", ")", ":", "return", "Asset", ".", "_from_guild_image", "(", "self", ".", "_state", ",", "self", ".", "id", ",", "self", ".", "splash", ",", "'splashes'", ",", "format", "=", "format", ",", "size", "=", "size", ")" ]
81.333333
28
def parse_kal_channel(kal_out): """Parse kal channel scan output.""" scan_band, scan_channel, tgt_freq = determine_band_channel(kal_out) kal_data = {"device": determine_device(kal_out), "sample_rate": determine_sample_rate(kal_out), "gain": determine_scan_gain(kal_out), "band": scan_band, "channel": scan_channel, "frequency": tgt_freq, "avg_absolute_error": determine_avg_absolute_error(kal_out), "measurements" : get_measurements_from_kal_scan(kal_out), "raw_scan_result": kal_out} return kal_data
[ "def", "parse_kal_channel", "(", "kal_out", ")", ":", "scan_band", ",", "scan_channel", ",", "tgt_freq", "=", "determine_band_channel", "(", "kal_out", ")", "kal_data", "=", "{", "\"device\"", ":", "determine_device", "(", "kal_out", ")", ",", "\"sample_rate\"", ":", "determine_sample_rate", "(", "kal_out", ")", ",", "\"gain\"", ":", "determine_scan_gain", "(", "kal_out", ")", ",", "\"band\"", ":", "scan_band", ",", "\"channel\"", ":", "scan_channel", ",", "\"frequency\"", ":", "tgt_freq", ",", "\"avg_absolute_error\"", ":", "determine_avg_absolute_error", "(", "kal_out", ")", ",", "\"measurements\"", ":", "get_measurements_from_kal_scan", "(", "kal_out", ")", ",", "\"raw_scan_result\"", ":", "kal_out", "}", "return", "kal_data" ]
48.615385
14.461538
def _set_ipsec(self, v, load=False): """ Setter method for ipsec, mapped from YANG variable /interface/fortygigabitethernet/ipv6/interface_ospfv3_conf/authentication/ipsec (container) If this variable is read-only (config: false) in the source YANG file, then _set_ipsec is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipsec() directly. YANG Description: Configure ipsec authentication for the interface """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=ipsec.ipsec, is_container='container', presence=False, yang_name="ipsec", rest_name="ipsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipsec authentication for the interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipsec must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=ipsec.ipsec, is_container='container', presence=False, yang_name="ipsec", rest_name="ipsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipsec authentication for the interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""", }) self.__ipsec = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ipsec", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "ipsec", ".", "ipsec", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"ipsec\"", ",", "rest_name", "=", "\"ipsec\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure ipsec authentication for the interface'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ospfv3'", ",", "defining_module", "=", "'brocade-ospfv3'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"ipsec must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=ipsec.ipsec, is_container='container', presence=False, yang_name=\"ipsec\", rest_name=\"ipsec\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipsec authentication for the interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__ipsec", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
70.75
35.625
def add_lv_load_area_group(self, lv_load_area_group): """Adds a LV load_area to _lv_load_areas if not already existing. """ if lv_load_area_group not in self.lv_load_area_groups(): self._lv_load_area_groups.append(lv_load_area_group)
[ "def", "add_lv_load_area_group", "(", "self", ",", "lv_load_area_group", ")", ":", "if", "lv_load_area_group", "not", "in", "self", ".", "lv_load_area_groups", "(", ")", ":", "self", ".", "_lv_load_area_groups", ".", "append", "(", "lv_load_area_group", ")" ]
53
12.2
def view_atype(self, atype): """View the given atype on the atype page :param atype: the atype to view :type atype: :class:`jukeboxcore.djadapter.models.Atype` :returns: None :rtype: None :raises: None """ if not self.cur_prj: return log.debug('Viewing atype %s', atype.name) self.cur_atype = None self.pages_tabw.setCurrentIndex(4) self.atype_name_le.setText(atype.name) self.atype_desc_pte.setPlainText(atype.description) assetrootdata = treemodel.ListItemData(['Name', 'Description']) assetrootitem = treemodel.TreeItem(assetrootdata) self.atype_asset_model = treemodel.TreeModel(assetrootitem) self.atype_asset_treev.setModel(self.atype_asset_model) for a in djadapter.assets.filter(project=self.cur_prj, atype=atype): assetdata = djitemdata.AssetItemData(a) treemodel.TreeItem(assetdata, assetrootitem) self.cur_atype = atype
[ "def", "view_atype", "(", "self", ",", "atype", ")", ":", "if", "not", "self", ".", "cur_prj", ":", "return", "log", ".", "debug", "(", "'Viewing atype %s'", ",", "atype", ".", "name", ")", "self", ".", "cur_atype", "=", "None", "self", ".", "pages_tabw", ".", "setCurrentIndex", "(", "4", ")", "self", ".", "atype_name_le", ".", "setText", "(", "atype", ".", "name", ")", "self", ".", "atype_desc_pte", ".", "setPlainText", "(", "atype", ".", "description", ")", "assetrootdata", "=", "treemodel", ".", "ListItemData", "(", "[", "'Name'", ",", "'Description'", "]", ")", "assetrootitem", "=", "treemodel", ".", "TreeItem", "(", "assetrootdata", ")", "self", ".", "atype_asset_model", "=", "treemodel", ".", "TreeModel", "(", "assetrootitem", ")", "self", ".", "atype_asset_treev", ".", "setModel", "(", "self", ".", "atype_asset_model", ")", "for", "a", "in", "djadapter", ".", "assets", ".", "filter", "(", "project", "=", "self", ".", "cur_prj", ",", "atype", "=", "atype", ")", ":", "assetdata", "=", "djitemdata", ".", "AssetItemData", "(", "a", ")", "treemodel", ".", "TreeItem", "(", "assetdata", ",", "assetrootitem", ")", "self", ".", "cur_atype", "=", "atype" ]
36.888889
18.740741
def get(self, task_id): '''taobao.topats.result.get 获取异步任务结果 使用指南:http://open.taobao.com/doc/detail.htm?id=30 - 1.此接口用于获取异步任务处理的结果,传入的task_id必需属于当前的appKey才可以 - 2.此接口只返回执行完成的任务结果,未执行完的返回结果里面不包含任务结果,只有任务id,执行状态 - 3.执行完成的每个task的子任务结果内容与单个任务的结果结构一致。如:taobao.topats.trades.fullinfo.get返回的子任务结果就会是Trade的结构体。''' request = TOPRequest('taobao.topats.result.get') request['task_id'] = task_id self.create(self.execute(request)['task']) return self
[ "def", "get", "(", "self", ",", "task_id", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.topats.result.get'", ")", "request", "[", "'task_id'", "]", "=", "task_id", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", "[", "'task'", "]", ")", "return", "self" ]
43.25
20.916667
def asMIMEString(self): """ Return a mime-multipart representation of: - callgrind profiling statistics (cachegrind.out.pprofile) - any SQL query issued via ZMySQLDA (query_*.sql) - any persistent object load via ZODB.Connection (ZODB_setstate.txt) - any path argument given to unrestrictedTraverse (unrestrictedTraverse_pathlist.txt) - all involved python code, including Python Scripts without hierarchy (the rest) To unpack resulting file, see "unpack a MIME message" in http://docs.python.org/2/library/email-examples.html Or get demultipart from https://pypi.python.org/pypi/demultipart """ result = MIMEMultipart() base_type_dict = { 'application': MIMEApplication, 'text': MIMEText, } encoder_dict = { 'application/x-kcachegrind': encode_quopri, 'text/x-python': 'utf-8', 'text/plain': 'utf-8', } for path, data, mimetype in self._iterOutFiles(): base_type, sub_type = mimetype.split('/') chunk = base_type_dict[base_type]( data, sub_type, encoder_dict.get(mimetype), ) chunk.add_header( 'Content-Disposition', 'attachment', filename=path, ) result.attach(chunk) return result.as_string(), result['content-type']
[ "def", "asMIMEString", "(", "self", ")", ":", "result", "=", "MIMEMultipart", "(", ")", "base_type_dict", "=", "{", "'application'", ":", "MIMEApplication", ",", "'text'", ":", "MIMEText", ",", "}", "encoder_dict", "=", "{", "'application/x-kcachegrind'", ":", "encode_quopri", ",", "'text/x-python'", ":", "'utf-8'", ",", "'text/plain'", ":", "'utf-8'", ",", "}", "for", "path", ",", "data", ",", "mimetype", "in", "self", ".", "_iterOutFiles", "(", ")", ":", "base_type", ",", "sub_type", "=", "mimetype", ".", "split", "(", "'/'", ")", "chunk", "=", "base_type_dict", "[", "base_type", "]", "(", "data", ",", "sub_type", ",", "encoder_dict", ".", "get", "(", "mimetype", ")", ",", ")", "chunk", ".", "add_header", "(", "'Content-Disposition'", ",", "'attachment'", ",", "filename", "=", "path", ",", ")", "result", ".", "attach", "(", "chunk", ")", "return", "result", ".", "as_string", "(", ")", ",", "result", "[", "'content-type'", "]" ]
38.076923
14.74359
def qos(self, prefetch_size=0, prefetch_count=0, is_global=False): ''' Set QoS on this channel. ''' args = Writer() args.write_long(prefetch_size).\ write_short(prefetch_count).\ write_bit(is_global) self.send_frame(MethodFrame(self.channel_id, 60, 10, args)) self.channel.add_synchronous_cb(self._recv_qos_ok)
[ "def", "qos", "(", "self", ",", "prefetch_size", "=", "0", ",", "prefetch_count", "=", "0", ",", "is_global", "=", "False", ")", ":", "args", "=", "Writer", "(", ")", "args", ".", "write_long", "(", "prefetch_size", ")", ".", "write_short", "(", "prefetch_count", ")", ".", "write_bit", "(", "is_global", ")", "self", ".", "send_frame", "(", "MethodFrame", "(", "self", ".", "channel_id", ",", "60", ",", "10", ",", "args", ")", ")", "self", ".", "channel", ".", "add_synchronous_cb", "(", "self", ".", "_recv_qos_ok", ")" ]
34.636364
18.454545
def task(obj = None, deps = None): """Decorator for creating a task.""" # The decorator is not used as a function if callable(obj): __task(obj.__name__, obj) return obj # The decorator is used as a function def __decorated(func): __task(obj if obj else obj.__name__, deps, func) return func return __decorated
[ "def", "task", "(", "obj", "=", "None", ",", "deps", "=", "None", ")", ":", "# The decorator is not used as a function", "if", "callable", "(", "obj", ")", ":", "__task", "(", "obj", ".", "__name__", ",", "obj", ")", "return", "obj", "# The decorator is used as a function", "def", "__decorated", "(", "func", ")", ":", "__task", "(", "obj", "if", "obj", "else", "obj", ".", "__name__", ",", "deps", ",", "func", ")", "return", "func", "return", "__decorated" ]
24.076923
17.538462
def setup_interpolant(self): """Initializes the z(d) interpolation.""" # for computing nearby (z < 1) redshifts zs = numpy.linspace(0., 1., num=self.numpoints) ds = self.cosmology.luminosity_distance(zs).value self.nearby_d2z = interpolate.interp1d(ds, zs, kind='linear', bounds_error=False) # for computing far away (z > 1) redshifts zs = numpy.logspace(0, numpy.log10(self.default_maxz), num=self.numpoints) ds = self.cosmology.luminosity_distance(zs).value self.faraway_d2z = interpolate.interp1d(ds, zs, kind='linear', bounds_error=False) # store the default maximum distance self.default_maxdist = ds.max()
[ "def", "setup_interpolant", "(", "self", ")", ":", "# for computing nearby (z < 1) redshifts", "zs", "=", "numpy", ".", "linspace", "(", "0.", ",", "1.", ",", "num", "=", "self", ".", "numpoints", ")", "ds", "=", "self", ".", "cosmology", ".", "luminosity_distance", "(", "zs", ")", ".", "value", "self", ".", "nearby_d2z", "=", "interpolate", ".", "interp1d", "(", "ds", ",", "zs", ",", "kind", "=", "'linear'", ",", "bounds_error", "=", "False", ")", "# for computing far away (z > 1) redshifts", "zs", "=", "numpy", ".", "logspace", "(", "0", ",", "numpy", ".", "log10", "(", "self", ".", "default_maxz", ")", ",", "num", "=", "self", ".", "numpoints", ")", "ds", "=", "self", ".", "cosmology", ".", "luminosity_distance", "(", "zs", ")", ".", "value", "self", ".", "faraway_d2z", "=", "interpolate", ".", "interp1d", "(", "ds", ",", "zs", ",", "kind", "=", "'linear'", ",", "bounds_error", "=", "False", ")", "# store the default maximum distance", "self", ".", "default_maxdist", "=", "ds", ".", "max", "(", ")" ]
54
15.133333
def _add_tag_files( zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count ): """Generate the tag files and add them to the zip.""" tag_info_list = [] _add_tag_file(zip_file, dir_name, tag_info_list, _gen_bagit_text_file_tup()) _add_tag_file( zip_file, dir_name, tag_info_list, _gen_bag_info_file_tup(payload_byte_count, payload_file_count), ) _add_tag_file( zip_file, dir_name, tag_info_list, _gen_pid_mapping_file_tup(payload_info_list) ) return tag_info_list
[ "def", "_add_tag_files", "(", "zip_file", ",", "dir_name", ",", "payload_info_list", ",", "payload_byte_count", ",", "payload_file_count", ")", ":", "tag_info_list", "=", "[", "]", "_add_tag_file", "(", "zip_file", ",", "dir_name", ",", "tag_info_list", ",", "_gen_bagit_text_file_tup", "(", ")", ")", "_add_tag_file", "(", "zip_file", ",", "dir_name", ",", "tag_info_list", ",", "_gen_bag_info_file_tup", "(", "payload_byte_count", ",", "payload_file_count", ")", ",", ")", "_add_tag_file", "(", "zip_file", ",", "dir_name", ",", "tag_info_list", ",", "_gen_pid_mapping_file_tup", "(", "payload_info_list", ")", ")", "return", "tag_info_list" ]
34.0625
26.875
def name_targets(func): """ Wrap a function such that returning ``'a', 'b', 'c', [1, 2, 3]`` transforms the value into ``dict(a=1, b=2, c=3)``. This is useful in the case where the last parameter is an SCons command. """ def wrap(*a, **kw): ret = func(*a, **kw) return dict(zip(ret[:-1], ret[-1])) return wrap
[ "def", "name_targets", "(", "func", ")", ":", "def", "wrap", "(", "*", "a", ",", "*", "*", "kw", ")", ":", "ret", "=", "func", "(", "*", "a", ",", "*", "*", "kw", ")", "return", "dict", "(", "zip", "(", "ret", "[", ":", "-", "1", "]", ",", "ret", "[", "-", "1", "]", ")", ")", "return", "wrap" ]
31.272727
17.454545
def _prepare_args(target_log_prob_fn, volatility_fn, state, step_size, target_log_prob=None, grads_target_log_prob=None, volatility=None, grads_volatility_fn=None, diffusion_drift=None, parallel_iterations=10): """Helper which processes input args to meet list-like assumptions.""" state_parts = list(state) if mcmc_util.is_list_like(state) else [state] [ target_log_prob, grads_target_log_prob, ] = mcmc_util.maybe_call_fn_and_grads( target_log_prob_fn, state_parts, target_log_prob, grads_target_log_prob) [ volatility_parts, grads_volatility, ] = _maybe_call_volatility_fn_and_grads( volatility_fn, state_parts, volatility, grads_volatility_fn, distribution_util.prefer_static_shape(target_log_prob), parallel_iterations) step_sizes = (list(step_size) if mcmc_util.is_list_like(step_size) else [step_size]) step_sizes = [ tf.convert_to_tensor( value=s, name='step_size', dtype=target_log_prob.dtype) for s in step_sizes ] if len(step_sizes) == 1: step_sizes *= len(state_parts) if len(state_parts) != len(step_sizes): raise ValueError('There should be exactly one `step_size` or it should ' 'have same length as `current_state`.') if diffusion_drift is None: diffusion_drift_parts = _get_drift(step_sizes, volatility_parts, grads_volatility, grads_target_log_prob) else: diffusion_drift_parts = (list(diffusion_drift) if mcmc_util.is_list_like(diffusion_drift) else [diffusion_drift]) if len(state_parts) != len(diffusion_drift): raise ValueError('There should be exactly one `diffusion_drift` or it ' 'should have same length as list-like `current_state`.') return [ state_parts, step_sizes, target_log_prob, grads_target_log_prob, volatility_parts, grads_volatility, diffusion_drift_parts, ]
[ "def", "_prepare_args", "(", "target_log_prob_fn", ",", "volatility_fn", ",", "state", ",", "step_size", ",", "target_log_prob", "=", "None", ",", "grads_target_log_prob", "=", "None", ",", "volatility", "=", "None", ",", "grads_volatility_fn", "=", "None", ",", "diffusion_drift", "=", "None", ",", "parallel_iterations", "=", "10", ")", ":", "state_parts", "=", "list", "(", "state", ")", "if", "mcmc_util", ".", "is_list_like", "(", "state", ")", "else", "[", "state", "]", "[", "target_log_prob", ",", "grads_target_log_prob", ",", "]", "=", "mcmc_util", ".", "maybe_call_fn_and_grads", "(", "target_log_prob_fn", ",", "state_parts", ",", "target_log_prob", ",", "grads_target_log_prob", ")", "[", "volatility_parts", ",", "grads_volatility", ",", "]", "=", "_maybe_call_volatility_fn_and_grads", "(", "volatility_fn", ",", "state_parts", ",", "volatility", ",", "grads_volatility_fn", ",", "distribution_util", ".", "prefer_static_shape", "(", "target_log_prob", ")", ",", "parallel_iterations", ")", "step_sizes", "=", "(", "list", "(", "step_size", ")", "if", "mcmc_util", ".", "is_list_like", "(", "step_size", ")", "else", "[", "step_size", "]", ")", "step_sizes", "=", "[", "tf", ".", "convert_to_tensor", "(", "value", "=", "s", ",", "name", "=", "'step_size'", ",", "dtype", "=", "target_log_prob", ".", "dtype", ")", "for", "s", "in", "step_sizes", "]", "if", "len", "(", "step_sizes", ")", "==", "1", ":", "step_sizes", "*=", "len", "(", "state_parts", ")", "if", "len", "(", "state_parts", ")", "!=", "len", "(", "step_sizes", ")", ":", "raise", "ValueError", "(", "'There should be exactly one `step_size` or it should '", "'have same length as `current_state`.'", ")", "if", "diffusion_drift", "is", "None", ":", "diffusion_drift_parts", "=", "_get_drift", "(", "step_sizes", ",", "volatility_parts", ",", "grads_volatility", ",", "grads_target_log_prob", ")", "else", ":", "diffusion_drift_parts", "=", "(", "list", "(", "diffusion_drift", ")", "if", "mcmc_util", ".", "is_list_like", "(", "diffusion_drift", ")", "else", "[", "diffusion_drift", "]", ")", "if", "len", "(", "state_parts", ")", "!=", "len", "(", "diffusion_drift", ")", ":", "raise", "ValueError", "(", "'There should be exactly one `diffusion_drift` or it '", "'should have same length as list-like `current_state`.'", ")", "return", "[", "state_parts", ",", "step_sizes", ",", "target_log_prob", ",", "grads_target_log_prob", ",", "volatility_parts", ",", "grads_volatility", ",", "diffusion_drift_parts", ",", "]" ]
33.287879
18.651515
def generational_replacement(random, population, parents, offspring, args): """Performs generational replacement with optional weak elitism. This function performs generational replacement, which means that the entire existing population is replaced by the offspring, truncating to the population size if the number of offspring is larger. Weak elitism may also be specified through the `num_elites` keyword argument in args. If this is used, the best `num_elites` individuals in the current population are allowed to survive if they are better than the worst `num_elites` offspring. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments Optional keyword arguments in args: - *num_elites* -- number of elites to consider (default 0) """ num_elites = args.setdefault('num_elites', 0) population.sort(reverse=True) offspring.extend(population[:num_elites]) offspring.sort(reverse=True) survivors = offspring[:len(population)] return survivors
[ "def", "generational_replacement", "(", "random", ",", "population", ",", "parents", ",", "offspring", ",", "args", ")", ":", "num_elites", "=", "args", ".", "setdefault", "(", "'num_elites'", ",", "0", ")", "population", ".", "sort", "(", "reverse", "=", "True", ")", "offspring", ".", "extend", "(", "population", "[", ":", "num_elites", "]", ")", "offspring", ".", "sort", "(", "reverse", "=", "True", ")", "survivors", "=", "offspring", "[", ":", "len", "(", "population", ")", "]", "return", "survivors" ]
42.103448
19.034483
def dirty(self, value): """ If dirty is true set the recipe to dirty flag. If false, clear the recipe and all extension dirty flags """ if value: self._dirty = True else: self._dirty = False for extension in self.recipe_extensions: extension.dirty = False
[ "def", "dirty", "(", "self", ",", "value", ")", ":", "if", "value", ":", "self", ".", "_dirty", "=", "True", "else", ":", "self", ".", "_dirty", "=", "False", "for", "extension", "in", "self", ".", "recipe_extensions", ":", "extension", ".", "dirty", "=", "False" ]
36.777778
11
def _run__cherrypy(app, config, mode): """Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.""" assert mode == "cherrypy-wsgiserver" try: from cherrypy import wsgiserver from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter _logger.warning("WARNING: cherrypy.wsgiserver is deprecated.") _logger.warning( " Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver" ) _logger.warning(" was moved to the cheroot project.") _logger.warning(" Consider using --server=cheroot.") except ImportError: _logger.error("*" * 78) _logger.error("ERROR: Could not import cherrypy.wsgiserver.") _logger.error( "Try `pip install cherrypy` or specify another server using the --server option." ) _logger.error("Note that starting with CherryPy 9.0, the server was moved to") _logger.error( "the cheroot project, so it is recommended to use `-server=cheroot`" ) _logger.error("and run `pip install cheroot` instead.") _logger.error("*" * 78) raise server_name = "WsgiDAV/{} {} Python/{}".format( __version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION ) wsgiserver.CherryPyWSGIServer.version = server_name # Support SSL ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config) ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config) ssl_certificate_chain = _get_checked_path( config.get("ssl_certificate_chain"), config ) protocol = "http" if ssl_certificate: assert ssl_private_key wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter( ssl_certificate, ssl_private_key, ssl_certificate_chain ) protocol = "https" _logger.info("SSL / HTTPS enabled.") _logger.info("Running {}".format(server_name)) _logger.info( "Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"]) ) server_args = { "bind_addr": (config["host"], config["port"]), "wsgi_app": app, "server_name": server_name, } # Override or add custom args server_args.update(config.get("server_args", {})) server = wsgiserver.CherryPyWSGIServer(**server_args) # If the caller passed a startup event, monkey patch the server to set it # when the request handler loop is entered startup_event = config.get("startup_event") if startup_event: def _patched_tick(): server.tick = org_tick # undo the monkey patch org_tick() _logger.info("CherryPyWSGIServer is ready") startup_event.set() org_tick = server.tick server.tick = _patched_tick try: server.start() except KeyboardInterrupt: _logger.warning("Caught Ctrl-C, shutting down...") finally: server.stop() return
[ "def", "_run__cherrypy", "(", "app", ",", "config", ",", "mode", ")", ":", "assert", "mode", "==", "\"cherrypy-wsgiserver\"", "try", ":", "from", "cherrypy", "import", "wsgiserver", "from", "cherrypy", ".", "wsgiserver", ".", "ssl_builtin", "import", "BuiltinSSLAdapter", "_logger", ".", "warning", "(", "\"WARNING: cherrypy.wsgiserver is deprecated.\"", ")", "_logger", ".", "warning", "(", "\" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver\"", ")", "_logger", ".", "warning", "(", "\" was moved to the cheroot project.\"", ")", "_logger", ".", "warning", "(", "\" Consider using --server=cheroot.\"", ")", "except", "ImportError", ":", "_logger", ".", "error", "(", "\"*\"", "*", "78", ")", "_logger", ".", "error", "(", "\"ERROR: Could not import cherrypy.wsgiserver.\"", ")", "_logger", ".", "error", "(", "\"Try `pip install cherrypy` or specify another server using the --server option.\"", ")", "_logger", ".", "error", "(", "\"Note that starting with CherryPy 9.0, the server was moved to\"", ")", "_logger", ".", "error", "(", "\"the cheroot project, so it is recommended to use `-server=cheroot`\"", ")", "_logger", ".", "error", "(", "\"and run `pip install cheroot` instead.\"", ")", "_logger", ".", "error", "(", "\"*\"", "*", "78", ")", "raise", "server_name", "=", "\"WsgiDAV/{} {} Python/{}\"", ".", "format", "(", "__version__", ",", "wsgiserver", ".", "CherryPyWSGIServer", ".", "version", ",", "util", ".", "PYTHON_VERSION", ")", "wsgiserver", ".", "CherryPyWSGIServer", ".", "version", "=", "server_name", "# Support SSL", "ssl_certificate", "=", "_get_checked_path", "(", "config", ".", "get", "(", "\"ssl_certificate\"", ")", ",", "config", ")", "ssl_private_key", "=", "_get_checked_path", "(", "config", ".", "get", "(", "\"ssl_private_key\"", ")", ",", "config", ")", "ssl_certificate_chain", "=", "_get_checked_path", "(", "config", ".", "get", "(", "\"ssl_certificate_chain\"", ")", ",", "config", ")", "protocol", "=", "\"http\"", "if", "ssl_certificate", ":", "assert", "ssl_private_key", "wsgiserver", ".", "CherryPyWSGIServer", ".", "ssl_adapter", "=", "BuiltinSSLAdapter", "(", "ssl_certificate", ",", "ssl_private_key", ",", "ssl_certificate_chain", ")", "protocol", "=", "\"https\"", "_logger", ".", "info", "(", "\"SSL / HTTPS enabled.\"", ")", "_logger", ".", "info", "(", "\"Running {}\"", ".", "format", "(", "server_name", ")", ")", "_logger", ".", "info", "(", "\"Serving on {}://{}:{} ...\"", ".", "format", "(", "protocol", ",", "config", "[", "\"host\"", "]", ",", "config", "[", "\"port\"", "]", ")", ")", "server_args", "=", "{", "\"bind_addr\"", ":", "(", "config", "[", "\"host\"", "]", ",", "config", "[", "\"port\"", "]", ")", ",", "\"wsgi_app\"", ":", "app", ",", "\"server_name\"", ":", "server_name", ",", "}", "# Override or add custom args", "server_args", ".", "update", "(", "config", ".", "get", "(", "\"server_args\"", ",", "{", "}", ")", ")", "server", "=", "wsgiserver", ".", "CherryPyWSGIServer", "(", "*", "*", "server_args", ")", "# If the caller passed a startup event, monkey patch the server to set it", "# when the request handler loop is entered", "startup_event", "=", "config", ".", "get", "(", "\"startup_event\"", ")", "if", "startup_event", ":", "def", "_patched_tick", "(", ")", ":", "server", ".", "tick", "=", "org_tick", "# undo the monkey patch", "org_tick", "(", ")", "_logger", ".", "info", "(", "\"CherryPyWSGIServer is ready\"", ")", "startup_event", ".", "set", "(", ")", "org_tick", "=", "server", ".", "tick", "server", ".", "tick", "=", "_patched_tick", "try", ":", "server", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "_logger", ".", "warning", "(", "\"Caught Ctrl-C, shutting down...\"", ")", "finally", ":", "server", ".", "stop", "(", ")", "return" ]
35.142857
23.77381
def update(self): """ Redraw the figure to show changed data. This is automatically called after `start()` was run. """ assert threading.current_thread() == threading.main_thread() for axis, line, interface in self.interfaces: line.set_xdata(interface.xdata) line.set_ydata(interface.ydata) axis.set_xlim(0, interface.width or 1, emit=False) axis.set_ylim(0, interface.height or 1, emit=False) self.figure.canvas.draw()
[ "def", "update", "(", "self", ")", ":", "assert", "threading", ".", "current_thread", "(", ")", "==", "threading", ".", "main_thread", "(", ")", "for", "axis", ",", "line", ",", "interface", "in", "self", ".", "interfaces", ":", "line", ".", "set_xdata", "(", "interface", ".", "xdata", ")", "line", ".", "set_ydata", "(", "interface", ".", "ydata", ")", "axis", ".", "set_xlim", "(", "0", ",", "interface", ".", "width", "or", "1", ",", "emit", "=", "False", ")", "axis", ".", "set_ylim", "(", "0", ",", "interface", ".", "height", "or", "1", ",", "emit", "=", "False", ")", "self", ".", "figure", ".", "canvas", ".", "draw", "(", ")" ]
42.666667
13.833333
def update(self, distributor_id, grade_id, session, trade_type=None): '''taobao.fenxiao.cooperation.update 更新合作关系等级 供应商更新合作的分销商等级''' request = TOPRequest('taobao.fenxiao.cooperation.update') request['distributor_id'] = distributor_id request['grade_id'] = grade_id if trade_type!=None: request['trade_type'] = trade_type self.create(self.execute(request, session), fields=['is_success']) return self.is_success
[ "def", "update", "(", "self", ",", "distributor_id", ",", "grade_id", ",", "session", ",", "trade_type", "=", "None", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.fenxiao.cooperation.update'", ")", "request", "[", "'distributor_id'", "]", "=", "distributor_id", "request", "[", "'grade_id'", "]", "=", "grade_id", "if", "trade_type", "!=", "None", ":", "request", "[", "'trade_type'", "]", "=", "trade_type", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ",", "session", ")", ",", "fields", "=", "[", "'is_success'", "]", ")", "return", "self", ".", "is_success" ]
47.4
19.4
def get_lib_volume_mounts(base_lib_name, assembled_specs): """ Returns a list of the formatted volume specs for a lib""" volumes = [_get_lib_repo_volume_mount(assembled_specs['libs'][base_lib_name])] volumes.append(get_command_files_volume_mount(base_lib_name, test=True)) for lib_name in assembled_specs['libs'][base_lib_name]['depends']['libs']: lib_spec = assembled_specs['libs'][lib_name] volumes.append(_get_lib_repo_volume_mount(lib_spec)) return volumes
[ "def", "get_lib_volume_mounts", "(", "base_lib_name", ",", "assembled_specs", ")", ":", "volumes", "=", "[", "_get_lib_repo_volume_mount", "(", "assembled_specs", "[", "'libs'", "]", "[", "base_lib_name", "]", ")", "]", "volumes", ".", "append", "(", "get_command_files_volume_mount", "(", "base_lib_name", ",", "test", "=", "True", ")", ")", "for", "lib_name", "in", "assembled_specs", "[", "'libs'", "]", "[", "base_lib_name", "]", "[", "'depends'", "]", "[", "'libs'", "]", ":", "lib_spec", "=", "assembled_specs", "[", "'libs'", "]", "[", "lib_name", "]", "volumes", ".", "append", "(", "_get_lib_repo_volume_mount", "(", "lib_spec", ")", ")", "return", "volumes" ]
61.125
23.5
def from_json(cls, json, _reader=blobstore.BlobReader): """Creates an instance of the InputReader for the given input shard state. Args: json: The InputReader state as a dict-like object. _reader: For dependency injection. Returns: An instance of the InputReader configured using the values of json. """ return cls(json[cls.BLOB_KEY_PARAM], json[cls.START_FILE_INDEX_PARAM], json[cls.END_FILE_INDEX_PARAM], json[cls.OFFSET_PARAM], _reader)
[ "def", "from_json", "(", "cls", ",", "json", ",", "_reader", "=", "blobstore", ".", "BlobReader", ")", ":", "return", "cls", "(", "json", "[", "cls", ".", "BLOB_KEY_PARAM", "]", ",", "json", "[", "cls", ".", "START_FILE_INDEX_PARAM", "]", ",", "json", "[", "cls", ".", "END_FILE_INDEX_PARAM", "]", ",", "json", "[", "cls", ".", "OFFSET_PARAM", "]", ",", "_reader", ")" ]
35
15.733333
def edit(self, id): """ Edit a VRF """ c.action = 'edit' c.edit_vrf = VRF.get(int(id)) # Did we have any action passed to us? if 'action' in request.params: if request.params['action'] == 'edit': if request.params['rt'].strip() == '': c.edit_vrf.rt = None else: c.edit_vrf.rt = request.params['rt'].strip() if request.params['name'].strip() == '': c.edit_vrf.name = None else: c.edit_vrf.name = request.params['name'].strip() c.edit_vrf.description = request.params['description'] c.edit_vrf.save() return render('/vrf_edit.html')
[ "def", "edit", "(", "self", ",", "id", ")", ":", "c", ".", "action", "=", "'edit'", "c", ".", "edit_vrf", "=", "VRF", ".", "get", "(", "int", "(", "id", ")", ")", "# Did we have any action passed to us?", "if", "'action'", "in", "request", ".", "params", ":", "if", "request", ".", "params", "[", "'action'", "]", "==", "'edit'", ":", "if", "request", ".", "params", "[", "'rt'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "edit_vrf", ".", "rt", "=", "None", "else", ":", "c", ".", "edit_vrf", ".", "rt", "=", "request", ".", "params", "[", "'rt'", "]", ".", "strip", "(", ")", "if", "request", ".", "params", "[", "'name'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "edit_vrf", ".", "name", "=", "None", "else", ":", "c", ".", "edit_vrf", ".", "name", "=", "request", ".", "params", "[", "'name'", "]", ".", "strip", "(", ")", "c", ".", "edit_vrf", ".", "description", "=", "request", ".", "params", "[", "'description'", "]", "c", ".", "edit_vrf", ".", "save", "(", ")", "return", "render", "(", "'/vrf_edit.html'", ")" ]
31.5
17.375
def post(self, id=None): """ Create a new object resource :json: Object to create :returns: json string representation :rtype: JSON """ try: try: base_object = json_util.loads(self.request.body) except TypeError: base_object = json_util.loads(self.request.body.decode()) #assert not hasattr(base_object, "_id") toa = self.request.headers.get("Caesium-TOA", None) if toa: # Async create flow stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings) revision_id = yield stack.push(base_object, toa=int(toa), meta=self._get_meta_data()) resource = yield stack.preview(revision_id) if isinstance(revision_id, str): self.set_header("Caesium-TOA", toa) self.return_resource(resource.get("snapshot")) else: self.raise_error(404, "Revision not scheduled for object: %s" % id) else: id = yield self.client.insert(base_object) base_object = yield self.client.find_one_by_id(id) self.return_resource(base_object) except ValidationError as vex: self.logger.error("%s validation error" % self.object_name, vex) self.raise_error(400, "Your %s cannot be created because it is missing required fields, see docs" % self.object_name) except ValueError as ex: self.raise_error(400, "Invalid JSON Body, check formatting. %s" % ex[0]) except Exception as ex: self.logger.error(ex) self.raise_error()
[ "def", "post", "(", "self", ",", "id", "=", "None", ")", ":", "try", ":", "try", ":", "base_object", "=", "json_util", ".", "loads", "(", "self", ".", "request", ".", "body", ")", "except", "TypeError", ":", "base_object", "=", "json_util", ".", "loads", "(", "self", ".", "request", ".", "body", ".", "decode", "(", ")", ")", "#assert not hasattr(base_object, \"_id\")", "toa", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "\"Caesium-TOA\"", ",", "None", ")", "if", "toa", ":", "# Async create flow", "stack", "=", "AsyncSchedulableDocumentRevisionStack", "(", "self", ".", "client", ".", "collection_name", ",", "self", ".", "settings", ")", "revision_id", "=", "yield", "stack", ".", "push", "(", "base_object", ",", "toa", "=", "int", "(", "toa", ")", ",", "meta", "=", "self", ".", "_get_meta_data", "(", ")", ")", "resource", "=", "yield", "stack", ".", "preview", "(", "revision_id", ")", "if", "isinstance", "(", "revision_id", ",", "str", ")", ":", "self", ".", "set_header", "(", "\"Caesium-TOA\"", ",", "toa", ")", "self", ".", "return_resource", "(", "resource", ".", "get", "(", "\"snapshot\"", ")", ")", "else", ":", "self", ".", "raise_error", "(", "404", ",", "\"Revision not scheduled for object: %s\"", "%", "id", ")", "else", ":", "id", "=", "yield", "self", ".", "client", ".", "insert", "(", "base_object", ")", "base_object", "=", "yield", "self", ".", "client", ".", "find_one_by_id", "(", "id", ")", "self", ".", "return_resource", "(", "base_object", ")", "except", "ValidationError", "as", "vex", ":", "self", ".", "logger", ".", "error", "(", "\"%s validation error\"", "%", "self", ".", "object_name", ",", "vex", ")", "self", ".", "raise_error", "(", "400", ",", "\"Your %s cannot be created because it is missing required fields, see docs\"", "%", "self", ".", "object_name", ")", "except", "ValueError", "as", "ex", ":", "self", ".", "raise_error", "(", "400", ",", "\"Invalid JSON Body, check formatting. %s\"", "%", "ex", "[", "0", "]", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "error", "(", "ex", ")", "self", ".", "raise_error", "(", ")" ]
35.916667
26.125
def match(self, expr) -> MatchDict: """Match the given expression (recursively) Returns a :class:`MatchDict` instance that maps any wildcard names to the expressions that the corresponding wildcard pattern matches. For (sub-)pattern that have a `mode` attribute other than `Pattern.single`, the wildcard name is mapped to a list of all matched expression. If the match is successful, the resulting :class:`MatchDict` instance will evaluate to True in a boolean context. If the match is not successful, it will evaluate as False, and the reason for failure is available in the `reason` attribute of the :class:`MatchDict` object. """ res = MatchDict() if self._has_non_single_arg: if self._non_single_arg_on_left: res.merge_lists = 1 else: res.merge_lists = -1 if self.head is not None: if not isinstance(expr, self.head): res.reason = ("%s is not an instance of %s" % (repr(expr), self._repr_head())) res.success = False return res for i_cond, condition in enumerate(self.conditions): if not condition(expr): res.reason = ("%s does not meet condition %d" % (repr(expr), i_cond+1)) res.success = False return res try: if self.args is not None: arg_pattern = self.extended_arg_patterns() for arg in self._arg_iterator(expr.args): current_arg_pattern = next(arg_pattern) res.update(match_pattern(current_arg_pattern, arg)) if not res.success: return res # ensure that we have matched all arg patterns try: last_arg_pattern = next(arg_pattern) res.update(self._check_last_arg_pattern( current_arg_pattern, last_arg_pattern)) except StopIteration: pass # expected, if current_arg_pattern was the last one if self.kwargs is not None: for key, arg_pattern in self.kwargs.items(): res.update(match_pattern(arg_pattern, expr.kwargs[key])) if not res.success: return res except AttributeError as exc_info: res.reason = ("%s is a scalar, not an Expression: %s" % (repr(expr), str(exc_info))) res.success = False except ValueError as exc_info: res.reason = "%s: %s" % (repr(expr), str(exc_info)) res.success = False except StopIteration: res.reason = ("%s has an too many positional arguments" % repr(expr)) res.success = False except KeyError as exc_info: if "has already been set" in str(exc_info): res.reason = "Double wildcard: %s" % str(exc_info) else: res.reason = ("%s has no keyword argument %s" % (repr(expr), str(exc_info))) res.success = False if res.success: if self.wc_name is not None: try: if self.mode > self.single: res[self.wc_name] = [expr, ] else: res[self.wc_name] = expr except KeyError as exc_info: res.reason = "Double wildcard: %s" % str(exc_info) res.success = False return res
[ "def", "match", "(", "self", ",", "expr", ")", "->", "MatchDict", ":", "res", "=", "MatchDict", "(", ")", "if", "self", ".", "_has_non_single_arg", ":", "if", "self", ".", "_non_single_arg_on_left", ":", "res", ".", "merge_lists", "=", "1", "else", ":", "res", ".", "merge_lists", "=", "-", "1", "if", "self", ".", "head", "is", "not", "None", ":", "if", "not", "isinstance", "(", "expr", ",", "self", ".", "head", ")", ":", "res", ".", "reason", "=", "(", "\"%s is not an instance of %s\"", "%", "(", "repr", "(", "expr", ")", ",", "self", ".", "_repr_head", "(", ")", ")", ")", "res", ".", "success", "=", "False", "return", "res", "for", "i_cond", ",", "condition", "in", "enumerate", "(", "self", ".", "conditions", ")", ":", "if", "not", "condition", "(", "expr", ")", ":", "res", ".", "reason", "=", "(", "\"%s does not meet condition %d\"", "%", "(", "repr", "(", "expr", ")", ",", "i_cond", "+", "1", ")", ")", "res", ".", "success", "=", "False", "return", "res", "try", ":", "if", "self", ".", "args", "is", "not", "None", ":", "arg_pattern", "=", "self", ".", "extended_arg_patterns", "(", ")", "for", "arg", "in", "self", ".", "_arg_iterator", "(", "expr", ".", "args", ")", ":", "current_arg_pattern", "=", "next", "(", "arg_pattern", ")", "res", ".", "update", "(", "match_pattern", "(", "current_arg_pattern", ",", "arg", ")", ")", "if", "not", "res", ".", "success", ":", "return", "res", "# ensure that we have matched all arg patterns", "try", ":", "last_arg_pattern", "=", "next", "(", "arg_pattern", ")", "res", ".", "update", "(", "self", ".", "_check_last_arg_pattern", "(", "current_arg_pattern", ",", "last_arg_pattern", ")", ")", "except", "StopIteration", ":", "pass", "# expected, if current_arg_pattern was the last one", "if", "self", ".", "kwargs", "is", "not", "None", ":", "for", "key", ",", "arg_pattern", "in", "self", ".", "kwargs", ".", "items", "(", ")", ":", "res", ".", "update", "(", "match_pattern", "(", "arg_pattern", ",", "expr", ".", "kwargs", "[", "key", "]", ")", ")", "if", "not", "res", ".", "success", ":", "return", "res", "except", "AttributeError", "as", "exc_info", ":", "res", ".", "reason", "=", "(", "\"%s is a scalar, not an Expression: %s\"", "%", "(", "repr", "(", "expr", ")", ",", "str", "(", "exc_info", ")", ")", ")", "res", ".", "success", "=", "False", "except", "ValueError", "as", "exc_info", ":", "res", ".", "reason", "=", "\"%s: %s\"", "%", "(", "repr", "(", "expr", ")", ",", "str", "(", "exc_info", ")", ")", "res", ".", "success", "=", "False", "except", "StopIteration", ":", "res", ".", "reason", "=", "(", "\"%s has an too many positional arguments\"", "%", "repr", "(", "expr", ")", ")", "res", ".", "success", "=", "False", "except", "KeyError", "as", "exc_info", ":", "if", "\"has already been set\"", "in", "str", "(", "exc_info", ")", ":", "res", ".", "reason", "=", "\"Double wildcard: %s\"", "%", "str", "(", "exc_info", ")", "else", ":", "res", ".", "reason", "=", "(", "\"%s has no keyword argument %s\"", "%", "(", "repr", "(", "expr", ")", ",", "str", "(", "exc_info", ")", ")", ")", "res", ".", "success", "=", "False", "if", "res", ".", "success", ":", "if", "self", ".", "wc_name", "is", "not", "None", ":", "try", ":", "if", "self", ".", "mode", ">", "self", ".", "single", ":", "res", "[", "self", ".", "wc_name", "]", "=", "[", "expr", ",", "]", "else", ":", "res", "[", "self", ".", "wc_name", "]", "=", "expr", "except", "KeyError", "as", "exc_info", ":", "res", ".", "reason", "=", "\"Double wildcard: %s\"", "%", "str", "(", "exc_info", ")", "res", ".", "success", "=", "False", "return", "res" ]
45.9625
16.3625
def validate(self, body, signature): """Check signature. https://devdocs.line.me/en/#webhook-authentication :param str body: Request body (as text) :param str signature: X-Line-Signature value (as text) :rtype: bool :return: result """ gen_signature = hmac.new( self.channel_secret, body.encode('utf-8'), hashlib.sha256 ).digest() return compare_digest( signature.encode('utf-8'), base64.b64encode(gen_signature) )
[ "def", "validate", "(", "self", ",", "body", ",", "signature", ")", ":", "gen_signature", "=", "hmac", ".", "new", "(", "self", ".", "channel_secret", ",", "body", ".", "encode", "(", "'utf-8'", ")", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", "return", "compare_digest", "(", "signature", ".", "encode", "(", "'utf-8'", ")", ",", "base64", ".", "b64encode", "(", "gen_signature", ")", ")" ]
28.368421
17.947368
def gnmt_print(*args, **kwargs): """ Wrapper for MLPerf compliance logging calls. All arguments but 'sync' are passed to mlperf_log.gnmt_print function. If 'sync' is set to True then the wrapper will synchronize all distributed workers. 'sync' should be set to True for all compliance tags that require accurate timing (RUN_START, RUN_STOP etc.) """ if kwargs.pop('sync'): barrier() if get_rank() == 0: kwargs['stack_offset'] = 2 mlperf_log.gnmt_print(*args, **kwargs)
[ "def", "gnmt_print", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "pop", "(", "'sync'", ")", ":", "barrier", "(", ")", "if", "get_rank", "(", ")", "==", "0", ":", "kwargs", "[", "'stack_offset'", "]", "=", "2", "mlperf_log", ".", "gnmt_print", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
39.692308
15.230769
def parseBranches(self, descendants): """ Parse top level of latex :param list elements: list of source objects :return: list of filtered TreeOfContents objects >>> toc = TOC.fromLatex(r'\section{h1}\subsection{subh1}\section{h2}\ ... \subsection{subh2}') >>> toc.parseTopDepth(toc.descendants) 1 >>> toc.parseBranches(toc.descendants) [h1, h2] >>> len(toc.branches) 2 >>> len(toc.section.branches) 1 """ i, branches = self.parseTopDepth(descendants), [] for descendant in descendants: if self.getHeadingLevel(descendant, self.hierarchy) == i: branches.append({'source': descendant}) if self.getHeadingLevel(descendant, self.hierarchy) > i \ and branches: branches[-1].setdefault('descendants', []).append(descendant) return [TOC(str(descendant), depth=i, hierarchy=self.hierarchy, **branch) for branch in branches]
[ "def", "parseBranches", "(", "self", ",", "descendants", ")", ":", "i", ",", "branches", "=", "self", ".", "parseTopDepth", "(", "descendants", ")", ",", "[", "]", "for", "descendant", "in", "descendants", ":", "if", "self", ".", "getHeadingLevel", "(", "descendant", ",", "self", ".", "hierarchy", ")", "==", "i", ":", "branches", ".", "append", "(", "{", "'source'", ":", "descendant", "}", ")", "if", "self", ".", "getHeadingLevel", "(", "descendant", ",", "self", ".", "hierarchy", ")", ">", "i", "and", "branches", ":", "branches", "[", "-", "1", "]", ".", "setdefault", "(", "'descendants'", ",", "[", "]", ")", ".", "append", "(", "descendant", ")", "return", "[", "TOC", "(", "str", "(", "descendant", ")", ",", "depth", "=", "i", ",", "hierarchy", "=", "self", ".", "hierarchy", ",", "*", "*", "branch", ")", "for", "branch", "in", "branches", "]" ]
39.192308
17.038462
def create_wiki(self, wiki_create_params, project=None): """CreateWiki. Creates the wiki resource. :param :class:`<WikiCreateParametersV2> <azure.devops.v5_0.wiki.models.WikiCreateParametersV2>` wiki_create_params: Parameters for the wiki creation. :param str project: Project ID or project name :rtype: :class:`<WikiV2> <azure.devops.v5_0.wiki.models.WikiV2>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(wiki_create_params, 'WikiCreateParametersV2') response = self._send(http_method='POST', location_id='288d122c-dbd4-451d-aa5f-7dbbba070728', version='5.0', route_values=route_values, content=content) return self._deserialize('WikiV2', response)
[ "def", "create_wiki", "(", "self", ",", "wiki_create_params", ",", "project", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "wiki_create_params", ",", "'WikiCreateParametersV2'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'288d122c-dbd4-451d-aa5f-7dbbba070728'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'WikiV2'", ",", "response", ")" ]
56.352941
22.647059
def args_match(m_args, m_kwargs, default, *args, **kwargs): """ :param m_args: values to match args against :param m_kwargs: values to match kwargs against :param arg: args to match :param arg: kwargs to match """ if len(m_args) > len(args): return False for m_arg, arg in zip(m_args, args): matches = arg_match(m_arg, arg, eq) if not matches or matches is InvalidArg: return False # bail out if m_kwargs: for name, m_arg in m_kwargs.items(): name, comparator = arg_comparitor(name) arg = kwargs.get(name) if not arg_match(m_arg, arg, comparator, default): return False # bail out return True
[ "def", "args_match", "(", "m_args", ",", "m_kwargs", ",", "default", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "m_args", ")", ">", "len", "(", "args", ")", ":", "return", "False", "for", "m_arg", ",", "arg", "in", "zip", "(", "m_args", ",", "args", ")", ":", "matches", "=", "arg_match", "(", "m_arg", ",", "arg", ",", "eq", ")", "if", "not", "matches", "or", "matches", "is", "InvalidArg", ":", "return", "False", "# bail out", "if", "m_kwargs", ":", "for", "name", ",", "m_arg", "in", "m_kwargs", ".", "items", "(", ")", ":", "name", ",", "comparator", "=", "arg_comparitor", "(", "name", ")", "arg", "=", "kwargs", ".", "get", "(", "name", ")", "if", "not", "arg_match", "(", "m_arg", ",", "arg", ",", "comparator", ",", "default", ")", ":", "return", "False", "# bail out", "return", "True" ]
33.952381
11.190476
def get_histograms(self, request): """ Get histograms of requested query from log service. Unsuccessful opertaion will cause an LogException. :type request: GetHistogramsRequest :param request: the GetHistograms request parameters class. :return: GetHistogramsResponse :raise: LogException """ headers = {} params = {} if request.get_topic() is not None: params['topic'] = request.get_topic() if request.get_from() is not None: params['from'] = request.get_from() if request.get_to() is not None: params['to'] = request.get_to() if request.get_query() is not None: params['query'] = request.get_query() params['type'] = 'histogram' logstore = request.get_logstore() project = request.get_project() resource = "/logstores/" + logstore (resp, header) = self._send("GET", project, None, resource, params, headers) return GetHistogramsResponse(resp, header)
[ "def", "get_histograms", "(", "self", ",", "request", ")", ":", "headers", "=", "{", "}", "params", "=", "{", "}", "if", "request", ".", "get_topic", "(", ")", "is", "not", "None", ":", "params", "[", "'topic'", "]", "=", "request", ".", "get_topic", "(", ")", "if", "request", ".", "get_from", "(", ")", "is", "not", "None", ":", "params", "[", "'from'", "]", "=", "request", ".", "get_from", "(", ")", "if", "request", ".", "get_to", "(", ")", "is", "not", "None", ":", "params", "[", "'to'", "]", "=", "request", ".", "get_to", "(", ")", "if", "request", ".", "get_query", "(", ")", "is", "not", "None", ":", "params", "[", "'query'", "]", "=", "request", ".", "get_query", "(", ")", "params", "[", "'type'", "]", "=", "'histogram'", "logstore", "=", "request", ".", "get_logstore", "(", ")", "project", "=", "request", ".", "get_project", "(", ")", "resource", "=", "\"/logstores/\"", "+", "logstore", "(", "resp", ",", "header", ")", "=", "self", ".", "_send", "(", "\"GET\"", ",", "project", ",", "None", ",", "resource", ",", "params", ",", "headers", ")", "return", "GetHistogramsResponse", "(", "resp", ",", "header", ")" ]
40
11.407407
def join(self, href): """Given a href relative to this link, return the :class:`Link` of the absolute url. :param href: A string-like path relative to this link. """ return self.wrap(urlparse.urljoin(self.url, href))
[ "def", "join", "(", "self", ",", "href", ")", ":", "return", "self", ".", "wrap", "(", "urlparse", ".", "urljoin", "(", "self", ".", "url", ",", "href", ")", ")" ]
38
15.166667
def list_present(name, acl_type, acl_names=None, perms='', recurse=False, force=False): ''' Ensure a Linux ACL list is present Takes a list of acl names and add them to the given path name The acl path acl_type The type of the acl is used for it can be 'user' or 'group' acl_names The list of users or groups perms Set the permissions eg.: rwx recurse Set the permissions recursive in the path force Wipe out old permissions and ensure only the new permissions are set ''' if acl_names is None: acl_names = [] ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} _octal_perms = sum([_octal.get(i, i) for i in perms]) if not os.path.exists(name): ret['comment'] = '{0} does not exist'.format(name) ret['result'] = False return ret __current_perms = __salt__['acl.getfacl'](name) if acl_type.startswith(('d:', 'default:')): _acl_type = ':'.join(acl_type.split(':')[1:]) _current_perms = __current_perms[name].get('defaults', {}) _default = True else: _acl_type = acl_type _current_perms = __current_perms[name] _default = False _origin_group = _current_perms.get('comment', {}).get('group', None) _origin_owner = _current_perms.get('comment', {}).get('owner', None) _current_acl_types = [] diff_perms = False for key in _current_perms[acl_type]: for current_acl_name in key.keys(): _current_acl_types.append(current_acl_name.encode('utf-8')) diff_perms = _octal_perms == key[current_acl_name]['octal'] if acl_type == 'user': try: _current_acl_types.remove(_origin_owner) except ValueError: pass else: try: _current_acl_types.remove(_origin_group) except ValueError: pass diff_acls = set(_current_acl_types) ^ set(acl_names) if not diff_acls and diff_perms and not force: ret = {'name': name, 'result': True, 'changes': {}, 'comment': 'Permissions and {}s are in the desired state'.format(acl_type)} return ret # The getfacl execution module lists default with empty names as being # applied to the user/group that owns the file, e.g., # default:group::rwx would be listed as default:group:root:rwx # In this case, if acl_name is empty, we really want to search for root # but still uses '' for other # We search through the dictionary getfacl returns for the owner of the # file if acl_name is empty. if acl_names == '': _search_names = __current_perms[name].get('comment').get(_acl_type, '') else: _search_names = acl_names if _current_perms.get(_acl_type, None) or _default: try: users = {} for i in _current_perms[_acl_type]: if i and next(six.iterkeys(i)) in _search_names: users.update(i) except (AttributeError, KeyError): users = None if users: changes = {} for count, search_name in enumerate(_search_names): if search_name in users: if users[search_name]['octal'] == sum([_octal.get(i, i) for i in perms]): ret['comment'] = 'Permissions are in the desired state' else: changes.update({'new': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': _octal_perms}, 'old': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': six.text_type(users[search_name]['octal'])}}) if __opts__['test']: ret.update({'comment': 'Updated permissions will be applied for ' '{0}: {1} -> {2}'.format( acl_names, six.text_type(users[search_name]['octal']), perms), 'result': None, 'changes': changes}) return ret try: if force: __salt__['acl.wipefacls'](name, recursive=recurse, raise_err=True) for acl_name in acl_names: __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse, raise_err=True) ret.update({'comment': 'Updated permissions for ' '{0}'.format(acl_names), 'result': True, 'changes': changes}) except CommandExecutionError as exc: ret.update({'comment': 'Error updating permissions for ' '{0}: {1}'.format(acl_names, exc.strerror), 'result': False}) else: changes = {'new': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': perms}} if __opts__['test']: ret.update({'comment': 'New permissions will be applied for ' '{0}: {1}'.format(acl_names, perms), 'result': None, 'changes': changes}) ret['result'] = None return ret try: if force: __salt__['acl.wipefacls'](name, recursive=recurse, raise_err=True) for acl_name in acl_names: __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse, raise_err=True) ret.update({'comment': 'Applied new permissions for ' '{0}'.format(', '.join(acl_names)), 'result': True, 'changes': changes}) except CommandExecutionError as exc: ret.update({'comment': 'Error updating permissions for {0}: ' '{1}'.format(acl_names, exc.strerror), 'result': False}) else: changes = {'new': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': perms}} if __opts__['test']: ret.update({'comment': 'New permissions will be applied for ' '{0}: {1}'.format(acl_names, perms), 'result': None, 'changes': changes}) ret['result'] = None return ret try: if force: __salt__['acl.wipefacls'](name, recursive=recurse, raise_err=True) for acl_name in acl_names: __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse, raise_err=True) ret.update({'comment': 'Applied new permissions for ' '{0}'.format(', '.join(acl_names)), 'result': True, 'changes': changes}) except CommandExecutionError as exc: ret.update({'comment': 'Error updating permissions for {0}: ' '{1}'.format(acl_names, exc.strerror), 'result': False}) else: ret['comment'] = 'ACL Type does not exist' ret['result'] = False return ret
[ "def", "list_present", "(", "name", ",", "acl_type", ",", "acl_names", "=", "None", ",", "perms", "=", "''", ",", "recurse", "=", "False", ",", "force", "=", "False", ")", ":", "if", "acl_names", "is", "None", ":", "acl_names", "=", "[", "]", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "_octal", "=", "{", "'r'", ":", "4", ",", "'w'", ":", "2", ",", "'x'", ":", "1", ",", "'-'", ":", "0", "}", "_octal_perms", "=", "sum", "(", "[", "_octal", ".", "get", "(", "i", ",", "i", ")", "for", "i", "in", "perms", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "ret", "[", "'comment'", "]", "=", "'{0} does not exist'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "__current_perms", "=", "__salt__", "[", "'acl.getfacl'", "]", "(", "name", ")", "if", "acl_type", ".", "startswith", "(", "(", "'d:'", ",", "'default:'", ")", ")", ":", "_acl_type", "=", "':'", ".", "join", "(", "acl_type", ".", "split", "(", "':'", ")", "[", "1", ":", "]", ")", "_current_perms", "=", "__current_perms", "[", "name", "]", ".", "get", "(", "'defaults'", ",", "{", "}", ")", "_default", "=", "True", "else", ":", "_acl_type", "=", "acl_type", "_current_perms", "=", "__current_perms", "[", "name", "]", "_default", "=", "False", "_origin_group", "=", "_current_perms", ".", "get", "(", "'comment'", ",", "{", "}", ")", ".", "get", "(", "'group'", ",", "None", ")", "_origin_owner", "=", "_current_perms", ".", "get", "(", "'comment'", ",", "{", "}", ")", ".", "get", "(", "'owner'", ",", "None", ")", "_current_acl_types", "=", "[", "]", "diff_perms", "=", "False", "for", "key", "in", "_current_perms", "[", "acl_type", "]", ":", "for", "current_acl_name", "in", "key", ".", "keys", "(", ")", ":", "_current_acl_types", ".", "append", "(", "current_acl_name", ".", "encode", "(", "'utf-8'", ")", ")", "diff_perms", "=", "_octal_perms", "==", "key", "[", "current_acl_name", "]", "[", "'octal'", "]", "if", "acl_type", "==", "'user'", ":", "try", ":", "_current_acl_types", ".", "remove", "(", "_origin_owner", ")", "except", "ValueError", ":", "pass", "else", ":", "try", ":", "_current_acl_types", ".", "remove", "(", "_origin_group", ")", "except", "ValueError", ":", "pass", "diff_acls", "=", "set", "(", "_current_acl_types", ")", "^", "set", "(", "acl_names", ")", "if", "not", "diff_acls", "and", "diff_perms", "and", "not", "force", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "'Permissions and {}s are in the desired state'", ".", "format", "(", "acl_type", ")", "}", "return", "ret", "# The getfacl execution module lists default with empty names as being", "# applied to the user/group that owns the file, e.g.,", "# default:group::rwx would be listed as default:group:root:rwx", "# In this case, if acl_name is empty, we really want to search for root", "# but still uses '' for other", "# We search through the dictionary getfacl returns for the owner of the", "# file if acl_name is empty.", "if", "acl_names", "==", "''", ":", "_search_names", "=", "__current_perms", "[", "name", "]", ".", "get", "(", "'comment'", ")", ".", "get", "(", "_acl_type", ",", "''", ")", "else", ":", "_search_names", "=", "acl_names", "if", "_current_perms", ".", "get", "(", "_acl_type", ",", "None", ")", "or", "_default", ":", "try", ":", "users", "=", "{", "}", "for", "i", "in", "_current_perms", "[", "_acl_type", "]", ":", "if", "i", "and", "next", "(", "six", ".", "iterkeys", "(", "i", ")", ")", "in", "_search_names", ":", "users", ".", "update", "(", "i", ")", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "users", "=", "None", "if", "users", ":", "changes", "=", "{", "}", "for", "count", ",", "search_name", "in", "enumerate", "(", "_search_names", ")", ":", "if", "search_name", "in", "users", ":", "if", "users", "[", "search_name", "]", "[", "'octal'", "]", "==", "sum", "(", "[", "_octal", ".", "get", "(", "i", ",", "i", ")", "for", "i", "in", "perms", "]", ")", ":", "ret", "[", "'comment'", "]", "=", "'Permissions are in the desired state'", "else", ":", "changes", ".", "update", "(", "{", "'new'", ":", "{", "'acl_name'", ":", "', '", ".", "join", "(", "acl_names", ")", ",", "'acl_type'", ":", "acl_type", ",", "'perms'", ":", "_octal_perms", "}", ",", "'old'", ":", "{", "'acl_name'", ":", "', '", ".", "join", "(", "acl_names", ")", ",", "'acl_type'", ":", "acl_type", ",", "'perms'", ":", "six", ".", "text_type", "(", "users", "[", "search_name", "]", "[", "'octal'", "]", ")", "}", "}", ")", "if", "__opts__", "[", "'test'", "]", ":", "ret", ".", "update", "(", "{", "'comment'", ":", "'Updated permissions will be applied for '", "'{0}: {1} -> {2}'", ".", "format", "(", "acl_names", ",", "six", ".", "text_type", "(", "users", "[", "search_name", "]", "[", "'octal'", "]", ")", ",", "perms", ")", ",", "'result'", ":", "None", ",", "'changes'", ":", "changes", "}", ")", "return", "ret", "try", ":", "if", "force", ":", "__salt__", "[", "'acl.wipefacls'", "]", "(", "name", ",", "recursive", "=", "recurse", ",", "raise_err", "=", "True", ")", "for", "acl_name", "in", "acl_names", ":", "__salt__", "[", "'acl.modfacl'", "]", "(", "acl_type", ",", "acl_name", ",", "perms", ",", "name", ",", "recursive", "=", "recurse", ",", "raise_err", "=", "True", ")", "ret", ".", "update", "(", "{", "'comment'", ":", "'Updated permissions for '", "'{0}'", ".", "format", "(", "acl_names", ")", ",", "'result'", ":", "True", ",", "'changes'", ":", "changes", "}", ")", "except", "CommandExecutionError", "as", "exc", ":", "ret", ".", "update", "(", "{", "'comment'", ":", "'Error updating permissions for '", "'{0}: {1}'", ".", "format", "(", "acl_names", ",", "exc", ".", "strerror", ")", ",", "'result'", ":", "False", "}", ")", "else", ":", "changes", "=", "{", "'new'", ":", "{", "'acl_name'", ":", "', '", ".", "join", "(", "acl_names", ")", ",", "'acl_type'", ":", "acl_type", ",", "'perms'", ":", "perms", "}", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", ".", "update", "(", "{", "'comment'", ":", "'New permissions will be applied for '", "'{0}: {1}'", ".", "format", "(", "acl_names", ",", "perms", ")", ",", "'result'", ":", "None", ",", "'changes'", ":", "changes", "}", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "try", ":", "if", "force", ":", "__salt__", "[", "'acl.wipefacls'", "]", "(", "name", ",", "recursive", "=", "recurse", ",", "raise_err", "=", "True", ")", "for", "acl_name", "in", "acl_names", ":", "__salt__", "[", "'acl.modfacl'", "]", "(", "acl_type", ",", "acl_name", ",", "perms", ",", "name", ",", "recursive", "=", "recurse", ",", "raise_err", "=", "True", ")", "ret", ".", "update", "(", "{", "'comment'", ":", "'Applied new permissions for '", "'{0}'", ".", "format", "(", "', '", ".", "join", "(", "acl_names", ")", ")", ",", "'result'", ":", "True", ",", "'changes'", ":", "changes", "}", ")", "except", "CommandExecutionError", "as", "exc", ":", "ret", ".", "update", "(", "{", "'comment'", ":", "'Error updating permissions for {0}: '", "'{1}'", ".", "format", "(", "acl_names", ",", "exc", ".", "strerror", ")", ",", "'result'", ":", "False", "}", ")", "else", ":", "changes", "=", "{", "'new'", ":", "{", "'acl_name'", ":", "', '", ".", "join", "(", "acl_names", ")", ",", "'acl_type'", ":", "acl_type", ",", "'perms'", ":", "perms", "}", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", ".", "update", "(", "{", "'comment'", ":", "'New permissions will be applied for '", "'{0}: {1}'", ".", "format", "(", "acl_names", ",", "perms", ")", ",", "'result'", ":", "None", ",", "'changes'", ":", "changes", "}", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "try", ":", "if", "force", ":", "__salt__", "[", "'acl.wipefacls'", "]", "(", "name", ",", "recursive", "=", "recurse", ",", "raise_err", "=", "True", ")", "for", "acl_name", "in", "acl_names", ":", "__salt__", "[", "'acl.modfacl'", "]", "(", "acl_type", ",", "acl_name", ",", "perms", ",", "name", ",", "recursive", "=", "recurse", ",", "raise_err", "=", "True", ")", "ret", ".", "update", "(", "{", "'comment'", ":", "'Applied new permissions for '", "'{0}'", ".", "format", "(", "', '", ".", "join", "(", "acl_names", ")", ")", ",", "'result'", ":", "True", ",", "'changes'", ":", "changes", "}", ")", "except", "CommandExecutionError", "as", "exc", ":", "ret", ".", "update", "(", "{", "'comment'", ":", "'Error updating permissions for {0}: '", "'{1}'", ".", "format", "(", "acl_names", ",", "exc", ".", "strerror", ")", ",", "'result'", ":", "False", "}", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'ACL Type does not exist'", "ret", "[", "'result'", "]", "=", "False", "return", "ret" ]
43.615789
25.394737
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return HI_Algorithm(key) if key not in HI_Algorithm._member_map_: extend_enum(HI_Algorithm, key, default) return HI_Algorithm[key]
[ "def", "get", "(", "key", ",", "default", "=", "-", "1", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "HI_Algorithm", "(", "key", ")", "if", "key", "not", "in", "HI_Algorithm", ".", "_member_map_", ":", "extend_enum", "(", "HI_Algorithm", ",", "key", ",", "default", ")", "return", "HI_Algorithm", "[", "key", "]" ]
39.142857
7.714286
def statistics(self, start=None, end=None, namespace=None): """Get write statistics for the specified namespace and date range""" return self.make_context(start=start, end=end, namespace=namespace).statistics()
[ "def", "statistics", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "namespace", "=", "None", ")", ":", "return", "self", ".", "make_context", "(", "start", "=", "start", ",", "end", "=", "end", ",", "namespace", "=", "namespace", ")", ".", "statistics", "(", ")" ]
64
14.75
def check_link_tag(self): """\ checks to see if we were able to find open link_src on this page """ node = self.article.raw_doc meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src') for item in meta: src = self.parser.getAttribute(item, attr='href') if src: return self.get_image(src, extraction_type='linktag') return None
[ "def", "check_link_tag", "(", "self", ")", ":", "node", "=", "self", ".", "article", ".", "raw_doc", "meta", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "node", ",", "tag", "=", "'link'", ",", "attr", "=", "'rel'", ",", "value", "=", "'image_src'", ")", "for", "item", "in", "meta", ":", "src", "=", "self", ".", "parser", ".", "getAttribute", "(", "item", ",", "attr", "=", "'href'", ")", "if", "src", ":", "return", "self", ".", "get_image", "(", "src", ",", "extraction_type", "=", "'linktag'", ")", "return", "None" ]
37.25
15
def ApplyEdits(self, adds=None, updates=None, deletes=None): """This operation adds, updates and deletes features to the associated feature layer or table in a single call (POST only). The apply edits operation is performed on a feature service layer resource. The result of this operation are 3 arrays of edit results (for adds, updates and deletes respectively). Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" add_str, update_str = None, None if adds: add_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in adds) if updates: update_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in updates) return self._get_subfolder("./applyEdits", JsonPostResult, {'adds': add_str, 'updates': update_str, 'deletes': deletes })
[ "def", "ApplyEdits", "(", "self", ",", "adds", "=", "None", ",", "updates", "=", "None", ",", "deletes", "=", "None", ")", ":", "add_str", ",", "update_str", "=", "None", ",", "None", "if", "adds", ":", "add_str", "=", "\",\"", ".", "join", "(", "json", ".", "dumps", "(", "feature", ".", "_json_struct_for_featureset", ")", "for", "feature", "in", "adds", ")", "if", "updates", ":", "update_str", "=", "\",\"", ".", "join", "(", "json", ".", "dumps", "(", "feature", ".", "_json_struct_for_featureset", ")", "for", "feature", "in", "updates", ")", "return", "self", ".", "_get_subfolder", "(", "\"./applyEdits\"", ",", "JsonPostResult", ",", "{", "'adds'", ":", "add_str", ",", "'updates'", ":", "update_str", ",", "'deletes'", ":", "deletes", "}", ")" ]
64.92
25.6
def parse(self): """Parse pattern list.""" result = [''] negative = False p = util.norm_pattern(self.pattern, not self.unix, self.raw_chars) p = p.decode('latin-1') if self.is_bytes else p if is_negative(p, self.flags): negative = True p = p[1:] self.root(p, result) case_flag = 'i' if not self.case_sensitive else '' if util.PY36: pattern = ( r'^(?!(?s%s:%s)$).*?$' if negative and not self.globstar_capture else r'^(?s%s:%s)$' ) % (case_flag, ''.join(result)) else: pattern = ( r'(?s%s)^(?!(?:%s)$).*?$' if negative and not self.globstar_capture else r'(?s%s)^(?:%s)$' ) % (case_flag, ''.join(result)) if self.is_bytes: pattern = pattern.encode('latin-1') return pattern
[ "def", "parse", "(", "self", ")", ":", "result", "=", "[", "''", "]", "negative", "=", "False", "p", "=", "util", ".", "norm_pattern", "(", "self", ".", "pattern", ",", "not", "self", ".", "unix", ",", "self", ".", "raw_chars", ")", "p", "=", "p", ".", "decode", "(", "'latin-1'", ")", "if", "self", ".", "is_bytes", "else", "p", "if", "is_negative", "(", "p", ",", "self", ".", "flags", ")", ":", "negative", "=", "True", "p", "=", "p", "[", "1", ":", "]", "self", ".", "root", "(", "p", ",", "result", ")", "case_flag", "=", "'i'", "if", "not", "self", ".", "case_sensitive", "else", "''", "if", "util", ".", "PY36", ":", "pattern", "=", "(", "r'^(?!(?s%s:%s)$).*?$'", "if", "negative", "and", "not", "self", ".", "globstar_capture", "else", "r'^(?s%s:%s)$'", ")", "%", "(", "case_flag", ",", "''", ".", "join", "(", "result", ")", ")", "else", ":", "pattern", "=", "(", "r'(?s%s)^(?!(?:%s)$).*?$'", "if", "negative", "and", "not", "self", ".", "globstar_capture", "else", "r'(?s%s)^(?:%s)$'", ")", "%", "(", "case_flag", ",", "''", ".", "join", "(", "result", ")", ")", "if", "self", ".", "is_bytes", ":", "pattern", "=", "pattern", ".", "encode", "(", "'latin-1'", ")", "return", "pattern" ]
29.758621
24.344828
def parse_cgmlst_alleles(cgmlst_fasta): """Parse cgMLST alleles from fasta file cgMLST FASTA file must have a header format of ">{marker name}|{allele name}" Args: cgmlst_fasta (str): cgMLST fasta file path Returns: dict of list: Marker name to list of allele sequences """ out = defaultdict(list) for header, seq in parse_fasta(cgmlst_fasta): if not '|' in header: raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header)) marker_name, allele_name = header.split('|') out[marker_name].append(seq) return out
[ "def", "parse_cgmlst_alleles", "(", "cgmlst_fasta", ")", ":", "out", "=", "defaultdict", "(", "list", ")", "for", "header", ",", "seq", "in", "parse_fasta", "(", "cgmlst_fasta", ")", ":", "if", "not", "'|'", "in", "header", ":", "raise", "Exception", "(", "'Unexpected format for cgMLST fasta file header. No \"|\" (pipe) delimiter present! Header=\"{}\"'", ".", "format", "(", "header", ")", ")", "marker_name", ",", "allele_name", "=", "header", ".", "split", "(", "'|'", ")", "out", "[", "marker_name", "]", ".", "append", "(", "seq", ")", "return", "out" ]
38.058824
22.647059
def _init_io_container(self, init_value): """Initialize container to hold lob data. Here either a cStringIO or a io.StringIO class is used depending on the Python version. For CLobs ensure that an initial unicode value only contains valid ascii chars. """ if isinstance(init_value, CLOB_STRING_IO_CLASSES): # already a valid StringIO instance, just use it as it is v = init_value else: # works for strings and unicodes. However unicodes must only contain valid ascii chars! if PY3: # a io.StringIO also accepts any unicode characters, but we must be sure that only # ascii chars are contained. In PY2 we use a cStringIO class which complains by itself # if it catches this case, so in PY2 no extra check needs to be performed here. init_value.encode('ascii') # this is just a check, result not needed! v = CLOB_STRING_IO(init_value) return v
[ "def", "_init_io_container", "(", "self", ",", "init_value", ")", ":", "if", "isinstance", "(", "init_value", ",", "CLOB_STRING_IO_CLASSES", ")", ":", "# already a valid StringIO instance, just use it as it is", "v", "=", "init_value", "else", ":", "# works for strings and unicodes. However unicodes must only contain valid ascii chars!", "if", "PY3", ":", "# a io.StringIO also accepts any unicode characters, but we must be sure that only", "# ascii chars are contained. In PY2 we use a cStringIO class which complains by itself", "# if it catches this case, so in PY2 no extra check needs to be performed here.", "init_value", ".", "encode", "(", "'ascii'", ")", "# this is just a check, result not needed!", "v", "=", "CLOB_STRING_IO", "(", "init_value", ")", "return", "v" ]
59.176471
30.470588
def chart_json(start, end, period, symbol): """ Requests chart data from Poloniex API Args: start: Int epoch date to START getting market stats from. Note that this epoch is FURTHER from the current date. end: Int epoch date to STOP getting market stats from. Note that this epoch is CLOSER to the current date. period: Int defining width of each chart candlestick in seconds. Valid values: 300, 900, 1800, 7200, 14400, 86400 symbol: String of currency pair, like a ticker symbol. Returns: Tuple of (JSON data, URL to JSON). JSON data as a list of dict dates, where the keys are the raw market statistics. String URL to Poloniex API representing the given JSON. """ url = ('https://poloniex.com/public?command' '=returnChartData&currencyPair={0}&start={1}' '&end={2}&period={3}').format(symbol, start, end, period) logger.debug(' HTTP Request URL:\n{0}'.format(url)) json = requests.get(url).json() logger.debug(' JSON:\n{0}'.format(json)) if 'error' in json: logger.error(' Invalid parameters in URL for HTTP response') raise SystemExit elif all(val == 0 for val in json[0]): logger.error(' Bad HTTP response. Time unit too short?') raise SystemExit elif len(json) < 1: # time to short logger.error(' Not enough dates to calculate changes') raise SystemExit return json, url
[ "def", "chart_json", "(", "start", ",", "end", ",", "period", ",", "symbol", ")", ":", "url", "=", "(", "'https://poloniex.com/public?command'", "'=returnChartData&currencyPair={0}&start={1}'", "'&end={2}&period={3}'", ")", ".", "format", "(", "symbol", ",", "start", ",", "end", ",", "period", ")", "logger", ".", "debug", "(", "' HTTP Request URL:\\n{0}'", ".", "format", "(", "url", ")", ")", "json", "=", "requests", ".", "get", "(", "url", ")", ".", "json", "(", ")", "logger", ".", "debug", "(", "' JSON:\\n{0}'", ".", "format", "(", "json", ")", ")", "if", "'error'", "in", "json", ":", "logger", ".", "error", "(", "' Invalid parameters in URL for HTTP response'", ")", "raise", "SystemExit", "elif", "all", "(", "val", "==", "0", "for", "val", "in", "json", "[", "0", "]", ")", ":", "logger", ".", "error", "(", "' Bad HTTP response. Time unit too short?'", ")", "raise", "SystemExit", "elif", "len", "(", "json", ")", "<", "1", ":", "# time to short", "logger", ".", "error", "(", "' Not enough dates to calculate changes'", ")", "raise", "SystemExit", "return", "json", ",", "url" ]
40.777778
18.888889
def contigsub(a,b): 'find longest common substring. return its slice coordinates (in a and b; see last line) or None if not found' 'a and b are token lists' common=commonelts(a,b); groupsa=groupelts(a,common); groupsb=groupelts(b,common) bestmatch=[None,None,0]; bslice=None for i in range(len(groupsb)): if not groupsb[i][0]: continue if len(groupsb[i])-1<=bestmatch[2]: continue # this whole segment can't beat our best match so far for j in range(len(groupsb[i])): match=seqingroups(groupsa,groupsb[i][j:]) if match and match[2]>bestmatch[2]: bestmatch=match; bslice=[i,j,match[2]] if match and match[2]>=(len(groupsb[i])/2.0): break # i.e. this is as good as we're going to get for groupsb[i], skip the rest. TODO: write a test for this return None if not bestmatch[2] else (ungroupslice(groupsa,bestmatch),ungroupslice(groupsb,bslice))
[ "def", "contigsub", "(", "a", ",", "b", ")", ":", "'a and b are token lists'", "common", "=", "commonelts", "(", "a", ",", "b", ")", "groupsa", "=", "groupelts", "(", "a", ",", "common", ")", "groupsb", "=", "groupelts", "(", "b", ",", "common", ")", "bestmatch", "=", "[", "None", ",", "None", ",", "0", "]", "bslice", "=", "None", "for", "i", "in", "range", "(", "len", "(", "groupsb", ")", ")", ":", "if", "not", "groupsb", "[", "i", "]", "[", "0", "]", ":", "continue", "if", "len", "(", "groupsb", "[", "i", "]", ")", "-", "1", "<=", "bestmatch", "[", "2", "]", ":", "continue", "# this whole segment can't beat our best match so far\r", "for", "j", "in", "range", "(", "len", "(", "groupsb", "[", "i", "]", ")", ")", ":", "match", "=", "seqingroups", "(", "groupsa", ",", "groupsb", "[", "i", "]", "[", "j", ":", "]", ")", "if", "match", "and", "match", "[", "2", "]", ">", "bestmatch", "[", "2", "]", ":", "bestmatch", "=", "match", "bslice", "=", "[", "i", ",", "j", ",", "match", "[", "2", "]", "]", "if", "match", "and", "match", "[", "2", "]", ">=", "(", "len", "(", "groupsb", "[", "i", "]", ")", "/", "2.0", ")", ":", "break", "# i.e. this is as good as we're going to get for groupsb[i], skip the rest. TODO: write a test for this\r", "return", "None", "if", "not", "bestmatch", "[", "2", "]", "else", "(", "ungroupslice", "(", "groupsa", ",", "bestmatch", ")", ",", "ungroupslice", "(", "groupsb", ",", "bslice", ")", ")" ]
67.846154
35.384615
def geojson_to_gml(gj, set_srs=True): """Given a dict deserialized from a GeoJSON object, returns an lxml Element of the corresponding GML geometry.""" tag = G(gj['type']) if set_srs: tag.set('srsName', 'urn:ogc:def:crs:EPSG::4326') if gj['type'] == 'Point': tag.append(G.pos(_reverse_geojson_coords(gj['coordinates']))) elif gj['type'] == 'LineString': tag.append(G.posList(' '.join(_reverse_geojson_coords(ll) for ll in gj['coordinates']))) elif gj['type'] == 'Polygon': rings = [ G.LinearRing( G.posList(' '.join(_reverse_geojson_coords(ll) for ll in ring)) ) for ring in gj['coordinates'] ] tag.append(G.exterior(rings.pop(0))) for ring in rings: tag.append(G.interior(ring)) elif gj['type'] in ('MultiPoint', 'MultiLineString', 'MultiPolygon'): single_type = gj['type'][5:] member_tag = single_type[0].lower() + single_type[1:] + 'Member' for coord in gj['coordinates']: tag.append( G(member_tag, geojson_to_gml({'type': single_type, 'coordinates': coord}, set_srs=False)) ) else: raise NotImplementedError return tag
[ "def", "geojson_to_gml", "(", "gj", ",", "set_srs", "=", "True", ")", ":", "tag", "=", "G", "(", "gj", "[", "'type'", "]", ")", "if", "set_srs", ":", "tag", ".", "set", "(", "'srsName'", ",", "'urn:ogc:def:crs:EPSG::4326'", ")", "if", "gj", "[", "'type'", "]", "==", "'Point'", ":", "tag", ".", "append", "(", "G", ".", "pos", "(", "_reverse_geojson_coords", "(", "gj", "[", "'coordinates'", "]", ")", ")", ")", "elif", "gj", "[", "'type'", "]", "==", "'LineString'", ":", "tag", ".", "append", "(", "G", ".", "posList", "(", "' '", ".", "join", "(", "_reverse_geojson_coords", "(", "ll", ")", "for", "ll", "in", "gj", "[", "'coordinates'", "]", ")", ")", ")", "elif", "gj", "[", "'type'", "]", "==", "'Polygon'", ":", "rings", "=", "[", "G", ".", "LinearRing", "(", "G", ".", "posList", "(", "' '", ".", "join", "(", "_reverse_geojson_coords", "(", "ll", ")", "for", "ll", "in", "ring", ")", ")", ")", "for", "ring", "in", "gj", "[", "'coordinates'", "]", "]", "tag", ".", "append", "(", "G", ".", "exterior", "(", "rings", ".", "pop", "(", "0", ")", ")", ")", "for", "ring", "in", "rings", ":", "tag", ".", "append", "(", "G", ".", "interior", "(", "ring", ")", ")", "elif", "gj", "[", "'type'", "]", "in", "(", "'MultiPoint'", ",", "'MultiLineString'", ",", "'MultiPolygon'", ")", ":", "single_type", "=", "gj", "[", "'type'", "]", "[", "5", ":", "]", "member_tag", "=", "single_type", "[", "0", "]", ".", "lower", "(", ")", "+", "single_type", "[", "1", ":", "]", "+", "'Member'", "for", "coord", "in", "gj", "[", "'coordinates'", "]", ":", "tag", ".", "append", "(", "G", "(", "member_tag", ",", "geojson_to_gml", "(", "{", "'type'", ":", "single_type", ",", "'coordinates'", ":", "coord", "}", ",", "set_srs", "=", "False", ")", ")", ")", "else", ":", "raise", "NotImplementedError", "return", "tag" ]
39.16129
20
def get_prices(self) -> List[PriceModel]: """ Returns all available prices for security """ # return self.security.prices.order_by(Price.date) from pricedb.dal import Price pricedb = PriceDbApplication() repo = pricedb.get_price_repository() query = (repo.query(Price) .filter(Price.namespace == self.security.namespace) .filter(Price.symbol == self.security.mnemonic) .orderby_desc(Price.date) ) return query.all()
[ "def", "get_prices", "(", "self", ")", "->", "List", "[", "PriceModel", "]", ":", "# return self.security.prices.order_by(Price.date)", "from", "pricedb", ".", "dal", "import", "Price", "pricedb", "=", "PriceDbApplication", "(", ")", "repo", "=", "pricedb", ".", "get_price_repository", "(", ")", "query", "=", "(", "repo", ".", "query", "(", "Price", ")", ".", "filter", "(", "Price", ".", "namespace", "==", "self", ".", "security", ".", "namespace", ")", ".", "filter", "(", "Price", ".", "symbol", "==", "self", ".", "security", ".", "mnemonic", ")", ".", "orderby_desc", "(", "Price", ".", "date", ")", ")", "return", "query", ".", "all", "(", ")" ]
38.769231
12.692308
def build_index_from_design(df, design, remove_prefix=None, types=None, axis=1, auto_convert_numeric=True, unmatched_columns='index'): """ Build a MultiIndex from a design table. Supply with a table with column headings for the new multiindex and a index containing the labels to search for in the data. :param df: :param design: :param remove: :param types: :param axis: :param auto_convert_numeric: :return: """ df = df.copy() if 'Label' not in design.index.names: design = design.set_index('Label') if remove_prefix is None: remove_prefix = [] if type(remove_prefix) is str: remove_prefix=[remove_prefix] unmatched_for_index = [] names = design.columns.values idx_levels = len(names) indexes = [] # Convert numeric only columns_to_combine; except index if auto_convert_numeric: design = design.apply(pd.to_numeric, errors="ignore") # The match columns are always strings, so the index must also be design.index = design.index.astype(str) # Apply type settings if types: for n, t in types.items(): if n in design.columns.values: design[n] = design[n].astype(t) # Build the index for lo in df.columns.values: l = copy(lo) for s in remove_prefix: l = l.replace(s, '') # Remove trailing/forward spaces l = l.strip() # Convert to numeric if possible l = numeric(l) # Attempt to match to the labels try: # Index idx = design.loc[str(l)] except: if unmatched_columns: unmatched_for_index.append(lo) else: # No match, fill with None idx = tuple([None] * idx_levels) indexes.append(idx) else: # We have a matched row, store it idx = tuple(idx.values) indexes.append(idx) if axis == 0: df.index = pd.MultiIndex.from_tuples(indexes, names=names) else: # If using unmatched for index, append if unmatched_columns == 'index': df = df.set_index(unmatched_for_index, append=True) elif unmatched_columns == 'drop': df = df.drop(unmatched_for_index, axis=1) df.columns = pd.MultiIndex.from_tuples(indexes, names=names) df = df.sort_index(axis=1) return df
[ "def", "build_index_from_design", "(", "df", ",", "design", ",", "remove_prefix", "=", "None", ",", "types", "=", "None", ",", "axis", "=", "1", ",", "auto_convert_numeric", "=", "True", ",", "unmatched_columns", "=", "'index'", ")", ":", "df", "=", "df", ".", "copy", "(", ")", "if", "'Label'", "not", "in", "design", ".", "index", ".", "names", ":", "design", "=", "design", ".", "set_index", "(", "'Label'", ")", "if", "remove_prefix", "is", "None", ":", "remove_prefix", "=", "[", "]", "if", "type", "(", "remove_prefix", ")", "is", "str", ":", "remove_prefix", "=", "[", "remove_prefix", "]", "unmatched_for_index", "=", "[", "]", "names", "=", "design", ".", "columns", ".", "values", "idx_levels", "=", "len", "(", "names", ")", "indexes", "=", "[", "]", "# Convert numeric only columns_to_combine; except index", "if", "auto_convert_numeric", ":", "design", "=", "design", ".", "apply", "(", "pd", ".", "to_numeric", ",", "errors", "=", "\"ignore\"", ")", "# The match columns are always strings, so the index must also be", "design", ".", "index", "=", "design", ".", "index", ".", "astype", "(", "str", ")", "# Apply type settings", "if", "types", ":", "for", "n", ",", "t", "in", "types", ".", "items", "(", ")", ":", "if", "n", "in", "design", ".", "columns", ".", "values", ":", "design", "[", "n", "]", "=", "design", "[", "n", "]", ".", "astype", "(", "t", ")", "# Build the index", "for", "lo", "in", "df", ".", "columns", ".", "values", ":", "l", "=", "copy", "(", "lo", ")", "for", "s", "in", "remove_prefix", ":", "l", "=", "l", ".", "replace", "(", "s", ",", "''", ")", "# Remove trailing/forward spaces", "l", "=", "l", ".", "strip", "(", ")", "# Convert to numeric if possible", "l", "=", "numeric", "(", "l", ")", "# Attempt to match to the labels", "try", ":", "# Index", "idx", "=", "design", ".", "loc", "[", "str", "(", "l", ")", "]", "except", ":", "if", "unmatched_columns", ":", "unmatched_for_index", ".", "append", "(", "lo", ")", "else", ":", "# No match, fill with None", "idx", "=", "tuple", "(", "[", "None", "]", "*", "idx_levels", ")", "indexes", ".", "append", "(", "idx", ")", "else", ":", "# We have a matched row, store it", "idx", "=", "tuple", "(", "idx", ".", "values", ")", "indexes", ".", "append", "(", "idx", ")", "if", "axis", "==", "0", ":", "df", ".", "index", "=", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "indexes", ",", "names", "=", "names", ")", "else", ":", "# If using unmatched for index, append", "if", "unmatched_columns", "==", "'index'", ":", "df", "=", "df", ".", "set_index", "(", "unmatched_for_index", ",", "append", "=", "True", ")", "elif", "unmatched_columns", "==", "'drop'", ":", "df", "=", "df", ".", "drop", "(", "unmatched_for_index", ",", "axis", "=", "1", ")", "df", ".", "columns", "=", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "indexes", ",", "names", "=", "names", ")", "df", "=", "df", ".", "sort_index", "(", "axis", "=", "1", ")", "return", "df" ]
26.88764
20.41573
def execute(self): """ Execute the job, that is, execute all of its tasks. Each produced sync map will be stored inside the corresponding task object. :raises: :class:`~aeneas.executejob.ExecuteJobExecutionError`: if there is a problem during the job execution """ self.log(u"Executing job") if self.job is None: self.log_exc(u"The job object is None", None, True, ExecuteJobExecutionError) if len(self.job) == 0: self.log_exc(u"The job has no tasks", None, True, ExecuteJobExecutionError) job_max_tasks = self.rconf[RuntimeConfiguration.JOB_MAX_TASKS] if (job_max_tasks > 0) and (len(self.job) > job_max_tasks): self.log_exc(u"The Job has %d Tasks, more than the maximum allowed (%d)." % (len(self.job), job_max_tasks), None, True, ExecuteJobExecutionError) self.log([u"Number of tasks: '%d'", len(self.job)]) for task in self.job.tasks: try: custom_id = task.configuration["custom_id"] self.log([u"Executing task '%s'...", custom_id]) executor = ExecuteTask(task, rconf=self.rconf, logger=self.logger) executor.execute() self.log([u"Executing task '%s'... done", custom_id]) except Exception as exc: self.log_exc(u"Error while executing task '%s'" % (custom_id), exc, True, ExecuteJobExecutionError) self.log(u"Executing task: succeeded") self.log(u"Executing job: succeeded")
[ "def", "execute", "(", "self", ")", ":", "self", ".", "log", "(", "u\"Executing job\"", ")", "if", "self", ".", "job", "is", "None", ":", "self", ".", "log_exc", "(", "u\"The job object is None\"", ",", "None", ",", "True", ",", "ExecuteJobExecutionError", ")", "if", "len", "(", "self", ".", "job", ")", "==", "0", ":", "self", ".", "log_exc", "(", "u\"The job has no tasks\"", ",", "None", ",", "True", ",", "ExecuteJobExecutionError", ")", "job_max_tasks", "=", "self", ".", "rconf", "[", "RuntimeConfiguration", ".", "JOB_MAX_TASKS", "]", "if", "(", "job_max_tasks", ">", "0", ")", "and", "(", "len", "(", "self", ".", "job", ")", ">", "job_max_tasks", ")", ":", "self", ".", "log_exc", "(", "u\"The Job has %d Tasks, more than the maximum allowed (%d).\"", "%", "(", "len", "(", "self", ".", "job", ")", ",", "job_max_tasks", ")", ",", "None", ",", "True", ",", "ExecuteJobExecutionError", ")", "self", ".", "log", "(", "[", "u\"Number of tasks: '%d'\"", ",", "len", "(", "self", ".", "job", ")", "]", ")", "for", "task", "in", "self", ".", "job", ".", "tasks", ":", "try", ":", "custom_id", "=", "task", ".", "configuration", "[", "\"custom_id\"", "]", "self", ".", "log", "(", "[", "u\"Executing task '%s'...\"", ",", "custom_id", "]", ")", "executor", "=", "ExecuteTask", "(", "task", ",", "rconf", "=", "self", ".", "rconf", ",", "logger", "=", "self", ".", "logger", ")", "executor", ".", "execute", "(", ")", "self", ".", "log", "(", "[", "u\"Executing task '%s'... done\"", ",", "custom_id", "]", ")", "except", "Exception", "as", "exc", ":", "self", ".", "log_exc", "(", "u\"Error while executing task '%s'\"", "%", "(", "custom_id", ")", ",", "exc", ",", "True", ",", "ExecuteJobExecutionError", ")", "self", ".", "log", "(", "u\"Executing task: succeeded\"", ")", "self", ".", "log", "(", "u\"Executing job: succeeded\"", ")" ]
47.875
27.75
def add_output_option(parser): """output option""" parser.add_argument("-o", "--outdir", dest="outdir", type=str, default='GSEApy_reports', metavar='', action="store", help="The GSEApy output directory. Default: the current working directory") parser.add_argument("-f", "--format", dest="format", type=str, metavar='', action="store", choices=("pdf", "png", "jpeg", "eps", "svg"), default="pdf", help="File extensions supported by Matplotlib active backend,\ choose from {'pdf', 'png', 'jpeg','ps', 'eps','svg'}. Default: 'pdf'.") parser.add_argument("--fs", "--figsize", action='store', nargs=2, dest='figsize', metavar=('width', 'height'),type=float, default=(6.5, 6), help="The figsize keyword argument need two parameters to define. Default: (6.5, 6)") parser.add_argument("--graph", dest = "graph", action="store", type=int, default=20, metavar='int', help="Numbers of top graphs produced. Default: 20") parser.add_argument("--no-plot", action='store_true', dest='noplot', default=False, help="Speed up computing by suppressing the plot output."+\ "This is useful only if data are interested. Default: False.") parser.add_argument("-v", "--verbose", action="store_true", default=False, dest='verbose', help="Increase output verbosity, print out progress of your job", )
[ "def", "add_output_option", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "\"-o\"", ",", "\"--outdir\"", ",", "dest", "=", "\"outdir\"", ",", "type", "=", "str", ",", "default", "=", "'GSEApy_reports'", ",", "metavar", "=", "''", ",", "action", "=", "\"store\"", ",", "help", "=", "\"The GSEApy output directory. Default: the current working directory\"", ")", "parser", ".", "add_argument", "(", "\"-f\"", ",", "\"--format\"", ",", "dest", "=", "\"format\"", ",", "type", "=", "str", ",", "metavar", "=", "''", ",", "action", "=", "\"store\"", ",", "choices", "=", "(", "\"pdf\"", ",", "\"png\"", ",", "\"jpeg\"", ",", "\"eps\"", ",", "\"svg\"", ")", ",", "default", "=", "\"pdf\"", ",", "help", "=", "\"File extensions supported by Matplotlib active backend,\\\n choose from {'pdf', 'png', 'jpeg','ps', 'eps','svg'}. Default: 'pdf'.\"", ")", "parser", ".", "add_argument", "(", "\"--fs\"", ",", "\"--figsize\"", ",", "action", "=", "'store'", ",", "nargs", "=", "2", ",", "dest", "=", "'figsize'", ",", "metavar", "=", "(", "'width'", ",", "'height'", ")", ",", "type", "=", "float", ",", "default", "=", "(", "6.5", ",", "6", ")", ",", "help", "=", "\"The figsize keyword argument need two parameters to define. Default: (6.5, 6)\"", ")", "parser", ".", "add_argument", "(", "\"--graph\"", ",", "dest", "=", "\"graph\"", ",", "action", "=", "\"store\"", ",", "type", "=", "int", ",", "default", "=", "20", ",", "metavar", "=", "'int'", ",", "help", "=", "\"Numbers of top graphs produced. Default: 20\"", ")", "parser", ".", "add_argument", "(", "\"--no-plot\"", ",", "action", "=", "'store_true'", ",", "dest", "=", "'noplot'", ",", "default", "=", "False", ",", "help", "=", "\"Speed up computing by suppressing the plot output.\"", "+", "\"This is useful only if data are interested. Default: False.\"", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "\"--verbose\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "dest", "=", "'verbose'", ",", "help", "=", "\"Increase output verbosity, print out progress of your job\"", ",", ")" ]
77.95
43.8
def check_application_state(self, request, callback): "Check optional state parameter." stored = request.session.get(self.session_key, None) returned = request.GET.get('state', None) check = False if stored is not None: if returned is not None: check = constant_time_compare(stored, returned) else: logger.error('No state parameter returned by the provider.') else: logger.error('No state stored in the sesssion.') return check
[ "def", "check_application_state", "(", "self", ",", "request", ",", "callback", ")", ":", "stored", "=", "request", ".", "session", ".", "get", "(", "self", ".", "session_key", ",", "None", ")", "returned", "=", "request", ".", "GET", ".", "get", "(", "'state'", ",", "None", ")", "check", "=", "False", "if", "stored", "is", "not", "None", ":", "if", "returned", "is", "not", "None", ":", "check", "=", "constant_time_compare", "(", "stored", ",", "returned", ")", "else", ":", "logger", ".", "error", "(", "'No state parameter returned by the provider.'", ")", "else", ":", "logger", ".", "error", "(", "'No state stored in the sesssion.'", ")", "return", "check" ]
41.461538
17.307692
def _get_revision(self, revision): """ For git backend we always return integer here. This way we ensure that changset's revision attribute would become integer. """ is_null = lambda o: len(o) == revision.count('0') try: self.revisions[0] except (KeyError, IndexError): raise EmptyRepositoryError("There are no changesets yet") if revision in (None, '', 'tip', 'HEAD', 'head', -1): return self.revisions[-1] is_bstr = isinstance(revision, (str, unicode)) if ((is_bstr and revision.isdigit() and len(revision) < 12) or isinstance(revision, int) or is_null(revision)): try: revision = self.revisions[int(revision)] except Exception: raise ChangesetDoesNotExistError("Revision %s does not exist " "for this repository" % (revision)) elif is_bstr: # get by branch/tag name _ref_revision = self._parsed_refs.get(revision) if _ref_revision: # and _ref_revision[1] in ['H', 'RH', 'T']: return _ref_revision[0] _tags_shas = self.tags.values() # maybe it's a tag ? we don't have them in self.revisions if revision in _tags_shas: return _tags_shas[_tags_shas.index(revision)] elif not SHA_PATTERN.match(revision) or revision not in self.revisions: raise ChangesetDoesNotExistError("Revision %s does not exist " "for this repository" % (revision)) # Ensure we return full id if not SHA_PATTERN.match(str(revision)): raise ChangesetDoesNotExistError("Given revision %s not recognized" % revision) return revision
[ "def", "_get_revision", "(", "self", ",", "revision", ")", ":", "is_null", "=", "lambda", "o", ":", "len", "(", "o", ")", "==", "revision", ".", "count", "(", "'0'", ")", "try", ":", "self", ".", "revisions", "[", "0", "]", "except", "(", "KeyError", ",", "IndexError", ")", ":", "raise", "EmptyRepositoryError", "(", "\"There are no changesets yet\"", ")", "if", "revision", "in", "(", "None", ",", "''", ",", "'tip'", ",", "'HEAD'", ",", "'head'", ",", "-", "1", ")", ":", "return", "self", ".", "revisions", "[", "-", "1", "]", "is_bstr", "=", "isinstance", "(", "revision", ",", "(", "str", ",", "unicode", ")", ")", "if", "(", "(", "is_bstr", "and", "revision", ".", "isdigit", "(", ")", "and", "len", "(", "revision", ")", "<", "12", ")", "or", "isinstance", "(", "revision", ",", "int", ")", "or", "is_null", "(", "revision", ")", ")", ":", "try", ":", "revision", "=", "self", ".", "revisions", "[", "int", "(", "revision", ")", "]", "except", "Exception", ":", "raise", "ChangesetDoesNotExistError", "(", "\"Revision %s does not exist \"", "\"for this repository\"", "%", "(", "revision", ")", ")", "elif", "is_bstr", ":", "# get by branch/tag name", "_ref_revision", "=", "self", ".", "_parsed_refs", ".", "get", "(", "revision", ")", "if", "_ref_revision", ":", "# and _ref_revision[1] in ['H', 'RH', 'T']:", "return", "_ref_revision", "[", "0", "]", "_tags_shas", "=", "self", ".", "tags", ".", "values", "(", ")", "# maybe it's a tag ? we don't have them in self.revisions", "if", "revision", "in", "_tags_shas", ":", "return", "_tags_shas", "[", "_tags_shas", ".", "index", "(", "revision", ")", "]", "elif", "not", "SHA_PATTERN", ".", "match", "(", "revision", ")", "or", "revision", "not", "in", "self", ".", "revisions", ":", "raise", "ChangesetDoesNotExistError", "(", "\"Revision %s does not exist \"", "\"for this repository\"", "%", "(", "revision", ")", ")", "# Ensure we return full id", "if", "not", "SHA_PATTERN", ".", "match", "(", "str", "(", "revision", ")", ")", ":", "raise", "ChangesetDoesNotExistError", "(", "\"Given revision %s not recognized\"", "%", "revision", ")", "return", "revision" ]
39.577778
21.622222
def is_auth(nodes): ''' Check if nodes are already authorized nodes a list of nodes to be checked for authorization to the cluster CLI Example: .. code-block:: bash salt '*' pcs.is_auth nodes='[node1.example.org node2.example.org]' ''' cmd = ['pcs', 'cluster', 'auth'] cmd += nodes return __salt__['cmd.run_all'](cmd, stdin='\n\n', output_loglevel='trace', python_shell=False)
[ "def", "is_auth", "(", "nodes", ")", ":", "cmd", "=", "[", "'pcs'", ",", "'cluster'", ",", "'auth'", "]", "cmd", "+=", "nodes", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "stdin", "=", "'\\n\\n'", ",", "output_loglevel", "=", "'trace'", ",", "python_shell", "=", "False", ")" ]
24.529412
29.941176
def ticket_tags(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tags#show-tags" api_path = "/api/v2/tickets/{id}/tags.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "ticket_tags", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/tickets/{id}/tags.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
49
10.6
def _insert_breathe_configs(c, *, project_name, doxygen_xml_dirname): """Add breathe extension configurations to the state. """ if doxygen_xml_dirname is not None: c['breathe_projects'] = {project_name: doxygen_xml_dirname} c['breathe_default_project'] = project_name return c
[ "def", "_insert_breathe_configs", "(", "c", ",", "*", ",", "project_name", ",", "doxygen_xml_dirname", ")", ":", "if", "doxygen_xml_dirname", "is", "not", "None", ":", "c", "[", "'breathe_projects'", "]", "=", "{", "project_name", ":", "doxygen_xml_dirname", "}", "c", "[", "'breathe_default_project'", "]", "=", "project_name", "return", "c" ]
43.142857
13.714286
def set(self, section, key, value): """set function sets a particular value for the specified key in the specified section and writes it to the config file. Parameters: * **section (string):** the section under which the config should be saved. Only accepted values are - oxd, client * **key (string):** the key/name of the config value * **value (string):** the value which needs to be stored as a string Returns: **success (bool):** a boolean indication of whether the value was stored successfully in the file """ if not self.parser.has_section(section): logger.warning("Invalid config section: %s", section) return False self.parser.set(section, key, value) with open(self.config_file, 'wb') as cfile: self.parser.write(cfile) return True
[ "def", "set", "(", "self", ",", "section", ",", "key", ",", "value", ")", ":", "if", "not", "self", ".", "parser", ".", "has_section", "(", "section", ")", ":", "logger", ".", "warning", "(", "\"Invalid config section: %s\"", ",", "section", ")", "return", "False", "self", ".", "parser", ".", "set", "(", "section", ",", "key", ",", "value", ")", "with", "open", "(", "self", ".", "config_file", ",", "'wb'", ")", "as", "cfile", ":", "self", ".", "parser", ".", "write", "(", "cfile", ")", "return", "True" ]
36.75
27.375
def update(self): """Update |QFactor| based on |FT| and the current simulation step size. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('1d') >>> ft(10.0) >>> derived.qfactor.update() >>> derived.qfactor qfactor(0.115741) """ con = self.subpars.pars.control self(con.ft*1000./self.simulationstep.seconds)
[ "def", "update", "(", "self", ")", ":", "con", "=", "self", ".", "subpars", ".", "pars", ".", "control", "self", "(", "con", ".", "ft", "*", "1000.", "/", "self", ".", "simulationstep", ".", "seconds", ")" ]
31.923077
11.615385
def proto_0202(theABF): """protocol: MTIV.""" abf=ABF(theABF) abf.log.info("analyzing as MTIV") plot=ABFplot(abf) plot.figure_height,plot.figure_width=SQUARESIZE,SQUARESIZE plot.title="" plot.kwargs["alpha"]=.6 plot.figure_sweeps() # frame to uppwer/lower bounds, ignoring peaks from capacitive transients abf.setsweep(0) plt.axis([None,None,abf.average(.9,1)-100,None]) abf.setsweep(-1) plt.axis([None,None,None,abf.average(.9,1)+100]) # save it plt.tight_layout() frameAndSave(abf,"MTIV") plt.close('all')
[ "def", "proto_0202", "(", "theABF", ")", ":", "abf", "=", "ABF", "(", "theABF", ")", "abf", ".", "log", ".", "info", "(", "\"analyzing as MTIV\"", ")", "plot", "=", "ABFplot", "(", "abf", ")", "plot", ".", "figure_height", ",", "plot", ".", "figure_width", "=", "SQUARESIZE", ",", "SQUARESIZE", "plot", ".", "title", "=", "\"\"", "plot", ".", "kwargs", "[", "\"alpha\"", "]", "=", ".6", "plot", ".", "figure_sweeps", "(", ")", "# frame to uppwer/lower bounds, ignoring peaks from capacitive transients", "abf", ".", "setsweep", "(", "0", ")", "plt", ".", "axis", "(", "[", "None", ",", "None", ",", "abf", ".", "average", "(", ".9", ",", "1", ")", "-", "100", ",", "None", "]", ")", "abf", ".", "setsweep", "(", "-", "1", ")", "plt", ".", "axis", "(", "[", "None", ",", "None", ",", "None", ",", "abf", ".", "average", "(", ".9", ",", "1", ")", "+", "100", "]", ")", "# save it", "plt", ".", "tight_layout", "(", ")", "frameAndSave", "(", "abf", ",", "\"MTIV\"", ")", "plt", ".", "close", "(", "'all'", ")" ]
27.9
19.65
def create_from_params(cls, template_model, datastore_name, vm_cluster_model, ip_regex, refresh_ip_timeout, auto_power_on, auto_power_off, wait_for_ip, auto_delete): """ :param VCenterTemplateModel template_model: :param str datastore_name: :param VMClusterModel vm_cluster_model: :param str ip_regex: Custom regex to filter IP addresses :param refresh_ip_timeout: :param bool auto_power_on: :param bool auto_power_off: :param bool wait_for_ip: :param bool auto_delete: """ dic = { 'template_model': template_model, 'datastore_name': datastore_name, 'vm_cluster_model': vm_cluster_model, 'ip_regex': ip_regex, 'refresh_ip_timeout': refresh_ip_timeout, 'auto_power_on': auto_power_on, 'auto_power_off': auto_power_off, 'wait_for_ip': wait_for_ip, 'auto_delete': auto_delete } return cls(dic)
[ "def", "create_from_params", "(", "cls", ",", "template_model", ",", "datastore_name", ",", "vm_cluster_model", ",", "ip_regex", ",", "refresh_ip_timeout", ",", "auto_power_on", ",", "auto_power_off", ",", "wait_for_ip", ",", "auto_delete", ")", ":", "dic", "=", "{", "'template_model'", ":", "template_model", ",", "'datastore_name'", ":", "datastore_name", ",", "'vm_cluster_model'", ":", "vm_cluster_model", ",", "'ip_regex'", ":", "ip_regex", ",", "'refresh_ip_timeout'", ":", "refresh_ip_timeout", ",", "'auto_power_on'", ":", "auto_power_on", ",", "'auto_power_off'", ":", "auto_power_off", ",", "'wait_for_ip'", ":", "wait_for_ip", ",", "'auto_delete'", ":", "auto_delete", "}", "return", "cls", "(", "dic", ")" ]
40.52
12.6
def find(self,cell_designation,cell_filter=lambda x,c: 'c' in x and x['c'] == c): """ finds spike containers in multi spike containers collection offspring """ if 'parent' in self.meta: return (self.meta['parent'],self.meta['parent'].find(cell_designation,cell_filter=cell_filter))
[ "def", "find", "(", "self", ",", "cell_designation", ",", "cell_filter", "=", "lambda", "x", ",", "c", ":", "'c'", "in", "x", "and", "x", "[", "'c'", "]", "==", "c", ")", ":", "if", "'parent'", "in", "self", ".", "meta", ":", "return", "(", "self", ".", "meta", "[", "'parent'", "]", ",", "self", ".", "meta", "[", "'parent'", "]", ".", "find", "(", "cell_designation", ",", "cell_filter", "=", "cell_filter", ")", ")" ]
54
26
def fill_concentric_circles(size, center, radius): """ Returns a path that fills a concentric circle with the given radius and center. :param radius: :param center: :param size: The size of the image, used to skip points that are out of bounds. :return: Yields iterators, where each iterator yields (x,y) coordinates of points about the circle. The path moves outwards from the center of the circle. If `size` is specified, points that are out of bounds are skipped. """ for r in range(radius): yield concentric_circle(center, r, size=size)
[ "def", "fill_concentric_circles", "(", "size", ",", "center", ",", "radius", ")", ":", "for", "r", "in", "range", "(", "radius", ")", ":", "yield", "concentric_circle", "(", "center", ",", "r", ",", "size", "=", "size", ")" ]
48.083333
23.083333
def request_pdu(self): """ Build request PDU to read coils. :return: Byte array of 5 bytes with PDU. """ if None in [self.starting_address, self.quantity]: # TODO Raise proper exception. raise Exception return struct.pack('>BHH', self.function_code, self.starting_address, self.quantity)
[ "def", "request_pdu", "(", "self", ")", ":", "if", "None", "in", "[", "self", ".", "starting_address", ",", "self", ".", "quantity", "]", ":", "# TODO Raise proper exception.", "raise", "Exception", "return", "struct", ".", "pack", "(", "'>BHH'", ",", "self", ".", "function_code", ",", "self", ".", "starting_address", ",", "self", ".", "quantity", ")" ]
33.636364
16.090909