nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
sequence
function
stringlengths
18
4.83M
function_tokens
sequence
url
stringlengths
83
304
baidu/CUP
79ab2f3ad6eaab1461aa3b4cca37d3262240194a
cup/storage/obj.py
python
ObjectInterface.put
(self, dest, localfile)
:param dest: system path :param localfile: localfile :return: { 'returncode': 0 for success, others for failure, 'msg': 'if any' }
:param dest: system path :param localfile: localfile
[ ":", "param", "dest", ":", "system", "path", ":", "param", "localfile", ":", "localfile" ]
def put(self, dest, localfile): """ :param dest: system path :param localfile: localfile :return: { 'returncode': 0 for success, others for failure, 'msg': 'if any' } """
[ "def", "put", "(", "self", ",", "dest", ",", "localfile", ")", ":" ]
https://github.com/baidu/CUP/blob/79ab2f3ad6eaab1461aa3b4cca37d3262240194a/cup/storage/obj.py#L56-L68
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
lms/djangoapps/instructor_task/subtasks.py
python
SubtaskStatus.__repr__
(self)
return f'SubtaskStatus<{self.to_dict()!r}>'
Return print representation of a SubtaskStatus object.
Return print representation of a SubtaskStatus object.
[ "Return", "print", "representation", "of", "a", "SubtaskStatus", "object", "." ]
def __repr__(self): """Return print representation of a SubtaskStatus object.""" return f'SubtaskStatus<{self.to_dict()!r}>'
[ "def", "__repr__", "(", "self", ")", ":", "return", "f'SubtaskStatus<{self.to_dict()!r}>'" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/lms/djangoapps/instructor_task/subtasks.py#L203-L205
enthought/traitsui
b7c38c7a47bf6ae7971f9ddab70c8a358647dd25
traitsui/qt4/button_editor.py
python
SimpleEditor.dispose
(self)
Disposes of the contents of an editor.
Disposes of the contents of an editor.
[ "Disposes", "of", "the", "contents", "of", "an", "editor", "." ]
def dispose(self): """Disposes of the contents of an editor.""" if self.factory.values_trait: self.object.observe( self._update_menu, self.factory.values_trait + ".items", remove=True, ) if self.control is not None: self.control.clicked.disconnect(self.update_object) super().dispose()
[ "def", "dispose", "(", "self", ")", ":", "if", "self", ".", "factory", ".", "values_trait", ":", "self", ".", "object", ".", "observe", "(", "self", ".", "_update_menu", ",", "self", ".", "factory", ".", "values_trait", "+", "\".items\"", ",", "remove", "=", "True", ",", ")", "if", "self", ".", "control", "is", "not", "None", ":", "self", ".", "control", ".", "clicked", ".", "disconnect", "(", "self", ".", "update_object", ")", "super", "(", ")", ".", "dispose", "(", ")" ]
https://github.com/enthought/traitsui/blob/b7c38c7a47bf6ae7971f9ddab70c8a358647dd25/traitsui/qt4/button_editor.py#L85-L97
SigmaHQ/sigma
6f7d28b52a6468b2430e8d7dfefb79dc01e2f1af
tools/sigma/backends/humio.py
python
HumioBackend.generateANDNode
(self, node)
[]
def generateANDNode(self, node): generated = [self.generateNode(val) for val in node] filtered = [g for g in generated if g is not None] if filtered: if self.sort_condition_lists: filtered = sorted(filtered) if any([item for item in filtered if "regex" in item]): res = "" for item in filtered: if item.startswith("regex"): if res.endswith(" | "): res = res.rstrip(" | ") res += " | %s | " % item.strip(" | ") else: res += item return res.strip(" | ") return self.andToken.join(filtered) else: return None
[ "def", "generateANDNode", "(", "self", ",", "node", ")", ":", "generated", "=", "[", "self", ".", "generateNode", "(", "val", ")", "for", "val", "in", "node", "]", "filtered", "=", "[", "g", "for", "g", "in", "generated", "if", "g", "is", "not", "None", "]", "if", "filtered", ":", "if", "self", ".", "sort_condition_lists", ":", "filtered", "=", "sorted", "(", "filtered", ")", "if", "any", "(", "[", "item", "for", "item", "in", "filtered", "if", "\"regex\"", "in", "item", "]", ")", ":", "res", "=", "\"\"", "for", "item", "in", "filtered", ":", "if", "item", ".", "startswith", "(", "\"regex\"", ")", ":", "if", "res", ".", "endswith", "(", "\" | \"", ")", ":", "res", "=", "res", ".", "rstrip", "(", "\" | \"", ")", "res", "+=", "\" | %s | \"", "%", "item", ".", "strip", "(", "\" | \"", ")", "else", ":", "res", "+=", "item", "return", "res", ".", "strip", "(", "\" | \"", ")", "return", "self", ".", "andToken", ".", "join", "(", "filtered", ")", "else", ":", "return", "None" ]
https://github.com/SigmaHQ/sigma/blob/6f7d28b52a6468b2430e8d7dfefb79dc01e2f1af/tools/sigma/backends/humio.py#L63-L81
pycket/pycket
05ebd9885efa3a0ae54e77c1a1f07ea441b445c6
pycket/small_list.py
python
inline_small_list
(sizemax=11, sizemin=0, immutable=False, unbox_num=False, nonull=False, attrname="list", factoryname="make", listgettername="_get_full_list", listsizename="_get_size_list", gettername="_get_list", settername="_set_list")
return wrapper
This function is helpful if you have a class with a field storing a list and the list is often very small. Calling this function will inline the list into instances for the small sizes. This works by adding the following methods (names customizable) to the class: _get_list(self, i): return ith element of the list _set_list(self, i, val): set ith element of the list _get_full_list(self): returns a copy of the full list _get_size_list(self): returns the length of the list @staticmethod make(listcontent, *args): makes a new instance with the list's content set to listcontent
This function is helpful if you have a class with a field storing a list and the list is often very small. Calling this function will inline the list into instances for the small sizes. This works by adding the following methods (names customizable) to the class:
[ "This", "function", "is", "helpful", "if", "you", "have", "a", "class", "with", "a", "field", "storing", "a", "list", "and", "the", "list", "is", "often", "very", "small", ".", "Calling", "this", "function", "will", "inline", "the", "list", "into", "instances", "for", "the", "small", "sizes", ".", "This", "works", "by", "adding", "the", "following", "methods", "(", "names", "customizable", ")", "to", "the", "class", ":" ]
def inline_small_list(sizemax=11, sizemin=0, immutable=False, unbox_num=False, nonull=False, attrname="list", factoryname="make", listgettername="_get_full_list", listsizename="_get_size_list", gettername="_get_list", settername="_set_list"): """ This function is helpful if you have a class with a field storing a list and the list is often very small. Calling this function will inline the list into instances for the small sizes. This works by adding the following methods (names customizable) to the class: _get_list(self, i): return ith element of the list _set_list(self, i, val): set ith element of the list _get_full_list(self): returns a copy of the full list _get_size_list(self): returns the length of the list @staticmethod make(listcontent, *args): makes a new instance with the list's content set to listcontent """ if not config.type_size_specialization: sizemin = sizemax = 0 unbox_num = False def wrapper(cls): _immutable_ = getattr(cls, "_immutable_", False) def make_class(size): attrs = ["_%s_%s" % (attrname, i) for i in range(size)] unrolling_enumerate_attrs = unrolling_iterable(enumerate(attrs)) def _get_size_list(self): return size def _get_list(self, i): for j, attr in unrolling_enumerate_attrs: if j == i: result = getattr(self, attr) if nonull: debug.check_annotation(result, _not_null) return result raise IndexError def _get_full_list(self): res = [None] * size for i, attr in unrolling_enumerate_attrs: elem = getattr(self, attr) if nonull: debug.check_annotation(elem, _not_null) res[i] = getattr(self, attr) return res def _set_list(self, i, val): if nonull: assert val is not None for j, attr in unrolling_enumerate_attrs: if j == i: setattr(self, attr, val) return raise IndexError def _init(self, elems, *args): assert len(elems) == size for i, attr in unrolling_enumerate_attrs: val = elems[i] if nonull: assert val is not None setattr(self, attr, elems[i]) cls.__init__(self, *args) # Methods for the new class being built methods = { gettername : _get_list, listsizename : _get_size_list, listgettername : _get_full_list, settername : _set_list, "__init__" : _init, } newcls = type(cls)("%sSize%s" % (cls.__name__, size), (cls, ), methods) if _immutable_: setattr(newcls, "_immutable_", True) newcls = add_clone_method(newcls) if immutable: setattr(newcls, "_immutable_fields_", attrs) newcls = add_clone_method(newcls) if "_attrs_" in cls.__dict__: setattr(newcls, "_attrs_", attrs) return newcls classes = map(make_class, range(sizemin, sizemax)) # Build the arbitrary sized variant def _get_arbitrary(self, i): return getattr(self, attrname)[i] def _get_size_list_arbitrary(self): return len(getattr(self, attrname)) def _get_list_arbitrary(self): return getattr(self, attrname) def _set_arbitrary(self, i, val): if nonull: assert val is not None getattr(self, attrname)[i] = val def _init(self, elems, *args): debug.make_sure_not_resized(elems) setattr(self, attrname, elems) cls.__init__(self, *args) methods = { gettername : _get_arbitrary, listsizename : _get_size_list_arbitrary, listgettername : _get_list_arbitrary, settername : _set_arbitrary, "__init__" : _init, } cls_arbitrary = type(cls)("%sArbitrary" % cls.__name__, (cls, ), methods) if _immutable_: setattr(cls_arbitrary, "_immutable_", True) cls_arbitrary = add_clone_method(cls_arbitrary) if immutable: setattr(cls_arbitrary, "_immutable_fields_", ["%s[*]" % (attrname,)]) cls_arbitrary = add_clone_method(cls_arbitrary) if "_attrs_" in cls.__dict__: setattr(cls_arbitrary, "_attrs_", attrname) def make(elems, *args): if classes: if (elems is None or len(elems) == 0): return make0(*args) else: if elems is None: elems = [] if sizemin <= len(elems) < sizemax: cls = classes[len(elems) - sizemin] else: cls = cls_arbitrary return cls(elems, *args) # XXX those could be done more nicely def make0(*args): if not classes: # no type specialization return make([], *args) result = objectmodel.instantiate(classes[0]) cls.__init__(result, *args) return result def make1(elem, *args): if not classes: # no type specialization return make([elem], *args) result = objectmodel.instantiate(classes[1]) result._set_list(0, elem) cls.__init__(result, *args) return result def make2(elem1, elem2, *args): if not classes: # no type specialization return make([elem1, elem2], *args) result = objectmodel.instantiate(classes[2]) result._set_list(0, elem1) result._set_list(1, elem2) cls.__init__(result, *args) return result def make_n(size, *args): if sizemin <= size < sizemax: subcls = classes[size - sizemin] else: subcls = cls_arbitrary result = objectmodel.instantiate(subcls) if subcls is cls_arbitrary: assert isinstance(result, subcls) setattr(result, attrname, [None] * size) cls.__init__(result, *args) return result if unbox_num: assert _immutable_ or immutable, "unboxing is only supported for immutable objects" make, make1, make2 = _add_num_classes(cls, make, make0, make1, make2, immut=_immutable_) setattr(cls, factoryname, staticmethod(make)) setattr(cls, factoryname + "0", staticmethod(make0)) setattr(cls, factoryname + "1", staticmethod(make1)) setattr(cls, factoryname + "2", staticmethod(make2)) setattr(cls, factoryname + "_n", staticmethod(make_n)) return cls return wrapper
[ "def", "inline_small_list", "(", "sizemax", "=", "11", ",", "sizemin", "=", "0", ",", "immutable", "=", "False", ",", "unbox_num", "=", "False", ",", "nonull", "=", "False", ",", "attrname", "=", "\"list\"", ",", "factoryname", "=", "\"make\"", ",", "listgettername", "=", "\"_get_full_list\"", ",", "listsizename", "=", "\"_get_size_list\"", ",", "gettername", "=", "\"_get_list\"", ",", "settername", "=", "\"_set_list\"", ")", ":", "if", "not", "config", ".", "type_size_specialization", ":", "sizemin", "=", "sizemax", "=", "0", "unbox_num", "=", "False", "def", "wrapper", "(", "cls", ")", ":", "_immutable_", "=", "getattr", "(", "cls", ",", "\"_immutable_\"", ",", "False", ")", "def", "make_class", "(", "size", ")", ":", "attrs", "=", "[", "\"_%s_%s\"", "%", "(", "attrname", ",", "i", ")", "for", "i", "in", "range", "(", "size", ")", "]", "unrolling_enumerate_attrs", "=", "unrolling_iterable", "(", "enumerate", "(", "attrs", ")", ")", "def", "_get_size_list", "(", "self", ")", ":", "return", "size", "def", "_get_list", "(", "self", ",", "i", ")", ":", "for", "j", ",", "attr", "in", "unrolling_enumerate_attrs", ":", "if", "j", "==", "i", ":", "result", "=", "getattr", "(", "self", ",", "attr", ")", "if", "nonull", ":", "debug", ".", "check_annotation", "(", "result", ",", "_not_null", ")", "return", "result", "raise", "IndexError", "def", "_get_full_list", "(", "self", ")", ":", "res", "=", "[", "None", "]", "*", "size", "for", "i", ",", "attr", "in", "unrolling_enumerate_attrs", ":", "elem", "=", "getattr", "(", "self", ",", "attr", ")", "if", "nonull", ":", "debug", ".", "check_annotation", "(", "elem", ",", "_not_null", ")", "res", "[", "i", "]", "=", "getattr", "(", "self", ",", "attr", ")", "return", "res", "def", "_set_list", "(", "self", ",", "i", ",", "val", ")", ":", "if", "nonull", ":", "assert", "val", "is", "not", "None", "for", "j", ",", "attr", "in", "unrolling_enumerate_attrs", ":", "if", "j", "==", "i", ":", "setattr", "(", "self", ",", "attr", ",", "val", ")", "return", "raise", "IndexError", "def", "_init", "(", "self", ",", "elems", ",", "*", "args", ")", ":", "assert", "len", "(", "elems", ")", "==", "size", "for", "i", ",", "attr", "in", "unrolling_enumerate_attrs", ":", "val", "=", "elems", "[", "i", "]", "if", "nonull", ":", "assert", "val", "is", "not", "None", "setattr", "(", "self", ",", "attr", ",", "elems", "[", "i", "]", ")", "cls", ".", "__init__", "(", "self", ",", "*", "args", ")", "# Methods for the new class being built", "methods", "=", "{", "gettername", ":", "_get_list", ",", "listsizename", ":", "_get_size_list", ",", "listgettername", ":", "_get_full_list", ",", "settername", ":", "_set_list", ",", "\"__init__\"", ":", "_init", ",", "}", "newcls", "=", "type", "(", "cls", ")", "(", "\"%sSize%s\"", "%", "(", "cls", ".", "__name__", ",", "size", ")", ",", "(", "cls", ",", ")", ",", "methods", ")", "if", "_immutable_", ":", "setattr", "(", "newcls", ",", "\"_immutable_\"", ",", "True", ")", "newcls", "=", "add_clone_method", "(", "newcls", ")", "if", "immutable", ":", "setattr", "(", "newcls", ",", "\"_immutable_fields_\"", ",", "attrs", ")", "newcls", "=", "add_clone_method", "(", "newcls", ")", "if", "\"_attrs_\"", "in", "cls", ".", "__dict__", ":", "setattr", "(", "newcls", ",", "\"_attrs_\"", ",", "attrs", ")", "return", "newcls", "classes", "=", "map", "(", "make_class", ",", "range", "(", "sizemin", ",", "sizemax", ")", ")", "# Build the arbitrary sized variant", "def", "_get_arbitrary", "(", "self", ",", "i", ")", ":", "return", "getattr", "(", "self", ",", "attrname", ")", "[", "i", "]", "def", "_get_size_list_arbitrary", "(", "self", ")", ":", "return", "len", "(", "getattr", "(", "self", ",", "attrname", ")", ")", "def", "_get_list_arbitrary", "(", "self", ")", ":", "return", "getattr", "(", "self", ",", "attrname", ")", "def", "_set_arbitrary", "(", "self", ",", "i", ",", "val", ")", ":", "if", "nonull", ":", "assert", "val", "is", "not", "None", "getattr", "(", "self", ",", "attrname", ")", "[", "i", "]", "=", "val", "def", "_init", "(", "self", ",", "elems", ",", "*", "args", ")", ":", "debug", ".", "make_sure_not_resized", "(", "elems", ")", "setattr", "(", "self", ",", "attrname", ",", "elems", ")", "cls", ".", "__init__", "(", "self", ",", "*", "args", ")", "methods", "=", "{", "gettername", ":", "_get_arbitrary", ",", "listsizename", ":", "_get_size_list_arbitrary", ",", "listgettername", ":", "_get_list_arbitrary", ",", "settername", ":", "_set_arbitrary", ",", "\"__init__\"", ":", "_init", ",", "}", "cls_arbitrary", "=", "type", "(", "cls", ")", "(", "\"%sArbitrary\"", "%", "cls", ".", "__name__", ",", "(", "cls", ",", ")", ",", "methods", ")", "if", "_immutable_", ":", "setattr", "(", "cls_arbitrary", ",", "\"_immutable_\"", ",", "True", ")", "cls_arbitrary", "=", "add_clone_method", "(", "cls_arbitrary", ")", "if", "immutable", ":", "setattr", "(", "cls_arbitrary", ",", "\"_immutable_fields_\"", ",", "[", "\"%s[*]\"", "%", "(", "attrname", ",", ")", "]", ")", "cls_arbitrary", "=", "add_clone_method", "(", "cls_arbitrary", ")", "if", "\"_attrs_\"", "in", "cls", ".", "__dict__", ":", "setattr", "(", "cls_arbitrary", ",", "\"_attrs_\"", ",", "attrname", ")", "def", "make", "(", "elems", ",", "*", "args", ")", ":", "if", "classes", ":", "if", "(", "elems", "is", "None", "or", "len", "(", "elems", ")", "==", "0", ")", ":", "return", "make0", "(", "*", "args", ")", "else", ":", "if", "elems", "is", "None", ":", "elems", "=", "[", "]", "if", "sizemin", "<=", "len", "(", "elems", ")", "<", "sizemax", ":", "cls", "=", "classes", "[", "len", "(", "elems", ")", "-", "sizemin", "]", "else", ":", "cls", "=", "cls_arbitrary", "return", "cls", "(", "elems", ",", "*", "args", ")", "# XXX those could be done more nicely", "def", "make0", "(", "*", "args", ")", ":", "if", "not", "classes", ":", "# no type specialization", "return", "make", "(", "[", "]", ",", "*", "args", ")", "result", "=", "objectmodel", ".", "instantiate", "(", "classes", "[", "0", "]", ")", "cls", ".", "__init__", "(", "result", ",", "*", "args", ")", "return", "result", "def", "make1", "(", "elem", ",", "*", "args", ")", ":", "if", "not", "classes", ":", "# no type specialization", "return", "make", "(", "[", "elem", "]", ",", "*", "args", ")", "result", "=", "objectmodel", ".", "instantiate", "(", "classes", "[", "1", "]", ")", "result", ".", "_set_list", "(", "0", ",", "elem", ")", "cls", ".", "__init__", "(", "result", ",", "*", "args", ")", "return", "result", "def", "make2", "(", "elem1", ",", "elem2", ",", "*", "args", ")", ":", "if", "not", "classes", ":", "# no type specialization", "return", "make", "(", "[", "elem1", ",", "elem2", "]", ",", "*", "args", ")", "result", "=", "objectmodel", ".", "instantiate", "(", "classes", "[", "2", "]", ")", "result", ".", "_set_list", "(", "0", ",", "elem1", ")", "result", ".", "_set_list", "(", "1", ",", "elem2", ")", "cls", ".", "__init__", "(", "result", ",", "*", "args", ")", "return", "result", "def", "make_n", "(", "size", ",", "*", "args", ")", ":", "if", "sizemin", "<=", "size", "<", "sizemax", ":", "subcls", "=", "classes", "[", "size", "-", "sizemin", "]", "else", ":", "subcls", "=", "cls_arbitrary", "result", "=", "objectmodel", ".", "instantiate", "(", "subcls", ")", "if", "subcls", "is", "cls_arbitrary", ":", "assert", "isinstance", "(", "result", ",", "subcls", ")", "setattr", "(", "result", ",", "attrname", ",", "[", "None", "]", "*", "size", ")", "cls", ".", "__init__", "(", "result", ",", "*", "args", ")", "return", "result", "if", "unbox_num", ":", "assert", "_immutable_", "or", "immutable", ",", "\"unboxing is only supported for immutable objects\"", "make", ",", "make1", ",", "make2", "=", "_add_num_classes", "(", "cls", ",", "make", ",", "make0", ",", "make1", ",", "make2", ",", "immut", "=", "_immutable_", ")", "setattr", "(", "cls", ",", "factoryname", ",", "staticmethod", "(", "make", ")", ")", "setattr", "(", "cls", ",", "factoryname", "+", "\"0\"", ",", "staticmethod", "(", "make0", ")", ")", "setattr", "(", "cls", ",", "factoryname", "+", "\"1\"", ",", "staticmethod", "(", "make1", ")", ")", "setattr", "(", "cls", ",", "factoryname", "+", "\"2\"", ",", "staticmethod", "(", "make2", ")", ")", "setattr", "(", "cls", ",", "factoryname", "+", "\"_n\"", ",", "staticmethod", "(", "make_n", ")", ")", "return", "cls", "return", "wrapper" ]
https://github.com/pycket/pycket/blob/05ebd9885efa3a0ae54e77c1a1f07ea441b445c6/pycket/small_list.py#L13-L200
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/快递速查/requests/sessions.py
python
Session.request
(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None)
return resp
Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True by default. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object.
[ "Constructs", "a", ":", "class", ":", "Request", "<Request", ">", "prepares", "it", "and", "sends", "it", ".", "Returns", ":", "class", ":", "Response", "<Response", ">", "object", "." ]
def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True by default. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ cookies = cookies or {} proxies = proxies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = self.cookies.copy() merged_cookies.update(cookies) cookies = merged_cookies # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. env_proxies = get_environ_proxies(url) or {} for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Set environment's basic authentication. if not auth: auth = get_netrc_auth(url) # Look for configuration. if not verify and verify is not False: verify = os.environ.get('REQUESTS_CA_BUNDLE') # Curl compatibility. if not verify and verify is not False: verify = os.environ.get('CURL_CA_BUNDLE') # Merge all the kwargs. params = merge_kwargs(params, self.params) headers = merge_kwargs(headers, self.headers) auth = merge_kwargs(auth, self.auth) proxies = merge_kwargs(proxies, self.proxies) hooks = merge_kwargs(hooks, self.hooks) stream = merge_kwargs(stream, self.stream) verify = merge_kwargs(verify, self.verify) cert = merge_kwargs(cert, self.cert) # Create the Request. req = Request() req.method = method.upper() req.url = url req.headers = headers req.files = files req.data = data req.params = params req.auth = auth req.cookies = cookies req.hooks = hooks # Prepare the Request. prep = req.prepare() # Send the request. send_kwargs = { 'stream': stream, 'timeout': timeout, 'verify': verify, 'cert': cert, 'proxies': proxies, 'allow_redirects': allow_redirects, } resp = self.send(prep, **send_kwargs) return resp
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "cookies", "=", "None", ",", "files", "=", "None", ",", "auth", "=", "None", ",", "timeout", "=", "None", ",", "allow_redirects", "=", "True", ",", "proxies", "=", "None", ",", "hooks", "=", "None", ",", "stream", "=", "None", ",", "verify", "=", "None", ",", "cert", "=", "None", ")", ":", "cookies", "=", "cookies", "or", "{", "}", "proxies", "=", "proxies", "or", "{", "}", "# Bootstrap CookieJar.", "if", "not", "isinstance", "(", "cookies", ",", "cookielib", ".", "CookieJar", ")", ":", "cookies", "=", "cookiejar_from_dict", "(", "cookies", ")", "# Merge with session cookies", "merged_cookies", "=", "self", ".", "cookies", ".", "copy", "(", ")", "merged_cookies", ".", "update", "(", "cookies", ")", "cookies", "=", "merged_cookies", "# Gather clues from the surrounding environment.", "if", "self", ".", "trust_env", ":", "# Set environment's proxies.", "env_proxies", "=", "get_environ_proxies", "(", "url", ")", "or", "{", "}", "for", "(", "k", ",", "v", ")", "in", "env_proxies", ".", "items", "(", ")", ":", "proxies", ".", "setdefault", "(", "k", ",", "v", ")", "# Set environment's basic authentication.", "if", "not", "auth", ":", "auth", "=", "get_netrc_auth", "(", "url", ")", "# Look for configuration.", "if", "not", "verify", "and", "verify", "is", "not", "False", ":", "verify", "=", "os", ".", "environ", ".", "get", "(", "'REQUESTS_CA_BUNDLE'", ")", "# Curl compatibility.", "if", "not", "verify", "and", "verify", "is", "not", "False", ":", "verify", "=", "os", ".", "environ", ".", "get", "(", "'CURL_CA_BUNDLE'", ")", "# Merge all the kwargs.", "params", "=", "merge_kwargs", "(", "params", ",", "self", ".", "params", ")", "headers", "=", "merge_kwargs", "(", "headers", ",", "self", ".", "headers", ")", "auth", "=", "merge_kwargs", "(", "auth", ",", "self", ".", "auth", ")", "proxies", "=", "merge_kwargs", "(", "proxies", ",", "self", ".", "proxies", ")", "hooks", "=", "merge_kwargs", "(", "hooks", ",", "self", ".", "hooks", ")", "stream", "=", "merge_kwargs", "(", "stream", ",", "self", ".", "stream", ")", "verify", "=", "merge_kwargs", "(", "verify", ",", "self", ".", "verify", ")", "cert", "=", "merge_kwargs", "(", "cert", ",", "self", ".", "cert", ")", "# Create the Request.", "req", "=", "Request", "(", ")", "req", ".", "method", "=", "method", ".", "upper", "(", ")", "req", ".", "url", "=", "url", "req", ".", "headers", "=", "headers", "req", ".", "files", "=", "files", "req", ".", "data", "=", "data", "req", ".", "params", "=", "params", "req", ".", "auth", "=", "auth", "req", ".", "cookies", "=", "cookies", "req", ".", "hooks", "=", "hooks", "# Prepare the Request.", "prep", "=", "req", ".", "prepare", "(", ")", "# Send the request.", "send_kwargs", "=", "{", "'stream'", ":", "stream", ",", "'timeout'", ":", "timeout", ",", "'verify'", ":", "verify", ",", "'cert'", ":", "cert", ",", "'proxies'", ":", "proxies", ",", "'allow_redirects'", ":", "allow_redirects", ",", "}", "resp", "=", "self", ".", "send", "(", "prep", ",", "*", "*", "send_kwargs", ")", "return", "resp" ]
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/快递速查/requests/sessions.py#L235-L346
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/pprint.py
python
pprint
(object, stream=None, indent=1, width=80, depth=None)
Pretty-print a Python object to a stream [default is sys.stdout].
Pretty-print a Python object to a stream [default is sys.stdout].
[ "Pretty", "-", "print", "a", "Python", "object", "to", "a", "stream", "[", "default", "is", "sys", ".", "stdout", "]", "." ]
def pprint(object, stream=None, indent=1, width=80, depth=None): """Pretty-print a Python object to a stream [default is sys.stdout].""" printer = PrettyPrinter( stream=stream, indent=indent, width=width, depth=depth) printer.pprint(object)
[ "def", "pprint", "(", "object", ",", "stream", "=", "None", ",", "indent", "=", "1", ",", "width", "=", "80", ",", "depth", "=", "None", ")", ":", "printer", "=", "PrettyPrinter", "(", "stream", "=", "stream", ",", "indent", "=", "indent", ",", "width", "=", "width", ",", "depth", "=", "depth", ")", "printer", ".", "pprint", "(", "object", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/pprint.py#L52-L56
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/aggregate_ucrs/models.py
python
AggregateTableDefinition.get_secondary_tables_and_adapters
(self)
return [ (table, get_indicator_adapter(table.data_source).get_table()) for table in self.secondary_tables.all() ]
[]
def get_secondary_tables_and_adapters(self): return [ (table, get_indicator_adapter(table.data_source).get_table()) for table in self.secondary_tables.all() ]
[ "def", "get_secondary_tables_and_adapters", "(", "self", ")", ":", "return", "[", "(", "table", ",", "get_indicator_adapter", "(", "table", ".", "data_source", ")", ".", "get_table", "(", ")", ")", "for", "table", "in", "self", ".", "secondary_tables", ".", "all", "(", ")", "]" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/aggregate_ucrs/models.py#L141-L145
PaddlePaddle/PGL
e48545f2814523c777b8a9a9188bf5a7f00d6e52
legacy/pgl/graph_wrapper.py
python
StaticGraphWrapper.__create_graph_node_feat
(self, node_feat, collector)
Convert node features into paddlepaddle tensor.
Convert node features into paddlepaddle tensor.
[ "Convert", "node", "features", "into", "paddlepaddle", "tensor", "." ]
def __create_graph_node_feat(self, node_feat, collector): """Convert node features into paddlepaddle tensor. """ for node_feat_name, node_feat_value in node_feat.items(): node_feat_shape = node_feat_value.shape node_feat_dtype = node_feat_value.dtype self.node_feat_tensor_dict[ node_feat_name], init = paddle_helper.constant( name=self._data_name_prefix + '/node_feat/' + node_feat_name, dtype=node_feat_dtype, value=node_feat_value) collector.append(init)
[ "def", "__create_graph_node_feat", "(", "self", ",", "node_feat", ",", "collector", ")", ":", "for", "node_feat_name", ",", "node_feat_value", "in", "node_feat", ".", "items", "(", ")", ":", "node_feat_shape", "=", "node_feat_value", ".", "shape", "node_feat_dtype", "=", "node_feat_value", ".", "dtype", "self", ".", "node_feat_tensor_dict", "[", "node_feat_name", "]", ",", "init", "=", "paddle_helper", ".", "constant", "(", "name", "=", "self", ".", "_data_name_prefix", "+", "'/node_feat/'", "+", "node_feat_name", ",", "dtype", "=", "node_feat_dtype", ",", "value", "=", "node_feat_value", ")", "collector", ".", "append", "(", "init", ")" ]
https://github.com/PaddlePaddle/PGL/blob/e48545f2814523c777b8a9a9188bf5a7f00d6e52/legacy/pgl/graph_wrapper.py#L438-L450
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/werkzeug/contrib/cache.py
python
RedisCache.clear
(self)
return status
[]
def clear(self): status = False if self.key_prefix: keys = self._client.keys(self.key_prefix + '*') if keys: status = self._client.delete(*keys) else: status = self._client.flushdb() return status
[ "def", "clear", "(", "self", ")", ":", "status", "=", "False", "if", "self", ".", "key_prefix", ":", "keys", "=", "self", ".", "_client", ".", "keys", "(", "self", ".", "key_prefix", "+", "'*'", ")", "if", "keys", ":", "status", "=", "self", ".", "_client", ".", "delete", "(", "*", "keys", ")", "else", ":", "status", "=", "self", ".", "_client", ".", "flushdb", "(", ")", "return", "status" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/werkzeug/contrib/cache.py#L670-L678
LinOTP/LinOTP
bb3940bbaccea99550e6c063ff824f258dd6d6d7
linotp/lib/resolver.py
python
_flush_user_resolver_cache
(resolver_spec)
flush the user realm cache in case of a change of the resolver, all realms which use this resolver must be flushed :param resolver_spec: the resolve which has been updated :return: - nothing -
flush the user realm cache in case of a change of the resolver, all realms which use this resolver must be flushed
[ "flush", "the", "user", "realm", "cache", "in", "case", "of", "a", "change", "of", "the", "resolver", "all", "realms", "which", "use", "this", "resolver", "must", "be", "flushed" ]
def _flush_user_resolver_cache(resolver_spec): """ flush the user realm cache in case of a change of the resolver, all realms which use this resolver must be flushed :param resolver_spec: the resolve which has been updated :return: - nothing - """ from linotp.lib.user import ( delete_realm_resolver_cache, delete_resolver_user_cache, ) delete_resolver_user_cache(resolver_spec) config = context["Config"] realms = config.getRealms() # if a resolver is redefined, we have to refresh the related realm cache for realm_name, realm_spec in list(realms.items()): resolvers = realm_spec.get("useridresolver", []) if resolver_spec in resolvers: delete_realm_resolver_cache(realm_name)
[ "def", "_flush_user_resolver_cache", "(", "resolver_spec", ")", ":", "from", "linotp", ".", "lib", ".", "user", "import", "(", "delete_realm_resolver_cache", ",", "delete_resolver_user_cache", ",", ")", "delete_resolver_user_cache", "(", "resolver_spec", ")", "config", "=", "context", "[", "\"Config\"", "]", "realms", "=", "config", ".", "getRealms", "(", ")", "# if a resolver is redefined, we have to refresh the related realm cache", "for", "realm_name", ",", "realm_spec", "in", "list", "(", "realms", ".", "items", "(", ")", ")", ":", "resolvers", "=", "realm_spec", ".", "get", "(", "\"useridresolver\"", ",", "[", "]", ")", "if", "resolver_spec", "in", "resolvers", ":", "delete_realm_resolver_cache", "(", "realm_name", ")" ]
https://github.com/LinOTP/LinOTP/blob/bb3940bbaccea99550e6c063ff824f258dd6d6d7/linotp/lib/resolver.py#L685-L709
eandersson/amqpstorm
7f57cf1291c8b3817527c10aae317aa1702654bc
amqpstorm/io.py
python
IO._process_incoming_data
(self)
Retrieve and process any incoming data. :return:
Retrieve and process any incoming data.
[ "Retrieve", "and", "process", "any", "incoming", "data", "." ]
def _process_incoming_data(self): """Retrieve and process any incoming data. :return: """ while self._running.is_set(): if self.poller.is_ready: self.data_in += self._receive() self.data_in = self._on_read_impl(self.data_in)
[ "def", "_process_incoming_data", "(", "self", ")", ":", "while", "self", ".", "_running", ".", "is_set", "(", ")", ":", "if", "self", ".", "poller", ".", "is_ready", ":", "self", ".", "data_in", "+=", "self", ".", "_receive", "(", ")", "self", ".", "data_in", "=", "self", ".", "_on_read_impl", "(", "self", ".", "data_in", ")" ]
https://github.com/eandersson/amqpstorm/blob/7f57cf1291c8b3817527c10aae317aa1702654bc/amqpstorm/io.py#L253-L261
msracver/Deformable-ConvNets
6aeda878a95bcb55eadffbe125804e730574de8d
deeplab/core/module.py
python
Module.load
(prefix, epoch, load_optimizer_states=False, **kwargs)
return mod
Create a model from previously saved checkpoint. Parameters ---------- prefix : str path prefix of saved model files. You should have "prefix-symbol.json", "prefix-xxxx.params", and optionally "prefix-xxxx.states", where xxxx is the epoch number. epoch : int epoch to load. load_optimizer_states : bool whether to load optimizer states. Checkpoint needs to have been made with save_optimizer_states=True. data_names : list of str Default is `('data')` for a typical model used in image classification. label_names : list of str Default is `('softmax_label')` for a typical model used in image classification. logger : Logger Default is `logging`. context : Context or list of Context Default is `cpu()`. work_load_list : list of number Default `None`, indicating uniform workload. fixed_param_names: list of str Default `None`, indicating no network parameters are fixed.
Create a model from previously saved checkpoint.
[ "Create", "a", "model", "from", "previously", "saved", "checkpoint", "." ]
def load(prefix, epoch, load_optimizer_states=False, **kwargs): """Create a model from previously saved checkpoint. Parameters ---------- prefix : str path prefix of saved model files. You should have "prefix-symbol.json", "prefix-xxxx.params", and optionally "prefix-xxxx.states", where xxxx is the epoch number. epoch : int epoch to load. load_optimizer_states : bool whether to load optimizer states. Checkpoint needs to have been made with save_optimizer_states=True. data_names : list of str Default is `('data')` for a typical model used in image classification. label_names : list of str Default is `('softmax_label')` for a typical model used in image classification. logger : Logger Default is `logging`. context : Context or list of Context Default is `cpu()`. work_load_list : list of number Default `None`, indicating uniform workload. fixed_param_names: list of str Default `None`, indicating no network parameters are fixed. """ sym, args, auxs = load_checkpoint(prefix, epoch) mod = Module(symbol=sym, **kwargs) mod._arg_params = args mod._aux_params = auxs mod.params_initialized = True if load_optimizer_states: mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch) return mod
[ "def", "load", "(", "prefix", ",", "epoch", ",", "load_optimizer_states", "=", "False", ",", "*", "*", "kwargs", ")", ":", "sym", ",", "args", ",", "auxs", "=", "load_checkpoint", "(", "prefix", ",", "epoch", ")", "mod", "=", "Module", "(", "symbol", "=", "sym", ",", "*", "*", "kwargs", ")", "mod", ".", "_arg_params", "=", "args", "mod", ".", "_aux_params", "=", "auxs", "mod", ".", "params_initialized", "=", "True", "if", "load_optimizer_states", ":", "mod", ".", "_preload_opt_states", "=", "'%s-%04d.states'", "%", "(", "prefix", ",", "epoch", ")", "return", "mod" ]
https://github.com/msracver/Deformable-ConvNets/blob/6aeda878a95bcb55eadffbe125804e730574de8d/deeplab/core/module.py#L110-L146
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/repr.py
python
Repr.repr_set
(self, x, level)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
[]
def repr_set(self, x, level): x = _possibly_sorted(x) return self._repr_iterable(x, level, 'set([', '])', self.maxset)
[ "def", "repr_set", "(", "self", ",", "x", ",", "level", ")", ":", "x", "=", "_possibly_sorted", "(", "x", ")", "return", "self", ".", "_repr_iterable", "(", "x", ",", "level", ",", "'set(['", ",", "'])'", ",", "self", ".", "maxset", ")" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/repr.py#L64-L66
mlcommons/ck
558a22c5970eb0d6708d0edc080e62a92566bab0
ck/repo/module/result/module.py
python
postprocess_html
(i)
return {'return':0, 'html':h}
Input: { html - html to post-process original_input (dict) - passing extra parameters from URL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 html - post-processed html }
Input: { html - html to post-process
[ "Input", ":", "{", "html", "-", "html", "to", "post", "-", "process" ]
def postprocess_html(i): """ Input: { html - html to post-process original_input (dict) - passing extra parameters from URL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 html - post-processed html } """ h=i['html'] # Substitutions sub={ 'ck_html_title':'', 'ck_html_title_main':'', 'ck_html_title_note':'', 'ck_html_end_note':'', 'ck_html_form':'' } # Check cfg to customize input oi=i.get('original_input',{}) result_cfg=oi.get('cfg','') cfg_id=oi.get('cfg_id','') if result_cfg!='': r=load_cfg({'cfg':result_cfg, 'cfg_id':cfg_id}) if r['return']>0: return r dcfg=r['dict'] update_html=dcfg.get('update_html',{}) if len(update_html)>0: sub.update(update_html) sub['ck_cfg_uoa']=result_cfg sub['ck_cfg_id']=r['cfg_id'] sub['ck_html_form']=r['html_selector'] # Check other params in original input and pass them to HTML for k in common_data_keys + list(common_data_keys2.keys()) + ['user']: sub['ck_'+k]=oi.get(k,'') # Update html for s in sub: h=h.replace('$#'+s+'#$', sub[s]) return {'return':0, 'html':h}
[ "def", "postprocess_html", "(", "i", ")", ":", "h", "=", "i", "[", "'html'", "]", "# Substitutions", "sub", "=", "{", "'ck_html_title'", ":", "''", ",", "'ck_html_title_main'", ":", "''", ",", "'ck_html_title_note'", ":", "''", ",", "'ck_html_end_note'", ":", "''", ",", "'ck_html_form'", ":", "''", "}", "# Check cfg to customize input", "oi", "=", "i", ".", "get", "(", "'original_input'", ",", "{", "}", ")", "result_cfg", "=", "oi", ".", "get", "(", "'cfg'", ",", "''", ")", "cfg_id", "=", "oi", ".", "get", "(", "'cfg_id'", ",", "''", ")", "if", "result_cfg", "!=", "''", ":", "r", "=", "load_cfg", "(", "{", "'cfg'", ":", "result_cfg", ",", "'cfg_id'", ":", "cfg_id", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "dcfg", "=", "r", "[", "'dict'", "]", "update_html", "=", "dcfg", ".", "get", "(", "'update_html'", ",", "{", "}", ")", "if", "len", "(", "update_html", ")", ">", "0", ":", "sub", ".", "update", "(", "update_html", ")", "sub", "[", "'ck_cfg_uoa'", "]", "=", "result_cfg", "sub", "[", "'ck_cfg_id'", "]", "=", "r", "[", "'cfg_id'", "]", "sub", "[", "'ck_html_form'", "]", "=", "r", "[", "'html_selector'", "]", "# Check other params in original input and pass them to HTML", "for", "k", "in", "common_data_keys", "+", "list", "(", "common_data_keys2", ".", "keys", "(", ")", ")", "+", "[", "'user'", "]", ":", "sub", "[", "'ck_'", "+", "k", "]", "=", "oi", ".", "get", "(", "k", ",", "''", ")", "# Update html", "for", "s", "in", "sub", ":", "h", "=", "h", ".", "replace", "(", "'$#'", "+", "s", "+", "'#$'", ",", "sub", "[", "s", "]", ")", "return", "{", "'return'", ":", "0", ",", "'html'", ":", "h", "}" ]
https://github.com/mlcommons/ck/blob/558a22c5970eb0d6708d0edc080e62a92566bab0/ck/repo/module/result/module.py#L234-L291
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/django/contrib/gis/db/backends/mysql/schema.py
python
MySQLGISSchemaEditor.remove_field
(self, model, field)
[]
def remove_field(self, model, field): if isinstance(field, GeometryField) and field.spatial_index: qn = self.connection.ops.quote_name sql = self.sql_drop_spatial_index % { 'index': qn(self._create_spatial_index_name(model, field)), 'table': qn(model._meta.db_table), } try: self.execute(sql) except OperationalError: logger.error( "Couldn't remove spatial index: %s (may be expected " "if your storage engine doesn't support them).", sql ) super(MySQLGISSchemaEditor, self).remove_field(model, field)
[ "def", "remove_field", "(", "self", ",", "model", ",", "field", ")", ":", "if", "isinstance", "(", "field", ",", "GeometryField", ")", "and", "field", ".", "spatial_index", ":", "qn", "=", "self", ".", "connection", ".", "ops", ".", "quote_name", "sql", "=", "self", ".", "sql_drop_spatial_index", "%", "{", "'index'", ":", "qn", "(", "self", ".", "_create_spatial_index_name", "(", "model", ",", "field", ")", ")", ",", "'table'", ":", "qn", "(", "model", ".", "_meta", ".", "db_table", ")", ",", "}", "try", ":", "self", ".", "execute", "(", "sql", ")", "except", "OperationalError", ":", "logger", ".", "error", "(", "\"Couldn't remove spatial index: %s (may be expected \"", "\"if your storage engine doesn't support them).\"", ",", "sql", ")", "super", "(", "MySQLGISSchemaEditor", ",", "self", ")", ".", "remove_field", "(", "model", ",", "field", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/contrib/gis/db/backends/mysql/schema.py#L48-L63
Pylons/substanced
a897f4a0518c51b6e093cc5af39fa326f23752c2
substanced/event/__init__.py
python
add_content_subscriber
(config, subscriber, iface=None, **predicates)
Configurator directive that works like Pyramid's ``add_subscriber``, except it wraps the subscriber in something that first adds the ``registry`` attribute to the event being sent before the wrapped subscriber is called.
Configurator directive that works like Pyramid's ``add_subscriber``, except it wraps the subscriber in something that first adds the ``registry`` attribute to the event being sent before the wrapped subscriber is called.
[ "Configurator", "directive", "that", "works", "like", "Pyramid", "s", "add_subscriber", "except", "it", "wraps", "the", "subscriber", "in", "something", "that", "first", "adds", "the", "registry", "attribute", "to", "the", "event", "being", "sent", "before", "the", "wrapped", "subscriber", "is", "called", "." ]
def add_content_subscriber(config, subscriber, iface=None, **predicates): """ Configurator directive that works like Pyramid's ``add_subscriber``, except it wraps the subscriber in something that first adds the ``registry`` attribute to the event being sent before the wrapped subscriber is called.""" registry = config.registry def wrapper(event, *arg): # *arg ignored, XXX it can go away pyr1.4b1+ event.registry = registry return subscriber(event) if hasattr(subscriber, '__name__'): update_wrapper(wrapper, subscriber) wrapper.wrapped = subscriber config.add_subscriber(wrapper, iface, **predicates)
[ "def", "add_content_subscriber", "(", "config", ",", "subscriber", ",", "iface", "=", "None", ",", "*", "*", "predicates", ")", ":", "registry", "=", "config", ".", "registry", "def", "wrapper", "(", "event", ",", "*", "arg", ")", ":", "# *arg ignored, XXX it can go away pyr1.4b1+", "event", ".", "registry", "=", "registry", "return", "subscriber", "(", "event", ")", "if", "hasattr", "(", "subscriber", ",", "'__name__'", ")", ":", "update_wrapper", "(", "wrapper", ",", "subscriber", ")", "wrapper", ".", "wrapped", "=", "subscriber", "config", ".", "add_subscriber", "(", "wrapper", ",", "iface", ",", "*", "*", "predicates", ")" ]
https://github.com/Pylons/substanced/blob/a897f4a0518c51b6e093cc5af39fa326f23752c2/substanced/event/__init__.py#L271-L283
merkremont/LineVodka
c2fa74107cecf00dd17416b62e4eb579e2c7bbaf
LineAlpha/LineThrift/TalkService.py
python
Client.report
(self, syncOpRevision, category, report)
Parameters: - syncOpRevision - category - report
Parameters: - syncOpRevision - category - report
[ "Parameters", ":", "-", "syncOpRevision", "-", "category", "-", "report" ]
def report(self, syncOpRevision, category, report): """ Parameters: - syncOpRevision - category - report """ self.send_report(syncOpRevision, category, report) self.recv_report()
[ "def", "report", "(", "self", ",", "syncOpRevision", ",", "category", ",", "report", ")", ":", "self", ".", "send_report", "(", "syncOpRevision", ",", "category", ",", "report", ")", "self", ".", "recv_report", "(", ")" ]
https://github.com/merkremont/LineVodka/blob/c2fa74107cecf00dd17416b62e4eb579e2c7bbaf/LineAlpha/LineThrift/TalkService.py#L5920-L5928
Abjad/abjad
d0646dfbe83db3dc5ab268f76a0950712b87b7fd
abjad/parsers/parser.py
python
LilyPondSyntacticalDefinition.p_post_event_nofinger__Chr94__fingering
(self, p)
post_event_nofinger : '^' fingering
post_event_nofinger : '^' fingering
[ "post_event_nofinger", ":", "^", "fingering" ]
def p_post_event_nofinger__Chr94__fingering(self, p): "post_event_nofinger : '^' fingering" p[0] = None
[ "def", "p_post_event_nofinger__Chr94__fingering", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "None" ]
https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/parsers/parser.py#L6074-L6076
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/sandpiles/sandpile.py
python
SandpileDivisor.deg
(self)
return self._deg
r""" The degree of the divisor. OUTPUT: integer EXAMPLES:: sage: S = sandpiles.Cycle(3) sage: D = SandpileDivisor(S, [1,2,3]) sage: D.deg() 6
r""" The degree of the divisor.
[ "r", "The", "degree", "of", "the", "divisor", "." ]
def deg(self): r""" The degree of the divisor. OUTPUT: integer EXAMPLES:: sage: S = sandpiles.Cycle(3) sage: D = SandpileDivisor(S, [1,2,3]) sage: D.deg() 6 """ return self._deg
[ "def", "deg", "(", "self", ")", ":", "return", "self", ".", "_deg" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/sandpiles/sandpile.py#L4433-L4448
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/ensemble/boosting/homo/homo_secureboosting_aggregator.py
python
DecisionTreeArbiterAggregator.__init__
(self, verbose=False)
[]
def __init__(self, verbose=False): self.aggregator = secure_sum_aggregator.Server(enable_secure_aggregate=True) self.scatter = loss_scatter.Server() self.verbose = verbose
[ "def", "__init__", "(", "self", ",", "verbose", "=", "False", ")", ":", "self", ".", "aggregator", "=", "secure_sum_aggregator", ".", "Server", "(", "enable_secure_aggregate", "=", "True", ")", "self", ".", "scatter", "=", "loss_scatter", ".", "Server", "(", ")", "self", ".", "verbose", "=", "verbose" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/ensemble/boosting/homo/homo_secureboosting_aggregator.py#L51-L54
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/billiard/_win.py
python
get_processtree_pids
(pid, include_parent=True)
return list(pids)
Return a list with all the pids of a process tree
Return a list with all the pids of a process tree
[ "Return", "a", "list", "with", "all", "the", "pids", "of", "a", "process", "tree" ]
def get_processtree_pids(pid, include_parent=True): """Return a list with all the pids of a process tree""" parents = get_all_processes_pids() all_pids = list(parents.keys()) pids = {pid} while 1: pids_new = pids.copy() for _pid in all_pids: if parents[_pid] in pids: pids_new.add(_pid) if pids_new == pids: break pids = pids_new.copy() if not include_parent: pids.remove(pid) return list(pids)
[ "def", "get_processtree_pids", "(", "pid", ",", "include_parent", "=", "True", ")", ":", "parents", "=", "get_all_processes_pids", "(", ")", "all_pids", "=", "list", "(", "parents", ".", "keys", "(", ")", ")", "pids", "=", "{", "pid", "}", "while", "1", ":", "pids_new", "=", "pids", ".", "copy", "(", ")", "for", "_pid", "in", "all_pids", ":", "if", "parents", "[", "_pid", "]", "in", "pids", ":", "pids_new", ".", "add", "(", "_pid", ")", "if", "pids_new", "==", "pids", ":", "break", "pids", "=", "pids_new", ".", "copy", "(", ")", "if", "not", "include_parent", ":", "pids", ".", "remove", "(", "pid", ")", "return", "list", "(", "pids", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/billiard/_win.py#L88-L108
iniqua/plecost
ef0d89bfdf1ef870bd11b1d8bdf93a8ce9ec6ca0
plecost_lib/libs/db.py
python
db_query
(data)
return _actions[data.action](data, db)
Query the database and return a text with the information. :param data: PlecostDatabaseQuery object :type data: PlecostDatabaseQuery :return: results of query :rtype: str
Query the database and return a text with the information.
[ "Query", "the", "database", "and", "return", "a", "text", "with", "the", "information", "." ]
def db_query(data): """ Query the database and return a text with the information. :param data: PlecostDatabaseQuery object :type data: PlecostDatabaseQuery :return: results of query :rtype: str """ if not isinstance(data, PlecostDatabaseQuery): raise TypeError("Expected PlecostDatabaseQuery, got '%s' instead" % type(data)) _actions = dict(plugin_list=__plugin_list, cve=__cve_details, plugin_cves=__plugin_cves) db = DB(join(get_data_folder(), "cve.db")) return _actions[data.action](data, db)
[ "def", "db_query", "(", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "PlecostDatabaseQuery", ")", ":", "raise", "TypeError", "(", "\"Expected PlecostDatabaseQuery, got '%s' instead\"", "%", "type", "(", "data", ")", ")", "_actions", "=", "dict", "(", "plugin_list", "=", "__plugin_list", ",", "cve", "=", "__cve_details", ",", "plugin_cves", "=", "__plugin_cves", ")", "db", "=", "DB", "(", "join", "(", "get_data_folder", "(", ")", ",", "\"cve.db\"", ")", ")", "return", "_actions", "[", "data", ".", "action", "]", "(", "data", ",", "db", ")" ]
https://github.com/iniqua/plecost/blob/ef0d89bfdf1ef870bd11b1d8bdf93a8ce9ec6ca0/plecost_lib/libs/db.py#L373-L393
Alexander-H-Liu/End-to-end-ASR-Pytorch
1103d144423e8e692f1d18cd9db27a96cb49fb9d
src/asr.py
python
Decoder.forward
(self, x)
return char, x
Decode and transform into vocab
Decode and transform into vocab
[ "Decode", "and", "transform", "into", "vocab" ]
def forward(self, x): ''' Decode and transform into vocab ''' if not self.training: self.layers.flatten_parameters() x, self.hidden_state = self.layers(x.unsqueeze(1), self.hidden_state) x = x.squeeze(1) char = self.char_trans(self.final_dropout(x)) return char, x
[ "def", "forward", "(", "self", ",", "x", ")", ":", "if", "not", "self", ".", "training", ":", "self", ".", "layers", ".", "flatten_parameters", "(", ")", "x", ",", "self", ".", "hidden_state", "=", "self", ".", "layers", "(", "x", ".", "unsqueeze", "(", "1", ")", ",", "self", ".", "hidden_state", ")", "x", "=", "x", ".", "squeeze", "(", "1", ")", "char", "=", "self", ".", "char_trans", "(", "self", ".", "final_dropout", "(", "x", ")", ")", "return", "char", ",", "x" ]
https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/src/asr.py#L214-L221
wannabeOG/Mask-RCNN
b6ce3d8795eeaccbbde6d91ec827a38df3a88a4c
utils.py
python
Dataset.prepare
(self, class_map=None)
Prepares the Dataset class for use. TODO: class map is not supported yet. When done, it should handle mapping classes from different datasets to the same class ID.
Prepares the Dataset class for use.
[ "Prepares", "the", "Dataset", "class", "for", "use", "." ]
def prepare(self, class_map=None): """Prepares the Dataset class for use. TODO: class map is not supported yet. When done, it should handle mapping classes from different datasets to the same class ID. """ def clean_name(name): """Returns a shorter version of object names for cleaner display.""" return ",".join(name.split(",")[:1]) # Build (or rebuild) everything else from the info dicts. self.num_classes = len(self.class_info) self.class_ids = np.arange(self.num_classes) self.class_names = [clean_name(c["name"]) for c in self.class_info] self.num_images = len(self.image_info) self._image_ids = np.arange(self.num_images) self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id for info, id in zip(self.class_info, self.class_ids)} # Map sources to class_ids they support self.sources = list(set([i['source'] for i in self.class_info])) self.source_class_ids = {} # Loop over datasets for source in self.sources: self.source_class_ids[source] = [] # Find classes that belong to this dataset for i, info in enumerate(self.class_info): # Include BG class in all datasets if i == 0 or source == info['source']: self.source_class_ids[source].append(i)
[ "def", "prepare", "(", "self", ",", "class_map", "=", "None", ")", ":", "def", "clean_name", "(", "name", ")", ":", "\"\"\"Returns a shorter version of object names for cleaner display.\"\"\"", "return", "\",\"", ".", "join", "(", "name", ".", "split", "(", "\",\"", ")", "[", ":", "1", "]", ")", "# Build (or rebuild) everything else from the info dicts.", "self", ".", "num_classes", "=", "len", "(", "self", ".", "class_info", ")", "self", ".", "class_ids", "=", "np", ".", "arange", "(", "self", ".", "num_classes", ")", "self", ".", "class_names", "=", "[", "clean_name", "(", "c", "[", "\"name\"", "]", ")", "for", "c", "in", "self", ".", "class_info", "]", "self", ".", "num_images", "=", "len", "(", "self", ".", "image_info", ")", "self", ".", "_image_ids", "=", "np", ".", "arange", "(", "self", ".", "num_images", ")", "self", ".", "class_from_source_map", "=", "{", "\"{}.{}\"", ".", "format", "(", "info", "[", "'source'", "]", ",", "info", "[", "'id'", "]", ")", ":", "id", "for", "info", ",", "id", "in", "zip", "(", "self", ".", "class_info", ",", "self", ".", "class_ids", ")", "}", "# Map sources to class_ids they support", "self", ".", "sources", "=", "list", "(", "set", "(", "[", "i", "[", "'source'", "]", "for", "i", "in", "self", ".", "class_info", "]", ")", ")", "self", ".", "source_class_ids", "=", "{", "}", "# Loop over datasets", "for", "source", "in", "self", ".", "sources", ":", "self", ".", "source_class_ids", "[", "source", "]", "=", "[", "]", "# Find classes that belong to this dataset", "for", "i", ",", "info", "in", "enumerate", "(", "self", ".", "class_info", ")", ":", "# Include BG class in all datasets", "if", "i", "==", "0", "or", "source", "==", "info", "[", "'source'", "]", ":", "self", ".", "source_class_ids", "[", "source", "]", ".", "append", "(", "i", ")" ]
https://github.com/wannabeOG/Mask-RCNN/blob/b6ce3d8795eeaccbbde6d91ec827a38df3a88a4c/utils.py#L203-L233
NVlabs/stylegan2
bf0fe0baba9fc7039eae0cac575c1778be1ce3e3
dnnlib/tflib/optimizer.py
python
SimpleAdam.apply_gradients
(self, grads_and_vars)
[]
def apply_gradients(self, grads_and_vars): with tf.name_scope(self.name): state_vars = [] update_ops = [] # Adjust learning rate to deal with startup bias. with tf.control_dependencies(None): b1pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False) b2pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False) state_vars += [b1pow_var, b2pow_var] b1pow_new = b1pow_var * self.beta1 b2pow_new = b2pow_var * self.beta2 update_ops += [tf.assign(b1pow_var, b1pow_new), tf.assign(b2pow_var, b2pow_new)] lr_new = self.learning_rate * tf.sqrt(1 - b2pow_new) / (1 - b1pow_new) # Construct ops to update each variable. for grad, var in grads_and_vars: with tf.control_dependencies(None): m_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) v_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) state_vars += [m_var, v_var] m_new = self.beta1 * m_var + (1 - self.beta1) * grad v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad) var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon) update_ops += [tf.assign(m_var, m_new), tf.assign(v_var, v_new), tf.assign_sub(var, var_delta)] # Group everything together. self.all_state_vars += state_vars return tf.group(*update_ops)
[ "def", "apply_gradients", "(", "self", ",", "grads_and_vars", ")", ":", "with", "tf", ".", "name_scope", "(", "self", ".", "name", ")", ":", "state_vars", "=", "[", "]", "update_ops", "=", "[", "]", "# Adjust learning rate to deal with startup bias.", "with", "tf", ".", "control_dependencies", "(", "None", ")", ":", "b1pow_var", "=", "tf", ".", "Variable", "(", "dtype", "=", "tf", ".", "float32", ",", "initial_value", "=", "1", ",", "trainable", "=", "False", ")", "b2pow_var", "=", "tf", ".", "Variable", "(", "dtype", "=", "tf", ".", "float32", ",", "initial_value", "=", "1", ",", "trainable", "=", "False", ")", "state_vars", "+=", "[", "b1pow_var", ",", "b2pow_var", "]", "b1pow_new", "=", "b1pow_var", "*", "self", ".", "beta1", "b2pow_new", "=", "b2pow_var", "*", "self", ".", "beta2", "update_ops", "+=", "[", "tf", ".", "assign", "(", "b1pow_var", ",", "b1pow_new", ")", ",", "tf", ".", "assign", "(", "b2pow_var", ",", "b2pow_new", ")", "]", "lr_new", "=", "self", ".", "learning_rate", "*", "tf", ".", "sqrt", "(", "1", "-", "b2pow_new", ")", "/", "(", "1", "-", "b1pow_new", ")", "# Construct ops to update each variable.", "for", "grad", ",", "var", "in", "grads_and_vars", ":", "with", "tf", ".", "control_dependencies", "(", "None", ")", ":", "m_var", "=", "tf", ".", "Variable", "(", "dtype", "=", "tf", ".", "float32", ",", "initial_value", "=", "tf", ".", "zeros_like", "(", "var", ")", ",", "trainable", "=", "False", ")", "v_var", "=", "tf", ".", "Variable", "(", "dtype", "=", "tf", ".", "float32", ",", "initial_value", "=", "tf", ".", "zeros_like", "(", "var", ")", ",", "trainable", "=", "False", ")", "state_vars", "+=", "[", "m_var", ",", "v_var", "]", "m_new", "=", "self", ".", "beta1", "*", "m_var", "+", "(", "1", "-", "self", ".", "beta1", ")", "*", "grad", "v_new", "=", "self", ".", "beta2", "*", "v_var", "+", "(", "1", "-", "self", ".", "beta2", ")", "*", "tf", ".", "square", "(", "grad", ")", "var_delta", "=", "lr_new", "*", "m_new", "/", "(", "tf", ".", "sqrt", "(", "v_new", ")", "+", "self", ".", "epsilon", ")", "update_ops", "+=", "[", "tf", ".", "assign", "(", "m_var", ",", "m_new", ")", ",", "tf", ".", "assign", "(", "v_var", ",", "v_new", ")", ",", "tf", ".", "assign_sub", "(", "var", ",", "var_delta", ")", "]", "# Group everything together.", "self", ".", "all_state_vars", "+=", "state_vars", "return", "tf", ".", "group", "(", "*", "update_ops", ")" ]
https://github.com/NVlabs/stylegan2/blob/bf0fe0baba9fc7039eae0cac575c1778be1ce3e3/dnnlib/tflib/optimizer.py#L342-L370
ConvLab/ConvLab
a04582a77537c1a706fbf64715baa9ad0be1301a
convlab/modules/dst/state_tracker.py
python
Tracker.init_session
(self)
Init the Tracker to start a new session.
Init the Tracker to start a new session.
[ "Init", "the", "Tracker", "to", "start", "a", "new", "session", "." ]
def init_session(self): """Init the Tracker to start a new session.""" pass
[ "def", "init_session", "(", "self", ")", ":", "pass" ]
https://github.com/ConvLab/ConvLab/blob/a04582a77537c1a706fbf64715baa9ad0be1301a/convlab/modules/dst/state_tracker.py#L27-L29
pikpikcu/Pentest-Tools-Framework
cd6e6107764a809943dc4e073cde8149c1a2cd03
modules/xsser/build/bdist.linux-armv7l/egg/core/main.py
python
xsser.get_payloads
(self)
return payloads
Process payload options and make up the payload list for the attack.
Process payload options and make up the payload list for the attack.
[ "Process", "payload", "options", "and", "make", "up", "the", "payload", "list", "for", "the", "attack", "." ]
def get_payloads(self): """ Process payload options and make up the payload list for the attack. """ options = self.options # payloading sources payloads_fuzz = core.fuzzing.vectors.vectors payloads_dcp = core.fuzzing.DCP.DCPvectors payloads_dom = core.fuzzing.DOM.DOMvectors payloads_httpsr = core.fuzzing.HTTPsr.HTTPrs_vectors manual_payload = [{"payload":options.script, "browser":"[manual_injection]"}] # sustitute payload for hash to check false positives self.hashed_payload = self.generate_hash('url') checker_payload = [{"payload":self.hashed_payload, "browser":"[hashed_precheck_system]"}] # heuristic parameters heuristic_params = core.fuzzing.heuristic.heuristic_test def enable_options_heuristic(payloads): if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom return payloads if options.fuzz: payloads = payloads_fuzz if options.dcp: payloads = payloads + payloads_dcp if options.script: payloads = payloads + manual_payload if options.hash: payloads = checker_payload + payloads if options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.hash: payloads = checker_payload + payloads if options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.script: payloads = payloads + manual_payload if options.hash: payloads = checker_payload + payloads if options.inducedcode: payloads = payaloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.hash: payloads = checker_payload + payloads if options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.inducedcode: payloads = payloads + payloads_httpsr if options.hash: payloads = checker_payload + payloads if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.dcp: payloads = payloads_dcp if options.script: payloads = payloads + manual_payload if options.hash: payloads = checker_payload + payloads if options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.hash: payloads = checker_payload + payloads if options.inducedcode: payloads = payloads + inducedcode if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.script: payloads = manual_payload if options.hash: payloads = checker_payload + payloads if options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.inducedcode: payloads = payloads + payloads_httpsr if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.heuristic: payloads = heuristic_params + payloads if options.dom: paylaods = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.inducedcode: payloads = payloads_httpsr if options.hash: payloads = checker_payload + payloads if options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.heuristic: payloads = heuristic_params + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.heuristic: payloads = heuristic_params if options.hash: payloads = checker_payload + payloads if options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads + payloads_dom elif options.dom: payloads = payloads_dom elif not options.fuzz and not options.dcp and not options.script and not options.hash and not options.inducedcode and not options.heuristic and not options.dom: payloads = [{"payload":'">PAYLOAD', "browser":"[IE7.0|IE6.0|NS8.1-IE] [NS8.1-G|FF2.0] [O9.02]" }] else: payloads = checker_payload return payloads
[ "def", "get_payloads", "(", "self", ")", ":", "options", "=", "self", ".", "options", "# payloading sources", "payloads_fuzz", "=", "core", ".", "fuzzing", ".", "vectors", ".", "vectors", "payloads_dcp", "=", "core", ".", "fuzzing", ".", "DCP", ".", "DCPvectors", "payloads_dom", "=", "core", ".", "fuzzing", ".", "DOM", ".", "DOMvectors", "payloads_httpsr", "=", "core", ".", "fuzzing", ".", "HTTPsr", ".", "HTTPrs_vectors", "manual_payload", "=", "[", "{", "\"payload\"", ":", "options", ".", "script", ",", "\"browser\"", ":", "\"[manual_injection]\"", "}", "]", "# sustitute payload for hash to check false positives", "self", ".", "hashed_payload", "=", "self", ".", "generate_hash", "(", "'url'", ")", "checker_payload", "=", "[", "{", "\"payload\"", ":", "self", ".", "hashed_payload", ",", "\"browser\"", ":", "\"[hashed_precheck_system]\"", "}", "]", "# heuristic parameters", "heuristic_params", "=", "core", ".", "fuzzing", ".", "heuristic", ".", "heuristic_test", "def", "enable_options_heuristic", "(", "payloads", ")", ":", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "return", "payloads", "if", "options", ".", "fuzz", ":", "payloads", "=", "payloads_fuzz", "if", "options", ".", "dcp", ":", "payloads", "=", "payloads", "+", "payloads_dcp", "if", "options", ".", "script", ":", "payloads", "=", "payloads", "+", "manual_payload", "if", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "script", ":", "payloads", "=", "payloads", "+", "manual_payload", "if", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "inducedcode", ":", "payloads", "=", "payaloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dcp", ":", "payloads", "=", "payloads_dcp", "if", "options", ".", "script", ":", "payloads", "=", "payloads", "+", "manual_payload", "if", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "inducedcode", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "script", ":", "payloads", "=", "manual_payload", "if", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "inducedcode", ":", "payloads", "=", "payloads", "+", "payloads_httpsr", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "paylaods", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "inducedcode", ":", "payloads", "=", "payloads_httpsr", "if", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "heuristic", ":", "payloads", "=", "heuristic_params", "if", "options", ".", "hash", ":", "payloads", "=", "checker_payload", "+", "payloads", "if", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads", "+", "payloads_dom", "elif", "options", ".", "dom", ":", "payloads", "=", "payloads_dom", "elif", "not", "options", ".", "fuzz", "and", "not", "options", ".", "dcp", "and", "not", "options", ".", "script", "and", "not", "options", ".", "hash", "and", "not", "options", ".", "inducedcode", "and", "not", "options", ".", "heuristic", "and", "not", "options", ".", "dom", ":", "payloads", "=", "[", "{", "\"payload\"", ":", "'\">PAYLOAD'", ",", "\"browser\"", ":", "\"[IE7.0|IE6.0|NS8.1-IE] [NS8.1-G|FF2.0] [O9.02]\"", "}", "]", "else", ":", "payloads", "=", "checker_payload", "return", "payloads" ]
https://github.com/pikpikcu/Pentest-Tools-Framework/blob/cd6e6107764a809943dc4e073cde8149c1a2cd03/modules/xsser/build/bdist.linux-armv7l/egg/core/main.py#L271-L471
brendano/tweetmotif
1b0b1e3a941745cd5a26eba01f554688b7c4b27e
everything_else/djfrontend/django-1.0.2/db/models/sql/subqueries.py
python
UpdateQuery.pre_sql_setup
(self)
If the update depends on results from other tables, we need to do some munging of the "where" conditions to match the format required for (portable) SQL updates. That is done here. Further, if we are going to be running multiple updates, we pull out the id values to update at this point so that they don't change as a result of the progressive updates.
If the update depends on results from other tables, we need to do some munging of the "where" conditions to match the format required for (portable) SQL updates. That is done here.
[ "If", "the", "update", "depends", "on", "results", "from", "other", "tables", "we", "need", "to", "do", "some", "munging", "of", "the", "where", "conditions", "to", "match", "the", "format", "required", "for", "(", "portable", ")", "SQL", "updates", ".", "That", "is", "done", "here", "." ]
def pre_sql_setup(self): """ If the update depends on results from other tables, we need to do some munging of the "where" conditions to match the format required for (portable) SQL updates. That is done here. Further, if we are going to be running multiple updates, we pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ self.select_related = False self.clear_ordering(True) super(UpdateQuery, self).pre_sql_setup() count = self.count_active_tables() if not self.related_updates and count == 1: return # We need to use a sub-select in the where clause to filter on things # from other tables. query = self.clone(klass=Query) query.bump_prefix() query.extra_select = {} first_table = query.tables[0] if query.alias_refcount[first_table] == 1: # We can remove one table from the inner query. query.unref_alias(first_table) for i in xrange(1, len(query.tables)): table = query.tables[i] if query.alias_refcount[table]: break join_info = query.alias_map[table] query.select = [(join_info[RHS_ALIAS], join_info[RHS_JOIN_COL])] must_pre_select = False else: query.select = [] query.add_fields([query.model._meta.pk.name]) must_pre_select = not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.where = self.where_class() if self.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.execute_sql(MULTI): idents.extend([r[0] for r in rows]) self.add_filter(('pk__in', idents)) self.related_ids = idents else: # The fast path. Filters and updates in one query. self.add_filter(('pk__in', query)) for alias in self.tables[1:]: self.alias_refcount[alias] = 0
[ "def", "pre_sql_setup", "(", "self", ")", ":", "self", ".", "select_related", "=", "False", "self", ".", "clear_ordering", "(", "True", ")", "super", "(", "UpdateQuery", ",", "self", ")", ".", "pre_sql_setup", "(", ")", "count", "=", "self", ".", "count_active_tables", "(", ")", "if", "not", "self", ".", "related_updates", "and", "count", "==", "1", ":", "return", "# We need to use a sub-select in the where clause to filter on things", "# from other tables.", "query", "=", "self", ".", "clone", "(", "klass", "=", "Query", ")", "query", ".", "bump_prefix", "(", ")", "query", ".", "extra_select", "=", "{", "}", "first_table", "=", "query", ".", "tables", "[", "0", "]", "if", "query", ".", "alias_refcount", "[", "first_table", "]", "==", "1", ":", "# We can remove one table from the inner query.", "query", ".", "unref_alias", "(", "first_table", ")", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "query", ".", "tables", ")", ")", ":", "table", "=", "query", ".", "tables", "[", "i", "]", "if", "query", ".", "alias_refcount", "[", "table", "]", ":", "break", "join_info", "=", "query", ".", "alias_map", "[", "table", "]", "query", ".", "select", "=", "[", "(", "join_info", "[", "RHS_ALIAS", "]", ",", "join_info", "[", "RHS_JOIN_COL", "]", ")", "]", "must_pre_select", "=", "False", "else", ":", "query", ".", "select", "=", "[", "]", "query", ".", "add_fields", "(", "[", "query", ".", "model", ".", "_meta", ".", "pk", ".", "name", "]", ")", "must_pre_select", "=", "not", "self", ".", "connection", ".", "features", ".", "update_can_self_select", "# Now we adjust the current query: reset the where clause and get rid", "# of all the tables we don't need (since they're in the sub-select).", "self", ".", "where", "=", "self", ".", "where_class", "(", ")", "if", "self", ".", "related_updates", "or", "must_pre_select", ":", "# Either we're using the idents in multiple update queries (so", "# don't want them to change), or the db backend doesn't support", "# selecting from the updating table (e.g. MySQL).", "idents", "=", "[", "]", "for", "rows", "in", "query", ".", "execute_sql", "(", "MULTI", ")", ":", "idents", ".", "extend", "(", "[", "r", "[", "0", "]", "for", "r", "in", "rows", "]", ")", "self", ".", "add_filter", "(", "(", "'pk__in'", ",", "idents", ")", ")", "self", ".", "related_ids", "=", "idents", "else", ":", "# The fast path. Filters and updates in one query.", "self", ".", "add_filter", "(", "(", "'pk__in'", ",", "query", ")", ")", "for", "alias", "in", "self", ".", "tables", "[", "1", ":", "]", ":", "self", ".", "alias_refcount", "[", "alias", "]", "=", "0" ]
https://github.com/brendano/tweetmotif/blob/1b0b1e3a941745cd5a26eba01f554688b7c4b27e/everything_else/djfrontend/django-1.0.2/db/models/sql/subqueries.py#L149-L203
Cadene/tensorflow-model-zoo.torch
990b10ffc22d4c8eacb2a502f20415b4f70c74c2
models/research/swivel/prep.py
python
words
(line)
return line.strip().split()
Splits a line of text into tokens.
Splits a line of text into tokens.
[ "Splits", "a", "line", "of", "text", "into", "tokens", "." ]
def words(line): """Splits a line of text into tokens.""" return line.strip().split()
[ "def", "words", "(", "line", ")", ":", "return", "line", ".", "strip", "(", ")", ".", "split", "(", ")" ]
https://github.com/Cadene/tensorflow-model-zoo.torch/blob/990b10ffc22d4c8eacb2a502f20415b4f70c74c2/models/research/swivel/prep.py#L86-L88
Teradata/stacki
a8085dce179dbe903f65f136f4b63bcc076cc057
common/src/stack/pylib/stack/file.py
python
RPMFile.installPackage
(self, root, flags="")
return retval
Installs the RPM at the given root directory. This is used for patching RPMs into the distribution and making bootable CDs
Installs the RPM at the given root directory. This is used for patching RPMs into the distribution and making bootable CDs
[ "Installs", "the", "RPM", "at", "the", "given", "root", "directory", ".", "This", "is", "used", "for", "patching", "RPMs", "into", "the", "distribution", "and", "making", "bootable", "CDs" ]
def installPackage(self, root, flags=""): """Installs the RPM at the given root directory. This is used for patching RPMs into the distribution and making bootable CDs""" pass dbdir = os.path.join(root, 'var', 'lib', 'rpm') if not os.path.isdir(dbdir): os.makedirs(dbdir) cmd = 'rpm -i --nomd5 --force --nodeps --ignorearch ' + \ '--dbpath %s %s ' % (dbdir, flags) cmd += '--badreloc --relocate /=%s %s' \ % (root, self.getFullName()) retval = os.system(cmd) # Crawl up from the end of the dbdir path and prune off # all the empty directories. while dbdir: if not os.listdir(dbdir): shutil.rmtree(dbdir) list = dbdir.split(os.sep) dbdir = os.sep.join(list[:-1]) return retval
[ "def", "installPackage", "(", "self", ",", "root", ",", "flags", "=", "\"\"", ")", ":", "pass", "dbdir", "=", "os", ".", "path", ".", "join", "(", "root", ",", "'var'", ",", "'lib'", ",", "'rpm'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "dbdir", ")", ":", "os", ".", "makedirs", "(", "dbdir", ")", "cmd", "=", "'rpm -i --nomd5 --force --nodeps --ignorearch '", "+", "'--dbpath %s %s '", "%", "(", "dbdir", ",", "flags", ")", "cmd", "+=", "'--badreloc --relocate /=%s %s'", "%", "(", "root", ",", "self", ".", "getFullName", "(", ")", ")", "retval", "=", "os", ".", "system", "(", "cmd", ")", "# Crawl up from the end of the dbdir path and prune off", "# all the empty directories.\t\t", "while", "dbdir", ":", "if", "not", "os", ".", "listdir", "(", "dbdir", ")", ":", "shutil", ".", "rmtree", "(", "dbdir", ")", "list", "=", "dbdir", ".", "split", "(", "os", ".", "sep", ")", "dbdir", "=", "os", ".", "sep", ".", "join", "(", "list", "[", ":", "-", "1", "]", ")", "return", "retval" ]
https://github.com/Teradata/stacki/blob/a8085dce179dbe903f65f136f4b63bcc076cc057/common/src/stack/pylib/stack/file.py#L218-L243
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/integrals/risch.py
python
integrate_hyperexponential
(a, d, DE, z=None, conds='piecewise')
return (ret, i, b)
Integration of hyperexponential functions. Given a hyperexponential monomial t over k and f in k(t), return g elementary over k(t), i in k(t), and a bool b in {True, False} such that i = f - Dg is in k if b is True or i = f - Dg does not have an elementary integral over k(t) if b is False. This function returns a Basic expression for the first argument. If b is True, the second argument is Basic expression in k to recursively integrate. If b is False, the second argument is an unevaluated Integral, which has been proven to be nonelementary.
Integration of hyperexponential functions.
[ "Integration", "of", "hyperexponential", "functions", "." ]
def integrate_hyperexponential(a, d, DE, z=None, conds='piecewise'): """ Integration of hyperexponential functions. Given a hyperexponential monomial t over k and f in k(t), return g elementary over k(t), i in k(t), and a bool b in {True, False} such that i = f - Dg is in k if b is True or i = f - Dg does not have an elementary integral over k(t) if b is False. This function returns a Basic expression for the first argument. If b is True, the second argument is Basic expression in k to recursively integrate. If b is False, the second argument is an unevaluated Integral, which has been proven to be nonelementary. """ # XXX: a and d must be canceled, or this might return incorrect results z = z or Dummy("z") s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) g1, h, r = hermite_reduce(a, d, DE) g2, b = residue_reduce(h[0], h[1], DE, z=z) if not b: i = cancel(a.as_expr()/d.as_expr() - (g1[1]*derivation(g1[0], DE) - g1[0]*derivation(g1[1], DE)).as_expr()/(g1[1]**2).as_expr() - residue_reduce_derivation(g2, DE, z)) i = NonElementaryIntegral(cancel(i.subs(s)), DE.x) return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) + residue_reduce_to_basic(g2, DE, z), i, b) # p should be a polynomial in t and 1/t, because Sirr == k[t, 1/t] # h - Dg2 + r p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2, DE, z) + r[0].as_expr()/r[1].as_expr()) pp = as_poly_1t(p, DE.t, z) qa, qd, b = integrate_hyperexponential_polynomial(pp, DE, z) i = pp.nth(0, 0) ret = ((g1[0].as_expr()/g1[1].as_expr()).subs(s) \ + residue_reduce_to_basic(g2, DE, z)) qas = qa.as_expr().subs(s) qds = qd.as_expr().subs(s) if conds == 'piecewise' and DE.x not in qds.free_symbols: # We have to be careful if the exponent is S.Zero! # XXX: Does qd = 0 always necessarily correspond to the exponential # equaling 1? ret += Piecewise( (qas/qds, Ne(qds, 0)), (integrate((p - i).subs(DE.t, 1).subs(s), DE.x), True) ) else: ret += qas/qds if not b: i = p - (qd*derivation(qa, DE) - qa*derivation(qd, DE)).as_expr()/\ (qd**2).as_expr() i = NonElementaryIntegral(cancel(i).subs(s), DE.x) return (ret, i, b)
[ "def", "integrate_hyperexponential", "(", "a", ",", "d", ",", "DE", ",", "z", "=", "None", ",", "conds", "=", "'piecewise'", ")", ":", "# XXX: a and d must be canceled, or this might return incorrect results", "z", "=", "z", "or", "Dummy", "(", "\"z\"", ")", "s", "=", "list", "(", "zip", "(", "reversed", "(", "DE", ".", "T", ")", ",", "reversed", "(", "[", "f", "(", "DE", ".", "x", ")", "for", "f", "in", "DE", ".", "Tfuncs", "]", ")", ")", ")", "g1", ",", "h", ",", "r", "=", "hermite_reduce", "(", "a", ",", "d", ",", "DE", ")", "g2", ",", "b", "=", "residue_reduce", "(", "h", "[", "0", "]", ",", "h", "[", "1", "]", ",", "DE", ",", "z", "=", "z", ")", "if", "not", "b", ":", "i", "=", "cancel", "(", "a", ".", "as_expr", "(", ")", "/", "d", ".", "as_expr", "(", ")", "-", "(", "g1", "[", "1", "]", "*", "derivation", "(", "g1", "[", "0", "]", ",", "DE", ")", "-", "g1", "[", "0", "]", "*", "derivation", "(", "g1", "[", "1", "]", ",", "DE", ")", ")", ".", "as_expr", "(", ")", "/", "(", "g1", "[", "1", "]", "**", "2", ")", ".", "as_expr", "(", ")", "-", "residue_reduce_derivation", "(", "g2", ",", "DE", ",", "z", ")", ")", "i", "=", "NonElementaryIntegral", "(", "cancel", "(", "i", ".", "subs", "(", "s", ")", ")", ",", "DE", ".", "x", ")", "return", "(", "(", "g1", "[", "0", "]", ".", "as_expr", "(", ")", "/", "g1", "[", "1", "]", ".", "as_expr", "(", ")", ")", ".", "subs", "(", "s", ")", "+", "residue_reduce_to_basic", "(", "g2", ",", "DE", ",", "z", ")", ",", "i", ",", "b", ")", "# p should be a polynomial in t and 1/t, because Sirr == k[t, 1/t]", "# h - Dg2 + r", "p", "=", "cancel", "(", "h", "[", "0", "]", ".", "as_expr", "(", ")", "/", "h", "[", "1", "]", ".", "as_expr", "(", ")", "-", "residue_reduce_derivation", "(", "g2", ",", "DE", ",", "z", ")", "+", "r", "[", "0", "]", ".", "as_expr", "(", ")", "/", "r", "[", "1", "]", ".", "as_expr", "(", ")", ")", "pp", "=", "as_poly_1t", "(", "p", ",", "DE", ".", "t", ",", "z", ")", "qa", ",", "qd", ",", "b", "=", "integrate_hyperexponential_polynomial", "(", "pp", ",", "DE", ",", "z", ")", "i", "=", "pp", ".", "nth", "(", "0", ",", "0", ")", "ret", "=", "(", "(", "g1", "[", "0", "]", ".", "as_expr", "(", ")", "/", "g1", "[", "1", "]", ".", "as_expr", "(", ")", ")", ".", "subs", "(", "s", ")", "+", "residue_reduce_to_basic", "(", "g2", ",", "DE", ",", "z", ")", ")", "qas", "=", "qa", ".", "as_expr", "(", ")", ".", "subs", "(", "s", ")", "qds", "=", "qd", ".", "as_expr", "(", ")", ".", "subs", "(", "s", ")", "if", "conds", "==", "'piecewise'", "and", "DE", ".", "x", "not", "in", "qds", ".", "free_symbols", ":", "# We have to be careful if the exponent is S.Zero!", "# XXX: Does qd = 0 always necessarily correspond to the exponential", "# equaling 1?", "ret", "+=", "Piecewise", "(", "(", "qas", "/", "qds", ",", "Ne", "(", "qds", ",", "0", ")", ")", ",", "(", "integrate", "(", "(", "p", "-", "i", ")", ".", "subs", "(", "DE", ".", "t", ",", "1", ")", ".", "subs", "(", "s", ")", ",", "DE", ".", "x", ")", ",", "True", ")", ")", "else", ":", "ret", "+=", "qas", "/", "qds", "if", "not", "b", ":", "i", "=", "p", "-", "(", "qd", "*", "derivation", "(", "qa", ",", "DE", ")", "-", "qa", "*", "derivation", "(", "qd", ",", "DE", ")", ")", ".", "as_expr", "(", ")", "/", "(", "qd", "**", "2", ")", ".", "as_expr", "(", ")", "i", "=", "NonElementaryIntegral", "(", "cancel", "(", "i", ")", ".", "subs", "(", "s", ")", ",", "DE", ".", "x", ")", "return", "(", "ret", ",", "i", ",", "b", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/integrals/risch.py#L1465-L1524
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/pep425tags.py
python
is_manylinux1_compatible
()
return pip.utils.glibc.have_compatible_glibc(2, 5)
[]
def is_manylinux1_compatible(): # Only Linux, and only x86-64 / i686 if get_platform() not in ("linux_x86_64", "linux_i686"): return False # Check for presence of _manylinux module try: import _manylinux return bool(_manylinux.manylinux1_compatible) except (ImportError, AttributeError): # Fall through to heuristic check below pass # Check glibc version. CentOS 5 uses glibc 2.5. return pip.utils.glibc.have_compatible_glibc(2, 5)
[ "def", "is_manylinux1_compatible", "(", ")", ":", "# Only Linux, and only x86-64 / i686", "if", "get_platform", "(", ")", "not", "in", "(", "\"linux_x86_64\"", ",", "\"linux_i686\"", ")", ":", "return", "False", "# Check for presence of _manylinux module", "try", ":", "import", "_manylinux", "return", "bool", "(", "_manylinux", ".", "manylinux1_compatible", ")", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "# Fall through to heuristic check below", "pass", "# Check glibc version. CentOS 5 uses glibc 2.5.", "return", "pip", ".", "utils", ".", "glibc", ".", "have_compatible_glibc", "(", "2", ",", "5", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/pep425tags.py#L148-L162
tecladocode/rest-apis-flask-python
d2d40872012dcf1b63ffde4bbd2dd447fec98eca
section2/10_args_and_kwargs.py
python
what_are_args
(*args)
[]
def what_are_args(*args): print(args)
[ "def", "what_are_args", "(", "*", "args", ")", ":", "print", "(", "args", ")" ]
https://github.com/tecladocode/rest-apis-flask-python/blob/d2d40872012dcf1b63ffde4bbd2dd447fec98eca/section2/10_args_and_kwargs.py#L16-L17
keiffster/program-y
8c99b56f8c32f01a7b9887b5daae9465619d0385
src/programy/utils/email/config.py
python
EmailConfiguration.password
(self)
return self._password
[]
def password(self): return self._password
[ "def", "password", "(", "self", ")", ":", "return", "self", ".", "_password" ]
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/utils/email/config.py#L47-L48
MDAnalysis/mdanalysis
3488df3cdb0c29ed41c4fb94efe334b541e31b21
package/MDAnalysis/coordinates/TRZ.py
python
TRZReader._get_dt
(self)
The amount of time between frames in ps Assumes that this step is constant (ie. 2 trajectories with different steps haven't been stitched together). Returns ``AttributeError`` in case of ``StopIteration`` (which makes :attr:`dt` return 1.0). .. versionchanged:: 2.1.0 Now returns an ``AttributeError`` if dt can't be obtained from the time difference between two frames.
The amount of time between frames in ps
[ "The", "amount", "of", "time", "between", "frames", "in", "ps" ]
def _get_dt(self): """The amount of time between frames in ps Assumes that this step is constant (ie. 2 trajectories with different steps haven't been stitched together). Returns ``AttributeError`` in case of ``StopIteration`` (which makes :attr:`dt` return 1.0). .. versionchanged:: 2.1.0 Now returns an ``AttributeError`` if dt can't be obtained from the time difference between two frames. """ curr_frame = self.ts.frame try: t0 = self.ts.time self.next() t1 = self.ts.time dt = t1 - t0 except StopIteration: raise AttributeError else: return dt finally: self._read_frame(curr_frame)
[ "def", "_get_dt", "(", "self", ")", ":", "curr_frame", "=", "self", ".", "ts", ".", "frame", "try", ":", "t0", "=", "self", ".", "ts", ".", "time", "self", ".", "next", "(", ")", "t1", "=", "self", ".", "ts", ".", "time", "dt", "=", "t1", "-", "t0", "except", "StopIteration", ":", "raise", "AttributeError", "else", ":", "return", "dt", "finally", ":", "self", ".", "_read_frame", "(", "curr_frame", ")" ]
https://github.com/MDAnalysis/mdanalysis/blob/3488df3cdb0c29ed41c4fb94efe334b541e31b21/package/MDAnalysis/coordinates/TRZ.py#L260-L283
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/combinat/sloane_functions.py
python
A083105.__init__
(self)
r""" Second-order linear recurrence sequence with `a(n) = a(n-1) + a(n-2)`. `a(0) = 62638280004239857`, `a(1) = 49463435743205655`. This is the second-order linear recurrence sequence with `a(0)` and `a(1)` co-prime. It was found by Donald Knuth in 1990. INPUT: - ``n`` -- non negative integer OUTPUT: - ``integer`` -- function value EXAMPLES:: sage: a = sloane.A083105;a Second-order linear recurrence sequence with a(n) = a(n-1) + a(n-2). sage: a(1) 49463435743205655 sage: a(2) 112101715747445512 sage: a(3) 161565151490651167 sage: a.offset 0 sage: a(8) 1853029790662436896 sage: a(20) 596510791500513098192 sage: a.list(4) [62638280004239857, 49463435743205655, 112101715747445512, 161565151490651167] AUTHORS: - Jaap Spies (2007-01-23)
r""" Second-order linear recurrence sequence with `a(n) = a(n-1) + a(n-2)`.
[ "r", "Second", "-", "order", "linear", "recurrence", "sequence", "with", "a", "(", "n", ")", "=", "a", "(", "n", "-", "1", ")", "+", "a", "(", "n", "-", "2", ")", "." ]
def __init__(self): r""" Second-order linear recurrence sequence with `a(n) = a(n-1) + a(n-2)`. `a(0) = 62638280004239857`, `a(1) = 49463435743205655`. This is the second-order linear recurrence sequence with `a(0)` and `a(1)` co-prime. It was found by Donald Knuth in 1990. INPUT: - ``n`` -- non negative integer OUTPUT: - ``integer`` -- function value EXAMPLES:: sage: a = sloane.A083105;a Second-order linear recurrence sequence with a(n) = a(n-1) + a(n-2). sage: a(1) 49463435743205655 sage: a(2) 112101715747445512 sage: a(3) 161565151490651167 sage: a.offset 0 sage: a(8) 1853029790662436896 sage: a(20) 596510791500513098192 sage: a.list(4) [62638280004239857, 49463435743205655, 112101715747445512, 161565151490651167] AUTHORS: - Jaap Spies (2007-01-23) """ SloaneSequence.__init__(self, offset=0) self._b = [] self._params = (62638280004239857,49463435743205655,1,1) self._precompute(2)
[ "def", "__init__", "(", "self", ")", ":", "SloaneSequence", ".", "__init__", "(", "self", ",", "offset", "=", "0", ")", "self", ".", "_b", "=", "[", "]", "self", ".", "_params", "=", "(", "62638280004239857", ",", "49463435743205655", ",", "1", ",", "1", ")", "self", ".", "_precompute", "(", "2", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/sloane_functions.py#L7959-L8003
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/renderers/stateconf.py
python
state_name
(sname)
return sname.split(".", 1)[0]
Return the name of the state regardless if sname is just the state name or a state.func name.
Return the name of the state regardless if sname is just the state name or a state.func name.
[ "Return", "the", "name", "of", "the", "state", "regardless", "if", "sname", "is", "just", "the", "state", "name", "or", "a", "state", ".", "func", "name", "." ]
def state_name(sname): """ Return the name of the state regardless if sname is just the state name or a state.func name. """ return sname.split(".", 1)[0]
[ "def", "state_name", "(", "sname", ")", ":", "return", "sname", ".", "split", "(", "\".\"", ",", "1", ")", "[", "0", "]" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/renderers/stateconf.py#L535-L540
JimmXinu/FanFicFare
bc149a2deb2636320fe50a3e374af6eef8f61889
included_dependencies/html2text/utils.py
python
escape_md
(text)
return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text)
Escapes markdown-sensitive characters within other markdown constructs.
Escapes markdown-sensitive characters within other markdown constructs.
[ "Escapes", "markdown", "-", "sensitive", "characters", "within", "other", "markdown", "constructs", "." ]
def escape_md(text): """ Escapes markdown-sensitive characters within other markdown constructs. """ return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text)
[ "def", "escape_md", "(", "text", ")", ":", "return", "config", ".", "RE_MD_CHARS_MATCHER", ".", "sub", "(", "r\"\\\\\\1\"", ",", "text", ")" ]
https://github.com/JimmXinu/FanFicFare/blob/bc149a2deb2636320fe50a3e374af6eef8f61889/included_dependencies/html2text/utils.py#L224-L229
WeblateOrg/weblate
8126f3dda9d24f2846b755955132a8b8410866c8
weblate/utils/ratelimit.py
python
get_cache_key
(scope, request=None, address=None, user=None)
return f"ratelimit-{origin}-{scope}-{key}"
Generate cache key for request.
Generate cache key for request.
[ "Generate", "cache", "key", "for", "request", "." ]
def get_cache_key(scope, request=None, address=None, user=None): """Generate cache key for request.""" if (request and request.user.is_authenticated) or user: if user: key = user.id else: key = request.user.id origin = "user" else: if address is None: address = get_ip_address(request) origin = "ip" key = calculate_checksum(address) return f"ratelimit-{origin}-{scope}-{key}"
[ "def", "get_cache_key", "(", "scope", ",", "request", "=", "None", ",", "address", "=", "None", ",", "user", "=", "None", ")", ":", "if", "(", "request", "and", "request", ".", "user", ".", "is_authenticated", ")", "or", "user", ":", "if", "user", ":", "key", "=", "user", ".", "id", "else", ":", "key", "=", "request", ".", "user", ".", "id", "origin", "=", "\"user\"", "else", ":", "if", "address", "is", "None", ":", "address", "=", "get_ip_address", "(", "request", ")", "origin", "=", "\"ip\"", "key", "=", "calculate_checksum", "(", "address", ")", "return", "f\"ratelimit-{origin}-{scope}-{key}\"" ]
https://github.com/WeblateOrg/weblate/blob/8126f3dda9d24f2846b755955132a8b8410866c8/weblate/utils/ratelimit.py#L33-L46
wwqgtxx/wwqLyParse
33136508e52821babd9294fdecffbdf02d73a6fc
wwqLyParse/lib/python-3.7.2-embed-amd64/Crypto/Util/RFC1751.py
python
key_to_english
(key)
return english[:-1]
Transform an arbitrary key into a string containing English words. Example:: >>> from Crypto.Util.RFC1751 import key_to_english >>> key_to_english(b'66666666') 'RAM LOIS GOAD CREW CARE HIT' Args: key (byte string): The key to convert. Its length must be a multiple of 8. Return: A string of English words.
Transform an arbitrary key into a string containing English words.
[ "Transform", "an", "arbitrary", "key", "into", "a", "string", "containing", "English", "words", "." ]
def key_to_english (key): """Transform an arbitrary key into a string containing English words. Example:: >>> from Crypto.Util.RFC1751 import key_to_english >>> key_to_english(b'66666666') 'RAM LOIS GOAD CREW CARE HIT' Args: key (byte string): The key to convert. Its length must be a multiple of 8. Return: A string of English words. """ english='' for index in range(0, len(key), 8): # Loop over 8-byte subkeys subkey=key[index:index+8] # Compute the parity of the key skbin=_key2bin(subkey) ; p=0 for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) # Append parity bits to the subkey skbin=_key2bin(subkey+bchr((p<<6) & 255)) for i in range(0, 64, 11): english=english+wordlist[_extract(skbin, i, 11)]+' ' return english[:-1]
[ "def", "key_to_english", "(", "key", ")", ":", "english", "=", "''", "for", "index", "in", "range", "(", "0", ",", "len", "(", "key", ")", ",", "8", ")", ":", "# Loop over 8-byte subkeys", "subkey", "=", "key", "[", "index", ":", "index", "+", "8", "]", "# Compute the parity of the key", "skbin", "=", "_key2bin", "(", "subkey", ")", "p", "=", "0", "for", "i", "in", "range", "(", "0", ",", "64", ",", "2", ")", ":", "p", "=", "p", "+", "_extract", "(", "skbin", ",", "i", ",", "2", ")", "# Append parity bits to the subkey", "skbin", "=", "_key2bin", "(", "subkey", "+", "bchr", "(", "(", "p", "<<", "6", ")", "&", "255", ")", ")", "for", "i", "in", "range", "(", "0", ",", "64", ",", "11", ")", ":", "english", "=", "english", "+", "wordlist", "[", "_extract", "(", "skbin", ",", "i", ",", "11", ")", "]", "+", "' '", "return", "english", "[", ":", "-", "1", "]" ]
https://github.com/wwqgtxx/wwqLyParse/blob/33136508e52821babd9294fdecffbdf02d73a6fc/wwqLyParse/lib/python-3.7.2-embed-amd64/Crypto/Util/RFC1751.py#L47-L74
aceisace/Inkycal
552744bc5d80769c1015d48fd8b13201683ee679
inkycal/display/drivers/epd_7_in_5_colour.py
python
EPD.__init__
(self)
[]
def __init__(self): self.reset_pin = epdconfig.RST_PIN self.dc_pin = epdconfig.DC_PIN self.busy_pin = epdconfig.BUSY_PIN self.cs_pin = epdconfig.CS_PIN self.width = EPD_WIDTH self.height = EPD_HEIGHT
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "reset_pin", "=", "epdconfig", ".", "RST_PIN", "self", ".", "dc_pin", "=", "epdconfig", ".", "DC_PIN", "self", ".", "busy_pin", "=", "epdconfig", ".", "BUSY_PIN", "self", ".", "cs_pin", "=", "epdconfig", ".", "CS_PIN", "self", ".", "width", "=", "EPD_WIDTH", "self", ".", "height", "=", "EPD_HEIGHT" ]
https://github.com/aceisace/Inkycal/blob/552744bc5d80769c1015d48fd8b13201683ee679/inkycal/display/drivers/epd_7_in_5_colour.py#L39-L45
scikit-image/scikit-image
ed642e2bc822f362504d24379dee94978d6fa9de
skimage/morphology/_util.py
python
_resolve_neighborhood
(footprint, connectivity, ndim)
return footprint
Validate or create a footprint (structuring element). Depending on the values of `connectivity` and `footprint` this function either creates a new footprint (`footprint` is None) using `connectivity` or validates the given footprint (`footprint` is not None). Parameters ---------- footprint : ndarray The footprint (structuring) element used to determine the neighborhood of each evaluated pixel (``True`` denotes a connected pixel). It must be a boolean array and have the same number of dimensions as `image`. If neither `footprint` nor `connectivity` are given, all adjacent pixels are considered as part of the neighborhood. connectivity : int A number used to determine the neighborhood of each evaluated pixel. Adjacent pixels whose squared distance from the center is less than or equal to `connectivity` are considered neighbors. Ignored if `footprint` is not None. ndim : int Number of dimensions `footprint` ought to have. Returns ------- footprint : ndarray Validated or new footprint specifying the neighborhood. Examples -------- >>> _resolve_neighborhood(None, 1, 2) array([[False, True, False], [ True, True, True], [False, True, False]]) >>> _resolve_neighborhood(None, None, 3).shape (3, 3, 3)
Validate or create a footprint (structuring element).
[ "Validate", "or", "create", "a", "footprint", "(", "structuring", "element", ")", "." ]
def _resolve_neighborhood(footprint, connectivity, ndim): """Validate or create a footprint (structuring element). Depending on the values of `connectivity` and `footprint` this function either creates a new footprint (`footprint` is None) using `connectivity` or validates the given footprint (`footprint` is not None). Parameters ---------- footprint : ndarray The footprint (structuring) element used to determine the neighborhood of each evaluated pixel (``True`` denotes a connected pixel). It must be a boolean array and have the same number of dimensions as `image`. If neither `footprint` nor `connectivity` are given, all adjacent pixels are considered as part of the neighborhood. connectivity : int A number used to determine the neighborhood of each evaluated pixel. Adjacent pixels whose squared distance from the center is less than or equal to `connectivity` are considered neighbors. Ignored if `footprint` is not None. ndim : int Number of dimensions `footprint` ought to have. Returns ------- footprint : ndarray Validated or new footprint specifying the neighborhood. Examples -------- >>> _resolve_neighborhood(None, 1, 2) array([[False, True, False], [ True, True, True], [False, True, False]]) >>> _resolve_neighborhood(None, None, 3).shape (3, 3, 3) """ if footprint is None: if connectivity is None: connectivity = ndim footprint = ndi.generate_binary_structure(ndim, connectivity) else: # Validate custom structured element footprint = np.asarray(footprint, dtype=bool) # Must specify neighbors for all dimensions if footprint.ndim != ndim: raise ValueError( "number of dimensions in image and footprint do not" "match" ) # Must only specify direct neighbors if any(s != 3 for s in footprint.shape): raise ValueError("dimension size in footprint is not 3") return footprint
[ "def", "_resolve_neighborhood", "(", "footprint", ",", "connectivity", ",", "ndim", ")", ":", "if", "footprint", "is", "None", ":", "if", "connectivity", "is", "None", ":", "connectivity", "=", "ndim", "footprint", "=", "ndi", ".", "generate_binary_structure", "(", "ndim", ",", "connectivity", ")", "else", ":", "# Validate custom structured element", "footprint", "=", "np", ".", "asarray", "(", "footprint", ",", "dtype", "=", "bool", ")", "# Must specify neighbors for all dimensions", "if", "footprint", ".", "ndim", "!=", "ndim", ":", "raise", "ValueError", "(", "\"number of dimensions in image and footprint do not\"", "\"match\"", ")", "# Must only specify direct neighbors", "if", "any", "(", "s", "!=", "3", "for", "s", "in", "footprint", ".", "shape", ")", ":", "raise", "ValueError", "(", "\"dimension size in footprint is not 3\"", ")", "return", "footprint" ]
https://github.com/scikit-image/scikit-image/blob/ed642e2bc822f362504d24379dee94978d6fa9de/skimage/morphology/_util.py#L210-L264
LinkedInAttic/indextank-service
880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e
storefront/boto/vpc/__init__.py
python
VPCConnection.get_all_customer_gateways
(self, customer_gateway_ids=None, filters=None)
return self.get_list('DescribeCustomerGateways', params, [('item', CustomerGateway)])
Retrieve information about your CustomerGateways. You can filter results to return information only about those CustomerGateways that match your search parameters. Otherwise, all CustomerGateways associated with your account are returned. :type customer_gateway_ids: list :param customer_gateway_ids: A list of strings with the desired CustomerGateway ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the CustomerGateway (pending,available,deleting,deleted) - *type*, the type of customer gateway (ipsec.1) - *ipAddress* the IP address of customer gateway's internet-routable external inteface :rtype: list :return: A list of :class:`boto.vpc.customergateway.CustomerGateway`
Retrieve information about your CustomerGateways. You can filter results to return information only about those CustomerGateways that match your search parameters. Otherwise, all CustomerGateways associated with your account are returned. :type customer_gateway_ids: list :param customer_gateway_ids: A list of strings with the desired CustomerGateway ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the CustomerGateway (pending,available,deleting,deleted) - *type*, the type of customer gateway (ipsec.1) - *ipAddress* the IP address of customer gateway's internet-routable external inteface
[ "Retrieve", "information", "about", "your", "CustomerGateways", ".", "You", "can", "filter", "results", "to", "return", "information", "only", "about", "those", "CustomerGateways", "that", "match", "your", "search", "parameters", ".", "Otherwise", "all", "CustomerGateways", "associated", "with", "your", "account", "are", "returned", ".", ":", "type", "customer_gateway_ids", ":", "list", ":", "param", "customer_gateway_ids", ":", "A", "list", "of", "strings", "with", "the", "desired", "CustomerGateway", "ID", "s", ":", "type", "filters", ":", "list", "of", "tuples", ":", "param", "filters", ":", "A", "list", "of", "tuples", "containing", "filters", ".", "Each", "tuple", "consists", "of", "a", "filter", "key", "and", "a", "filter", "value", ".", "Possible", "filter", "keys", "are", ":", "-", "*", "state", "*", "the", "state", "of", "the", "CustomerGateway", "(", "pending", "available", "deleting", "deleted", ")", "-", "*", "type", "*", "the", "type", "of", "customer", "gateway", "(", "ipsec", ".", "1", ")", "-", "*", "ipAddress", "*", "the", "IP", "address", "of", "customer", "gateway", "s", "internet", "-", "routable", "external", "inteface" ]
def get_all_customer_gateways(self, customer_gateway_ids=None, filters=None): """ Retrieve information about your CustomerGateways. You can filter results to return information only about those CustomerGateways that match your search parameters. Otherwise, all CustomerGateways associated with your account are returned. :type customer_gateway_ids: list :param customer_gateway_ids: A list of strings with the desired CustomerGateway ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the CustomerGateway (pending,available,deleting,deleted) - *type*, the type of customer gateway (ipsec.1) - *ipAddress* the IP address of customer gateway's internet-routable external inteface :rtype: list :return: A list of :class:`boto.vpc.customergateway.CustomerGateway` """ params = {} if customer_gateway_ids: self.build_list_params(params, customer_gateway_ids, 'CustomerGatewayId') if filters: i = 1 for filter in filters: params[('Filter.%d.Key' % i)] = filter[0] params[('Filter.%d.Value.1')] = filter[1] i += 1 return self.get_list('DescribeCustomerGateways', params, [('item', CustomerGateway)])
[ "def", "get_all_customer_gateways", "(", "self", ",", "customer_gateway_ids", "=", "None", ",", "filters", "=", "None", ")", ":", "params", "=", "{", "}", "if", "customer_gateway_ids", ":", "self", ".", "build_list_params", "(", "params", ",", "customer_gateway_ids", ",", "'CustomerGatewayId'", ")", "if", "filters", ":", "i", "=", "1", "for", "filter", "in", "filters", ":", "params", "[", "(", "'Filter.%d.Key'", "%", "i", ")", "]", "=", "filter", "[", "0", "]", "params", "[", "(", "'Filter.%d.Value.1'", ")", "]", "=", "filter", "[", "1", "]", "i", "+=", "1", "return", "self", ".", "get_list", "(", "'DescribeCustomerGateways'", ",", "params", ",", "[", "(", "'item'", ",", "CustomerGateway", ")", "]", ")" ]
https://github.com/LinkedInAttic/indextank-service/blob/880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e/storefront/boto/vpc/__init__.py#L104-L137
thatbrguy/Pedestrian-Detection
b11c7d6bed0ff320811726fe1c429be26a87da9e
object_detection/anchor_generators/multiple_grid_anchor_generator.py
python
MultipleGridAnchorGenerator.num_anchors_per_location
(self)
return [len(box_specs) for box_specs in self._box_specs]
Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the Generate function.
Returns the number of anchors per spatial location.
[ "Returns", "the", "number", "of", "anchors", "per", "spatial", "location", "." ]
def num_anchors_per_location(self): """Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the Generate function. """ return [len(box_specs) for box_specs in self._box_specs]
[ "def", "num_anchors_per_location", "(", "self", ")", ":", "return", "[", "len", "(", "box_specs", ")", "for", "box_specs", "in", "self", ".", "_box_specs", "]" ]
https://github.com/thatbrguy/Pedestrian-Detection/blob/b11c7d6bed0ff320811726fe1c429be26a87da9e/object_detection/anchor_generators/multiple_grid_anchor_generator.py#L131-L138
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/python-sitelib/pyxpcomProfiler.py
python
getXPCOMRecorder
(xpcomObject)
return recorder
Return the base xpcom recorder object for this python xpcom object. Tries to record all the same xpcom instances for one interface in the same recorder object.
Return the base xpcom recorder object for this python xpcom object.
[ "Return", "the", "base", "xpcom", "recorder", "object", "for", "this", "python", "xpcom", "object", "." ]
def getXPCOMRecorder(xpcomObject): """Return the base xpcom recorder object for this python xpcom object. Tries to record all the same xpcom instances for one interface in the same recorder object. """ names = None if hasattr(xpcomObject, "_interface_names_"): names = [x.name for x in xpcomObject._interface_names_] if not names: com_interfaces = getattr(xpcomObject, "_com_interfaces_", None) if com_interfaces: if not isinstance(com_interfaces, (tuple, list)): names = [com_interfaces.name] else: names = [x.name for x in com_interfaces] if names is not None: name = "_".join(names) else: name = repr(xpcomObject) recorder = xpcom_recordings.get(name) if recorder is None: recorder = XPCOMRecorder(name) xpcom_recordings[name] = recorder return recorder
[ "def", "getXPCOMRecorder", "(", "xpcomObject", ")", ":", "names", "=", "None", "if", "hasattr", "(", "xpcomObject", ",", "\"_interface_names_\"", ")", ":", "names", "=", "[", "x", ".", "name", "for", "x", "in", "xpcomObject", ".", "_interface_names_", "]", "if", "not", "names", ":", "com_interfaces", "=", "getattr", "(", "xpcomObject", ",", "\"_com_interfaces_\"", ",", "None", ")", "if", "com_interfaces", ":", "if", "not", "isinstance", "(", "com_interfaces", ",", "(", "tuple", ",", "list", ")", ")", ":", "names", "=", "[", "com_interfaces", ".", "name", "]", "else", ":", "names", "=", "[", "x", ".", "name", "for", "x", "in", "com_interfaces", "]", "if", "names", "is", "not", "None", ":", "name", "=", "\"_\"", ".", "join", "(", "names", ")", "else", ":", "name", "=", "repr", "(", "xpcomObject", ")", "recorder", "=", "xpcom_recordings", ".", "get", "(", "name", ")", "if", "recorder", "is", "None", ":", "recorder", "=", "XPCOMRecorder", "(", "name", ")", "xpcom_recordings", "[", "name", "]", "=", "recorder", "return", "recorder" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/python-sitelib/pyxpcomProfiler.py#L122-L146
quantumlib/Cirq
89f88b01d69222d3f1ec14d649b7b3a85ed9211f
dev_tools/pr_monitor.py
python
get_repo_ref
(repo: GithubRepository, ref: str)
return payload
Get a given github reference. References: https://developer.github.com/v3/git/refs/#get-a-reference Args: repo: The github repo to get the reference from. ref: The id of the reference. Returns: The raw response of the request for the reference.. Raises: RuntimeError: If the request does not return status 200 (success).
Get a given github reference.
[ "Get", "a", "given", "github", "reference", "." ]
def get_repo_ref(repo: GithubRepository, ref: str) -> Dict[str, Any]: """Get a given github reference. References: https://developer.github.com/v3/git/refs/#get-a-reference Args: repo: The github repo to get the reference from. ref: The id of the reference. Returns: The raw response of the request for the reference.. Raises: RuntimeError: If the request does not return status 200 (success). """ url = f"https://api.github.com/repos/{repo.organization}/{repo.name}/git/refs/{ref}" response = repo.get(url) if response.status_code != 200: raise RuntimeError( 'Refs get failed. Code: {}. Content: {!r}.'.format( response.status_code, response.content ) ) payload = json.JSONDecoder().decode(response.content.decode()) return payload
[ "def", "get_repo_ref", "(", "repo", ":", "GithubRepository", ",", "ref", ":", "str", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "url", "=", "f\"https://api.github.com/repos/{repo.organization}/{repo.name}/git/refs/{ref}\"", "response", "=", "repo", ".", "get", "(", "url", ")", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "RuntimeError", "(", "'Refs get failed. Code: {}. Content: {!r}.'", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "content", ")", ")", "payload", "=", "json", ".", "JSONDecoder", "(", ")", ".", "decode", "(", "response", ".", "content", ".", "decode", "(", ")", ")", "return", "payload" ]
https://github.com/quantumlib/Cirq/blob/89f88b01d69222d3f1ec14d649b7b3a85ed9211f/dev_tools/pr_monitor.py#L574-L600
menpo/menpo
a61500656c4fc2eea82497684f13cc31a605550b
menpo/model/linear.py
python
MeanLinearVectorModel.mean
(self)
return self._mean
r""" Return the mean of the model. :type: `ndarray`
r""" Return the mean of the model.
[ "r", "Return", "the", "mean", "of", "the", "model", "." ]
def mean(self): r""" Return the mean of the model. :type: `ndarray` """ return self._mean
[ "def", "mean", "(", "self", ")", ":", "return", "self", ".", "_mean" ]
https://github.com/menpo/menpo/blob/a61500656c4fc2eea82497684f13cc31a605550b/menpo/model/linear.py#L315-L321
Cisco-Talos/GhIDA
a396916ae53e46adf3dca918b810f5db046af015
ghida_plugin/idaxml.py
python
XmlExporter.export_typeinfo_cmt
(self, cmt)
Exports comment containing type information for data and functions. Args: cmt: String containing type info.
Exports comment containing type information for data and functions.
[ "Exports", "comment", "containing", "type", "information", "for", "data", "and", "functions", "." ]
def export_typeinfo_cmt(self, cmt): """ Exports comment containing type information for data and functions. Args: cmt: String containing type info. """ # older versions of IDAPython returned a '\n' at end of cmt if(len(cmt) > 0): while cmt[-1] == '\n': cmt = cmt[:-1] self.write_comment_element(TYPEINFO_CMT, cmt)
[ "def", "export_typeinfo_cmt", "(", "self", ",", "cmt", ")", ":", "# older versions of IDAPython returned a '\\n' at end of cmt", "if", "(", "len", "(", "cmt", ")", ">", "0", ")", ":", "while", "cmt", "[", "-", "1", "]", "==", "'\\n'", ":", "cmt", "=", "cmt", "[", ":", "-", "1", "]", "self", ".", "write_comment_element", "(", "TYPEINFO_CMT", ",", "cmt", ")" ]
https://github.com/Cisco-Talos/GhIDA/blob/a396916ae53e46adf3dca918b810f5db046af015/ghida_plugin/idaxml.py#L1676-L1687
cloudera/impyla
0c736af4cad2bade9b8e313badc08ec50e81c948
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
python
Iface.alter_table_with_environment_context
(self, dbname, tbl_name, new_tbl, environment_context)
Parameters: - dbname - tbl_name - new_tbl - environment_context
Parameters: - dbname - tbl_name - new_tbl - environment_context
[ "Parameters", ":", "-", "dbname", "-", "tbl_name", "-", "new_tbl", "-", "environment_context" ]
def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context): """ Parameters: - dbname - tbl_name - new_tbl - environment_context """ pass
[ "def", "alter_table_with_environment_context", "(", "self", ",", "dbname", ",", "tbl_name", ",", "new_tbl", ",", "environment_context", ")", ":", "pass" ]
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L265-L273
keras-team/keras
5caa668b6a415675064a730f5eb46ecc08e40f65
keras/saving/saved_model/base_serialization.py
python
SavedModelSaver.list_extra_dependencies_for_serialization
(self, serialization_cache)
return self.objects_to_serialize(serialization_cache)
Lists extra dependencies to serialize to SavedModel. By overriding this method, extra dependencies can be attached to the serialized Layer. For example, this is used to save the list of `variables` and `trainable_variables`, which are python properties in a Layer object, but are represented as a static list in the SavedModel. Args: serialization_cache: A dictionary shared between all objects in the same object graph. This object is passed to both `_list_extra_dependencies_for_serialization` and `_list_functions_for_serialization`. Returns: A dictionary mapping attribute names to trackable objects. The entire list of attributes are listed in the `saved_model._LayerAttributes` class.
Lists extra dependencies to serialize to SavedModel.
[ "Lists", "extra", "dependencies", "to", "serialize", "to", "SavedModel", "." ]
def list_extra_dependencies_for_serialization(self, serialization_cache): """Lists extra dependencies to serialize to SavedModel. By overriding this method, extra dependencies can be attached to the serialized Layer. For example, this is used to save the list of `variables` and `trainable_variables`, which are python properties in a Layer object, but are represented as a static list in the SavedModel. Args: serialization_cache: A dictionary shared between all objects in the same object graph. This object is passed to both `_list_extra_dependencies_for_serialization` and `_list_functions_for_serialization`. Returns: A dictionary mapping attribute names to trackable objects. The entire list of attributes are listed in the `saved_model._LayerAttributes` class. """ if not utils.should_save_traces(): return {} return self.objects_to_serialize(serialization_cache)
[ "def", "list_extra_dependencies_for_serialization", "(", "self", ",", "serialization_cache", ")", ":", "if", "not", "utils", ".", "should_save_traces", "(", ")", ":", "return", "{", "}", "return", "self", ".", "objects_to_serialize", "(", "serialization_cache", ")" ]
https://github.com/keras-team/keras/blob/5caa668b6a415675064a730f5eb46ecc08e40f65/keras/saving/saved_model/base_serialization.py#L56-L77
jobovy/galpy
8e6a230bbe24ce16938db10053f92eb17fe4bb52
galpy/df/surfaceSigmaProfile.py
python
expSurfaceSigmaProfile.sigma2Derivative
(self,R,log=False)
NAME: sigmaDerivative PURPOSE: return the derivative wrt R of the sigma_R^2 profile at this R INPUT: R - Galactocentric radius (/ro) log - if True, return the derivative of the log (default: False) OUTPUT: Sigma_R^2'(R) or (log Sigma_R^2(r) )' HISTORY: 2011-03-24 - Written - Bovy (NYU)
NAME: sigmaDerivative PURPOSE: return the derivative wrt R of the sigma_R^2 profile at this R INPUT: R - Galactocentric radius (/ro) log - if True, return the derivative of the log (default: False) OUTPUT: Sigma_R^2'(R) or (log Sigma_R^2(r) )' HISTORY: 2011-03-24 - Written - Bovy (NYU)
[ "NAME", ":", "sigmaDerivative", "PURPOSE", ":", "return", "the", "derivative", "wrt", "R", "of", "the", "sigma_R^2", "profile", "at", "this", "R", "INPUT", ":", "R", "-", "Galactocentric", "radius", "(", "/", "ro", ")", "log", "-", "if", "True", "return", "the", "derivative", "of", "the", "log", "(", "default", ":", "False", ")", "OUTPUT", ":", "Sigma_R^2", "(", "R", ")", "or", "(", "log", "Sigma_R^2", "(", "r", ")", ")", "HISTORY", ":", "2011", "-", "03", "-", "24", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
def sigma2Derivative(self,R,log=False): """ NAME: sigmaDerivative PURPOSE: return the derivative wrt R of the sigma_R^2 profile at this R INPUT: R - Galactocentric radius (/ro) log - if True, return the derivative of the log (default: False) OUTPUT: Sigma_R^2'(R) or (log Sigma_R^2(r) )' HISTORY: 2011-03-24 - Written - Bovy (NYU) """ if log: return -2./self._params[1] else: return self._params[2]**2.*numpy.exp(-2.*(R-1.)/self._params[1])\ *(-2./self._params[1])
[ "def", "sigma2Derivative", "(", "self", ",", "R", ",", "log", "=", "False", ")", ":", "if", "log", ":", "return", "-", "2.", "/", "self", ".", "_params", "[", "1", "]", "else", ":", "return", "self", ".", "_params", "[", "2", "]", "**", "2.", "*", "numpy", ".", "exp", "(", "-", "2.", "*", "(", "R", "-", "1.", ")", "/", "self", ".", "_params", "[", "1", "]", ")", "*", "(", "-", "2.", "/", "self", ".", "_params", "[", "1", "]", ")" ]
https://github.com/jobovy/galpy/blob/8e6a230bbe24ce16938db10053f92eb17fe4bb52/galpy/df/surfaceSigmaProfile.py#L159-L177
s-leger/archipack
5a6243bf1edf08a6b429661ce291dacb551e5f8a
pygeos/op_relate.py
python
RelateComputer.labelIsolatedNode
(self, node, targetIndex: int)
* Label an isolated node with its relationship to the target geometry.
* Label an isolated node with its relationship to the target geometry.
[ "*", "Label", "an", "isolated", "node", "with", "its", "relationship", "to", "the", "target", "geometry", "." ]
def labelIsolatedNode(self, node, targetIndex: int) -> None: """ * Label an isolated node with its relationship to the target geometry. """ loc = self.ptLocator.locate(node.coord, self.arg[targetIndex].geom) node.label.setAllLocations(targetIndex, loc)
[ "def", "labelIsolatedNode", "(", "self", ",", "node", ",", "targetIndex", ":", "int", ")", "->", "None", ":", "loc", "=", "self", ".", "ptLocator", ".", "locate", "(", "node", ".", "coord", ",", "self", ".", "arg", "[", "targetIndex", "]", ".", "geom", ")", "node", ".", "label", ".", "setAllLocations", "(", "targetIndex", ",", "loc", ")" ]
https://github.com/s-leger/archipack/blob/5a6243bf1edf08a6b429661ce291dacb551e5f8a/pygeos/op_relate.py#L776-L781
redhat-imaging/imagefactory
176f6e045e1df049d50f33a924653128d5ab8b27
imgfac/rest/bottle.py
python
BaseTemplate.search
(cls, name, lookup=[])
Search name in all directories specified in lookup. First without, then with common extensions. Return first hit.
Search name in all directories specified in lookup. First without, then with common extensions. Return first hit.
[ "Search", "name", "in", "all", "directories", "specified", "in", "lookup", ".", "First", "without", "then", "with", "common", "extensions", ".", "Return", "first", "hit", "." ]
def search(cls, name, lookup=[]): """ Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. """ if not lookup: depr('The template lookup path list should not be empty.') #0.12 lookup = ['.'] if os.path.isabs(name) and os.path.isfile(name): depr('Absolute template path names are deprecated.') #0.12 return os.path.abspath(name) for spath in lookup: spath = os.path.abspath(spath) + os.sep fname = os.path.abspath(os.path.join(spath, name)) if not fname.startswith(spath): continue if os.path.isfile(fname): return fname for ext in cls.extensions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext)
[ "def", "search", "(", "cls", ",", "name", ",", "lookup", "=", "[", "]", ")", ":", "if", "not", "lookup", ":", "depr", "(", "'The template lookup path list should not be empty.'", ")", "#0.12", "lookup", "=", "[", "'.'", "]", "if", "os", ".", "path", ".", "isabs", "(", "name", ")", "and", "os", ".", "path", ".", "isfile", "(", "name", ")", ":", "depr", "(", "'Absolute template path names are deprecated.'", ")", "#0.12", "return", "os", ".", "path", ".", "abspath", "(", "name", ")", "for", "spath", "in", "lookup", ":", "spath", "=", "os", ".", "path", ".", "abspath", "(", "spath", ")", "+", "os", ".", "sep", "fname", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "spath", ",", "name", ")", ")", "if", "not", "fname", ".", "startswith", "(", "spath", ")", ":", "continue", "if", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "return", "fname", "for", "ext", "in", "cls", ".", "extensions", ":", "if", "os", ".", "path", ".", "isfile", "(", "'%s.%s'", "%", "(", "fname", ",", "ext", ")", ")", ":", "return", "'%s.%s'", "%", "(", "fname", ",", "ext", ")" ]
https://github.com/redhat-imaging/imagefactory/blob/176f6e045e1df049d50f33a924653128d5ab8b27/imgfac/rest/bottle.py#L3230-L3248
joerick/pyinstrument
d3c45164a385021f366c1081baec18a1a226a573
pyinstrument/renderers/speedscope.py
python
SpeedscopeRenderer.render_frame
(self, frame: BaseFrame | None)
return events_array
Builds up a list of speedscope events that are used to populate the "events" array in speedscope-formatted JSON. This method has two notable side effects: * it populates the self._frame_to_index dictionary that matches speedscope frames with their positions in the "shared" array of speedscope output; this dictionary will be used to write this "shared" array in the render method * it accumulates a running total of time elapsed by accumulating the self_time spent in each pyinstrument frame; this running total is used by speedscope events to construct a flame chart.
Builds up a list of speedscope events that are used to populate the "events" array in speedscope-formatted JSON.
[ "Builds", "up", "a", "list", "of", "speedscope", "events", "that", "are", "used", "to", "populate", "the", "events", "array", "in", "speedscope", "-", "formatted", "JSON", "." ]
def render_frame(self, frame: BaseFrame | None) -> list[SpeedscopeEvent]: """ Builds up a list of speedscope events that are used to populate the "events" array in speedscope-formatted JSON. This method has two notable side effects: * it populates the self._frame_to_index dictionary that matches speedscope frames with their positions in the "shared" array of speedscope output; this dictionary will be used to write this "shared" array in the render method * it accumulates a running total of time elapsed by accumulating the self_time spent in each pyinstrument frame; this running total is used by speedscope events to construct a flame chart. """ # if frame is None, recursion bottoms out; no event frames # need to be added if frame is None: return [] # Otherwise, form a speedscope frame and add it to the frame # to index map if the frame is not already a key in that map. sframe = SpeedscopeFrame(frame.function, frame.file_path, frame.line_no) if sframe not in self._frame_to_index: self._frame_to_index[sframe] = len(self._frame_to_index) # Get the frame index and add a speedscope event corresponding # to opening a stack frame. sframe_index = self._frame_to_index[sframe] open_event = SpeedscopeEvent(SpeedscopeEventType.OPEN, self._event_time, sframe_index) events_array: list[SpeedscopeEvent] = [open_event] # Add stack frame open and close events for all child frames # of this frame. for child in frame.children: events_array.extend(self.render_frame(child)) # Update event time for closing this stack frame. # # If number of frames approaches 1e16 * desired accuracy # level, consider using Neumaier-Kahan summation; improves # worst-case relative accuracy of sum from O(num_summands * # eps) to (2 * eps + O(num_summands * eps * eps)), where eps # is IEEE-754 double precision unit roundoff, approximately # 1e-16. Average case relative accuracy expressions replace # num_summands with sqrt(num_summands). However, Kahan # summation quadruples operation count of sum, and Neumaier # variant also adds a branch & swap for each summand. Pairwise # summation isn't an option here because a running total is # needed. self._event_time += frame.self_time # Add event closing this stack frame. close_event = SpeedscopeEvent(SpeedscopeEventType.CLOSE, self._event_time, sframe_index) events_array.append(close_event) return events_array
[ "def", "render_frame", "(", "self", ",", "frame", ":", "BaseFrame", "|", "None", ")", "->", "list", "[", "SpeedscopeEvent", "]", ":", "# if frame is None, recursion bottoms out; no event frames", "# need to be added", "if", "frame", "is", "None", ":", "return", "[", "]", "# Otherwise, form a speedscope frame and add it to the frame", "# to index map if the frame is not already a key in that map.", "sframe", "=", "SpeedscopeFrame", "(", "frame", ".", "function", ",", "frame", ".", "file_path", ",", "frame", ".", "line_no", ")", "if", "sframe", "not", "in", "self", ".", "_frame_to_index", ":", "self", ".", "_frame_to_index", "[", "sframe", "]", "=", "len", "(", "self", ".", "_frame_to_index", ")", "# Get the frame index and add a speedscope event corresponding", "# to opening a stack frame.", "sframe_index", "=", "self", ".", "_frame_to_index", "[", "sframe", "]", "open_event", "=", "SpeedscopeEvent", "(", "SpeedscopeEventType", ".", "OPEN", ",", "self", ".", "_event_time", ",", "sframe_index", ")", "events_array", ":", "list", "[", "SpeedscopeEvent", "]", "=", "[", "open_event", "]", "# Add stack frame open and close events for all child frames", "# of this frame.", "for", "child", "in", "frame", ".", "children", ":", "events_array", ".", "extend", "(", "self", ".", "render_frame", "(", "child", ")", ")", "# Update event time for closing this stack frame.", "#", "# If number of frames approaches 1e16 * desired accuracy", "# level, consider using Neumaier-Kahan summation; improves", "# worst-case relative accuracy of sum from O(num_summands *", "# eps) to (2 * eps + O(num_summands * eps * eps)), where eps", "# is IEEE-754 double precision unit roundoff, approximately", "# 1e-16. Average case relative accuracy expressions replace", "# num_summands with sqrt(num_summands). However, Kahan", "# summation quadruples operation count of sum, and Neumaier", "# variant also adds a branch & swap for each summand. Pairwise", "# summation isn't an option here because a running total is", "# needed.", "self", ".", "_event_time", "+=", "frame", ".", "self_time", "# Add event closing this stack frame.", "close_event", "=", "SpeedscopeEvent", "(", "SpeedscopeEventType", ".", "CLOSE", ",", "self", ".", "_event_time", ",", "sframe_index", ")", "events_array", ".", "append", "(", "close_event", ")", "return", "events_array" ]
https://github.com/joerick/pyinstrument/blob/d3c45164a385021f366c1081baec18a1a226a573/pyinstrument/renderers/speedscope.py#L144-L203
NVIDIA/DeepLearningExamples
589604d49e016cd9ef4525f7abcc9c7b826cfc5e
PyTorch/Translation/Transformer/fairseq/data/dictionary.py
python
Dictionary.update
(self, new_dict)
Updates counts from new dictionary.
Updates counts from new dictionary.
[ "Updates", "counts", "from", "new", "dictionary", "." ]
def update(self, new_dict): """Updates counts from new dictionary.""" for word in new_dict.symbols: idx2 = new_dict.indices[word] if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + new_dict.count[idx2] else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(new_dict.count[idx2])
[ "def", "update", "(", "self", ",", "new_dict", ")", ":", "for", "word", "in", "new_dict", ".", "symbols", ":", "idx2", "=", "new_dict", ".", "indices", "[", "word", "]", "if", "word", "in", "self", ".", "indices", ":", "idx", "=", "self", ".", "indices", "[", "word", "]", "self", ".", "count", "[", "idx", "]", "=", "self", ".", "count", "[", "idx", "]", "+", "new_dict", ".", "count", "[", "idx2", "]", "else", ":", "idx", "=", "len", "(", "self", ".", "symbols", ")", "self", ".", "indices", "[", "word", "]", "=", "idx", "self", ".", "symbols", ".", "append", "(", "word", ")", "self", ".", "count", ".", "append", "(", "new_dict", ".", "count", "[", "idx2", "]", ")" ]
https://github.com/NVIDIA/DeepLearningExamples/blob/589604d49e016cd9ef4525f7abcc9c7b826cfc5e/PyTorch/Translation/Transformer/fairseq/data/dictionary.py#L85-L96
feisuzhu/thbattle
ac0dee1b2d86de7664289cf432b157ef25427ba1
src/pyglet/font/base.py
python
Glyph.draw_quad_vertices
(self)
Debug method. Use the higher level APIs for performance and kerning.
Debug method.
[ "Debug", "method", "." ]
def draw_quad_vertices(self): '''Debug method. Use the higher level APIs for performance and kerning. ''' glTexCoord3f(*self.tex_coords[:3]) glVertex2f(self.vertices[0], self.vertices[1]) glTexCoord3f(*self.tex_coords[3:6]) glVertex2f(self.vertices[2], self.vertices[1]) glTexCoord3f(*self.tex_coords[6:9]) glVertex2f(self.vertices[2], self.vertices[3]) glTexCoord3f(*self.tex_coords[9:12]) glVertex2f(self.vertices[0], self.vertices[3])
[ "def", "draw_quad_vertices", "(", "self", ")", ":", "glTexCoord3f", "(", "*", "self", ".", "tex_coords", "[", ":", "3", "]", ")", "glVertex2f", "(", "self", ".", "vertices", "[", "0", "]", ",", "self", ".", "vertices", "[", "1", "]", ")", "glTexCoord3f", "(", "*", "self", ".", "tex_coords", "[", "3", ":", "6", "]", ")", "glVertex2f", "(", "self", ".", "vertices", "[", "2", "]", ",", "self", ".", "vertices", "[", "1", "]", ")", "glTexCoord3f", "(", "*", "self", ".", "tex_coords", "[", "6", ":", "9", "]", ")", "glVertex2f", "(", "self", ".", "vertices", "[", "2", "]", ",", "self", ".", "vertices", "[", "3", "]", ")", "glTexCoord3f", "(", "*", "self", ".", "tex_coords", "[", "9", ":", "12", "]", ")", "glVertex2f", "(", "self", ".", "vertices", "[", "0", "]", ",", "self", ".", "vertices", "[", "3", "]", ")" ]
https://github.com/feisuzhu/thbattle/blob/ac0dee1b2d86de7664289cf432b157ef25427ba1/src/pyglet/font/base.py#L189-L201
IdentityPython/pysaml2
6badb32d212257bd83ffcc816f9b625f68281b47
src/saml2/authn_context/ppt.py
python
identification_type__from_string
(xml_string)
return saml2.create_class_from_xml_string(IdentificationType_, xml_string)
[]
def identification_type__from_string(xml_string): return saml2.create_class_from_xml_string(IdentificationType_, xml_string)
[ "def", "identification_type__from_string", "(", "xml_string", ")", ":", "return", "saml2", ".", "create_class_from_xml_string", "(", "IdentificationType_", ",", "xml_string", ")" ]
https://github.com/IdentityPython/pysaml2/blob/6badb32d212257bd83ffcc816f9b625f68281b47/src/saml2/authn_context/ppt.py#L1447-L1448
aws/sagemaker-tensorflow-training-toolkit
38db16c9d3fcbda6f9d8bc31261398fb4617bfc8
benchmarks/horovod-resnet/train_imagenet_resnet_hvd.py
python
LayerBuilder.dense_linear
(self, inputs, units, **kwargs)
return tf.layers.dense(inputs, units, activation=None)
[]
def dense_linear(self, inputs, units, **kwargs): return tf.layers.dense(inputs, units, activation=None)
[ "def", "dense_linear", "(", "self", ",", "inputs", ",", "units", ",", "*", "*", "kwargs", ")", ":", "return", "tf", ".", "layers", ".", "dense", "(", "inputs", ",", "units", ",", "activation", "=", "None", ")" ]
https://github.com/aws/sagemaker-tensorflow-training-toolkit/blob/38db16c9d3fcbda6f9d8bc31261398fb4617bfc8/benchmarks/horovod-resnet/train_imagenet_resnet_hvd.py#L155-L156
openembedded/bitbake
98407efc8c670abd71d3fa88ec3776ee9b5c38f3
lib/bb/pysh/pyshyacc.py
python
parse
(input, eof=False, debug=False)
return yacc.parse(lexer=lexer, debug=debug), remaining
Parse a whole script at once and return the generated AST and unconsumed data in a tuple. NOTE: eof is probably meaningless for now, the parser being unable to work in pull mode. It should be set to True.
Parse a whole script at once and return the generated AST and unconsumed data in a tuple. NOTE: eof is probably meaningless for now, the parser being unable to work in pull mode. It should be set to True.
[ "Parse", "a", "whole", "script", "at", "once", "and", "return", "the", "generated", "AST", "and", "unconsumed", "data", "in", "a", "tuple", ".", "NOTE", ":", "eof", "is", "probably", "meaningless", "for", "now", "the", "parser", "being", "unable", "to", "work", "in", "pull", "mode", ".", "It", "should", "be", "set", "to", "True", "." ]
def parse(input, eof=False, debug=False): """Parse a whole script at once and return the generated AST and unconsumed data in a tuple. NOTE: eof is probably meaningless for now, the parser being unable to work in pull mode. It should be set to True. """ lexer = pyshlex.PLYLexer() remaining = lexer.add(input, eof) if lexer.is_empty(): return [], remaining if debug: debug = 2 return yacc.parse(lexer=lexer, debug=debug), remaining
[ "def", "parse", "(", "input", ",", "eof", "=", "False", ",", "debug", "=", "False", ")", ":", "lexer", "=", "pyshlex", ".", "PLYLexer", "(", ")", "remaining", "=", "lexer", ".", "add", "(", "input", ",", "eof", ")", "if", "lexer", ".", "is_empty", "(", ")", ":", "return", "[", "]", ",", "remaining", "if", "debug", ":", "debug", "=", "2", "return", "yacc", ".", "parse", "(", "lexer", "=", "lexer", ",", "debug", "=", "debug", ")", ",", "remaining" ]
https://github.com/openembedded/bitbake/blob/98407efc8c670abd71d3fa88ec3776ee9b5c38f3/lib/bb/pysh/pyshyacc.py#L664-L677
rndusr/stig
334f03e2e3eda7c1856dd5489f0265a47b9861b6
stig/client/filters/utils.py
python
_either_past_or_future
(item_value, user_value)
return item_value_in_future == user_value_in_future
Return True if `item_value` and `user_value` are equal, both in the past or both in the future, False otherwise
Return True if `item_value` and `user_value` are equal, both in the past or both in the future, False otherwise
[ "Return", "True", "if", "item_value", "and", "user_value", "are", "equal", "both", "in", "the", "past", "or", "both", "in", "the", "future", "False", "otherwise" ]
def _either_past_or_future(item_value, user_value): """ Return True if `item_value` and `user_value` are equal, both in the past or both in the future, False otherwise """ type_item_value = type(item_value) type_user_value = type(user_value) if type_user_value is Timestamp: user_value_in_future = user_value.in_future elif type_user_value is Timedelta: user_value_in_future = user_value > 0 if type_item_value is Timestamp: item_value_in_future = item_value.in_future elif type_item_value is Timedelta: item_value_in_future = item_value > 0 return item_value_in_future == user_value_in_future
[ "def", "_either_past_or_future", "(", "item_value", ",", "user_value", ")", ":", "type_item_value", "=", "type", "(", "item_value", ")", "type_user_value", "=", "type", "(", "user_value", ")", "if", "type_user_value", "is", "Timestamp", ":", "user_value_in_future", "=", "user_value", ".", "in_future", "elif", "type_user_value", "is", "Timedelta", ":", "user_value_in_future", "=", "user_value", ">", "0", "if", "type_item_value", "is", "Timestamp", ":", "item_value_in_future", "=", "item_value", ".", "in_future", "elif", "type_item_value", "is", "Timedelta", ":", "item_value_in_future", "=", "item_value", ">", "0", "return", "item_value_in_future", "==", "user_value_in_future" ]
https://github.com/rndusr/stig/blob/334f03e2e3eda7c1856dd5489f0265a47b9861b6/stig/client/filters/utils.py#L98-L116
python/cpython
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
Lib/idlelib/config.py
python
IdleConf.__GetRawExtensionKeys
(self,extensionName)
return extKeys
Return dict {configurable extensionName event : keybinding list}. Events come from default config extension_cfgBindings section. Keybindings list come from the splitting of GetOption, which tries user config before default config.
Return dict {configurable extensionName event : keybinding list}.
[ "Return", "dict", "{", "configurable", "extensionName", "event", ":", "keybinding", "list", "}", "." ]
def __GetRawExtensionKeys(self,extensionName): """Return dict {configurable extensionName event : keybinding list}. Events come from default config extension_cfgBindings section. Keybindings list come from the splitting of GetOption, which tries user config before default config. """ keysName = extensionName+'_cfgBindings' extKeys = {} if self.defaultCfg['extensions'].has_section(keysName): eventNames = self.defaultCfg['extensions'].GetOptionList(keysName) for eventName in eventNames: binding = self.GetOption( 'extensions', keysName, eventName, default='').split() event = '<<' + eventName + '>>' extKeys[event] = binding return extKeys
[ "def", "__GetRawExtensionKeys", "(", "self", ",", "extensionName", ")", ":", "keysName", "=", "extensionName", "+", "'_cfgBindings'", "extKeys", "=", "{", "}", "if", "self", ".", "defaultCfg", "[", "'extensions'", "]", ".", "has_section", "(", "keysName", ")", ":", "eventNames", "=", "self", ".", "defaultCfg", "[", "'extensions'", "]", ".", "GetOptionList", "(", "keysName", ")", "for", "eventName", "in", "eventNames", ":", "binding", "=", "self", ".", "GetOption", "(", "'extensions'", ",", "keysName", ",", "eventName", ",", "default", "=", "''", ")", ".", "split", "(", ")", "event", "=", "'<<'", "+", "eventName", "+", "'>>'", "extKeys", "[", "event", "]", "=", "binding", "return", "extKeys" ]
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/idlelib/config.py#L489-L505
IBM/lale
b4d6829c143a4735b06083a0e6c70d2cca244162
lale/lib/lale/observing.py
python
observe
(f)
return wrapper
[]
def observe(f): @wraps(f) def wrapper(self, *args, **kwds): name = f.__name__ self.startObserving(name, *args, **kwds) try: ret = f(self, *args, **kwds) self.endObserving(name, ret) except BaseException as e: self.failObserving(name, e) raise return ret return wrapper
[ "def", "observe", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "name", "=", "f", ".", "__name__", "self", ".", "startObserving", "(", "name", ",", "*", "args", ",", "*", "*", "kwds", ")", "try", ":", "ret", "=", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", "self", ".", "endObserving", "(", "name", ",", "ret", ")", "except", "BaseException", "as", "e", ":", "self", ".", "failObserving", "(", "name", ",", "e", ")", "raise", "return", "ret", "return", "wrapper" ]
https://github.com/IBM/lale/blob/b4d6829c143a4735b06083a0e6c70d2cca244162/lale/lib/lale/observing.py#L27-L40
emmetio/livestyle-sublime-old
c42833c046e9b2f53ebce3df3aa926528f5a33b5
tornado/web.py
python
TemplateModule._get_resources
(self, key)
return (r[key] for r in self._resource_list if key in r)
[]
def _get_resources(self, key): return (r[key] for r in self._resource_list if key in r)
[ "def", "_get_resources", "(", "self", ",", "key", ")", ":", "return", "(", "r", "[", "key", "]", "for", "r", "in", "self", ".", "_resource_list", "if", "key", "in", "r", ")" ]
https://github.com/emmetio/livestyle-sublime-old/blob/c42833c046e9b2f53ebce3df3aa926528f5a33b5/tornado/web.py#L2388-L2389
pcyin/NL2code
f9732f1f5caafa73a0f767cc4f5ce9f5961c46d6
lang/py/parse.py
python
parse
(code)
return tree
parse a python code into a tree structure code -> AST tree -> AST tree to internal tree structure
parse a python code into a tree structure code -> AST tree -> AST tree to internal tree structure
[ "parse", "a", "python", "code", "into", "a", "tree", "structure", "code", "-", ">", "AST", "tree", "-", ">", "AST", "tree", "to", "internal", "tree", "structure" ]
def parse(code): """ parse a python code into a tree structure code -> AST tree -> AST tree to internal tree structure """ code = canonicalize_code(code) py_ast = ast.parse(code) tree = python_ast_to_parse_tree(py_ast.body[0]) tree = add_root(tree) return tree
[ "def", "parse", "(", "code", ")", ":", "code", "=", "canonicalize_code", "(", "code", ")", "py_ast", "=", "ast", ".", "parse", "(", "code", ")", "tree", "=", "python_ast_to_parse_tree", "(", "py_ast", ".", "body", "[", "0", "]", ")", "tree", "=", "add_root", "(", "tree", ")", "return", "tree" ]
https://github.com/pcyin/NL2code/blob/f9732f1f5caafa73a0f767cc4f5ce9f5961c46d6/lang/py/parse.py#L254-L267
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/lockfile/symlinklockfile.py
python
SymlinkLockFile.release
(self)
[]
def release(self): if not self.is_locked(): raise NotLocked("%s is not locked" % self.path) elif not self.i_am_locking(): raise NotMyLock("%s is locked, but not by me" % self.path) os.unlink(self.lock_file)
[ "def", "release", "(", "self", ")", ":", "if", "not", "self", ".", "is_locked", "(", ")", ":", "raise", "NotLocked", "(", "\"%s is not locked\"", "%", "self", ".", "path", ")", "elif", "not", "self", ".", "i_am_locking", "(", ")", ":", "raise", "NotMyLock", "(", "\"%s is locked, but not by me\"", "%", "self", ".", "path", ")", "os", ".", "unlink", "(", "self", ".", "lock_file", ")" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/lockfile/symlinklockfile.py#L54-L59
wbond/asn1crypto
9ae350f212532dfee7f185f6b3eda24753249cf3
asn1crypto/core.py
python
OctetString.set
(self, value)
Sets the value of the object :param value: A byte string
Sets the value of the object
[ "Sets", "the", "value", "of", "the", "object" ]
def set(self, value): """ Sets the value of the object :param value: A byte string """ if not isinstance(value, byte_cls): raise TypeError(unwrap( ''' %s value must be a byte string, not %s ''', type_name(self), type_name(value) )) self._bytes = value self.contents = value self._header = None if self._indefinite: self._indefinite = False self.method = 0 if self._trailer != b'': self._trailer = b''
[ "def", "set", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n %s value must be a byte string, not %s\n '''", ",", "type_name", "(", "self", ")", ",", "type_name", "(", "value", ")", ")", ")", "self", ".", "_bytes", "=", "value", "self", ".", "contents", "=", "value", "self", ".", "_header", "=", "None", "if", "self", ".", "_indefinite", ":", "self", ".", "_indefinite", "=", "False", "self", ".", "method", "=", "0", "if", "self", ".", "_trailer", "!=", "b''", ":", "self", ".", "_trailer", "=", "b''" ]
https://github.com/wbond/asn1crypto/blob/9ae350f212532dfee7f185f6b3eda24753249cf3/asn1crypto/core.py#L2565-L2589
eriklindernoren/PyTorch-GAN
36d3c77e5ff20ebe0aeefd322326a134a279b93e
implementations/munit/models.py
python
MultiDiscriminator.compute_loss
(self, x, gt)
return loss
Computes the MSE between model output and scalar gt
Computes the MSE between model output and scalar gt
[ "Computes", "the", "MSE", "between", "model", "output", "and", "scalar", "gt" ]
def compute_loss(self, x, gt): """Computes the MSE between model output and scalar gt""" loss = sum([torch.mean((out - gt) ** 2) for out in self.forward(x)]) return loss
[ "def", "compute_loss", "(", "self", ",", "x", ",", "gt", ")", ":", "loss", "=", "sum", "(", "[", "torch", ".", "mean", "(", "(", "out", "-", "gt", ")", "**", "2", ")", "for", "out", "in", "self", ".", "forward", "(", "x", ")", "]", ")", "return", "loss" ]
https://github.com/eriklindernoren/PyTorch-GAN/blob/36d3c77e5ff20ebe0aeefd322326a134a279b93e/implementations/munit/models.py#L225-L228
GraylinKim/sc2reader
d69feb4e0be597581040588193579d29e8241431
sc2reader/decoders.py
python
ByteDecoder.__init__
(self, contents, endian)
Accepts both strings and files implementing ``read()`` and decodes them in the specified endian format.
Accepts both strings and files implementing ``read()`` and decodes them in the specified endian format.
[ "Accepts", "both", "strings", "and", "files", "implementing", "read", "()", "and", "decodes", "them", "in", "the", "specified", "endian", "format", "." ]
def __init__(self, contents, endian): """ Accepts both strings and files implementing ``read()`` and decodes them in the specified endian format. """ if hasattr(contents, 'read'): self._contents = contents.read() else: self._contents = contents self._buffer = BytesIO(self._contents) self.length = len(self._contents) # Expose the basic BytesIO interface self.read = self._buffer.read self.seek = self._buffer.seek self.tell = self._buffer.tell # decode the endian value if necessary self.endian = endian.lower() if self.endian.lower() == 'little': self.endian = "<" elif self.endian.lower() == 'big': self.endian = ">" elif self.endian not in ('<', '>'): raise ValueError("Endian must be one of 'little', '<', 'big', or '>' but was: "+self.endian) # Pre-compiling self._unpack_int = struct.Struct(str(self.endian+'I')).unpack self._unpack_short = struct.Struct(str(self.endian+'H')).unpack self._unpack_longlong = struct.Struct(str(self.endian+'Q')).unpack self._unpack_bytes = lambda bytes: bytes if self.endian == '>' else bytes[::-1]
[ "def", "__init__", "(", "self", ",", "contents", ",", "endian", ")", ":", "if", "hasattr", "(", "contents", ",", "'read'", ")", ":", "self", ".", "_contents", "=", "contents", ".", "read", "(", ")", "else", ":", "self", ".", "_contents", "=", "contents", "self", ".", "_buffer", "=", "BytesIO", "(", "self", ".", "_contents", ")", "self", ".", "length", "=", "len", "(", "self", ".", "_contents", ")", "# Expose the basic BytesIO interface", "self", ".", "read", "=", "self", ".", "_buffer", ".", "read", "self", ".", "seek", "=", "self", ".", "_buffer", ".", "seek", "self", ".", "tell", "=", "self", ".", "_buffer", ".", "tell", "# decode the endian value if necessary", "self", ".", "endian", "=", "endian", ".", "lower", "(", ")", "if", "self", ".", "endian", ".", "lower", "(", ")", "==", "'little'", ":", "self", ".", "endian", "=", "\"<\"", "elif", "self", ".", "endian", ".", "lower", "(", ")", "==", "'big'", ":", "self", ".", "endian", "=", "\">\"", "elif", "self", ".", "endian", "not", "in", "(", "'<'", ",", "'>'", ")", ":", "raise", "ValueError", "(", "\"Endian must be one of 'little', '<', 'big', or '>' but was: \"", "+", "self", ".", "endian", ")", "# Pre-compiling", "self", ".", "_unpack_int", "=", "struct", ".", "Struct", "(", "str", "(", "self", ".", "endian", "+", "'I'", ")", ")", ".", "unpack", "self", ".", "_unpack_short", "=", "struct", ".", "Struct", "(", "str", "(", "self", ".", "endian", "+", "'H'", ")", ")", ".", "unpack", "self", ".", "_unpack_longlong", "=", "struct", ".", "Struct", "(", "str", "(", "self", ".", "endian", "+", "'Q'", ")", ")", ".", "unpack", "self", ".", "_unpack_bytes", "=", "lambda", "bytes", ":", "bytes", "if", "self", ".", "endian", "==", "'>'", "else", "bytes", "[", ":", ":", "-", "1", "]" ]
https://github.com/GraylinKim/sc2reader/blob/d69feb4e0be597581040588193579d29e8241431/sc2reader/decoders.py#L33-L63
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/apis/authorization_v1beta1_api.py
python
AuthorizationV1beta1Api.create_subject_access_review_with_http_info
(self, body, **kwargs)
return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1SubjectAccessReview', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
create a SubjectAccessReview This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_subject_access_review_with_http_info(body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param V1beta1SubjectAccessReview body: (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1SubjectAccessReview If the method is called asynchronously, returns the request thread.
create a SubjectAccessReview This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_subject_access_review_with_http_info(body, callback=callback_function)
[ "create", "a", "SubjectAccessReview", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "create_subject_access_review_with_http_info", "(", "body", "callback", "=", "callback_function", ")" ]
def create_subject_access_review_with_http_info(self, body, **kwargs): """ create a SubjectAccessReview This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_subject_access_review_with_http_info(body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param V1beta1SubjectAccessReview body: (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1SubjectAccessReview If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'pretty'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_subject_access_review" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_subject_access_review`") collection_formats = {} resource_path = '/apis/authorization.k8s.io/v1beta1/subjectaccessreviews'.replace('{format}', 'json') path_params = {} query_params = {} if 'pretty' in params: query_params['pretty'] = params['pretty'] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1SubjectAccessReview', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
[ "def", "create_subject_access_review_with_http_info", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "all_params", "=", "[", "'body'", ",", "'pretty'", "]", "all_params", ".", "append", "(", "'callback'", ")", "all_params", ".", "append", "(", "'_return_http_data_only'", ")", "all_params", ".", "append", "(", "'_preload_content'", ")", "all_params", ".", "append", "(", "'_request_timeout'", ")", "params", "=", "locals", "(", ")", "for", "key", ",", "val", "in", "iteritems", "(", "params", "[", "'kwargs'", "]", ")", ":", "if", "key", "not", "in", "all_params", ":", "raise", "TypeError", "(", "\"Got an unexpected keyword argument '%s'\"", "\" to method create_subject_access_review\"", "%", "key", ")", "params", "[", "key", "]", "=", "val", "del", "params", "[", "'kwargs'", "]", "# verify the required parameter 'body' is set", "if", "(", "'body'", "not", "in", "params", ")", "or", "(", "params", "[", "'body'", "]", "is", "None", ")", ":", "raise", "ValueError", "(", "\"Missing the required parameter `body` when calling `create_subject_access_review`\"", ")", "collection_formats", "=", "{", "}", "resource_path", "=", "'/apis/authorization.k8s.io/v1beta1/subjectaccessreviews'", ".", "replace", "(", "'{format}'", ",", "'json'", ")", "path_params", "=", "{", "}", "query_params", "=", "{", "}", "if", "'pretty'", "in", "params", ":", "query_params", "[", "'pretty'", "]", "=", "params", "[", "'pretty'", "]", "header_params", "=", "{", "}", "form_params", "=", "[", "]", "local_var_files", "=", "{", "}", "body_params", "=", "None", "if", "'body'", "in", "params", ":", "body_params", "=", "params", "[", "'body'", "]", "# HTTP header `Accept`", "header_params", "[", "'Accept'", "]", "=", "self", ".", "api_client", ".", "select_header_accept", "(", "[", "'application/json'", ",", "'application/yaml'", ",", "'application/vnd.kubernetes.protobuf'", "]", ")", "# HTTP header `Content-Type`", "header_params", "[", "'Content-Type'", "]", "=", "self", ".", "api_client", ".", "select_header_content_type", "(", "[", "'*/*'", "]", ")", "# Authentication setting", "auth_settings", "=", "[", "'BearerToken'", "]", "return", "self", ".", "api_client", ".", "call_api", "(", "resource_path", ",", "'POST'", ",", "path_params", ",", "query_params", ",", "header_params", ",", "body", "=", "body_params", ",", "post_params", "=", "form_params", ",", "files", "=", "local_var_files", ",", "response_type", "=", "'V1beta1SubjectAccessReview'", ",", "auth_settings", "=", "auth_settings", ",", "callback", "=", "params", ".", "get", "(", "'callback'", ")", ",", "_return_http_data_only", "=", "params", ".", "get", "(", "'_return_http_data_only'", ")", ",", "_preload_content", "=", "params", ".", "get", "(", "'_preload_content'", ",", "True", ")", ",", "_request_timeout", "=", "params", ".", "get", "(", "'_request_timeout'", ")", ",", "collection_formats", "=", "collection_formats", ")" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/apis/authorization_v1beta1_api.py#L294-L375
hyperspy/hyperspy
1ffb3fab33e607045a37f30c1463350b72617e10
hyperspy/events.py
python
Events.__delattr__
(self, name)
Magic to enable having `Event`s as attributes, and keeping them separate from other attributes. Deletes attribute from self._events if present, otherwise delete attribute in normal way.
Magic to enable having `Event`s as attributes, and keeping them separate from other attributes.
[ "Magic", "to", "enable", "having", "Event", "s", "as", "attributes", "and", "keeping", "them", "separate", "from", "other", "attributes", "." ]
def __delattr__(self, name): """ Magic to enable having `Event`s as attributes, and keeping them separate from other attributes. Deletes attribute from self._events if present, otherwise delete attribute in normal way. """ if name in self._events: del self._events[name] self._update_doc() else: super(Events, self).__delattr__(name)
[ "def", "__delattr__", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_events", ":", "del", "self", ".", "_events", "[", "name", "]", "self", ".", "_update_doc", "(", ")", "else", ":", "super", "(", "Events", ",", "self", ")", ".", "__delattr__", "(", "name", ")" ]
https://github.com/hyperspy/hyperspy/blob/1ffb3fab33e607045a37f30c1463350b72617e10/hyperspy/events.py#L109-L121
google/turbinia
4559d4af9267762a8995cfc415099a80a22ee4d3
turbinia/workers/analysis/wordpress_creds.py
python
WordpressCredsAnalysisTask._analyse_wordpress_creds
(self, creds, hashnames, timeout=300)
return (report, priority, summary)
Attempt to brute force extracted Wordpress credentials. Args: creds (list): List of strings containing raw extracted credentials hashnames (dict): Dict mapping hash back to username for convenience. timeout (int): How long to spend cracking. Returns: Tuple( report_text(str): The report data report_priority(int): The priority of the report (0 - 100) summary(str): A summary of the report (used for task status) )
Attempt to brute force extracted Wordpress credentials.
[ "Attempt", "to", "brute", "force", "extracted", "Wordpress", "credentials", "." ]
def _analyse_wordpress_creds(self, creds, hashnames, timeout=300): """Attempt to brute force extracted Wordpress credentials. Args: creds (list): List of strings containing raw extracted credentials hashnames (dict): Dict mapping hash back to username for convenience. timeout (int): How long to spend cracking. Returns: Tuple( report_text(str): The report data report_priority(int): The priority of the report (0 - 100) summary(str): A summary of the report (used for task status) ) """ report = [] summary = 'No weak passwords found' priority = Priority.LOW # 1000 is "phpass" weak_passwords = bruteforce_password_hashes( creds, tmp_dir=self.tmp_dir, timeout=timeout, extra_args='--username -m 400') if weak_passwords: priority = Priority.CRITICAL summary = 'Wordpress analysis found {0:d} weak password(s)'.format( len(weak_passwords)) report.insert(0, fmt.heading4(fmt.bold(summary))) line = '{0:n} weak password(s) found:'.format(len(weak_passwords)) report.append(fmt.bullet(fmt.bold(line))) for password_hash, plaintext in weak_passwords: if password_hash in hashnames: line = """User '{0:s}' with password '{1:s}'""".format( hashnames[password_hash], plaintext) report.append(fmt.bullet(line, level=2)) report = '\n'.join(report) return (report, priority, summary)
[ "def", "_analyse_wordpress_creds", "(", "self", ",", "creds", ",", "hashnames", ",", "timeout", "=", "300", ")", ":", "report", "=", "[", "]", "summary", "=", "'No weak passwords found'", "priority", "=", "Priority", ".", "LOW", "# 1000 is \"phpass\"", "weak_passwords", "=", "bruteforce_password_hashes", "(", "creds", ",", "tmp_dir", "=", "self", ".", "tmp_dir", ",", "timeout", "=", "timeout", ",", "extra_args", "=", "'--username -m 400'", ")", "if", "weak_passwords", ":", "priority", "=", "Priority", ".", "CRITICAL", "summary", "=", "'Wordpress analysis found {0:d} weak password(s)'", ".", "format", "(", "len", "(", "weak_passwords", ")", ")", "report", ".", "insert", "(", "0", ",", "fmt", ".", "heading4", "(", "fmt", ".", "bold", "(", "summary", ")", ")", ")", "line", "=", "'{0:n} weak password(s) found:'", ".", "format", "(", "len", "(", "weak_passwords", ")", ")", "report", ".", "append", "(", "fmt", ".", "bullet", "(", "fmt", ".", "bold", "(", "line", ")", ")", ")", "for", "password_hash", ",", "plaintext", "in", "weak_passwords", ":", "if", "password_hash", "in", "hashnames", ":", "line", "=", "\"\"\"User '{0:s}' with password '{1:s}'\"\"\"", ".", "format", "(", "hashnames", "[", "password_hash", "]", ",", "plaintext", ")", "report", ".", "append", "(", "fmt", ".", "bullet", "(", "line", ",", "level", "=", "2", ")", ")", "report", "=", "'\\n'", ".", "join", "(", "report", ")", "return", "(", "report", ",", "priority", ",", "summary", ")" ]
https://github.com/google/turbinia/blob/4559d4af9267762a8995cfc415099a80a22ee4d3/turbinia/workers/analysis/wordpress_creds.py#L164-L201
isce-framework/isce2
0e5114a8bede3caf1d533d98e44dfe4b983e3f48
contrib/stack/topsStack/VRTManager.py
python
VRTConstructor.addBurst
(self, burst, infile, yoff, xoff, band=1, validOnly=True)
Add one burst to the VRT.
Add one burst to the VRT.
[ "Add", "one", "burst", "to", "the", "VRT", "." ]
def addBurst(self, burst, infile, yoff, xoff, band=1, validOnly=True): ''' Add one burst to the VRT. ''' tysize = burst.numberOfLines txsize = burst.numberOfSamples if validOnly: tyoff = int(burst.firstValidLine) txoff = int(burst.firstValidSample) wysize = int(burst.numValidLines) wxsize = int(burst.numValidSamples) fyoff = int(yoff + burst.firstValidLine) fxoff = int(xoff + burst.firstValidSample) else: tyoff = 0 txoff = 0 wysize = tysize wxsize = txsize fyoff = int(yoff) fxoff = int(xoff) tmpl = ''' <SimpleSource> <SourceFilename relativeToVRT="1">{tiff}</SourceFilename> <SourceBand>{band}</SourceBand> <SourceProperties RasterXSize="{txsize}" RasterYSize="{tysize}" DataType="{dtype}"/> <SrcRect xOff="{txoff}" yOff="{tyoff}" xSize="{wxsize}" ySize="{wysize}"/> <DstRect xOff="{fxoff}" yOff="{fyoff}" xSize="{wxsize}" ySize="{wysize}"/> </SimpleSource> ''' self.vrt += tmpl.format( tyoff=tyoff, txoff=txoff, fyoff=fyoff, fxoff=fxoff, wxsize=wxsize, wysize=wysize, tiff=infile+'.vrt', dtype=self.dtype, tysize=tysize, txsize=txsize, band=band)
[ "def", "addBurst", "(", "self", ",", "burst", ",", "infile", ",", "yoff", ",", "xoff", ",", "band", "=", "1", ",", "validOnly", "=", "True", ")", ":", "tysize", "=", "burst", ".", "numberOfLines", "txsize", "=", "burst", ".", "numberOfSamples", "if", "validOnly", ":", "tyoff", "=", "int", "(", "burst", ".", "firstValidLine", ")", "txoff", "=", "int", "(", "burst", ".", "firstValidSample", ")", "wysize", "=", "int", "(", "burst", ".", "numValidLines", ")", "wxsize", "=", "int", "(", "burst", ".", "numValidSamples", ")", "fyoff", "=", "int", "(", "yoff", "+", "burst", ".", "firstValidLine", ")", "fxoff", "=", "int", "(", "xoff", "+", "burst", ".", "firstValidSample", ")", "else", ":", "tyoff", "=", "0", "txoff", "=", "0", "wysize", "=", "tysize", "wxsize", "=", "txsize", "fyoff", "=", "int", "(", "yoff", ")", "fxoff", "=", "int", "(", "xoff", ")", "tmpl", "=", "''' <SimpleSource>\n <SourceFilename relativeToVRT=\"1\">{tiff}</SourceFilename>\n <SourceBand>{band}</SourceBand>\n <SourceProperties RasterXSize=\"{txsize}\" RasterYSize=\"{tysize}\" DataType=\"{dtype}\"/>\n <SrcRect xOff=\"{txoff}\" yOff=\"{tyoff}\" xSize=\"{wxsize}\" ySize=\"{wysize}\"/>\n <DstRect xOff=\"{fxoff}\" yOff=\"{fyoff}\" xSize=\"{wxsize}\" ySize=\"{wysize}\"/>\n </SimpleSource>\n'''", "self", ".", "vrt", "+=", "tmpl", ".", "format", "(", "tyoff", "=", "tyoff", ",", "txoff", "=", "txoff", ",", "fyoff", "=", "fyoff", ",", "fxoff", "=", "fxoff", ",", "wxsize", "=", "wxsize", ",", "wysize", "=", "wysize", ",", "tiff", "=", "infile", "+", "'.vrt'", ",", "dtype", "=", "self", ".", "dtype", ",", "tysize", "=", "tysize", ",", "txsize", "=", "txsize", ",", "band", "=", "band", ")" ]
https://github.com/isce-framework/isce2/blob/0e5114a8bede3caf1d533d98e44dfe4b983e3f48/contrib/stack/topsStack/VRTManager.py#L165-L204
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/util/basic.py
python
hdf5_dimension
(filename, dimension)
return res
:param str filename: :param str dimension: :rtype: numpy.ndarray|int
:param str filename: :param str dimension: :rtype: numpy.ndarray|int
[ ":", "param", "str", "filename", ":", ":", "param", "str", "dimension", ":", ":", "rtype", ":", "numpy", ".", "ndarray|int" ]
def hdf5_dimension(filename, dimension): """ :param str filename: :param str dimension: :rtype: numpy.ndarray|int """ fin = h5py.File(filename, "r") if '/' in dimension: res = fin['/'.join(dimension.split('/')[:-1])].attrs[dimension.split('/')[-1]] else: res = fin.attrs[dimension] fin.close() return res
[ "def", "hdf5_dimension", "(", "filename", ",", "dimension", ")", ":", "fin", "=", "h5py", ".", "File", "(", "filename", ",", "\"r\"", ")", "if", "'/'", "in", "dimension", ":", "res", "=", "fin", "[", "'/'", ".", "join", "(", "dimension", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "]", ".", "attrs", "[", "dimension", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "]", "else", ":", "res", "=", "fin", ".", "attrs", "[", "dimension", "]", "fin", ".", "close", "(", ")", "return", "res" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/util/basic.py#L526-L538
spyder-ide/spyder
55da47c032dfcf519600f67f8b30eab467f965e7
spyder/widgets/reporterror.py
python
DescriptionWidget.keyPressEvent
(self, event)
Reimplemented Qt Method to avoid removing the header.
Reimplemented Qt Method to avoid removing the header.
[ "Reimplemented", "Qt", "Method", "to", "avoid", "removing", "the", "header", "." ]
def keyPressEvent(self, event): """Reimplemented Qt Method to avoid removing the header.""" event, text, key, ctrl, shift = restore_keyevent(event) cursor_position = self.get_position('cursor') if cursor_position < self.header_end_pos: self.restrict_cursor_position(self.header_end_pos, 'eof') elif key == Qt.Key_Backspace: if self.has_selected_text(): self.remove_text() elif self.header_end_pos == cursor_position: return else: self.stdkey_backspace() elif key == Qt.Key_X and ctrl: self.cut() else: super().keyPressEvent(event)
[ "def", "keyPressEvent", "(", "self", ",", "event", ")", ":", "event", ",", "text", ",", "key", ",", "ctrl", ",", "shift", "=", "restore_keyevent", "(", "event", ")", "cursor_position", "=", "self", ".", "get_position", "(", "'cursor'", ")", "if", "cursor_position", "<", "self", ".", "header_end_pos", ":", "self", ".", "restrict_cursor_position", "(", "self", ".", "header_end_pos", ",", "'eof'", ")", "elif", "key", "==", "Qt", ".", "Key_Backspace", ":", "if", "self", ".", "has_selected_text", "(", ")", ":", "self", ".", "remove_text", "(", ")", "elif", "self", ".", "header_end_pos", "==", "cursor_position", ":", "return", "else", ":", "self", ".", "stdkey_backspace", "(", ")", "elif", "key", "==", "Qt", ".", "Key_X", "and", "ctrl", ":", "self", ".", "cut", "(", ")", "else", ":", "super", "(", ")", ".", "keyPressEvent", "(", "event", ")" ]
https://github.com/spyder-ide/spyder/blob/55da47c032dfcf519600f67f8b30eab467f965e7/spyder/widgets/reporterror.py#L76-L93
pandas-dev/pandas
5ba7d714014ae8feaccc0dd4a98890828cf2832d
pandas/core/indexes/datetimes.py
python
bdate_range
( start=None, end=None, periods: int | None = None, freq="B", tz=None, normalize: bool = True, name: Hashable = None, weekmask=None, holidays=None, closed: lib.NoDefault = lib.no_default, inclusive: str | None = None, **kwargs, )
return date_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, inclusive=inclusive, **kwargs, )
Return a fixed frequency DatetimeIndex, with business day as the default frequency. Parameters ---------- start : str or datetime-like, default None Left bound for generating dates. end : str or datetime-like, default None Right bound for generating dates. periods : int, default None Number of periods to generate. freq : str or DateOffset, default 'B' (business daily) Frequency strings can have multiples, e.g. '5H'. tz : str or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. weekmask : str or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. closed : str, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). .. deprecated:: 1.4.0 Argument `closed` has been deprecated to standardize boundary inputs. Use `inclusive` instead, to set each bound as closed or open. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B')
Return a fixed frequency DatetimeIndex, with business day as the default frequency.
[ "Return", "a", "fixed", "frequency", "DatetimeIndex", "with", "business", "day", "as", "the", "default", "frequency", "." ]
def bdate_range( start=None, end=None, periods: int | None = None, freq="B", tz=None, normalize: bool = True, name: Hashable = None, weekmask=None, holidays=None, closed: lib.NoDefault = lib.no_default, inclusive: str | None = None, **kwargs, ) -> DatetimeIndex: """ Return a fixed frequency DatetimeIndex, with business day as the default frequency. Parameters ---------- start : str or datetime-like, default None Left bound for generating dates. end : str or datetime-like, default None Right bound for generating dates. periods : int, default None Number of periods to generate. freq : str or DateOffset, default 'B' (business daily) Frequency strings can have multiples, e.g. '5H'. tz : str or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. weekmask : str or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. closed : str, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). .. deprecated:: 1.4.0 Argument `closed` has been deprecated to standardize boundary inputs. Use `inclusive` instead, to set each bound as closed or open. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B') """ if freq is None: msg = "freq must be specified for bdate_range; use date_range instead" raise TypeError(msg) if isinstance(freq, str) and freq.startswith("C"): try: weekmask = weekmask or "Mon Tue Wed Thu Fri" freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) except (KeyError, TypeError) as err: msg = f"invalid custom frequency string: {freq}" raise ValueError(msg) from err elif holidays or weekmask: msg = ( "a custom frequency string is required when holidays or " f"weekmask are passed, got frequency {freq}" ) raise ValueError(msg) return date_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, inclusive=inclusive, **kwargs, )
[ "def", "bdate_range", "(", "start", "=", "None", ",", "end", "=", "None", ",", "periods", ":", "int", "|", "None", "=", "None", ",", "freq", "=", "\"B\"", ",", "tz", "=", "None", ",", "normalize", ":", "bool", "=", "True", ",", "name", ":", "Hashable", "=", "None", ",", "weekmask", "=", "None", ",", "holidays", "=", "None", ",", "closed", ":", "lib", ".", "NoDefault", "=", "lib", ".", "no_default", ",", "inclusive", ":", "str", "|", "None", "=", "None", ",", "*", "*", "kwargs", ",", ")", "->", "DatetimeIndex", ":", "if", "freq", "is", "None", ":", "msg", "=", "\"freq must be specified for bdate_range; use date_range instead\"", "raise", "TypeError", "(", "msg", ")", "if", "isinstance", "(", "freq", ",", "str", ")", "and", "freq", ".", "startswith", "(", "\"C\"", ")", ":", "try", ":", "weekmask", "=", "weekmask", "or", "\"Mon Tue Wed Thu Fri\"", "freq", "=", "prefix_mapping", "[", "freq", "]", "(", "holidays", "=", "holidays", ",", "weekmask", "=", "weekmask", ")", "except", "(", "KeyError", ",", "TypeError", ")", "as", "err", ":", "msg", "=", "f\"invalid custom frequency string: {freq}\"", "raise", "ValueError", "(", "msg", ")", "from", "err", "elif", "holidays", "or", "weekmask", ":", "msg", "=", "(", "\"a custom frequency string is required when holidays or \"", "f\"weekmask are passed, got frequency {freq}\"", ")", "raise", "ValueError", "(", "msg", ")", "return", "date_range", "(", "start", "=", "start", ",", "end", "=", "end", ",", "periods", "=", "periods", ",", "freq", "=", "freq", ",", "tz", "=", "tz", ",", "normalize", "=", "normalize", ",", "name", "=", "name", ",", "closed", "=", "closed", ",", "inclusive", "=", "inclusive", ",", "*", "*", "kwargs", ",", ")" ]
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/core/indexes/datetimes.py#L1083-L1192
learnables/learn2learn
c664e3c8643efb1cc1cf7bb11668843e2ee8ebbe
learn2learn/vision/benchmarks/fc100_benchmark.py
python
fc100_tasksets
( train_ways=5, train_samples=10, test_ways=5, test_samples=10, root='~/data', device=None, **kwargs, )
return _datasets, _transforms
Tasksets for FC100 benchmarks.
Tasksets for FC100 benchmarks.
[ "Tasksets", "for", "FC100", "benchmarks", "." ]
def fc100_tasksets( train_ways=5, train_samples=10, test_ways=5, test_samples=10, root='~/data', device=None, **kwargs, ): """Tasksets for FC100 benchmarks.""" data_transform = tv.transforms.ToTensor() train_dataset = l2l.vision.datasets.FC100(root=root, transform=data_transform, mode='train', download=True) valid_dataset = l2l.vision.datasets.FC100(root=root, transform=data_transform, mode='validation', download=True) test_dataset = l2l.vision.datasets.FC100(root=root, transform=data_transform, mode='test', download=True) if device is not None: train_dataset = l2l.data.OnDeviceDataset( dataset=train_dataset, device=device, ) valid_dataset = l2l.data.OnDeviceDataset( dataset=valid_dataset, device=device, ) test_dataset = l2l.data.OnDeviceDataset( dataset=test_dataset, device=device, ) train_dataset = l2l.data.MetaDataset(train_dataset) valid_dataset = l2l.data.MetaDataset(valid_dataset) test_dataset = l2l.data.MetaDataset(test_dataset) train_transforms = [ NWays(train_dataset, train_ways), KShots(train_dataset, train_samples), LoadData(train_dataset), RemapLabels(train_dataset), ConsecutiveLabels(train_dataset), ] valid_transforms = [ NWays(valid_dataset, test_ways), KShots(valid_dataset, test_samples), LoadData(valid_dataset), ConsecutiveLabels(valid_dataset), RemapLabels(valid_dataset), ] test_transforms = [ NWays(test_dataset, test_ways), KShots(test_dataset, test_samples), LoadData(test_dataset), RemapLabels(test_dataset), ConsecutiveLabels(test_dataset), ] _datasets = (train_dataset, valid_dataset, test_dataset) _transforms = (train_transforms, valid_transforms, test_transforms) return _datasets, _transforms
[ "def", "fc100_tasksets", "(", "train_ways", "=", "5", ",", "train_samples", "=", "10", ",", "test_ways", "=", "5", ",", "test_samples", "=", "10", ",", "root", "=", "'~/data'", ",", "device", "=", "None", ",", "*", "*", "kwargs", ",", ")", ":", "data_transform", "=", "tv", ".", "transforms", ".", "ToTensor", "(", ")", "train_dataset", "=", "l2l", ".", "vision", ".", "datasets", ".", "FC100", "(", "root", "=", "root", ",", "transform", "=", "data_transform", ",", "mode", "=", "'train'", ",", "download", "=", "True", ")", "valid_dataset", "=", "l2l", ".", "vision", ".", "datasets", ".", "FC100", "(", "root", "=", "root", ",", "transform", "=", "data_transform", ",", "mode", "=", "'validation'", ",", "download", "=", "True", ")", "test_dataset", "=", "l2l", ".", "vision", ".", "datasets", ".", "FC100", "(", "root", "=", "root", ",", "transform", "=", "data_transform", ",", "mode", "=", "'test'", ",", "download", "=", "True", ")", "if", "device", "is", "not", "None", ":", "train_dataset", "=", "l2l", ".", "data", ".", "OnDeviceDataset", "(", "dataset", "=", "train_dataset", ",", "device", "=", "device", ",", ")", "valid_dataset", "=", "l2l", ".", "data", ".", "OnDeviceDataset", "(", "dataset", "=", "valid_dataset", ",", "device", "=", "device", ",", ")", "test_dataset", "=", "l2l", ".", "data", ".", "OnDeviceDataset", "(", "dataset", "=", "test_dataset", ",", "device", "=", "device", ",", ")", "train_dataset", "=", "l2l", ".", "data", ".", "MetaDataset", "(", "train_dataset", ")", "valid_dataset", "=", "l2l", ".", "data", ".", "MetaDataset", "(", "valid_dataset", ")", "test_dataset", "=", "l2l", ".", "data", ".", "MetaDataset", "(", "test_dataset", ")", "train_transforms", "=", "[", "NWays", "(", "train_dataset", ",", "train_ways", ")", ",", "KShots", "(", "train_dataset", ",", "train_samples", ")", ",", "LoadData", "(", "train_dataset", ")", ",", "RemapLabels", "(", "train_dataset", ")", ",", "ConsecutiveLabels", "(", "train_dataset", ")", ",", "]", "valid_transforms", "=", "[", "NWays", "(", "valid_dataset", ",", "test_ways", ")", ",", "KShots", "(", "valid_dataset", ",", "test_samples", ")", ",", "LoadData", "(", "valid_dataset", ")", ",", "ConsecutiveLabels", "(", "valid_dataset", ")", ",", "RemapLabels", "(", "valid_dataset", ")", ",", "]", "test_transforms", "=", "[", "NWays", "(", "test_dataset", ",", "test_ways", ")", ",", "KShots", "(", "test_dataset", ",", "test_samples", ")", ",", "LoadData", "(", "test_dataset", ")", ",", "RemapLabels", "(", "test_dataset", ")", ",", "ConsecutiveLabels", "(", "test_dataset", ")", ",", "]", "_datasets", "=", "(", "train_dataset", ",", "valid_dataset", ",", "test_dataset", ")", "_transforms", "=", "(", "train_transforms", ",", "valid_transforms", ",", "test_transforms", ")", "return", "_datasets", ",", "_transforms" ]
https://github.com/learnables/learn2learn/blob/c664e3c8643efb1cc1cf7bb11668843e2ee8ebbe/learn2learn/vision/benchmarks/fc100_benchmark.py#L9-L73
kupferlauncher/kupfer
1c1e9bcbce05a82f503f68f8b3955c20b02639b3
oldplugins/vim/plugin.py
python
stop_plugin_service
(plugin_id)
Return True if it was running and was stopped
Return True if it was running and was stopped
[ "Return", "True", "if", "it", "was", "running", "and", "was", "stopped" ]
def stop_plugin_service(plugin_id): """ Return True if it was running and was stopped """ plug_iface = get_plugin_service_obj(plugin_id, activate=False) if plug_iface: plug_iface.Exit(reply_handler=_dummy_handler, error_handler=_dummy_handler)
[ "def", "stop_plugin_service", "(", "plugin_id", ")", ":", "plug_iface", "=", "get_plugin_service_obj", "(", "plugin_id", ",", "activate", "=", "False", ")", "if", "plug_iface", ":", "plug_iface", ".", "Exit", "(", "reply_handler", "=", "_dummy_handler", ",", "error_handler", "=", "_dummy_handler", ")" ]
https://github.com/kupferlauncher/kupfer/blob/1c1e9bcbce05a82f503f68f8b3955c20b02639b3/oldplugins/vim/plugin.py#L132-L139
HuangYG123/CurricularFace
68c8727fb7cd2243ecbfd7e09c35efc87c6e2de4
backbone/model_irse.py
python
IR_101
(input_size)
return model
Constructs a ir-101 model.
Constructs a ir-101 model.
[ "Constructs", "a", "ir", "-", "101", "model", "." ]
def IR_101(input_size): """Constructs a ir-101 model. """ model = Backbone(input_size, 100, 'ir') return model
[ "def", "IR_101", "(", "input_size", ")", ":", "model", "=", "Backbone", "(", "input_size", ",", "100", ",", "'ir'", ")", "return", "model" ]
https://github.com/HuangYG123/CurricularFace/blob/68c8727fb7cd2243ecbfd7e09c35efc87c6e2de4/backbone/model_irse.py#L199-L204
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/ext/declarative/api.py
python
declarative_base
(bind=None, metadata=None, mapper=None, cls=object, name='Base', constructor=_declarative_constructor, class_registry=None, metaclass=DeclarativeMeta)
return metaclass(name, bases, class_dict)
Construct a base class for declarative class definitions. The new base class will be given a metaclass that produces appropriate :class:`~sqlalchemy.schema.Table` objects and makes the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the information provided declaratively in the class and any subclasses of the class. :param bind: An optional :class:`~sqlalchemy.engine.Connectable`, will be assigned the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData` instance. :param metadata: An optional :class:`~sqlalchemy.schema.MetaData` instance. All :class:`~sqlalchemy.schema.Table` objects implicitly declared by subclasses of the base will share this MetaData. A MetaData instance will be created if none is provided. The :class:`~sqlalchemy.schema.MetaData` instance will be available via the `metadata` attribute of the generated declarative base class. :param mapper: An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will be used to map subclasses to their Tables. :param cls: Defaults to :class:`object`. A type to use as the base for the generated declarative base class. May be a class or tuple of classes. :param name: Defaults to ``Base``. The display name for the generated class. Customizing this is not required, but can improve clarity in tracebacks and debugging. :param constructor: Defaults to :func:`~sqlalchemy.ext.declarative._declarative_constructor`, an __init__ implementation that assigns \**kwargs for declared fields and relationships to an instance. If ``None`` is supplied, no __init__ will be provided and construction will fall back to cls.__init__ by way of the normal Python semantics. :param class_registry: optional dictionary that will serve as the registry of class names-> mapped classes when string names are used to identify classes inside of :func:`.relationship` and others. Allows two or more declarative base classes to share the same registry of class names for simplified inter-base relationships. :param metaclass: Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ compatible callable to use as the meta type of the generated declarative base class. .. seealso:: :func:`.as_declarative`
Construct a base class for declarative class definitions.
[ "Construct", "a", "base", "class", "for", "declarative", "class", "definitions", "." ]
def declarative_base(bind=None, metadata=None, mapper=None, cls=object, name='Base', constructor=_declarative_constructor, class_registry=None, metaclass=DeclarativeMeta): """Construct a base class for declarative class definitions. The new base class will be given a metaclass that produces appropriate :class:`~sqlalchemy.schema.Table` objects and makes the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the information provided declaratively in the class and any subclasses of the class. :param bind: An optional :class:`~sqlalchemy.engine.Connectable`, will be assigned the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData` instance. :param metadata: An optional :class:`~sqlalchemy.schema.MetaData` instance. All :class:`~sqlalchemy.schema.Table` objects implicitly declared by subclasses of the base will share this MetaData. A MetaData instance will be created if none is provided. The :class:`~sqlalchemy.schema.MetaData` instance will be available via the `metadata` attribute of the generated declarative base class. :param mapper: An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will be used to map subclasses to their Tables. :param cls: Defaults to :class:`object`. A type to use as the base for the generated declarative base class. May be a class or tuple of classes. :param name: Defaults to ``Base``. The display name for the generated class. Customizing this is not required, but can improve clarity in tracebacks and debugging. :param constructor: Defaults to :func:`~sqlalchemy.ext.declarative._declarative_constructor`, an __init__ implementation that assigns \**kwargs for declared fields and relationships to an instance. If ``None`` is supplied, no __init__ will be provided and construction will fall back to cls.__init__ by way of the normal Python semantics. :param class_registry: optional dictionary that will serve as the registry of class names-> mapped classes when string names are used to identify classes inside of :func:`.relationship` and others. Allows two or more declarative base classes to share the same registry of class names for simplified inter-base relationships. :param metaclass: Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ compatible callable to use as the meta type of the generated declarative base class. .. seealso:: :func:`.as_declarative` """ lcl_metadata = metadata or MetaData() if bind: lcl_metadata.bind = bind if class_registry is None: class_registry = weakref.WeakValueDictionary() bases = not isinstance(cls, tuple) and (cls,) or cls class_dict = dict(_decl_class_registry=class_registry, metadata=lcl_metadata) if constructor: class_dict['__init__'] = constructor if mapper: class_dict['__mapper_cls__'] = mapper return metaclass(name, bases, class_dict)
[ "def", "declarative_base", "(", "bind", "=", "None", ",", "metadata", "=", "None", ",", "mapper", "=", "None", ",", "cls", "=", "object", ",", "name", "=", "'Base'", ",", "constructor", "=", "_declarative_constructor", ",", "class_registry", "=", "None", ",", "metaclass", "=", "DeclarativeMeta", ")", ":", "lcl_metadata", "=", "metadata", "or", "MetaData", "(", ")", "if", "bind", ":", "lcl_metadata", ".", "bind", "=", "bind", "if", "class_registry", "is", "None", ":", "class_registry", "=", "weakref", ".", "WeakValueDictionary", "(", ")", "bases", "=", "not", "isinstance", "(", "cls", ",", "tuple", ")", "and", "(", "cls", ",", ")", "or", "cls", "class_dict", "=", "dict", "(", "_decl_class_registry", "=", "class_registry", ",", "metadata", "=", "lcl_metadata", ")", "if", "constructor", ":", "class_dict", "[", "'__init__'", "]", "=", "constructor", "if", "mapper", ":", "class_dict", "[", "'__mapper_cls__'", "]", "=", "mapper", "return", "metaclass", "(", "name", ",", "bases", ",", "class_dict", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/ext/declarative/api.py#L168-L247
ghostop14/sparrow-wifi
4b8289773ea4304872062f65a6ffc9352612b08e
plugins/falconwifi.py
python
FalconWirelessEngine.testWEPCapture
(apMacAddr, capFile)
[]
def testWEPCapture(apMacAddr, capFile): # aircrack-ng -a2 -b D8:EB:97:2F:DD:CE -w /opt/wordlists/TopPasswords3-2.txt falconcap-01.cap params = ['aircrack-ng','-f','4','-1','-b', apMacAddr, capFile] result = subprocess.run(params, stdout=subprocess.PIPE,stderr=subprocess.DEVNULL) testResult = result.stdout.decode('ASCII') iv=re.compile(' +([0-9]+) IVs') regexresult=iv.search(testResult) if regexresult: try: ivstr = regexresult.group(1) ivstr = ivstr.replace('IVs', '') ivstr = ivstr.replace(' ', '') ivcount = int(ivstr) except: ivcount = 0 else: ivcount = 0 # Please specify a dictionary comes back when aircrack-ng recognizes it as WPA, not wep, # No matching network found means you have the wrong bssid or a packet hasn't been seen yet if result.returncode == 0 and 'KEY FOUND' in testResult: passwords = [] p = re.compile('KEY FOUND\! \[(.*?)\].*') lines = testResult.split('\n') for curLine in lines: try: if 'KEY FOUND' in curLine: fieldValue = p.search(curLine).group(1) else: fieldValue = "" except: fieldValue = "" if len(fieldValue) > 0: fieldValue = fieldValue.strip() if fieldValue not in passwords: passwords.append(fieldValue) return True, passwords, ivcount else: return False, [], ivcount
[ "def", "testWEPCapture", "(", "apMacAddr", ",", "capFile", ")", ":", "# aircrack-ng -a2 -b D8:EB:97:2F:DD:CE -w /opt/wordlists/TopPasswords3-2.txt falconcap-01.cap", "params", "=", "[", "'aircrack-ng'", ",", "'-f'", ",", "'4'", ",", "'-1'", ",", "'-b'", ",", "apMacAddr", ",", "capFile", "]", "result", "=", "subprocess", ".", "run", "(", "params", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "testResult", "=", "result", ".", "stdout", ".", "decode", "(", "'ASCII'", ")", "iv", "=", "re", ".", "compile", "(", "' +([0-9]+) IVs'", ")", "regexresult", "=", "iv", ".", "search", "(", "testResult", ")", "if", "regexresult", ":", "try", ":", "ivstr", "=", "regexresult", ".", "group", "(", "1", ")", "ivstr", "=", "ivstr", ".", "replace", "(", "'IVs'", ",", "''", ")", "ivstr", "=", "ivstr", ".", "replace", "(", "' '", ",", "''", ")", "ivcount", "=", "int", "(", "ivstr", ")", "except", ":", "ivcount", "=", "0", "else", ":", "ivcount", "=", "0", "# Please specify a dictionary comes back when aircrack-ng recognizes it as WPA, not wep,", "# No matching network found means you have the wrong bssid or a packet hasn't been seen yet", "if", "result", ".", "returncode", "==", "0", "and", "'KEY FOUND'", "in", "testResult", ":", "passwords", "=", "[", "]", "p", "=", "re", ".", "compile", "(", "'KEY FOUND\\! \\[(.*?)\\].*'", ")", "lines", "=", "testResult", ".", "split", "(", "'\\n'", ")", "for", "curLine", "in", "lines", ":", "try", ":", "if", "'KEY FOUND'", "in", "curLine", ":", "fieldValue", "=", "p", ".", "search", "(", "curLine", ")", ".", "group", "(", "1", ")", "else", ":", "fieldValue", "=", "\"\"", "except", ":", "fieldValue", "=", "\"\"", "if", "len", "(", "fieldValue", ")", ">", "0", ":", "fieldValue", "=", "fieldValue", ".", "strip", "(", ")", "if", "fieldValue", "not", "in", "passwords", ":", "passwords", ".", "append", "(", "fieldValue", ")", "return", "True", ",", "passwords", ",", "ivcount", "else", ":", "return", "False", ",", "[", "]", ",", "ivcount" ]
https://github.com/ghostop14/sparrow-wifi/blob/4b8289773ea4304872062f65a6ffc9352612b08e/plugins/falconwifi.py#L675-L722
EtienneCmb/visbrain
b599038e095919dc193b12d5e502d127de7d03c9
visbrain/gui/brain/interface/ui_elements/ui_objects.py
python
UiObjects._fcn_obj_name
(self)
Change object name.
Change object name.
[ "Change", "object", "name", "." ]
def _fcn_obj_name(self): """Change object name.""" idx_type = self._obj_type_lst.currentIndex() if idx_type == 4: # Sources fcn = self._sources_to_gui elif idx_type == 5: # Connectivity fcn = self._connect_to_gui elif idx_type == 6: # time-series fcn = self._ts_to_gui elif idx_type == 7: # pictures fcn = self._pic_to_gui elif idx_type == 8: # vectors fcn = self._vec_to_gui # if idx_type > 4: self._obj_run_method = False fcn() self._obj_run_method = True
[ "def", "_fcn_obj_name", "(", "self", ")", ":", "idx_type", "=", "self", ".", "_obj_type_lst", ".", "currentIndex", "(", ")", "if", "idx_type", "==", "4", ":", "# Sources", "fcn", "=", "self", ".", "_sources_to_gui", "elif", "idx_type", "==", "5", ":", "# Connectivity", "fcn", "=", "self", ".", "_connect_to_gui", "elif", "idx_type", "==", "6", ":", "# time-series", "fcn", "=", "self", ".", "_ts_to_gui", "elif", "idx_type", "==", "7", ":", "# pictures", "fcn", "=", "self", ".", "_pic_to_gui", "elif", "idx_type", "==", "8", ":", "# vectors", "fcn", "=", "self", ".", "_vec_to_gui", "# if idx_type > 4:", "self", ".", "_obj_run_method", "=", "False", "fcn", "(", ")", "self", ".", "_obj_run_method", "=", "True" ]
https://github.com/EtienneCmb/visbrain/blob/b599038e095919dc193b12d5e502d127de7d03c9/visbrain/gui/brain/interface/ui_elements/ui_objects.py#L58-L74
aws-cloudformation/cfn-lint
16df5d0ca0d8ebcf9330ebea701e83d883b47217
src/cfnlint/rules/resources/elb/Elb.py
python
Elb.__init__
(self)
Init
Init
[ "Init" ]
def __init__(self): """ Init """ super(Elb, self).__init__() self.resource_property_types = ['AWS::ElasticLoadBalancingV2::LoadBalancer']
[ "def", "__init__", "(", "self", ")", ":", "super", "(", "Elb", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "resource_property_types", "=", "[", "'AWS::ElasticLoadBalancingV2::LoadBalancer'", "]" ]
https://github.com/aws-cloudformation/cfn-lint/blob/16df5d0ca0d8ebcf9330ebea701e83d883b47217/src/cfnlint/rules/resources/elb/Elb.py#L19-L22
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py
python
SQLiteFileLock
(*args, **kwds)
return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", *args, **kwds)
Factory function provided for backwards compatibility. Do not use in new code. Instead, import SQLiteLockFile from the lockfile.mkdirlockfile module.
Factory function provided for backwards compatibility.
[ "Factory", "function", "provided", "for", "backwards", "compatibility", "." ]
def SQLiteFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import SQLiteLockFile from the lockfile.mkdirlockfile module. """ from . import sqlitelockfile return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", *args, **kwds)
[ "def", "SQLiteFileLock", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "from", ".", "import", "sqlitelockfile", "return", "_fl_helper", "(", "sqlitelockfile", ".", "SQLiteLockFile", ",", "\"lockfile.sqlitelockfile\"", ",", "*", "args", ",", "*", "*", "kwds", ")" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py#L304-L312
donnemartin/viz
dc1ed0690ed3d8cb515c9dbac43bafc59a704789
site/fabfile.py
python
rebuild
()
`clean` then `build`
`clean` then `build`
[ "clean", "then", "build" ]
def rebuild(): """`clean` then `build`""" clean() build()
[ "def", "rebuild", "(", ")", ":", "clean", "(", ")", "build", "(", ")" ]
https://github.com/donnemartin/viz/blob/dc1ed0690ed3d8cb515c9dbac43bafc59a704789/site/fabfile.py#L39-L42
jimmysong/pb-exercises
c5e64075c47503a40063aa836c06a452af14246d
session3/answers.py
python
parse_txin
(cls, s)
return cls(prev_tx, prev_index, script_sig, sequence)
Takes a byte stream and parses the tx_input at the start return a TxIn object
Takes a byte stream and parses the tx_input at the start return a TxIn object
[ "Takes", "a", "byte", "stream", "and", "parses", "the", "tx_input", "at", "the", "start", "return", "a", "TxIn", "object" ]
def parse_txin(cls, s): '''Takes a byte stream and parses the tx_input at the start return a TxIn object ''' prev_tx = s.read(32)[::-1] prev_index = little_endian_to_int(s.read(4)) script_sig = Script.parse(s) sequence = little_endian_to_int(s.read(4)) return cls(prev_tx, prev_index, script_sig, sequence)
[ "def", "parse_txin", "(", "cls", ",", "s", ")", ":", "prev_tx", "=", "s", ".", "read", "(", "32", ")", "[", ":", ":", "-", "1", "]", "prev_index", "=", "little_endian_to_int", "(", "s", ".", "read", "(", "4", ")", ")", "script_sig", "=", "Script", ".", "parse", "(", "s", ")", "sequence", "=", "little_endian_to_int", "(", "s", ".", "read", "(", "4", ")", ")", "return", "cls", "(", "prev_tx", ",", "prev_index", ",", "script_sig", ",", "sequence", ")" ]
https://github.com/jimmysong/pb-exercises/blob/c5e64075c47503a40063aa836c06a452af14246d/session3/answers.py#L204-L212
KoreLogicSecurity/mastiff
04d569e4fa59513572e77c74b049cad82f9b0310
mastiff/core.py
python
Mastiff.__del__
(self)
Class destructor.
Class destructor.
[ "Class", "destructor", "." ]
def __del__(self): """ Class destructor. """ # Close down all logging file handles so we don't have any open file descriptors log = logging.getLogger("Mastiff") handles = list(log.handlers) for file_handle in handles: log.removeHandler(file_handle) file_handle.close()
[ "def", "__del__", "(", "self", ")", ":", "# Close down all logging file handles so we don't have any open file descriptors", "log", "=", "logging", ".", "getLogger", "(", "\"Mastiff\"", ")", "handles", "=", "list", "(", "log", ".", "handlers", ")", "for", "file_handle", "in", "handles", ":", "log", ".", "removeHandler", "(", "file_handle", ")", "file_handle", ".", "close", "(", ")" ]
https://github.com/KoreLogicSecurity/mastiff/blob/04d569e4fa59513572e77c74b049cad82f9b0310/mastiff/core.py#L231-L240
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/tiia/v20190529/models.py
python
RecognizeCarRequest.__init__
(self)
r""" :param ImageUrl: 图片URL地址。 图片限制: • 图片格式:PNG、JPG、JPEG。 • 图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。 建议: • 图片像素:大于50*50像素,否则影响识别效果; • 长宽比:长边:短边<5; 接口响应时间会受到图片下载时间的影响,建议使用更可靠的存储服务,推荐将图片存储在腾讯云COS。 :type ImageUrl: str :param ImageBase64: 图片经过base64编码的内容。最大不超过4M。与ImageUrl同时存在时优先使用ImageUrl字段。 **注意:图片需要base64编码,并且要去掉编码头部。** 支持的图片格式:PNG、JPG、JPEG、BMP,暂不支持GIF格式。支持的图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。 :type ImageBase64: str
r""" :param ImageUrl: 图片URL地址。 图片限制: • 图片格式:PNG、JPG、JPEG。 • 图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。 建议: • 图片像素:大于50*50像素,否则影响识别效果; • 长宽比:长边:短边<5; 接口响应时间会受到图片下载时间的影响,建议使用更可靠的存储服务,推荐将图片存储在腾讯云COS。 :type ImageUrl: str :param ImageBase64: 图片经过base64编码的内容。最大不超过4M。与ImageUrl同时存在时优先使用ImageUrl字段。 **注意:图片需要base64编码,并且要去掉编码头部。** 支持的图片格式:PNG、JPG、JPEG、BMP,暂不支持GIF格式。支持的图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。 :type ImageBase64: str
[ "r", ":", "param", "ImageUrl", ":", "图片URL地址。", "图片限制:", "•", "图片格式:PNG、JPG、JPEG。", "•", "图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。", "建议:", "•", "图片像素:大于50", "*", "50像素,否则影响识别效果;", "•", "长宽比:长边:短边<5;", "接口响应时间会受到图片下载时间的影响,建议使用更可靠的存储服务,推荐将图片存储在腾讯云COS。", ":", "type", "ImageUrl", ":", "str", ":", "param", "ImageBase64", ":", "图片经过base64编码的内容。最大不超过4M。与ImageUrl同时存在时优先使用ImageUrl字段。", "**", "注意:图片需要base64编码,并且要去掉编码头部。", "**", "支持的图片格式:PNG、JPG、JPEG、BMP,暂不支持GIF格式。支持的图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。", ":", "type", "ImageBase64", ":", "str" ]
def __init__(self): r""" :param ImageUrl: 图片URL地址。 图片限制: • 图片格式:PNG、JPG、JPEG。 • 图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。 建议: • 图片像素:大于50*50像素,否则影响识别效果; • 长宽比:长边:短边<5; 接口响应时间会受到图片下载时间的影响,建议使用更可靠的存储服务,推荐将图片存储在腾讯云COS。 :type ImageUrl: str :param ImageBase64: 图片经过base64编码的内容。最大不超过4M。与ImageUrl同时存在时优先使用ImageUrl字段。 **注意:图片需要base64编码,并且要去掉编码头部。** 支持的图片格式:PNG、JPG、JPEG、BMP,暂不支持GIF格式。支持的图片大小:所下载图片经Base64编码后不超过4M。图片下载时间不超过3秒。 :type ImageBase64: str """ self.ImageUrl = None self.ImageBase64 = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "ImageUrl", "=", "None", "self", ".", "ImageBase64", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/tiia/v20190529/models.py#L1728-L1745
qutebrowser/qutebrowser
3a2aaaacbf97f4bf0c72463f3da94ed2822a5442
qutebrowser/browser/history.py
python
debug_dump_history
(dest)
Dump the history to a file in the old pre-SQL format. Args: dest: Where to write the file to.
Dump the history to a file in the old pre-SQL format.
[ "Dump", "the", "history", "to", "a", "file", "in", "the", "old", "pre", "-", "SQL", "format", "." ]
def debug_dump_history(dest): """Dump the history to a file in the old pre-SQL format. Args: dest: Where to write the file to. """ dest = os.path.expanduser(dest) lines = (f'{int(x.atime)}{"-r" * x.redirect} {x.url} {x.title}' for x in web_history.select(sort_by='atime', sort_order='asc')) try: with open(dest, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) message.info(f"Dumped history to {dest}") except OSError as e: raise cmdutils.CommandError(f'Could not write history: {e}')
[ "def", "debug_dump_history", "(", "dest", ")", ":", "dest", "=", "os", ".", "path", ".", "expanduser", "(", "dest", ")", "lines", "=", "(", "f'{int(x.atime)}{\"-r\" * x.redirect} {x.url} {x.title}'", "for", "x", "in", "web_history", ".", "select", "(", "sort_by", "=", "'atime'", ",", "sort_order", "=", "'asc'", ")", ")", "try", ":", "with", "open", "(", "dest", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "'\\n'", ".", "join", "(", "lines", ")", ")", "message", ".", "info", "(", "f\"Dumped history to {dest}\"", ")", "except", "OSError", "as", "e", ":", "raise", "cmdutils", ".", "CommandError", "(", "f'Could not write history: {e}'", ")" ]
https://github.com/qutebrowser/qutebrowser/blob/3a2aaaacbf97f4bf0c72463f3da94ed2822a5442/qutebrowser/browser/history.py#L463-L479
1012598167/flask_mongodb_game
60c7e0351586656ec38f851592886338e50b4110
python_flask/venv/Lib/site-packages/itsdangerous/_compat.py
python
_constant_time_compare
(val1, val2)
return result == 0
Return ``True`` if the two strings are equal, ``False`` otherwise. The time taken is independent of the number of characters that match. Do not use this function for anything else than comparision with known length targets. This is should be implemented in C in order to get it completely right. This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3.
Return ``True`` if the two strings are equal, ``False`` otherwise.
[ "Return", "True", "if", "the", "two", "strings", "are", "equal", "False", "otherwise", "." ]
def _constant_time_compare(val1, val2): """Return ``True`` if the two strings are equal, ``False`` otherwise. The time taken is independent of the number of characters that match. Do not use this function for anything else than comparision with known length targets. This is should be implemented in C in order to get it completely right. This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3. """ len_eq = len(val1) == len(val2) if len_eq: result = 0 left = val1 else: result = 1 left = val2 for x, y in izip(bytearray(left), bytearray(val2)): result |= x ^ y return result == 0
[ "def", "_constant_time_compare", "(", "val1", ",", "val2", ")", ":", "len_eq", "=", "len", "(", "val1", ")", "==", "len", "(", "val2", ")", "if", "len_eq", ":", "result", "=", "0", "left", "=", "val1", "else", ":", "result", "=", "1", "left", "=", "val2", "for", "x", ",", "y", "in", "izip", "(", "bytearray", "(", "left", ")", ",", "bytearray", "(", "val2", ")", ")", ":", "result", "|=", "x", "^", "y", "return", "result", "==", "0" ]
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/itsdangerous/_compat.py#L19-L41
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/protocols/tls.py
python
TLSMemoryBIOProtocol.getHandle
(self)
return self._tlsConnection
Return the L{OpenSSL.SSL.Connection} object being used to encrypt and decrypt this connection. This is done for the benefit of L{twisted.internet.ssl.Certificate}'s C{peerFromTransport} and C{hostFromTransport} methods only. A different system handle may be returned by future versions of this method.
Return the L{OpenSSL.SSL.Connection} object being used to encrypt and decrypt this connection.
[ "Return", "the", "L", "{", "OpenSSL", ".", "SSL", ".", "Connection", "}", "object", "being", "used", "to", "encrypt", "and", "decrypt", "this", "connection", "." ]
def getHandle(self): """ Return the L{OpenSSL.SSL.Connection} object being used to encrypt and decrypt this connection. This is done for the benefit of L{twisted.internet.ssl.Certificate}'s C{peerFromTransport} and C{hostFromTransport} methods only. A different system handle may be returned by future versions of this method. """ return self._tlsConnection
[ "def", "getHandle", "(", "self", ")", ":", "return", "self", ".", "_tlsConnection" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/protocols/tls.py#L113-L123
tav/pylibs
3c16b843681f54130ee6a022275289cadb2f2a69
paramiko/sftp_file.py
python
SFTPFile._check_exception
(self)
if there's a saved exception, raise & clear it
if there's a saved exception, raise & clear it
[ "if", "there", "s", "a", "saved", "exception", "raise", "&", "clear", "it" ]
def _check_exception(self): "if there's a saved exception, raise & clear it" if self._saved_exception is not None: x = self._saved_exception self._saved_exception = None raise x
[ "def", "_check_exception", "(", "self", ")", ":", "if", "self", ".", "_saved_exception", "is", "not", "None", ":", "x", "=", "self", ".", "_saved_exception", "self", ".", "_saved_exception", "=", "None", "raise", "x" ]
https://github.com/tav/pylibs/blob/3c16b843681f54130ee6a022275289cadb2f2a69/paramiko/sftp_file.py#L471-L476
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
sklearn/_loss/glm_distribution.py
python
ExponentialDispersionModel.unit_variance
(self, y_pred)
r"""Compute the unit variance function. The unit variance :math:`v(y_\textrm{pred})` determines the variance as a function of the mean :math:`y_\textrm{pred}` by :math:`\mathrm{Var}[Y_i] = \phi/s_i*v(y_\textrm{pred}_i)`. It can also be derived from the unit deviance :math:`d(y,y_\textrm{pred})` as .. math:: v(y_\textrm{pred}) = \frac{2}{ \frac{\partial^2 d(y,y_\textrm{pred})}{ \partialy_\textrm{pred}^2}}\big|_{y=y_\textrm{pred}} See also :func:`variance`. Parameters ---------- y_pred : array of shape (n_samples,) Predicted mean.
r"""Compute the unit variance function.
[ "r", "Compute", "the", "unit", "variance", "function", "." ]
def unit_variance(self, y_pred): r"""Compute the unit variance function. The unit variance :math:`v(y_\textrm{pred})` determines the variance as a function of the mean :math:`y_\textrm{pred}` by :math:`\mathrm{Var}[Y_i] = \phi/s_i*v(y_\textrm{pred}_i)`. It can also be derived from the unit deviance :math:`d(y,y_\textrm{pred})` as .. math:: v(y_\textrm{pred}) = \frac{2}{ \frac{\partial^2 d(y,y_\textrm{pred})}{ \partialy_\textrm{pred}^2}}\big|_{y=y_\textrm{pred}} See also :func:`variance`. Parameters ---------- y_pred : array of shape (n_samples,) Predicted mean. """
[ "def", "unit_variance", "(", "self", ",", "y_pred", ")", ":" ]
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/_loss/glm_distribution.py#L69-L88
jstockwin/py-pdf-parser
29d4656799876b35d95c82fd1230a2a92d24d6a6
py_pdf_parser/components.py
python
PDFElement.font
(self)
return self.__font
The name and size of the font, separated by a comma with no spaces. This will be taken from the pdf itself, using the first character in the element. If you have provided a font_mapping, this is the string you should map. If the string is mapped in your font_mapping then the mapped value will be returned. font_mapping can have regexes as keys. Returns: str: The font of the element.
The name and size of the font, separated by a comma with no spaces.
[ "The", "name", "and", "size", "of", "the", "font", "separated", "by", "a", "comma", "with", "no", "spaces", "." ]
def font(self) -> str: """ The name and size of the font, separated by a comma with no spaces. This will be taken from the pdf itself, using the first character in the element. If you have provided a font_mapping, this is the string you should map. If the string is mapped in your font_mapping then the mapped value will be returned. font_mapping can have regexes as keys. Returns: str: The font of the element. """ if self.__font is not None: return self.__font font = f"{self.font_name},{self.font_size}" if self.document._font_mapping_is_regex: for pattern, font_name in self.document._font_mapping.items(): if re.match(pattern, font, self.document._regex_flags): self.__font = font_name return self.__font self.__font = self.document._font_mapping.get(font) or font return self.__font
[ "def", "font", "(", "self", ")", "->", "str", ":", "if", "self", ".", "__font", "is", "not", "None", ":", "return", "self", ".", "__font", "font", "=", "f\"{self.font_name},{self.font_size}\"", "if", "self", ".", "document", ".", "_font_mapping_is_regex", ":", "for", "pattern", ",", "font_name", "in", "self", ".", "document", ".", "_font_mapping", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "pattern", ",", "font", ",", "self", ".", "document", ".", "_regex_flags", ")", ":", "self", ".", "__font", "=", "font_name", "return", "self", ".", "__font", "self", ".", "__font", "=", "self", ".", "document", ".", "_font_mapping", ".", "get", "(", "font", ")", "or", "font", "return", "self", ".", "__font" ]
https://github.com/jstockwin/py-pdf-parser/blob/29d4656799876b35d95c82fd1230a2a92d24d6a6/py_pdf_parser/components.py#L213-L237
googlearchive/appengine-flask-skeleton
8c25461d003a0bd99a9ff3b339c2791ee6919242
lib/jinja2/bccache.py
python
Bucket.load_bytecode
(self, f)
Loads bytecode from a file or file like object.
Loads bytecode from a file or file like object.
[ "Loads", "bytecode", "from", "a", "file", "or", "file", "like", "object", "." ]
def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return # if marshal_load fails then we need to reload try: self.code = marshal_load(f) except (EOFError, ValueError, TypeError): self.reset() return
[ "def", "load_bytecode", "(", "self", ",", "f", ")", ":", "# make sure the magic header is correct", "magic", "=", "f", ".", "read", "(", "len", "(", "bc_magic", ")", ")", "if", "magic", "!=", "bc_magic", ":", "self", ".", "reset", "(", ")", "return", "# the source code of the file changed, we need to reload", "checksum", "=", "pickle", ".", "load", "(", "f", ")", "if", "self", ".", "checksum", "!=", "checksum", ":", "self", ".", "reset", "(", ")", "return", "# if marshal_load fails then we need to reload", "try", ":", "self", ".", "code", "=", "marshal_load", "(", "f", ")", "except", "(", "EOFError", ",", "ValueError", ",", "TypeError", ")", ":", "self", ".", "reset", "(", ")", "return" ]
https://github.com/googlearchive/appengine-flask-skeleton/blob/8c25461d003a0bd99a9ff3b339c2791ee6919242/lib/jinja2/bccache.py#L79-L96
G-Wang/WaveRNN-Pytorch
7149631c777ad9c64d502e3628250830c30cefda
preprocess.py
python
process_data
(wav_dir, output_path, mel_path, wav_path)
given wav directory and output directory, process wav files and save quantized wav and mel spectrogram to output directory
given wav directory and output directory, process wav files and save quantized wav and mel spectrogram to output directory
[ "given", "wav", "directory", "and", "output", "directory", "process", "wav", "files", "and", "save", "quantized", "wav", "and", "mel", "spectrogram", "to", "output", "directory" ]
def process_data(wav_dir, output_path, mel_path, wav_path): """ given wav directory and output directory, process wav files and save quantized wav and mel spectrogram to output directory """ dataset_ids = [] # get list of wav files wav_files = os.listdir(wav_dir) # check wav_file assert len(wav_files) != 0 or wav_files[0][-4:] == '.wav', "no wav files found!" # create training and testing splits test_wav_files = wav_files[:4] wav_files = wav_files[4:] for i, wav_file in enumerate(tqdm(wav_files)): # get the file id file_id = '{:d}'.format(i).zfill(5) wav, mel = get_wav_mel(os.path.join(wav_dir,wav_file)) # save np.save(os.path.join(mel_path,file_id+".npy"), mel) np.save(os.path.join(wav_path,file_id+".npy"), wav) # add to dataset_ids dataset_ids.append(file_id) # save dataset_ids with open(os.path.join(output_path,'dataset_ids.pkl'), 'wb') as f: pickle.dump(dataset_ids, f) # process testing_wavs test_path = os.path.join(output_path,'test') os.makedirs(test_path, exist_ok=True) for i, wav_file in enumerate(test_wav_files): wav, mel = get_wav_mel(os.path.join(wav_dir,wav_file)) # save test_wavs np.save(os.path.join(test_path,"test_{}_mel.npy".format(i)),mel) np.save(os.path.join(test_path,"test_{}_wav.npy".format(i)),wav) print("\npreprocessing done, total processed wav files:{}.\nProcessed files are located in:{}".format(len(wav_files), os.path.abspath(output_path)))
[ "def", "process_data", "(", "wav_dir", ",", "output_path", ",", "mel_path", ",", "wav_path", ")", ":", "dataset_ids", "=", "[", "]", "# get list of wav files", "wav_files", "=", "os", ".", "listdir", "(", "wav_dir", ")", "# check wav_file", "assert", "len", "(", "wav_files", ")", "!=", "0", "or", "wav_files", "[", "0", "]", "[", "-", "4", ":", "]", "==", "'.wav'", ",", "\"no wav files found!\"", "# create training and testing splits", "test_wav_files", "=", "wav_files", "[", ":", "4", "]", "wav_files", "=", "wav_files", "[", "4", ":", "]", "for", "i", ",", "wav_file", "in", "enumerate", "(", "tqdm", "(", "wav_files", ")", ")", ":", "# get the file id", "file_id", "=", "'{:d}'", ".", "format", "(", "i", ")", ".", "zfill", "(", "5", ")", "wav", ",", "mel", "=", "get_wav_mel", "(", "os", ".", "path", ".", "join", "(", "wav_dir", ",", "wav_file", ")", ")", "# save", "np", ".", "save", "(", "os", ".", "path", ".", "join", "(", "mel_path", ",", "file_id", "+", "\".npy\"", ")", ",", "mel", ")", "np", ".", "save", "(", "os", ".", "path", ".", "join", "(", "wav_path", ",", "file_id", "+", "\".npy\"", ")", ",", "wav", ")", "# add to dataset_ids", "dataset_ids", ".", "append", "(", "file_id", ")", "# save dataset_ids", "with", "open", "(", "os", ".", "path", ".", "join", "(", "output_path", ",", "'dataset_ids.pkl'", ")", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "dataset_ids", ",", "f", ")", "# process testing_wavs", "test_path", "=", "os", ".", "path", ".", "join", "(", "output_path", ",", "'test'", ")", "os", ".", "makedirs", "(", "test_path", ",", "exist_ok", "=", "True", ")", "for", "i", ",", "wav_file", "in", "enumerate", "(", "test_wav_files", ")", ":", "wav", ",", "mel", "=", "get_wav_mel", "(", "os", ".", "path", ".", "join", "(", "wav_dir", ",", "wav_file", ")", ")", "# save test_wavs", "np", ".", "save", "(", "os", ".", "path", ".", "join", "(", "test_path", ",", "\"test_{}_mel.npy\"", ".", "format", "(", "i", ")", ")", ",", "mel", ")", "np", ".", "save", "(", "os", ".", "path", ".", "join", "(", "test_path", ",", "\"test_{}_wav.npy\"", ".", "format", "(", "i", ")", ")", ",", "wav", ")", "print", "(", "\"\\npreprocessing done, total processed wav files:{}.\\nProcessed files are located in:{}\"", ".", "format", "(", "len", "(", "wav_files", ")", ",", "os", ".", "path", ".", "abspath", "(", "output_path", ")", ")", ")" ]
https://github.com/G-Wang/WaveRNN-Pytorch/blob/7149631c777ad9c64d502e3628250830c30cefda/preprocess.py#L40-L77
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/messaging/v1/service/us_app_to_person.py
python
UsAppToPersonInstance._proxy
(self)
return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: UsAppToPersonContext for this UsAppToPersonInstance :rtype: twilio.rest.messaging.v1.service.us_app_to_person.UsAppToPersonContext
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: UsAppToPersonContext for this UsAppToPersonInstance :rtype: twilio.rest.messaging.v1.service.us_app_to_person.UsAppToPersonContext """ if self._context is None: self._context = UsAppToPersonContext( self._version, messaging_service_sid=self._solution['messaging_service_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "UsAppToPersonContext", "(", "self", ".", "_version", ",", "messaging_service_sid", "=", "self", ".", "_solution", "[", "'messaging_service_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_context" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/messaging/v1/service/us_app_to_person.py#L331-L345
xinntao/EDVR
b02e63a0fc5854cad7b11a87f74601612e356eff
basicsr/data/data_util.py
python
paired_paths_from_meta_info_file
(folders, keys, meta_info_file, filename_tmpl)
return paths
Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list.
Generate paired paths from an meta information file.
[ "Generate", "paired", "paths", "from", "an", "meta", "information", "file", "." ]
def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl): """Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. """ assert len(folders) == 2, ( 'The len of folders should be 2 with [input_folder, gt_folder]. ' f'But got {len(folders)}') assert len(keys) == 2, ( 'The len of keys should be 2 with [input_key, gt_key]. ' f'But got {len(keys)}') input_folder, gt_folder = folders input_key, gt_key = keys with open(meta_info_file, 'r') as fin: gt_names = [line.split(' ')[0] for line in fin] paths = [] for gt_name in gt_names: basename, ext = osp.splitext(osp.basename(gt_name)) input_name = f'{filename_tmpl.format(basename)}{ext}' input_path = osp.join(input_folder, input_name) gt_path = osp.join(gt_folder, gt_name) paths.append( dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) return paths
[ "def", "paired_paths_from_meta_info_file", "(", "folders", ",", "keys", ",", "meta_info_file", ",", "filename_tmpl", ")", ":", "assert", "len", "(", "folders", ")", "==", "2", ",", "(", "'The len of folders should be 2 with [input_folder, gt_folder]. '", "f'But got {len(folders)}'", ")", "assert", "len", "(", "keys", ")", "==", "2", ",", "(", "'The len of keys should be 2 with [input_key, gt_key]. '", "f'But got {len(keys)}'", ")", "input_folder", ",", "gt_folder", "=", "folders", "input_key", ",", "gt_key", "=", "keys", "with", "open", "(", "meta_info_file", ",", "'r'", ")", "as", "fin", ":", "gt_names", "=", "[", "line", ".", "split", "(", "' '", ")", "[", "0", "]", "for", "line", "in", "fin", "]", "paths", "=", "[", "]", "for", "gt_name", "in", "gt_names", ":", "basename", ",", "ext", "=", "osp", ".", "splitext", "(", "osp", ".", "basename", "(", "gt_name", ")", ")", "input_name", "=", "f'{filename_tmpl.format(basename)}{ext}'", "input_path", "=", "osp", ".", "join", "(", "input_folder", ",", "input_name", ")", "gt_path", "=", "osp", ".", "join", "(", "gt_folder", ",", "gt_name", ")", "paths", ".", "append", "(", "dict", "(", "[", "(", "f'{input_key}_path'", ",", "input_path", ")", ",", "(", "f'{gt_key}_path'", ",", "gt_path", ")", "]", ")", ")", "return", "paths" ]
https://github.com/xinntao/EDVR/blob/b02e63a0fc5854cad7b11a87f74601612e356eff/basicsr/data/data_util.py#L157-L204
mdiazcl/fuzzbunch-debian
2b76c2249ade83a389ae3badb12a1bd09901fd2c
windows/Resources/Python/Core/Lib/lib-tk/Tkinter.py
python
OptionMenu.__init__
(self, master, variable, value, *values, **kwargs)
Construct an optionmenu widget with the parent MASTER, with the resource textvariable set to VARIABLE, the initially selected value VALUE, the other menu values VALUES and an additional keyword argument command.
Construct an optionmenu widget with the parent MASTER, with the resource textvariable set to VARIABLE, the initially selected value VALUE, the other menu values VALUES and an additional keyword argument command.
[ "Construct", "an", "optionmenu", "widget", "with", "the", "parent", "MASTER", "with", "the", "resource", "textvariable", "set", "to", "VARIABLE", "the", "initially", "selected", "value", "VALUE", "the", "other", "menu", "values", "VALUES", "and", "an", "additional", "keyword", "argument", "command", "." ]
def __init__(self, master, variable, value, *values, **kwargs): """Construct an optionmenu widget with the parent MASTER, with the resource textvariable set to VARIABLE, the initially selected value VALUE, the other menu values VALUES and an additional keyword argument command.""" kw = {'borderwidth': 2,'textvariable': variable,'indicatoron': 1, 'relief': RAISED,'anchor': 'c','highlightthickness': 2 } Widget.__init__(self, master, 'menubutton', kw) self.widgetName = 'tk_optionMenu' menu = self.__menu = Menu(self, name='menu', tearoff=0) self.menuname = menu._w callback = kwargs.get('command') if 'command' in kwargs: del kwargs['command'] if kwargs: raise TclError, 'unknown option -' + kwargs.keys()[0] menu.add_command(label=value, command=_setit(variable, value, callback)) for v in values: menu.add_command(label=v, command=_setit(variable, v, callback)) self['menu'] = menu
[ "def", "__init__", "(", "self", ",", "master", ",", "variable", ",", "value", ",", "*", "values", ",", "*", "*", "kwargs", ")", ":", "kw", "=", "{", "'borderwidth'", ":", "2", ",", "'textvariable'", ":", "variable", ",", "'indicatoron'", ":", "1", ",", "'relief'", ":", "RAISED", ",", "'anchor'", ":", "'c'", ",", "'highlightthickness'", ":", "2", "}", "Widget", ".", "__init__", "(", "self", ",", "master", ",", "'menubutton'", ",", "kw", ")", "self", ".", "widgetName", "=", "'tk_optionMenu'", "menu", "=", "self", ".", "__menu", "=", "Menu", "(", "self", ",", "name", "=", "'menu'", ",", "tearoff", "=", "0", ")", "self", ".", "menuname", "=", "menu", ".", "_w", "callback", "=", "kwargs", ".", "get", "(", "'command'", ")", "if", "'command'", "in", "kwargs", ":", "del", "kwargs", "[", "'command'", "]", "if", "kwargs", ":", "raise", "TclError", ",", "'unknown option -'", "+", "kwargs", ".", "keys", "(", ")", "[", "0", "]", "menu", ".", "add_command", "(", "label", "=", "value", ",", "command", "=", "_setit", "(", "variable", ",", "value", ",", "callback", ")", ")", "for", "v", "in", "values", ":", "menu", ".", "add_command", "(", "label", "=", "v", ",", "command", "=", "_setit", "(", "variable", ",", "v", ",", "callback", ")", ")", "self", "[", "'menu'", "]", "=", "menu" ]
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/lib-tk/Tkinter.py#L3558-L3579
GoogleCloudPlatform/gsutil
5be882803e76608e2fd29cf8c504ccd1fe0a7746
gslib/utils/hashing_helper.py
python
_CrcMultiply
(p, q)
return result
Multiplies two polynomials together modulo CASTAGNOLI_POLY. Args: p: The first polynomial. q: The second polynomial. Returns: Result of the multiplication.
Multiplies two polynomials together modulo CASTAGNOLI_POLY.
[ "Multiplies", "two", "polynomials", "together", "modulo", "CASTAGNOLI_POLY", "." ]
def _CrcMultiply(p, q): """Multiplies two polynomials together modulo CASTAGNOLI_POLY. Args: p: The first polynomial. q: The second polynomial. Returns: Result of the multiplication. """ result = 0 top_bit = 1 << DEGREE for _ in range(DEGREE): if p & 1: result ^= q q <<= 1 if q & top_bit: q ^= CASTAGNOLI_POLY p >>= 1 return result
[ "def", "_CrcMultiply", "(", "p", ",", "q", ")", ":", "result", "=", "0", "top_bit", "=", "1", "<<", "DEGREE", "for", "_", "in", "range", "(", "DEGREE", ")", ":", "if", "p", "&", "1", ":", "result", "^=", "q", "q", "<<=", "1", "if", "q", "&", "top_bit", ":", "q", "^=", "CASTAGNOLI_POLY", "p", ">>=", "1", "return", "result" ]
https://github.com/GoogleCloudPlatform/gsutil/blob/5be882803e76608e2fd29cf8c504ccd1fe0a7746/gslib/utils/hashing_helper.py#L127-L147