nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
sequence
function
stringlengths
18
4.83M
function_tokens
sequence
url
stringlengths
83
304
shichao-an/leetcode-python
6c523ef4759a57433e10271b584eece16f9f05f3
construct_binary_tree_from_preorder_and_inorder_traversal/solution.py
python
Solution.buildTree
(self, preorder, inorder)
[]
def buildTree(self, preorder, inorder): if len(inorder) == 0: return None else: root_val = preorder.pop(0) root_index = inorder.index(root_val) left_tree = self.buildTree(preorder, inorder[:root_index]) right_tree = self.buildTree(preorder, inorder[root_index + 1:]) root = TreeNode(root_val) root.left = left_tree root.right = right_tree return root
[ "def", "buildTree", "(", "self", ",", "preorder", ",", "inorder", ")", ":", "if", "len", "(", "inorder", ")", "==", "0", ":", "return", "None", "else", ":", "root_val", "=", "preorder", ".", "pop", "(", "0", ")", "root_index", "=", "inorder", ".", "index", "(", "root_val", ")", "left_tree", "=", "self", ".", "buildTree", "(", "preorder", ",", "inorder", "[", ":", "root_index", "]", ")", "right_tree", "=", "self", ".", "buildTree", "(", "preorder", ",", "inorder", "[", "root_index", "+", "1", ":", "]", ")", "root", "=", "TreeNode", "(", "root_val", ")", "root", ".", "left", "=", "left_tree", "root", ".", "right", "=", "right_tree", "return", "root" ]
https://github.com/shichao-an/leetcode-python/blob/6c523ef4759a57433e10271b584eece16f9f05f3/construct_binary_tree_from_preorder_and_inorder_traversal/solution.py#L12-L23
zzzeek/mako
12819efda61b0c478a700670575c951b6cde7383
mako/util.py
python
parse_encoding
(fp)
Deduce the encoding of a Python source file (binary mode) from magic comment. It does this in the same way as the `Python interpreter`__ .. __: http://docs.python.org/ref/encodings.html The ``fp`` argument should be a seekable file object in binary mode.
Deduce the encoding of a Python source file (binary mode) from magic comment.
[ "Deduce", "the", "encoding", "of", "a", "Python", "source", "file", "(", "binary", "mode", ")", "from", "magic", "comment", "." ]
def parse_encoding(fp): """Deduce the encoding of a Python source file (binary mode) from magic comment. It does this in the same way as the `Python interpreter`__ .. __: http://docs.python.org/ref/encodings.html The ``fp`` argument should be a seekable file object in binary mode. """ pos = fp.tell() fp.seek(0) try: line1 = fp.readline() has_bom = line1.startswith(codecs.BOM_UTF8) if has_bom: line1 = line1[len(codecs.BOM_UTF8) :] m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode("ascii", "ignore")) if not m: try: parse(line1.decode("ascii", "ignore")) except (ImportError, SyntaxError): # Either it's a real syntax error, in which case the source # is not valid python source, or line2 is a continuation of # line1, in which case we don't want to scan line2 for a magic # comment. pass else: line2 = fp.readline() m = _PYTHON_MAGIC_COMMENT_re.match( line2.decode("ascii", "ignore") ) if has_bom: if m: raise SyntaxError( "python refuses to compile code with both a UTF8" " byte-order-mark and a magic encoding comment" ) return "utf_8" elif m: return m.group(1) else: return None finally: fp.seek(pos)
[ "def", "parse_encoding", "(", "fp", ")", ":", "pos", "=", "fp", ".", "tell", "(", ")", "fp", ".", "seek", "(", "0", ")", "try", ":", "line1", "=", "fp", ".", "readline", "(", ")", "has_bom", "=", "line1", ".", "startswith", "(", "codecs", ".", "BOM_UTF8", ")", "if", "has_bom", ":", "line1", "=", "line1", "[", "len", "(", "codecs", ".", "BOM_UTF8", ")", ":", "]", "m", "=", "_PYTHON_MAGIC_COMMENT_re", ".", "match", "(", "line1", ".", "decode", "(", "\"ascii\"", ",", "\"ignore\"", ")", ")", "if", "not", "m", ":", "try", ":", "parse", "(", "line1", ".", "decode", "(", "\"ascii\"", ",", "\"ignore\"", ")", ")", "except", "(", "ImportError", ",", "SyntaxError", ")", ":", "# Either it's a real syntax error, in which case the source", "# is not valid python source, or line2 is a continuation of", "# line1, in which case we don't want to scan line2 for a magic", "# comment.", "pass", "else", ":", "line2", "=", "fp", ".", "readline", "(", ")", "m", "=", "_PYTHON_MAGIC_COMMENT_re", ".", "match", "(", "line2", ".", "decode", "(", "\"ascii\"", ",", "\"ignore\"", ")", ")", "if", "has_bom", ":", "if", "m", ":", "raise", "SyntaxError", "(", "\"python refuses to compile code with both a UTF8\"", "\" byte-order-mark and a magic encoding comment\"", ")", "return", "\"utf_8\"", "elif", "m", ":", "return", "m", ".", "group", "(", "1", ")", "else", ":", "return", "None", "finally", ":", "fp", ".", "seek", "(", "pos", ")" ]
https://github.com/zzzeek/mako/blob/12819efda61b0c478a700670575c951b6cde7383/mako/util.py#L241-L287
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/enterprise/adbapi.py
python
ConnectionPool.close
(self)
Close all pool connections and shutdown the pool.
Close all pool connections and shutdown the pool.
[ "Close", "all", "pool", "connections", "and", "shutdown", "the", "pool", "." ]
def close(self): """ Close all pool connections and shutdown the pool. """ if self.shutdownID: self._reactor.removeSystemEventTrigger(self.shutdownID) self.shutdownID = None if self.startID: self._reactor.removeSystemEventTrigger(self.startID) self.startID = None self.finalClose()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "shutdownID", ":", "self", ".", "_reactor", ".", "removeSystemEventTrigger", "(", "self", ".", "shutdownID", ")", "self", ".", "shutdownID", "=", "None", "if", "self", ".", "startID", ":", "self", ".", "_reactor", ".", "removeSystemEventTrigger", "(", "self", ".", "startID", ")", "self", ".", "startID", "=", "None", "self", ".", "finalClose", "(", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/enterprise/adbapi.py#L374-L384
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/sql/elements.py
python
_is_column
(col)
return isinstance(col, ColumnElement)
True if ``col`` is an instance of :class:`.ColumnElement`.
True if ``col`` is an instance of :class:`.ColumnElement`.
[ "True", "if", "col", "is", "an", "instance", "of", ":", "class", ":", ".", "ColumnElement", "." ]
def _is_column(col): """True if ``col`` is an instance of :class:`.ColumnElement`.""" return isinstance(col, ColumnElement)
[ "def", "_is_column", "(", "col", ")", ":", "return", "isinstance", "(", "col", ",", "ColumnElement", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/sql/elements.py#L3384-L3387
WPO-Foundation/wptagent
94470f007294213f900dcd9a207678b5b9fce5d3
internal/safari_ios.py
python
iWptBrowser.process_optimization_results
(self, page_data, requests, optimization_results)
Merge the data from the optimization checks file
Merge the data from the optimization checks file
[ "Merge", "the", "data", "from", "the", "optimization", "checks", "file" ]
def process_optimization_results(self, page_data, requests, optimization_results): """Merge the data from the optimization checks file""" if optimization_results and not self.must_exit: page_data['score_cache'] = -1 page_data['score_cdn'] = -1 page_data['score_gzip'] = -1 page_data['score_cookies'] = -1 page_data['score_keep-alive'] = -1 page_data['score_minify'] = -1 page_data['score_combine'] = -1 page_data['score_compress'] = -1 page_data['score_etags'] = -1 page_data['score_progressive_jpeg'] = -1 page_data['gzip_total'] = 0 page_data['gzip_savings'] = 0 page_data['minify_total'] = -1 page_data['minify_savings'] = -1 page_data['image_total'] = 0 page_data['image_savings'] = 0 page_data['optimization_checked'] = 1 page_data['base_page_cdn'] = '' cache_count = 0 cache_total = 0 cdn_count = 0 cdn_total = 0 keep_alive_count = 0 keep_alive_total = 0 progressive_total_bytes = 0 progressive_bytes = 0 for request in requests: if request['responseCode'] == 200: request_id = str(request['id']) pos = request_id.find('-') if pos > 0: request_id = request_id[:pos] if request_id in optimization_results: opt = optimization_results[request_id] if 'cache' in opt: request['score_cache'] = opt['cache']['score'] request['cache_time'] = opt['cache']['time'] cache_count += 1 cache_total += request['score_cache'] if 'cdn' in opt: request['score_cdn'] = opt['cdn']['score'] request['cdn_provider'] = opt['cdn']['provider'] cdn_count += 1 cdn_total += request['score_cdn'] if 'is_base_page' in request and request['is_base_page'] and \ request['cdn_provider'] is not None: page_data['base_page_cdn'] = request['cdn_provider'] if 'keep_alive' in opt: request['score_keep-alive'] = opt['keep_alive']['score'] keep_alive_count += 1 keep_alive_total += request['score_keep-alive'] if 'gzip' in opt: savings = opt['gzip']['size'] - opt['gzip']['target_size'] request['score_gzip'] = opt['gzip']['score'] request['gzip_total'] = opt['gzip']['size'] request['gzip_save'] = savings page_data['gzip_total'] += opt['gzip']['size'] page_data['gzip_savings'] += savings if 'image' in opt: savings = opt['image']['size'] - opt['image']['target_size'] request['score_compress'] = opt['image']['score'] request['image_total'] = opt['image']['size'] request['image_save'] = savings page_data['image_total'] += opt['image']['size'] page_data['image_savings'] += savings if 'progressive' in opt: size = opt['progressive']['size'] request['jpeg_scan_count'] = opt['progressive']['scan_count'] progressive_total_bytes += size if request['jpeg_scan_count'] > 1: request['score_progressive_jpeg'] = 100 progressive_bytes += size elif size < 10240: request['score_progressive_jpeg'] = 50 else: request['score_progressive_jpeg'] = 0 if cache_count > 0: page_data['score_cache'] = int(round(cache_total / cache_count)) if cdn_count > 0: page_data['score_cdn'] = int(round(cdn_total / cdn_count)) if keep_alive_count > 0: page_data['score_keep-alive'] = int(round(keep_alive_total / keep_alive_count)) if page_data['gzip_total'] > 0: page_data['score_gzip'] = 100 - int(page_data['gzip_savings'] * 100 / page_data['gzip_total']) if page_data['image_total'] > 0: page_data['score_compress'] = 100 - int(page_data['image_savings'] * 100 / page_data['image_total']) if progressive_total_bytes > 0: page_data['score_progressive_jpeg'] = int(round(progressive_bytes * 100 / progressive_total_bytes))
[ "def", "process_optimization_results", "(", "self", ",", "page_data", ",", "requests", ",", "optimization_results", ")", ":", "if", "optimization_results", "and", "not", "self", ".", "must_exit", ":", "page_data", "[", "'score_cache'", "]", "=", "-", "1", "page_data", "[", "'score_cdn'", "]", "=", "-", "1", "page_data", "[", "'score_gzip'", "]", "=", "-", "1", "page_data", "[", "'score_cookies'", "]", "=", "-", "1", "page_data", "[", "'score_keep-alive'", "]", "=", "-", "1", "page_data", "[", "'score_minify'", "]", "=", "-", "1", "page_data", "[", "'score_combine'", "]", "=", "-", "1", "page_data", "[", "'score_compress'", "]", "=", "-", "1", "page_data", "[", "'score_etags'", "]", "=", "-", "1", "page_data", "[", "'score_progressive_jpeg'", "]", "=", "-", "1", "page_data", "[", "'gzip_total'", "]", "=", "0", "page_data", "[", "'gzip_savings'", "]", "=", "0", "page_data", "[", "'minify_total'", "]", "=", "-", "1", "page_data", "[", "'minify_savings'", "]", "=", "-", "1", "page_data", "[", "'image_total'", "]", "=", "0", "page_data", "[", "'image_savings'", "]", "=", "0", "page_data", "[", "'optimization_checked'", "]", "=", "1", "page_data", "[", "'base_page_cdn'", "]", "=", "''", "cache_count", "=", "0", "cache_total", "=", "0", "cdn_count", "=", "0", "cdn_total", "=", "0", "keep_alive_count", "=", "0", "keep_alive_total", "=", "0", "progressive_total_bytes", "=", "0", "progressive_bytes", "=", "0", "for", "request", "in", "requests", ":", "if", "request", "[", "'responseCode'", "]", "==", "200", ":", "request_id", "=", "str", "(", "request", "[", "'id'", "]", ")", "pos", "=", "request_id", ".", "find", "(", "'-'", ")", "if", "pos", ">", "0", ":", "request_id", "=", "request_id", "[", ":", "pos", "]", "if", "request_id", "in", "optimization_results", ":", "opt", "=", "optimization_results", "[", "request_id", "]", "if", "'cache'", "in", "opt", ":", "request", "[", "'score_cache'", "]", "=", "opt", "[", "'cache'", "]", "[", "'score'", "]", "request", "[", "'cache_time'", "]", "=", "opt", "[", "'cache'", "]", "[", "'time'", "]", "cache_count", "+=", "1", "cache_total", "+=", "request", "[", "'score_cache'", "]", "if", "'cdn'", "in", "opt", ":", "request", "[", "'score_cdn'", "]", "=", "opt", "[", "'cdn'", "]", "[", "'score'", "]", "request", "[", "'cdn_provider'", "]", "=", "opt", "[", "'cdn'", "]", "[", "'provider'", "]", "cdn_count", "+=", "1", "cdn_total", "+=", "request", "[", "'score_cdn'", "]", "if", "'is_base_page'", "in", "request", "and", "request", "[", "'is_base_page'", "]", "and", "request", "[", "'cdn_provider'", "]", "is", "not", "None", ":", "page_data", "[", "'base_page_cdn'", "]", "=", "request", "[", "'cdn_provider'", "]", "if", "'keep_alive'", "in", "opt", ":", "request", "[", "'score_keep-alive'", "]", "=", "opt", "[", "'keep_alive'", "]", "[", "'score'", "]", "keep_alive_count", "+=", "1", "keep_alive_total", "+=", "request", "[", "'score_keep-alive'", "]", "if", "'gzip'", "in", "opt", ":", "savings", "=", "opt", "[", "'gzip'", "]", "[", "'size'", "]", "-", "opt", "[", "'gzip'", "]", "[", "'target_size'", "]", "request", "[", "'score_gzip'", "]", "=", "opt", "[", "'gzip'", "]", "[", "'score'", "]", "request", "[", "'gzip_total'", "]", "=", "opt", "[", "'gzip'", "]", "[", "'size'", "]", "request", "[", "'gzip_save'", "]", "=", "savings", "page_data", "[", "'gzip_total'", "]", "+=", "opt", "[", "'gzip'", "]", "[", "'size'", "]", "page_data", "[", "'gzip_savings'", "]", "+=", "savings", "if", "'image'", "in", "opt", ":", "savings", "=", "opt", "[", "'image'", "]", "[", "'size'", "]", "-", "opt", "[", "'image'", "]", "[", "'target_size'", "]", "request", "[", "'score_compress'", "]", "=", "opt", "[", "'image'", "]", "[", "'score'", "]", "request", "[", "'image_total'", "]", "=", "opt", "[", "'image'", "]", "[", "'size'", "]", "request", "[", "'image_save'", "]", "=", "savings", "page_data", "[", "'image_total'", "]", "+=", "opt", "[", "'image'", "]", "[", "'size'", "]", "page_data", "[", "'image_savings'", "]", "+=", "savings", "if", "'progressive'", "in", "opt", ":", "size", "=", "opt", "[", "'progressive'", "]", "[", "'size'", "]", "request", "[", "'jpeg_scan_count'", "]", "=", "opt", "[", "'progressive'", "]", "[", "'scan_count'", "]", "progressive_total_bytes", "+=", "size", "if", "request", "[", "'jpeg_scan_count'", "]", ">", "1", ":", "request", "[", "'score_progressive_jpeg'", "]", "=", "100", "progressive_bytes", "+=", "size", "elif", "size", "<", "10240", ":", "request", "[", "'score_progressive_jpeg'", "]", "=", "50", "else", ":", "request", "[", "'score_progressive_jpeg'", "]", "=", "0", "if", "cache_count", ">", "0", ":", "page_data", "[", "'score_cache'", "]", "=", "int", "(", "round", "(", "cache_total", "/", "cache_count", ")", ")", "if", "cdn_count", ">", "0", ":", "page_data", "[", "'score_cdn'", "]", "=", "int", "(", "round", "(", "cdn_total", "/", "cdn_count", ")", ")", "if", "keep_alive_count", ">", "0", ":", "page_data", "[", "'score_keep-alive'", "]", "=", "int", "(", "round", "(", "keep_alive_total", "/", "keep_alive_count", ")", ")", "if", "page_data", "[", "'gzip_total'", "]", ">", "0", ":", "page_data", "[", "'score_gzip'", "]", "=", "100", "-", "int", "(", "page_data", "[", "'gzip_savings'", "]", "*", "100", "/", "page_data", "[", "'gzip_total'", "]", ")", "if", "page_data", "[", "'image_total'", "]", ">", "0", ":", "page_data", "[", "'score_compress'", "]", "=", "100", "-", "int", "(", "page_data", "[", "'image_savings'", "]", "*", "100", "/", "page_data", "[", "'image_total'", "]", ")", "if", "progressive_total_bytes", ">", "0", ":", "page_data", "[", "'score_progressive_jpeg'", "]", "=", "int", "(", "round", "(", "progressive_bytes", "*", "100", "/", "progressive_total_bytes", ")", ")" ]
https://github.com/WPO-Foundation/wptagent/blob/94470f007294213f900dcd9a207678b5b9fce5d3/internal/safari_ios.py#L1482-L1575
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/logging/__init__.py
python
Handler.__init__
(self, level=NOTSET)
Initializes the instance - basically setting the formatter to None and the filter list to empty.
Initializes the instance - basically setting the formatter to None and the filter list to empty.
[ "Initializes", "the", "instance", "-", "basically", "setting", "the", "formatter", "to", "None", "and", "the", "filter", "list", "to", "empty", "." ]
def __init__(self, level=NOTSET): """ Initializes the instance - basically setting the formatter to None and the filter list to empty. """ Filterer.__init__(self) self._name = None self.level = _checkLevel(level) self.formatter = None # Add the handler to the global _handlerList (for cleanup on shutdown) _addHandlerRef(self) self.createLock()
[ "def", "__init__", "(", "self", ",", "level", "=", "NOTSET", ")", ":", "Filterer", ".", "__init__", "(", "self", ")", "self", ".", "_name", "=", "None", "self", ".", "level", "=", "_checkLevel", "(", "level", ")", "self", ".", "formatter", "=", "None", "# Add the handler to the global _handlerList (for cleanup on shutdown)", "_addHandlerRef", "(", "self", ")", "self", ".", "createLock", "(", ")" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/logging/__init__.py#L761-L772
scipy/scipy
e0a749f01e79046642ccfdc419edbf9e7ca141ad
scipy/cluster/hierarchy.py
python
is_valid_linkage
(Z, warning=False, throw=False, name=None)
return valid
Check the validity of a linkage matrix. A linkage matrix is valid if it is a 2-D array (type double) with :math:`n` rows and 4 columns. The first two columns must contain indices between 0 and :math:`2n-1`. For a given row ``i``, the following two expressions have to hold: .. math:: 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 0 \\leq Z[i,1] \\leq i+n-1 I.e., a cluster cannot join another cluster unless the cluster being joined has been generated. Parameters ---------- Z : array_like Linkage matrix. warning : bool, optional When True, issues a Python warning if the linkage matrix passed is invalid. throw : bool, optional When True, throws a Python exception if the linkage matrix passed is invalid. name : str, optional This string refers to the variable name of the invalid linkage matrix. Returns ------- b : bool True if the inconsistency matrix is valid. See Also -------- linkage: for a description of what a linkage matrix is. Examples -------- >>> from scipy.cluster.hierarchy import ward, is_valid_linkage >>> from scipy.spatial.distance import pdist All linkage matrices generated by the clustering methods in this module will be valid (i.e., they will have the appropriate dimensions and the two required expressions will hold for all the rows). We can check this using `scipy.cluster.hierarchy.is_valid_linkage`: >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] >>> Z = ward(pdist(X)) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 2. , 12. , 1.29099445, 3. ], [ 5. , 13. , 1.29099445, 3. ], [ 8. , 14. , 1.29099445, 3. ], [11. , 15. , 1.29099445, 3. ], [16. , 17. , 5.77350269, 6. ], [18. , 19. , 5.77350269, 6. ], [20. , 21. , 8.16496581, 12. ]]) >>> is_valid_linkage(Z) True However, if we create a linkage matrix in a wrong way - or if we modify a valid one in a way that any of the required expressions don't hold anymore, then the check will fail: >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point >>> is_valid_linkage(Z) False
Check the validity of a linkage matrix.
[ "Check", "the", "validity", "of", "a", "linkage", "matrix", "." ]
def is_valid_linkage(Z, warning=False, throw=False, name=None): """ Check the validity of a linkage matrix. A linkage matrix is valid if it is a 2-D array (type double) with :math:`n` rows and 4 columns. The first two columns must contain indices between 0 and :math:`2n-1`. For a given row ``i``, the following two expressions have to hold: .. math:: 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 0 \\leq Z[i,1] \\leq i+n-1 I.e., a cluster cannot join another cluster unless the cluster being joined has been generated. Parameters ---------- Z : array_like Linkage matrix. warning : bool, optional When True, issues a Python warning if the linkage matrix passed is invalid. throw : bool, optional When True, throws a Python exception if the linkage matrix passed is invalid. name : str, optional This string refers to the variable name of the invalid linkage matrix. Returns ------- b : bool True if the inconsistency matrix is valid. See Also -------- linkage: for a description of what a linkage matrix is. Examples -------- >>> from scipy.cluster.hierarchy import ward, is_valid_linkage >>> from scipy.spatial.distance import pdist All linkage matrices generated by the clustering methods in this module will be valid (i.e., they will have the appropriate dimensions and the two required expressions will hold for all the rows). We can check this using `scipy.cluster.hierarchy.is_valid_linkage`: >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] >>> Z = ward(pdist(X)) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 2. , 12. , 1.29099445, 3. ], [ 5. , 13. , 1.29099445, 3. ], [ 8. , 14. , 1.29099445, 3. ], [11. , 15. , 1.29099445, 3. ], [16. , 17. , 5.77350269, 6. ], [18. , 19. , 5.77350269, 6. ], [20. , 21. , 8.16496581, 12. ]]) >>> is_valid_linkage(Z) True However, if we create a linkage matrix in a wrong way - or if we modify a valid one in a way that any of the required expressions don't hold anymore, then the check will fail: >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point >>> is_valid_linkage(Z) False """ Z = np.asarray(Z, order='c') valid = True name_str = "%r " % name if name else '' try: if type(Z) != np.ndarray: raise TypeError('Passed linkage argument %sis not a valid array.' % name_str) if Z.dtype != np.double: raise TypeError('Linkage matrix %smust contain doubles.' % name_str) if len(Z.shape) != 2: raise ValueError('Linkage matrix %smust have shape=2 (i.e. be ' 'two-dimensional).' % name_str) if Z.shape[1] != 4: raise ValueError('Linkage matrix %smust have 4 columns.' % name_str) if Z.shape[0] == 0: raise ValueError('Linkage must be computed on at least two ' 'observations.') n = Z.shape[0] if n > 1: if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()): raise ValueError('Linkage %scontains negative indices.' % name_str) if (Z[:, 2] < 0).any(): raise ValueError('Linkage %scontains negative distances.' % name_str) if (Z[:, 3] < 0).any(): raise ValueError('Linkage %scontains negative counts.' % name_str) if _check_hierarchy_uses_cluster_before_formed(Z): raise ValueError('Linkage %suses non-singleton cluster before ' 'it is formed.' % name_str) if _check_hierarchy_uses_cluster_more_than_once(Z): raise ValueError('Linkage %suses the same cluster more than once.' % name_str) except Exception as e: if throw: raise if warning: _warning(str(e)) valid = False return valid
[ "def", "is_valid_linkage", "(", "Z", ",", "warning", "=", "False", ",", "throw", "=", "False", ",", "name", "=", "None", ")", ":", "Z", "=", "np", ".", "asarray", "(", "Z", ",", "order", "=", "'c'", ")", "valid", "=", "True", "name_str", "=", "\"%r \"", "%", "name", "if", "name", "else", "''", "try", ":", "if", "type", "(", "Z", ")", "!=", "np", ".", "ndarray", ":", "raise", "TypeError", "(", "'Passed linkage argument %sis not a valid array.'", "%", "name_str", ")", "if", "Z", ".", "dtype", "!=", "np", ".", "double", ":", "raise", "TypeError", "(", "'Linkage matrix %smust contain doubles.'", "%", "name_str", ")", "if", "len", "(", "Z", ".", "shape", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Linkage matrix %smust have shape=2 (i.e. be '", "'two-dimensional).'", "%", "name_str", ")", "if", "Z", ".", "shape", "[", "1", "]", "!=", "4", ":", "raise", "ValueError", "(", "'Linkage matrix %smust have 4 columns.'", "%", "name_str", ")", "if", "Z", ".", "shape", "[", "0", "]", "==", "0", ":", "raise", "ValueError", "(", "'Linkage must be computed on at least two '", "'observations.'", ")", "n", "=", "Z", ".", "shape", "[", "0", "]", "if", "n", ">", "1", ":", "if", "(", "(", "Z", "[", ":", ",", "0", "]", "<", "0", ")", ".", "any", "(", ")", "or", "(", "Z", "[", ":", ",", "1", "]", "<", "0", ")", ".", "any", "(", ")", ")", ":", "raise", "ValueError", "(", "'Linkage %scontains negative indices.'", "%", "name_str", ")", "if", "(", "Z", "[", ":", ",", "2", "]", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'Linkage %scontains negative distances.'", "%", "name_str", ")", "if", "(", "Z", "[", ":", ",", "3", "]", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'Linkage %scontains negative counts.'", "%", "name_str", ")", "if", "_check_hierarchy_uses_cluster_before_formed", "(", "Z", ")", ":", "raise", "ValueError", "(", "'Linkage %suses non-singleton cluster before '", "'it is formed.'", "%", "name_str", ")", "if", "_check_hierarchy_uses_cluster_more_than_once", "(", "Z", ")", ":", "raise", "ValueError", "(", "'Linkage %suses the same cluster more than once.'", "%", "name_str", ")", "except", "Exception", "as", "e", ":", "if", "throw", ":", "raise", "if", "warning", ":", "_warning", "(", "str", "(", "e", ")", ")", "valid", "=", "False", "return", "valid" ]
https://github.com/scipy/scipy/blob/e0a749f01e79046642ccfdc419edbf9e7ca141ad/scipy/cluster/hierarchy.py#L2178-L2300
DxCx/plugin.video.9anime
34358c2f701e5ddf19d3276926374a16f63f7b6a
resources/lib/ui/js2py/es6/babel.py
python
PyJs_anonymous_1425_
(require, module, exports, this, arguments, var=var)
[]
def PyJs_anonymous_1425_(require, module, exports, this, arguments, var=var): var = Scope({u'this':this, u'require':require, u'exports':exports, u'module':module, u'arguments':arguments}, var) var.registers([u'require', u'exports', u'module']) var.get(u'require')(Js(u'../modules/es6.object.to-string')) var.get(u'require')(Js(u'../modules/web.dom.iterable')) var.get(u'require')(Js(u'../modules/es6.weak-map')) var.get(u'module').put(u'exports', var.get(u'require')(Js(u'../modules/_core')).get(u'WeakMap'))
[ "def", "PyJs_anonymous_1425_", "(", "require", ",", "module", ",", "exports", ",", "this", ",", "arguments", ",", "var", "=", "var", ")", ":", "var", "=", "Scope", "(", "{", "u'this'", ":", "this", ",", "u'require'", ":", "require", ",", "u'exports'", ":", "exports", ",", "u'module'", ":", "module", ",", "u'arguments'", ":", "arguments", "}", ",", "var", ")", "var", ".", "registers", "(", "[", "u'require'", ",", "u'exports'", ",", "u'module'", "]", ")", "var", ".", "get", "(", "u'require'", ")", "(", "Js", "(", "u'../modules/es6.object.to-string'", ")", ")", "var", ".", "get", "(", "u'require'", ")", "(", "Js", "(", "u'../modules/web.dom.iterable'", ")", ")", "var", ".", "get", "(", "u'require'", ")", "(", "Js", "(", "u'../modules/es6.weak-map'", ")", ")", "var", ".", "get", "(", "u'module'", ")", ".", "put", "(", "u'exports'", ",", "var", ".", "get", "(", "u'require'", ")", "(", "Js", "(", "u'../modules/_core'", ")", ")", ".", "get", "(", "u'WeakMap'", ")", ")" ]
https://github.com/DxCx/plugin.video.9anime/blob/34358c2f701e5ddf19d3276926374a16f63f7b6a/resources/lib/ui/js2py/es6/babel.py#L17012-L17018
Breakthrough/DVR-Scan
ea5f5f68cd48d03f7fcfb12b5c77fdb8147895a5
dvr_scan/scanner.py
python
ScanContext.set_output
(self, scan_only=True, comp_file=None, codec='XVID', draw_timecode=False)
Sets the path and encoder codec to use when exporting videos. Arguments: scan_only (bool): If True, only scans input for motion, but does not write any video(s) to disk. In this case, comp_file and codec are ignored. Note that the default value here is the opposite of the CLI default. comp_file (str): If set, represents the path that all concatenated motion events will be written to. If None, each motion event will be saved as a separate file. codec (str): The four-letter identifier of the encoder/video codec to use when exporting motion events as videos. Possible values are: XVID, MP4V, MP42, H264. draw_timecode (bool): If True, draws timecode on each frame. Raises: ValueError if codec is not four characters.
Sets the path and encoder codec to use when exporting videos.
[ "Sets", "the", "path", "and", "encoder", "codec", "to", "use", "when", "exporting", "videos", "." ]
def set_output(self, scan_only=True, comp_file=None, codec='XVID', draw_timecode=False): # type: (bool, str, str) -> None """ Sets the path and encoder codec to use when exporting videos. Arguments: scan_only (bool): If True, only scans input for motion, but does not write any video(s) to disk. In this case, comp_file and codec are ignored. Note that the default value here is the opposite of the CLI default. comp_file (str): If set, represents the path that all concatenated motion events will be written to. If None, each motion event will be saved as a separate file. codec (str): The four-letter identifier of the encoder/video codec to use when exporting motion events as videos. Possible values are: XVID, MP4V, MP42, H264. draw_timecode (bool): If True, draws timecode on each frame. Raises: ValueError if codec is not four characters. """ self._scan_only = scan_only self._comp_file = comp_file if len(codec) != 4: raise ValueError("codec must be exactly four (4) characters") self._fourcc = cv2.VideoWriter_fourcc(*codec.upper()) self._draw_timecode = draw_timecode
[ "def", "set_output", "(", "self", ",", "scan_only", "=", "True", ",", "comp_file", "=", "None", ",", "codec", "=", "'XVID'", ",", "draw_timecode", "=", "False", ")", ":", "# type: (bool, str, str) -> None", "self", ".", "_scan_only", "=", "scan_only", "self", ".", "_comp_file", "=", "comp_file", "if", "len", "(", "codec", ")", "!=", "4", ":", "raise", "ValueError", "(", "\"codec must be exactly four (4) characters\"", ")", "self", ".", "_fourcc", "=", "cv2", ".", "VideoWriter_fourcc", "(", "*", "codec", ".", "upper", "(", ")", ")", "self", ".", "_draw_timecode", "=", "draw_timecode" ]
https://github.com/Breakthrough/DVR-Scan/blob/ea5f5f68cd48d03f7fcfb12b5c77fdb8147895a5/dvr_scan/scanner.py#L117-L142
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
plexpy/helpers.py
python
sort_attrs
(attr)
return len(a), a
[]
def sort_attrs(attr): if isinstance(attr, (list, tuple)): a = attr[0].split('.') else: a = attr.split('.') return len(a), a
[ "def", "sort_attrs", "(", "attr", ")", ":", "if", "isinstance", "(", "attr", ",", "(", "list", ",", "tuple", ")", ")", ":", "a", "=", "attr", "[", "0", "]", ".", "split", "(", "'.'", ")", "else", ":", "a", "=", "attr", ".", "split", "(", "'.'", ")", "return", "len", "(", "a", ")", ",", "a" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/plexpy/helpers.py#L1300-L1305
duerrp/pyexperiment
c426565d870d944bd5b9712629d8f1ba2527c67f
pyexperiment/Logger.py
python
Logger.__init__
(self, console_level=logging.INFO, filename=None, file_level=logging.DEBUG, no_backups=5)
Initializer
Initializer
[ "Initializer" ]
def __init__(self, console_level=logging.INFO, filename=None, file_level=logging.DEBUG, no_backups=5): """Initializer """ # Initialize the base class, the minimal level makes sure no # logs are missed super(Logger, self).__init__( self, level=min(console_level, file_level)) # Setup console logging def expand_format_tags(message, use_color=True): """Expands $*SEQ tags in a string. If use_color is False, removes the tags. """ if use_color: message = message.replace( "$RESET", printers.RESET_SEQ).replace("$BOLD", printers.COLORS['bold']) else: message = message.replace("$RESET", "").replace("$BOLD", "") return message color_formatter = ColorFormatter( expand_format_tags(CONSOLE_FORMAT, True)) console_handler = CONSOLE_STREAM_HANDLER console_handler.setLevel(console_level) console_handler.setFormatter(color_formatter) self.addHandler(console_handler) # If needed add file handler if filename is not None: self.addHandler(MPRotLogHandler(filename=filename, level=file_level, no_backups=no_backups)) # Emit the messages saved by the pre-init logger for logg in PreInitLogHandler.get_instance().pre_init_logs: for handler in self.handlers: if logg.levelno >= handler.level: handler.emit(logg) PreInitLogHandler.reset_instance()
[ "def", "__init__", "(", "self", ",", "console_level", "=", "logging", ".", "INFO", ",", "filename", "=", "None", ",", "file_level", "=", "logging", ".", "DEBUG", ",", "no_backups", "=", "5", ")", ":", "# Initialize the base class, the minimal level makes sure no", "# logs are missed", "super", "(", "Logger", ",", "self", ")", ".", "__init__", "(", "self", ",", "level", "=", "min", "(", "console_level", ",", "file_level", ")", ")", "# Setup console logging", "def", "expand_format_tags", "(", "message", ",", "use_color", "=", "True", ")", ":", "\"\"\"Expands $*SEQ tags in a string. If use_color is False, removes\n the tags.\n \"\"\"", "if", "use_color", ":", "message", "=", "message", ".", "replace", "(", "\"$RESET\"", ",", "printers", ".", "RESET_SEQ", ")", ".", "replace", "(", "\"$BOLD\"", ",", "printers", ".", "COLORS", "[", "'bold'", "]", ")", "else", ":", "message", "=", "message", ".", "replace", "(", "\"$RESET\"", ",", "\"\"", ")", ".", "replace", "(", "\"$BOLD\"", ",", "\"\"", ")", "return", "message", "color_formatter", "=", "ColorFormatter", "(", "expand_format_tags", "(", "CONSOLE_FORMAT", ",", "True", ")", ")", "console_handler", "=", "CONSOLE_STREAM_HANDLER", "console_handler", ".", "setLevel", "(", "console_level", ")", "console_handler", ".", "setFormatter", "(", "color_formatter", ")", "self", ".", "addHandler", "(", "console_handler", ")", "# If needed add file handler", "if", "filename", "is", "not", "None", ":", "self", ".", "addHandler", "(", "MPRotLogHandler", "(", "filename", "=", "filename", ",", "level", "=", "file_level", ",", "no_backups", "=", "no_backups", ")", ")", "# Emit the messages saved by the pre-init logger", "for", "logg", "in", "PreInitLogHandler", ".", "get_instance", "(", ")", ".", "pre_init_logs", ":", "for", "handler", "in", "self", ".", "handlers", ":", "if", "logg", ".", "levelno", ">=", "handler", ".", "level", ":", "handler", ".", "emit", "(", "logg", ")", "PreInitLogHandler", ".", "reset_instance", "(", ")" ]
https://github.com/duerrp/pyexperiment/blob/c426565d870d944bd5b9712629d8f1ba2527c67f/pyexperiment/Logger.py#L196-L242
OctoPrint/OctoPrint
4b12b0e6f06c3abfb31b1840a0605e2de8e911d2
src/octoprint/vendor/zeroconf.py
python
DNSCache.get
(self, entry)
Gets an entry by key. Will return None if there is no matching entry.
Gets an entry by key. Will return None if there is no matching entry.
[ "Gets", "an", "entry", "by", "key", ".", "Will", "return", "None", "if", "there", "is", "no", "matching", "entry", "." ]
def get(self, entry): """Gets an entry by key. Will return None if there is no matching entry.""" try: list_ = self.cache[entry.key] for cached_entry in list_: if entry.__eq__(cached_entry): return cached_entry except (KeyError, ValueError): return None
[ "def", "get", "(", "self", ",", "entry", ")", ":", "try", ":", "list_", "=", "self", ".", "cache", "[", "entry", ".", "key", "]", "for", "cached_entry", "in", "list_", ":", "if", "entry", ".", "__eq__", "(", "cached_entry", ")", ":", "return", "cached_entry", "except", "(", "KeyError", ",", "ValueError", ")", ":", "return", "None" ]
https://github.com/OctoPrint/OctoPrint/blob/4b12b0e6f06c3abfb31b1840a0605e2de8e911d2/src/octoprint/vendor/zeroconf.py#L1078-L1087
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/idlelib/ParenMatch.py
python
ParenMatch.restore_event
(self, event=None)
[]
def restore_event(self, event=None): self.text.tag_delete("paren") self.deactivate_restore() self.counter += 1
[ "def", "restore_event", "(", "self", ",", "event", "=", "None", ")", ":", "self", ".", "text", ".", "tag_delete", "(", "\"paren\"", ")", "self", ".", "deactivate_restore", "(", ")", "self", ".", "counter", "+=", "1" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/idlelib/ParenMatch.py#L117-L120
tensorflow/tensor2tensor
2a33b152d7835af66a6d20afe7961751047e28dd
tensor2tensor/layers/message_passing_attention.py
python
_compute_edge_transforms
(node_states, depth, num_transforms, name="transform")
return x
Helper function that computes transformation for keys and values. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let T be the total number of transforms (num_transforms). Computes the transforms for keys or values for attention. * For each node N_j and edge type t, a key K_jt of size K is computed. When an edge of type t goes from node N_j to any other node, K_jt is the key that is in the attention process. * For each node N_j and edge type t, a value V_jt of size V is computed. When an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. Args: node_states: A tensor of shape [B, L, D] depth: An integer (K or V) num_transforms: An integer (T), name: A name for the function Returns: x: A The attention keys or values for each node and edge type (shape [B, N*T, K or V])
Helper function that computes transformation for keys and values.
[ "Helper", "function", "that", "computes", "transformation", "for", "keys", "and", "values", "." ]
def _compute_edge_transforms(node_states, depth, num_transforms, name="transform"): """Helper function that computes transformation for keys and values. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let T be the total number of transforms (num_transforms). Computes the transforms for keys or values for attention. * For each node N_j and edge type t, a key K_jt of size K is computed. When an edge of type t goes from node N_j to any other node, K_jt is the key that is in the attention process. * For each node N_j and edge type t, a value V_jt of size V is computed. When an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. Args: node_states: A tensor of shape [B, L, D] depth: An integer (K or V) num_transforms: An integer (T), name: A name for the function Returns: x: A The attention keys or values for each node and edge type (shape [B, N*T, K or V]) """ node_shapes = common_layers.shape_list(node_states) x = common_layers.dense( node_states, depth * num_transforms, use_bias=False, name=name) batch = node_shapes[0] # B. length = node_shapes[1] # N. # Making the fourth dimension explicit by separating the vectors of size # K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T] # (in k) and [V, T] in v. # x = tf.reshape(x, [batch, length, num_transforms, depth]) # Flatten out the fourth dimension. x = tf.reshape(x, [batch, length * num_transforms, depth]) return x
[ "def", "_compute_edge_transforms", "(", "node_states", ",", "depth", ",", "num_transforms", ",", "name", "=", "\"transform\"", ")", ":", "node_shapes", "=", "common_layers", ".", "shape_list", "(", "node_states", ")", "x", "=", "common_layers", ".", "dense", "(", "node_states", ",", "depth", "*", "num_transforms", ",", "use_bias", "=", "False", ",", "name", "=", "name", ")", "batch", "=", "node_shapes", "[", "0", "]", "# B.", "length", "=", "node_shapes", "[", "1", "]", "# N.", "# Making the fourth dimension explicit by separating the vectors of size", "# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]", "# (in k) and [V, T] in v.", "#", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch", ",", "length", ",", "num_transforms", ",", "depth", "]", ")", "# Flatten out the fourth dimension.", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch", ",", "length", "*", "num_transforms", ",", "depth", "]", ")", "return", "x" ]
https://github.com/tensorflow/tensor2tensor/blob/2a33b152d7835af66a6d20afe7961751047e28dd/tensor2tensor/layers/message_passing_attention.py#L251-L301
clalancette/oz
1ab43e88033efa774815c6ebb59ae2841f2980e9
oz/OpenSUSE.py
python
get_class
(tdl, config, auto, output_disk=None, netdev=None, diskbus=None, macaddress=None)
Factory method for OpenSUSE installs.
Factory method for OpenSUSE installs.
[ "Factory", "method", "for", "OpenSUSE", "installs", "." ]
def get_class(tdl, config, auto, output_disk=None, netdev=None, diskbus=None, macaddress=None): """ Factory method for OpenSUSE installs. """ if tdl.update in version_to_config.keys(): return OpenSUSEGuest(tdl, config, auto, output_disk, netdev, diskbus, macaddress)
[ "def", "get_class", "(", "tdl", ",", "config", ",", "auto", ",", "output_disk", "=", "None", ",", "netdev", "=", "None", ",", "diskbus", "=", "None", ",", "macaddress", "=", "None", ")", ":", "if", "tdl", ".", "update", "in", "version_to_config", ".", "keys", "(", ")", ":", "return", "OpenSUSEGuest", "(", "tdl", ",", "config", ",", "auto", ",", "output_disk", ",", "netdev", ",", "diskbus", ",", "macaddress", ")" ]
https://github.com/clalancette/oz/blob/1ab43e88033efa774815c6ebb59ae2841f2980e9/oz/OpenSUSE.py#L538-L545
arrow-py/arrow
e43524088f78efacb425524445a886600660d854
arrow/locales.py
python
HebrewLocale.describe_multi
( self, timeframes: Sequence[Tuple[TimeFrameLiteral, Union[int, float]]], only_distance: bool = False, )
return humanized
Describes a delta within multiple timeframes in plain language. In Hebrew, the and word behaves a bit differently. :param timeframes: a list of string, quantity pairs each representing a timeframe and delta. :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords
Describes a delta within multiple timeframes in plain language. In Hebrew, the and word behaves a bit differently.
[ "Describes", "a", "delta", "within", "multiple", "timeframes", "in", "plain", "language", ".", "In", "Hebrew", "the", "and", "word", "behaves", "a", "bit", "differently", "." ]
def describe_multi( self, timeframes: Sequence[Tuple[TimeFrameLiteral, Union[int, float]]], only_distance: bool = False, ) -> str: """Describes a delta within multiple timeframes in plain language. In Hebrew, the and word behaves a bit differently. :param timeframes: a list of string, quantity pairs each representing a timeframe and delta. :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords """ humanized = "" for index, (timeframe, delta) in enumerate(timeframes): last_humanized = self._format_timeframe(timeframe, trunc(delta)) if index == 0: humanized = last_humanized elif index == len(timeframes) - 1: # Must have at least 2 items humanized += " " + self.and_word if last_humanized[0].isdecimal(): humanized += "־" humanized += last_humanized else: # Don't add for the last one humanized += ", " + last_humanized if not only_distance: humanized = self._format_relative(humanized, timeframe, trunc(delta)) return humanized
[ "def", "describe_multi", "(", "self", ",", "timeframes", ":", "Sequence", "[", "Tuple", "[", "TimeFrameLiteral", ",", "Union", "[", "int", ",", "float", "]", "]", "]", ",", "only_distance", ":", "bool", "=", "False", ",", ")", "->", "str", ":", "humanized", "=", "\"\"", "for", "index", ",", "(", "timeframe", ",", "delta", ")", "in", "enumerate", "(", "timeframes", ")", ":", "last_humanized", "=", "self", ".", "_format_timeframe", "(", "timeframe", ",", "trunc", "(", "delta", ")", ")", "if", "index", "==", "0", ":", "humanized", "=", "last_humanized", "elif", "index", "==", "len", "(", "timeframes", ")", "-", "1", ":", "# Must have at least 2 items", "humanized", "+=", "\" \"", "+", "self", ".", "and_word", "if", "last_humanized", "[", "0", "]", ".", "isdecimal", "(", ")", ":", "humanized", "+=", "\"־\"", "humanized", "+=", "last_humanized", "else", ":", "# Don't add for the last one", "humanized", "+=", "\", \"", "+", "last_humanized", "if", "not", "only_distance", ":", "humanized", "=", "self", ".", "_format_relative", "(", "humanized", ",", "timeframe", ",", "trunc", "(", "delta", ")", ")", "return", "humanized" ]
https://github.com/arrow-py/arrow/blob/e43524088f78efacb425524445a886600660d854/arrow/locales.py#L3489-L3517
OpenShot/openshot-qt
bbd2dd040a4e2a6120791e6c65ae0ddf212cb73d
src/classes/info.py
python
get_default_path
(varname)
return _path_defaults.get(varname, None)
Return the default value of the named info.FOO_PATH attribute, even if it's been modified
Return the default value of the named info.FOO_PATH attribute, even if it's been modified
[ "Return", "the", "default", "value", "of", "the", "named", "info", ".", "FOO_PATH", "attribute", "even", "if", "it", "s", "been", "modified" ]
def get_default_path(varname): """Return the default value of the named info.FOO_PATH attribute, even if it's been modified""" return _path_defaults.get(varname, None)
[ "def", "get_default_path", "(", "varname", ")", ":", "return", "_path_defaults", ".", "get", "(", "varname", ",", "None", ")" ]
https://github.com/OpenShot/openshot-qt/blob/bbd2dd040a4e2a6120791e6c65ae0ddf212cb73d/src/classes/info.py#L223-L226
huawei-noah/vega
d9f13deede7f2b584e4b1d32ffdb833856129989
vega/datasets/transforms/Compose.py
python
ComposeAll.__init__
(self, transforms)
Construct the Compose class.
Construct the Compose class.
[ "Construct", "the", "Compose", "class", "." ]
def __init__(self, transforms): """Construct the Compose class.""" self.transforms = transforms
[ "def", "__init__", "(", "self", ",", "transforms", ")", ":", "self", ".", "transforms", "=", "transforms" ]
https://github.com/huawei-noah/vega/blob/d9f13deede7f2b584e4b1d32ffdb833856129989/vega/datasets/transforms/Compose.py#L46-L48
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/plugwise/binary_sensor.py
python
SmileBinarySensor.__init__
(self, api, coordinator, name, dev_id, binary_sensor)
Initialise the binary_sensor.
Initialise the binary_sensor.
[ "Initialise", "the", "binary_sensor", "." ]
def __init__(self, api, coordinator, name, dev_id, binary_sensor): """Initialise the binary_sensor.""" super().__init__(api, coordinator, name, dev_id) self._binary_sensor = binary_sensor self._icon = None self._is_on = False if dev_id == self._api.heater_id: self._entity_name = "Auxiliary" sensorname = binary_sensor.replace("_", " ").title() self._name = f"{self._entity_name} {sensorname}" if dev_id == self._api.gateway_id: self._entity_name = f"Smile {self._entity_name}" self._unique_id = f"{dev_id}-{binary_sensor}"
[ "def", "__init__", "(", "self", ",", "api", ",", "coordinator", ",", "name", ",", "dev_id", ",", "binary_sensor", ")", ":", "super", "(", ")", ".", "__init__", "(", "api", ",", "coordinator", ",", "name", ",", "dev_id", ")", "self", ".", "_binary_sensor", "=", "binary_sensor", "self", ".", "_icon", "=", "None", "self", ".", "_is_on", "=", "False", "if", "dev_id", "==", "self", ".", "_api", ".", "heater_id", ":", "self", ".", "_entity_name", "=", "\"Auxiliary\"", "sensorname", "=", "binary_sensor", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", ".", "title", "(", ")", "self", ".", "_name", "=", "f\"{self._entity_name} {sensorname}\"", "if", "dev_id", "==", "self", ".", "_api", ".", "gateway_id", ":", "self", ".", "_entity_name", "=", "f\"Smile {self._entity_name}\"", "self", ".", "_unique_id", "=", "f\"{dev_id}-{binary_sensor}\"" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/plugwise/binary_sensor.py#L78-L96
1012598167/flask_mongodb_game
60c7e0351586656ec38f851592886338e50b4110
python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/_backport/tarfile.py
python
TarFile.extractall
(self, path=".", members=None)
Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers().
Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers().
[ "Extract", "all", "members", "from", "the", "archive", "to", "the", "current", "working", "directory", "and", "set", "owner", "modification", "time", "and", "permissions", "on", "directories", "afterwards", ".", "path", "specifies", "a", "different", "directory", "to", "extract", "to", ".", "members", "is", "optional", "and", "must", "be", "a", "subset", "of", "the", "list", "returned", "by", "getmembers", "()", "." ]
def extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 0o700 # Do not set_attrs directories, as we will do that further down self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) # Reverse sort directories. directories.sort(key=lambda a: a.name) directories.reverse() # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e)
[ "def", "extractall", "(", "self", ",", "path", "=", "\".\"", ",", "members", "=", "None", ")", ":", "directories", "=", "[", "]", "if", "members", "is", "None", ":", "members", "=", "self", "for", "tarinfo", "in", "members", ":", "if", "tarinfo", ".", "isdir", "(", ")", ":", "# Extract directories with a safe mode.", "directories", ".", "append", "(", "tarinfo", ")", "tarinfo", "=", "copy", ".", "copy", "(", "tarinfo", ")", "tarinfo", ".", "mode", "=", "0o700", "# Do not set_attrs directories, as we will do that further down", "self", ".", "extract", "(", "tarinfo", ",", "path", ",", "set_attrs", "=", "not", "tarinfo", ".", "isdir", "(", ")", ")", "# Reverse sort directories.", "directories", ".", "sort", "(", "key", "=", "lambda", "a", ":", "a", ".", "name", ")", "directories", ".", "reverse", "(", ")", "# Set correct owner, mtime and filemode on directories.", "for", "tarinfo", "in", "directories", ":", "dirpath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "tarinfo", ".", "name", ")", "try", ":", "self", ".", "chown", "(", "tarinfo", ",", "dirpath", ")", "self", ".", "utime", "(", "tarinfo", ",", "dirpath", ")", "self", ".", "chmod", "(", "tarinfo", ",", "dirpath", ")", "except", "ExtractError", "as", "e", ":", "if", "self", ".", "errorlevel", ">", "1", ":", "raise", "else", ":", "self", ".", "_dbg", "(", "1", ",", "\"tarfile: %s\"", "%", "e", ")" ]
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/_backport/tarfile.py#L2126-L2162
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_adm_csr.py
python
Yedit.run_ansible
(params)
return {'failed': True, 'msg': 'Unkown state passed'}
perform the idempotent crud operations
perform the idempotent crud operations
[ "perform", "the", "idempotent", "crud", "operations" ]
def run_ansible(params): '''perform the idempotent crud operations''' yamlfile = Yedit(filename=params['src'], backup=params['backup'], content_type=params['content_type'], backup_ext=params['backup_ext'], separator=params['separator']) state = params['state'] if params['src']: rval = yamlfile.load() if yamlfile.yaml_dict is None and state != 'present': return {'failed': True, 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) + 'file exists, that it is has correct permissions, and is valid yaml.'} if state == 'list': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['key']: rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} elif state == 'absent': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['update']: rval = yamlfile.pop(params['key'], params['value']) else: rval = yamlfile.delete(params['key'], params['index'], params['value']) if rval[0] and params['src']: yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} elif state == 'present': # check if content is different than what is in the file if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) # We had no edits to make and the contents are the same if yamlfile.yaml_dict == content and \ params['value'] is None: return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} yamlfile.yaml_dict = content # If we were passed a key, value then # we enapsulate it in a list and process it # Key, Value passed to the module : Converted to Edits list # edits = [] _edit = {} if params['value'] is not None: _edit['value'] = params['value'] _edit['value_type'] = params['value_type'] _edit['key'] = params['key'] if params['update']: _edit['action'] = 'update' _edit['curr_value'] = params['curr_value'] _edit['curr_value_format'] = params['curr_value_format'] _edit['index'] = params['index'] elif params['append']: _edit['action'] = 'append' edits.append(_edit) elif params['edits'] is not None: edits = params['edits'] if edits: results = Yedit.process_edits(edits, yamlfile) # if there were changes and a src provided to us we need to write if results['changed'] and params['src']: yamlfile.write() return {'changed': results['changed'], 'result': results['results'], 'state': state} # no edits to make if params['src']: # pylint: disable=redefined-variable-type rval = yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} # We were passed content but no src, key or value, or edits. Return contents in memory return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} return {'failed': True, 'msg': 'Unkown state passed'}
[ "def", "run_ansible", "(", "params", ")", ":", "yamlfile", "=", "Yedit", "(", "filename", "=", "params", "[", "'src'", "]", ",", "backup", "=", "params", "[", "'backup'", "]", ",", "content_type", "=", "params", "[", "'content_type'", "]", ",", "backup_ext", "=", "params", "[", "'backup_ext'", "]", ",", "separator", "=", "params", "[", "'separator'", "]", ")", "state", "=", "params", "[", "'state'", "]", "if", "params", "[", "'src'", "]", ":", "rval", "=", "yamlfile", ".", "load", "(", ")", "if", "yamlfile", ".", "yaml_dict", "is", "None", "and", "state", "!=", "'present'", ":", "return", "{", "'failed'", ":", "True", ",", "'msg'", ":", "'Error opening file [{}]. Verify that the '", ".", "format", "(", "params", "[", "'src'", "]", ")", "+", "'file exists, that it is has correct permissions, and is valid yaml.'", "}", "if", "state", "==", "'list'", ":", "if", "params", "[", "'content'", "]", ":", "content", "=", "Yedit", ".", "parse_value", "(", "params", "[", "'content'", "]", ",", "params", "[", "'content_type'", "]", ")", "yamlfile", ".", "yaml_dict", "=", "content", "if", "params", "[", "'key'", "]", ":", "rval", "=", "yamlfile", ".", "get", "(", "params", "[", "'key'", "]", ")", "return", "{", "'changed'", ":", "False", ",", "'result'", ":", "rval", ",", "'state'", ":", "state", "}", "elif", "state", "==", "'absent'", ":", "if", "params", "[", "'content'", "]", ":", "content", "=", "Yedit", ".", "parse_value", "(", "params", "[", "'content'", "]", ",", "params", "[", "'content_type'", "]", ")", "yamlfile", ".", "yaml_dict", "=", "content", "if", "params", "[", "'update'", "]", ":", "rval", "=", "yamlfile", ".", "pop", "(", "params", "[", "'key'", "]", ",", "params", "[", "'value'", "]", ")", "else", ":", "rval", "=", "yamlfile", ".", "delete", "(", "params", "[", "'key'", "]", ",", "params", "[", "'index'", "]", ",", "params", "[", "'value'", "]", ")", "if", "rval", "[", "0", "]", "and", "params", "[", "'src'", "]", ":", "yamlfile", ".", "write", "(", ")", "return", "{", "'changed'", ":", "rval", "[", "0", "]", ",", "'result'", ":", "rval", "[", "1", "]", ",", "'state'", ":", "state", "}", "elif", "state", "==", "'present'", ":", "# check if content is different than what is in the file", "if", "params", "[", "'content'", "]", ":", "content", "=", "Yedit", ".", "parse_value", "(", "params", "[", "'content'", "]", ",", "params", "[", "'content_type'", "]", ")", "# We had no edits to make and the contents are the same", "if", "yamlfile", ".", "yaml_dict", "==", "content", "and", "params", "[", "'value'", "]", "is", "None", ":", "return", "{", "'changed'", ":", "False", ",", "'result'", ":", "yamlfile", ".", "yaml_dict", ",", "'state'", ":", "state", "}", "yamlfile", ".", "yaml_dict", "=", "content", "# If we were passed a key, value then", "# we enapsulate it in a list and process it", "# Key, Value passed to the module : Converted to Edits list #", "edits", "=", "[", "]", "_edit", "=", "{", "}", "if", "params", "[", "'value'", "]", "is", "not", "None", ":", "_edit", "[", "'value'", "]", "=", "params", "[", "'value'", "]", "_edit", "[", "'value_type'", "]", "=", "params", "[", "'value_type'", "]", "_edit", "[", "'key'", "]", "=", "params", "[", "'key'", "]", "if", "params", "[", "'update'", "]", ":", "_edit", "[", "'action'", "]", "=", "'update'", "_edit", "[", "'curr_value'", "]", "=", "params", "[", "'curr_value'", "]", "_edit", "[", "'curr_value_format'", "]", "=", "params", "[", "'curr_value_format'", "]", "_edit", "[", "'index'", "]", "=", "params", "[", "'index'", "]", "elif", "params", "[", "'append'", "]", ":", "_edit", "[", "'action'", "]", "=", "'append'", "edits", ".", "append", "(", "_edit", ")", "elif", "params", "[", "'edits'", "]", "is", "not", "None", ":", "edits", "=", "params", "[", "'edits'", "]", "if", "edits", ":", "results", "=", "Yedit", ".", "process_edits", "(", "edits", ",", "yamlfile", ")", "# if there were changes and a src provided to us we need to write", "if", "results", "[", "'changed'", "]", "and", "params", "[", "'src'", "]", ":", "yamlfile", ".", "write", "(", ")", "return", "{", "'changed'", ":", "results", "[", "'changed'", "]", ",", "'result'", ":", "results", "[", "'results'", "]", ",", "'state'", ":", "state", "}", "# no edits to make", "if", "params", "[", "'src'", "]", ":", "# pylint: disable=redefined-variable-type", "rval", "=", "yamlfile", ".", "write", "(", ")", "return", "{", "'changed'", ":", "rval", "[", "0", "]", ",", "'result'", ":", "rval", "[", "1", "]", ",", "'state'", ":", "state", "}", "# We were passed content but no src, key or value, or edits. Return contents in memory", "return", "{", "'changed'", ":", "False", ",", "'result'", ":", "yamlfile", ".", "yaml_dict", ",", "'state'", ":", "state", "}", "return", "{", "'failed'", ":", "True", ",", "'msg'", ":", "'Unkown state passed'", "}" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_adm_csr.py#L741-L839
openstack/sahara
c4f4d29847d5bcca83d49ef7e9a3378458462a79
sahara/service/castellan/sahara_key_manager.py
python
SaharaKeyManager.get
(self, context, key_id, **kwargs)
return key.Passphrase(passphrase=key_id)
get a key since sahara is not actually storing key UUIDs the key_id to this function should actually be the key payload. this function will simply return a new SaharaKey based on that value.
get a key
[ "get", "a", "key" ]
def get(self, context, key_id, **kwargs): """get a key since sahara is not actually storing key UUIDs the key_id to this function should actually be the key payload. this function will simply return a new SaharaKey based on that value. """ return key.Passphrase(passphrase=key_id)
[ "def", "get", "(", "self", ",", "context", ",", "key_id", ",", "*", "*", "kwargs", ")", ":", "return", "key", ".", "Passphrase", "(", "passphrase", "=", "key_id", ")" ]
https://github.com/openstack/sahara/blob/c4f4d29847d5bcca83d49ef7e9a3378458462a79/sahara/service/castellan/sahara_key_manager.py#L61-L68
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/combinatorics/permutations.py
python
Permutation.__invert__
(self)
return _af_new(_af_invert(self._array_form))
Return the inverse of the permutation. A permutation multiplied by its inverse is the identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[2,0], [3,1]]) >>> ~p Permutation([2, 3, 0, 1]) >>> _ == p**-1 True >>> p*~p == ~p*p == Permutation([0, 1, 2, 3]) True
Return the inverse of the permutation.
[ "Return", "the", "inverse", "of", "the", "permutation", "." ]
def __invert__(self): """ Return the inverse of the permutation. A permutation multiplied by its inverse is the identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[2,0], [3,1]]) >>> ~p Permutation([2, 3, 0, 1]) >>> _ == p**-1 True >>> p*~p == ~p*p == Permutation([0, 1, 2, 3]) True """ return _af_new(_af_invert(self._array_form))
[ "def", "__invert__", "(", "self", ")", ":", "return", "_af_new", "(", "_af_invert", "(", "self", ".", "_array_form", ")", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/combinatorics/permutations.py#L1449-L1467
inkandswitch/livebook
93c8d467734787366ad084fc3566bf5cbe249c51
public/pypyjs/modules/inspect.py
python
getdoc
(object)
return cleandoc(doc)
Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.
Get the documentation string for an object.
[ "Get", "the", "documentation", "string", "for", "an", "object", "." ]
def getdoc(object): """Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.""" try: doc = object.__doc__ except AttributeError: return None if not isinstance(doc, types.StringTypes): return None return cleandoc(doc)
[ "def", "getdoc", "(", "object", ")", ":", "try", ":", "doc", "=", "object", ".", "__doc__", "except", "AttributeError", ":", "return", "None", "if", "not", "isinstance", "(", "doc", ",", "types", ".", "StringTypes", ")", ":", "return", "None", "return", "cleandoc", "(", "doc", ")" ]
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/inspect.py#L355-L367
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/jc-weather/requests/api.py
python
head
(url, **kwargs)
return request('head', url, **kwargs)
Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes.
Sends a HEAD request. Returns :class:`Response` object.
[ "Sends", "a", "HEAD", "request", ".", "Returns", ":", "class", ":", "Response", "object", "." ]
def head(url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('head', url, **kwargs)
[ "def", "head", "(", "url", ",", "*", "*", "kwargs", ")", ":", "return", "request", "(", "'head'", ",", "url", ",", "*", "*", "kwargs", ")" ]
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/jc-weather/requests/api.py#L69-L76
googleapis/python-dialogflow
e48ea001b7c8a4a5c1fe4b162bad49ea397458e9
google/cloud/dialogflow_v2/services/entity_types/transports/grpc.py
python
EntityTypesGrpcTransport.close
(self)
[]
def close(self): self.grpc_channel.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "grpc_channel", ".", "close", "(", ")" ]
https://github.com/googleapis/python-dialogflow/blob/e48ea001b7c8a4a5c1fe4b162bad49ea397458e9/google/cloud/dialogflow_v2/services/entity_types/transports/grpc.py#L603-L604
pajbot/pajbot
d387a48b799ae600c0dcf42788f7d3667ced275c
pajbot/managers/emote.py
python
GenericChannelEmoteManager.match_global_emote
(self, word: str)
return self.global_lookup_table.get(word, None)
Attempts to find a matching emote equaling the given word from the global emotes known to this manager. Returns None if no emote was matched
Attempts to find a matching emote equaling the given word from the global emotes known to this manager. Returns None if no emote was matched
[ "Attempts", "to", "find", "a", "matching", "emote", "equaling", "the", "given", "word", "from", "the", "global", "emotes", "known", "to", "this", "manager", ".", "Returns", "None", "if", "no", "emote", "was", "matched" ]
def match_global_emote(self, word: str) -> Optional[Emote]: """Attempts to find a matching emote equaling the given word from the global emotes known to this manager. Returns None if no emote was matched""" return self.global_lookup_table.get(word, None)
[ "def", "match_global_emote", "(", "self", ",", "word", ":", "str", ")", "->", "Optional", "[", "Emote", "]", ":", "return", "self", ".", "global_lookup_table", ".", "get", "(", "word", ",", "None", ")" ]
https://github.com/pajbot/pajbot/blob/d387a48b799ae600c0dcf42788f7d3667ced275c/pajbot/managers/emote.py#L101-L104
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
dist/lib/python2.7/numbers.py
python
Real.imag
(self)
return 0
Real numbers have no imaginary component.
Real numbers have no imaginary component.
[ "Real", "numbers", "have", "no", "imaginary", "component", "." ]
def imag(self): """Real numbers have no imaginary component.""" return 0
[ "def", "imag", "(", "self", ")", ":", "return", "0" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/numbers.py#L259-L261
TropComplique/FaceBoxes-tensorflow
0dde35eda1cb3dab6586b94c583029162ec37aa5
src/training_target_creation.py
python
_create_targets
(anchors, groundtruth_boxes, matches)
return reg_targets
Returns regression targets for each anchor. Arguments: anchors: a float tensor with shape [num_anchors, 4]. groundtruth_boxes: a float tensor with shape [N, 4]. matches: a int tensor with shape [num_anchors]. Returns: reg_targets: a float tensor with shape [num_anchors, 4].
Returns regression targets for each anchor.
[ "Returns", "regression", "targets", "for", "each", "anchor", "." ]
def _create_targets(anchors, groundtruth_boxes, matches): """Returns regression targets for each anchor. Arguments: anchors: a float tensor with shape [num_anchors, 4]. groundtruth_boxes: a float tensor with shape [N, 4]. matches: a int tensor with shape [num_anchors]. Returns: reg_targets: a float tensor with shape [num_anchors, 4]. """ matched_anchor_indices = tf.where(tf.greater_equal(matches, 0)) # shape [num_matches, 1] matched_anchor_indices = tf.squeeze(matched_anchor_indices, axis=1) matched_gt_indices = tf.gather(matches, matched_anchor_indices) # shape [num_matches] matched_anchors = tf.gather(anchors, matched_anchor_indices) # shape [num_matches, 4] matched_gt_boxes = tf.gather(groundtruth_boxes, matched_gt_indices) # shape [num_matches, 4] matched_reg_targets = encode(matched_gt_boxes, matched_anchors) # shape [num_matches, 4] unmatched_anchor_indices = tf.where(tf.equal(matches, -1)) unmatched_anchor_indices = tf.squeeze(unmatched_anchor_indices, axis=1) # it has shape [num_anchors - num_matches] unmatched_reg_targets = tf.zeros([tf.size(unmatched_anchor_indices), 4]) # it has shape [num_anchors - num_matches, 4] matched_anchor_indices = tf.to_int32(matched_anchor_indices) unmatched_anchor_indices = tf.to_int32(unmatched_anchor_indices) reg_targets = tf.dynamic_stitch( [matched_anchor_indices, unmatched_anchor_indices], [matched_reg_targets, unmatched_reg_targets] ) return reg_targets
[ "def", "_create_targets", "(", "anchors", ",", "groundtruth_boxes", ",", "matches", ")", ":", "matched_anchor_indices", "=", "tf", ".", "where", "(", "tf", ".", "greater_equal", "(", "matches", ",", "0", ")", ")", "# shape [num_matches, 1]", "matched_anchor_indices", "=", "tf", ".", "squeeze", "(", "matched_anchor_indices", ",", "axis", "=", "1", ")", "matched_gt_indices", "=", "tf", ".", "gather", "(", "matches", ",", "matched_anchor_indices", ")", "# shape [num_matches]", "matched_anchors", "=", "tf", ".", "gather", "(", "anchors", ",", "matched_anchor_indices", ")", "# shape [num_matches, 4]", "matched_gt_boxes", "=", "tf", ".", "gather", "(", "groundtruth_boxes", ",", "matched_gt_indices", ")", "# shape [num_matches, 4]", "matched_reg_targets", "=", "encode", "(", "matched_gt_boxes", ",", "matched_anchors", ")", "# shape [num_matches, 4]", "unmatched_anchor_indices", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "matches", ",", "-", "1", ")", ")", "unmatched_anchor_indices", "=", "tf", ".", "squeeze", "(", "unmatched_anchor_indices", ",", "axis", "=", "1", ")", "# it has shape [num_anchors - num_matches]", "unmatched_reg_targets", "=", "tf", ".", "zeros", "(", "[", "tf", ".", "size", "(", "unmatched_anchor_indices", ")", ",", "4", "]", ")", "# it has shape [num_anchors - num_matches, 4]", "matched_anchor_indices", "=", "tf", ".", "to_int32", "(", "matched_anchor_indices", ")", "unmatched_anchor_indices", "=", "tf", ".", "to_int32", "(", "unmatched_anchor_indices", ")", "reg_targets", "=", "tf", ".", "dynamic_stitch", "(", "[", "matched_anchor_indices", ",", "unmatched_anchor_indices", "]", ",", "[", "matched_reg_targets", ",", "unmatched_reg_targets", "]", ")", "return", "reg_targets" ]
https://github.com/TropComplique/FaceBoxes-tensorflow/blob/0dde35eda1cb3dab6586b94c583029162ec37aa5/src/training_target_creation.py#L83-L115
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/subprocess.py
python
Popen.poll
(self)
return self._internal_poll()
[]
def poll(self): return self._internal_poll()
[ "def", "poll", "(", "self", ")", ":", "return", "self", ".", "_internal_poll", "(", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/subprocess.py#L822-L823
anxiangsir/deeplabv3-Tensorflow
f7f13da2705bacfab1cf4afe37923be5e914bac3
color_utils.py
python
color_predicts
(img)
return color
给class图上色
给class图上色
[ "给class图上色" ]
def color_predicts(img): ''' 给class图上色 ''' # img = cv2.imread(label_path,cv2.CAP_MODE_GRAY) color = np.ones([img.shape[0], img.shape[1], 3]) color[img==0] = [255, 255, 255] #其他,白色,0 color[img==1] = [0, 255, 0] #植被,绿色,1 color[img==2] = [0, 0, 0] #道路,黑色,2 color[img==3] = [131, 139, 139] #建筑,黄色,3 color[img==4] = [139, 69, 19] #水体,蓝色,4 return color
[ "def", "color_predicts", "(", "img", ")", ":", "# img = cv2.imread(label_path,cv2.CAP_MODE_GRAY)", "color", "=", "np", ".", "ones", "(", "[", "img", ".", "shape", "[", "0", "]", ",", "img", ".", "shape", "[", "1", "]", ",", "3", "]", ")", "color", "[", "img", "==", "0", "]", "=", "[", "255", ",", "255", ",", "255", "]", "#其他,白色,0", "color", "[", "img", "==", "1", "]", "=", "[", "0", ",", "255", ",", "0", "]", "#植被,绿色,1", "color", "[", "img", "==", "2", "]", "=", "[", "0", ",", "0", ",", "0", "]", "#道路,黑色,2", "color", "[", "img", "==", "3", "]", "=", "[", "131", ",", "139", ",", "139", "]", "#建筑,黄色,3", "color", "[", "img", "==", "4", "]", "=", "[", "139", ",", "69", ",", "19", "]", "#水体,蓝色,4", "return", "color" ]
https://github.com/anxiangsir/deeplabv3-Tensorflow/blob/f7f13da2705bacfab1cf4afe37923be5e914bac3/color_utils.py#L5-L19
rougier/ten-rules
fc1cce48f19a3ca85986a509b62493b3be7926b4
projections.py
python
polar_to_logpolar
(rho, theta)
return x, y
Polar to logpolar coordinates.
Polar to logpolar coordinates.
[ "Polar", "to", "logpolar", "coordinates", "." ]
def polar_to_logpolar(rho, theta): ''' Polar to logpolar coordinates. ''' # Shift in the SC mapping function in deg A = 3.0 # Collicular magnification along u axe in mm/rad Bx = 1.4 # Collicular magnification along v axe in mm/rad By = 1.8 xmin, xmax = 0.0, 4.80743279742 ymin, ymax = -2.76745559565, 2.76745559565 rho = rho*90.0 x = Bx*np.log(np.sqrt(rho*rho+2*A*rho*np.cos(theta)+A*A)/A) y = By*np.arctan(rho*np.sin(theta)/(rho*np.cos(theta)+A)) x = (x-xmin)/(xmax-xmin) y = (y-ymin)/(ymax-ymin) return x, y
[ "def", "polar_to_logpolar", "(", "rho", ",", "theta", ")", ":", "# Shift in the SC mapping function in deg", "A", "=", "3.0", "# Collicular magnification along u axe in mm/rad", "Bx", "=", "1.4", "# Collicular magnification along v axe in mm/rad", "By", "=", "1.8", "xmin", ",", "xmax", "=", "0.0", ",", "4.80743279742", "ymin", ",", "ymax", "=", "-", "2.76745559565", ",", "2.76745559565", "rho", "=", "rho", "*", "90.0", "x", "=", "Bx", "*", "np", ".", "log", "(", "np", ".", "sqrt", "(", "rho", "*", "rho", "+", "2", "*", "A", "*", "rho", "*", "np", ".", "cos", "(", "theta", ")", "+", "A", "*", "A", ")", "/", "A", ")", "y", "=", "By", "*", "np", ".", "arctan", "(", "rho", "*", "np", ".", "sin", "(", "theta", ")", "/", "(", "rho", "*", "np", ".", "cos", "(", "theta", ")", "+", "A", ")", ")", "x", "=", "(", "x", "-", "xmin", ")", "/", "(", "xmax", "-", "xmin", ")", "y", "=", "(", "y", "-", "ymin", ")", "/", "(", "ymax", "-", "ymin", ")", "return", "x", ",", "y" ]
https://github.com/rougier/ten-rules/blob/fc1cce48f19a3ca85986a509b62493b3be7926b4/projections.py#L52-L68
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/matrices/densesolve.py
python
forward_substitution
(lower_triangle, variable, constant, K)
return variable
Performs forward substitution given a lower triangular matrix, a vector of variables and a vector of constants. Examples ======== >>> from sympy.matrices.densesolve import forward_substitution >>> from sympy import QQ >>> from sympy import Dummy >>> x, y, z = Dummy('x'), Dummy('y'), Dummy('z') >>> a = [ ... [QQ(1), QQ(0), QQ(0)], ... [QQ(-2), QQ(1), QQ(0)], ... [QQ(-2), QQ(-1), QQ(1)]] >>> variables = [ ... [x], ... [y], ... [z]] >>> constants = [ ... [QQ(-1)], ... [QQ(13)], ... [QQ(-6)]] >>> forward_substitution(a, variables, constants, QQ) [[-1], [11], [3]] See Also ======== LU_solve cholesky_solve
Performs forward substitution given a lower triangular matrix, a vector of variables and a vector of constants.
[ "Performs", "forward", "substitution", "given", "a", "lower", "triangular", "matrix", "a", "vector", "of", "variables", "and", "a", "vector", "of", "constants", "." ]
def forward_substitution(lower_triangle, variable, constant, K): """ Performs forward substitution given a lower triangular matrix, a vector of variables and a vector of constants. Examples ======== >>> from sympy.matrices.densesolve import forward_substitution >>> from sympy import QQ >>> from sympy import Dummy >>> x, y, z = Dummy('x'), Dummy('y'), Dummy('z') >>> a = [ ... [QQ(1), QQ(0), QQ(0)], ... [QQ(-2), QQ(1), QQ(0)], ... [QQ(-2), QQ(-1), QQ(1)]] >>> variables = [ ... [x], ... [y], ... [z]] >>> constants = [ ... [QQ(-1)], ... [QQ(13)], ... [QQ(-6)]] >>> forward_substitution(a, variables, constants, QQ) [[-1], [11], [3]] See Also ======== LU_solve cholesky_solve """ copy_lower_triangle = copy.deepcopy(lower_triangle) nrow = len(copy_lower_triangle) for i in range(nrow): a = K.zero for j in range(i): a += copy_lower_triangle[i][j]*variable[j][0] variable[i][0] = (constant[i][0] - a)/copy_lower_triangle[i][i] return variable
[ "def", "forward_substitution", "(", "lower_triangle", ",", "variable", ",", "constant", ",", "K", ")", ":", "copy_lower_triangle", "=", "copy", ".", "deepcopy", "(", "lower_triangle", ")", "nrow", "=", "len", "(", "copy_lower_triangle", ")", "for", "i", "in", "range", "(", "nrow", ")", ":", "a", "=", "K", ".", "zero", "for", "j", "in", "range", "(", "i", ")", ":", "a", "+=", "copy_lower_triangle", "[", "i", "]", "[", "j", "]", "*", "variable", "[", "j", "]", "[", "0", "]", "variable", "[", "i", "]", "[", "0", "]", "=", "(", "constant", "[", "i", "]", "[", "0", "]", "-", "a", ")", "/", "copy_lower_triangle", "[", "i", "]", "[", "i", "]", "return", "variable" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/matrices/densesolve.py#L366-L406
khalim19/gimp-plugin-export-layers
b37255f2957ad322f4d332689052351cdea6e563
export_layers/pygimplib/_lib/python_standard_modules/logging/handlers.py
python
NTEventLogHandler.emit
(self, record)
Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log.
Emit a record.
[ "Emit", "a", "record", "." ]
def emit(self, record): """ Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log. """ if self._welu: try: id = self.getMessageID(record) cat = self.getEventCategory(record) type = self.getEventType(record) msg = self.format(record) self._welu.ReportEvent(self.appname, id, cat, type, [msg]) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "if", "self", ".", "_welu", ":", "try", ":", "id", "=", "self", ".", "getMessageID", "(", "record", ")", "cat", "=", "self", ".", "getEventCategory", "(", "record", ")", "type", "=", "self", ".", "getEventType", "(", "record", ")", "msg", "=", "self", ".", "format", "(", "record", ")", "self", ".", "_welu", ".", "ReportEvent", "(", "self", ".", "appname", ",", "id", ",", "cat", ",", "type", ",", "[", "msg", "]", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", ":", "self", ".", "handleError", "(", "record", ")" ]
https://github.com/khalim19/gimp-plugin-export-layers/blob/b37255f2957ad322f4d332689052351cdea6e563/export_layers/pygimplib/_lib/python_standard_modules/logging/handlers.py#L1033-L1050
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/patched/notpip/_vendor/pkg_resources/__init__.py
python
NullProvider.get_resource_stream
(self, manager, resource_name)
return io.BytesIO(self.get_resource_string(manager, resource_name))
[]
def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name))
[ "def", "get_resource_stream", "(", "self", ",", "manager", ",", "resource_name", ")", ":", "return", "io", ".", "BytesIO", "(", "self", ".", "get_resource_string", "(", "manager", ",", "resource_name", ")", ")" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L1397-L1398
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/sre_parse.py
python
State.checklookbehindgroup
(self, gid, source)
[]
def checklookbehindgroup(self, gid, source): if self.lookbehindgroups is not None: if not self.checkgroup(gid): raise source.error('cannot refer to an open group') if gid >= self.lookbehindgroups: raise source.error('cannot refer to group defined in the same ' 'lookbehind subpattern')
[ "def", "checklookbehindgroup", "(", "self", ",", "gid", ",", "source", ")", ":", "if", "self", ".", "lookbehindgroups", "is", "not", "None", ":", "if", "not", "self", ".", "checkgroup", "(", "gid", ")", ":", "raise", "source", ".", "error", "(", "'cannot refer to an open group'", ")", "if", "gid", ">=", "self", ".", "lookbehindgroups", ":", "raise", "source", ".", "error", "(", "'cannot refer to group defined in the same '", "'lookbehind subpattern'", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/sre_parse.py#L101-L107
Maicius/QQZoneMood
e529f386865bed141f43c5825a3f1dc7cfa161b9
src/spider/QQZoneFriendSpider.py
python
QQZoneFriendSpider.clean_friend_data
(self)
清洗好友数据,生成csv :return:
清洗好友数据,生成csv :return:
[ "清洗好友数据,生成csv", ":", "return", ":" ]
def clean_friend_data(self): """ 清洗好友数据,生成csv :return: """ try: if len(self.friend_list) == 0: self.load_friend_data() friend_total_num = len(self.friend_list) print("valid friend num:", friend_total_num) friend_list_df = pd.DataFrame(self.friend_list) self.friend_detail_list = [] if friend_total_num == 0: print("该用户没有好友") return False for friend in self.friend_detail: try: friend_uin = friend['friendUin'] add_friend_time = friend['addFriendTime'] img = friend_list_df.loc[friend_list_df['uin'] == friend_uin, 'img'].values[0] nick = friend['nick'] nick_name = remove_special_tag(nick[str(friend_uin)]) common_friend_num = len(friend['common']['friend']) common_group_num = len(friend['common']['group']) common_group_names = friend['common']['group'] self.friend_detail_list.append( dict(uin=self.username, friend_uin=friend_uin, add_friend_time=add_friend_time, nick_name=nick_name, common_friend_num=common_friend_num, common_group_num=common_group_num, common_group_names=common_group_names, img=img)) except BaseException as e: if self.debug: print("单向好友:", friend) self.friend_detail_list.append( dict(uin=0, friend_uin=friend['friendUin'], add_friend_time=0, nick_name='单向好友', common_friend_num=0, common_group_num=0, common_group_names='', img='')) friend_df = pd.DataFrame(self.friend_detail_list) friend_df.sort_values(by='add_friend_time', inplace=True) friend_df['add_friend_time2'] = friend_df['add_friend_time'].apply(lambda x: util.get_full_time_from_mktime(x)) friend_df.fillna('', inplace=True) if self.export_excel: friend_df.to_excel(self.FRIEND_DETAIL_EXCEL_FILE_NAME) if self.export_csv: friend_df.to_csv(self.FRIEND_DETAIL_LIST_FILE_NAME) if self.debug: print("Finish to clean friend data...") print("File Name:", self.FRIEND_DETAIL_LIST_FILE_NAME) self.friend_df = friend_df return True except BaseException as e: self.format_error(e, "Failed to parse friend_info") return False
[ "def", "clean_friend_data", "(", "self", ")", ":", "try", ":", "if", "len", "(", "self", ".", "friend_list", ")", "==", "0", ":", "self", ".", "load_friend_data", "(", ")", "friend_total_num", "=", "len", "(", "self", ".", "friend_list", ")", "print", "(", "\"valid friend num:\"", ",", "friend_total_num", ")", "friend_list_df", "=", "pd", ".", "DataFrame", "(", "self", ".", "friend_list", ")", "self", ".", "friend_detail_list", "=", "[", "]", "if", "friend_total_num", "==", "0", ":", "print", "(", "\"该用户没有好友\")", "", "return", "False", "for", "friend", "in", "self", ".", "friend_detail", ":", "try", ":", "friend_uin", "=", "friend", "[", "'friendUin'", "]", "add_friend_time", "=", "friend", "[", "'addFriendTime'", "]", "img", "=", "friend_list_df", ".", "loc", "[", "friend_list_df", "[", "'uin'", "]", "==", "friend_uin", ",", "'img'", "]", ".", "values", "[", "0", "]", "nick", "=", "friend", "[", "'nick'", "]", "nick_name", "=", "remove_special_tag", "(", "nick", "[", "str", "(", "friend_uin", ")", "]", ")", "common_friend_num", "=", "len", "(", "friend", "[", "'common'", "]", "[", "'friend'", "]", ")", "common_group_num", "=", "len", "(", "friend", "[", "'common'", "]", "[", "'group'", "]", ")", "common_group_names", "=", "friend", "[", "'common'", "]", "[", "'group'", "]", "self", ".", "friend_detail_list", ".", "append", "(", "dict", "(", "uin", "=", "self", ".", "username", ",", "friend_uin", "=", "friend_uin", ",", "add_friend_time", "=", "add_friend_time", ",", "nick_name", "=", "nick_name", ",", "common_friend_num", "=", "common_friend_num", ",", "common_group_num", "=", "common_group_num", ",", "common_group_names", "=", "common_group_names", ",", "img", "=", "img", ")", ")", "except", "BaseException", "as", "e", ":", "if", "self", ".", "debug", ":", "print", "(", "\"单向好友:\", friend", ")", "", "", "self", ".", "friend_detail_list", ".", "append", "(", "dict", "(", "uin", "=", "0", ",", "friend_uin", "=", "friend", "[", "'friendUin'", "]", ",", "add_friend_time", "=", "0", ",", "nick_name", "=", "'单向好友', common", "_", "riend_num=0,", "", "", "", "common_group_num", "=", "0", ",", "common_group_names", "=", "''", ",", "img", "=", "''", ")", ")", "friend_df", "=", "pd", ".", "DataFrame", "(", "self", ".", "friend_detail_list", ")", "friend_df", ".", "sort_values", "(", "by", "=", "'add_friend_time'", ",", "inplace", "=", "True", ")", "friend_df", "[", "'add_friend_time2'", "]", "=", "friend_df", "[", "'add_friend_time'", "]", ".", "apply", "(", "lambda", "x", ":", "util", ".", "get_full_time_from_mktime", "(", "x", ")", ")", "friend_df", ".", "fillna", "(", "''", ",", "inplace", "=", "True", ")", "if", "self", ".", "export_excel", ":", "friend_df", ".", "to_excel", "(", "self", ".", "FRIEND_DETAIL_EXCEL_FILE_NAME", ")", "if", "self", ".", "export_csv", ":", "friend_df", ".", "to_csv", "(", "self", ".", "FRIEND_DETAIL_LIST_FILE_NAME", ")", "if", "self", ".", "debug", ":", "print", "(", "\"Finish to clean friend data...\"", ")", "print", "(", "\"File Name:\"", ",", "self", ".", "FRIEND_DETAIL_LIST_FILE_NAME", ")", "self", ".", "friend_df", "=", "friend_df", "return", "True", "except", "BaseException", "as", "e", ":", "self", ".", "format_error", "(", "e", ",", "\"Failed to parse friend_info\"", ")", "return", "False" ]
https://github.com/Maicius/QQZoneMood/blob/e529f386865bed141f43c5825a3f1dc7cfa161b9/src/spider/QQZoneFriendSpider.py#L234-L289
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/logging/handlers.py
python
NTEventLogHandler.emit
(self, record)
Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log.
Emit a record.
[ "Emit", "a", "record", "." ]
def emit(self, record): """ Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log. """ if self._welu: try: id = self.getMessageID(record) cat = self.getEventCategory(record) type = self.getEventType(record) msg = self.format(record) self._welu.ReportEvent(self.appname, id, cat, type, [msg]) except Exception: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "if", "self", ".", "_welu", ":", "try", ":", "id", "=", "self", ".", "getMessageID", "(", "record", ")", "cat", "=", "self", ".", "getEventCategory", "(", "record", ")", "type", "=", "self", ".", "getEventType", "(", "record", ")", "msg", "=", "self", ".", "format", "(", "record", ")", "self", ".", "_welu", ".", "ReportEvent", "(", "self", ".", "appname", ",", "id", ",", "cat", ",", "type", ",", "[", "msg", "]", ")", "except", "Exception", ":", "self", ".", "handleError", "(", "record", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/logging/handlers.py#L1094-L1109
bugy/script-server
9a57ce15903c81bcb537b872f1330ee55ba31563
src/execution/executor.py
python
ScriptExecutor.get_return_code
(self)
return self.process_wrapper.get_return_code()
[]
def get_return_code(self): return self.process_wrapper.get_return_code()
[ "def", "get_return_code", "(", "self", ")", ":", "return", "self", ".", "process_wrapper", ".", "get_return_code", "(", ")" ]
https://github.com/bugy/script-server/blob/9a57ce15903c81bcb537b872f1330ee55ba31563/src/execution/executor.py#L170-L171
facebookresearch/Large-Scale-VRD
7ababfe1023941c3653d7aebe9f835a47f5e8277
lib/utils/model_convert_utils.py
python
pairwise
(iterable)
return zip(a, b)
s -> (s0,s1), (s1,s2), (s2, s3), ...
s -> (s0,s1), (s1,s2), (s2, s3), ...
[ "s", "-", ">", "(", "s0", "s1", ")", "(", "s1", "s2", ")", "(", "s2", "s3", ")", "..." ]
def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." from itertools import tee a, b = tee(iterable) next(b, None) return zip(a, b)
[ "def", "pairwise", "(", "iterable", ")", ":", "from", "itertools", "import", "tee", "a", ",", "b", "=", "tee", "(", "iterable", ")", "next", "(", "b", ",", "None", ")", "return", "zip", "(", "a", ",", "b", ")" ]
https://github.com/facebookresearch/Large-Scale-VRD/blob/7ababfe1023941c3653d7aebe9f835a47f5e8277/lib/utils/model_convert_utils.py#L127-L132
postgres/pgadmin4
374c5e952fa594d749fadf1f88076c1cba8c5f64
web/pgadmin/tools/import_export_servers/__init__.py
python
save
()
return make_json_response(success=1)
This function is used to import or export based on the data
This function is used to import or export based on the data
[ "This", "function", "is", "used", "to", "import", "or", "export", "based", "on", "the", "data" ]
def save(): """ This function is used to import or export based on the data """ required_args = [ 'type', 'filename' ] data = request.form if request.form else json.loads(request.data.decode()) for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=_( "Could not find the required parameter ({})." ).format(arg) ) status = False errmsg = None if data['type'] == 'export': status, errmsg = \ dump_database_servers(data['filename'], data['selected_sever_ids']) elif data['type'] == 'import': # Clear all the existing servers if 'replace_servers' in data and data['replace_servers']: clear_database_servers() status, errmsg = \ load_database_servers(data['filename'], data['selected_sever_ids']) if not status: return internal_server_error(errmsg) return make_json_response(success=1)
[ "def", "save", "(", ")", ":", "required_args", "=", "[", "'type'", ",", "'filename'", "]", "data", "=", "request", ".", "form", "if", "request", ".", "form", "else", "json", ".", "loads", "(", "request", ".", "data", ".", "decode", "(", ")", ")", "for", "arg", "in", "required_args", ":", "if", "arg", "not", "in", "data", ":", "return", "make_json_response", "(", "status", "=", "410", ",", "success", "=", "0", ",", "errormsg", "=", "_", "(", "\"Could not find the required parameter ({}).\"", ")", ".", "format", "(", "arg", ")", ")", "status", "=", "False", "errmsg", "=", "None", "if", "data", "[", "'type'", "]", "==", "'export'", ":", "status", ",", "errmsg", "=", "dump_database_servers", "(", "data", "[", "'filename'", "]", ",", "data", "[", "'selected_sever_ids'", "]", ")", "elif", "data", "[", "'type'", "]", "==", "'import'", ":", "# Clear all the existing servers", "if", "'replace_servers'", "in", "data", "and", "data", "[", "'replace_servers'", "]", ":", "clear_database_servers", "(", ")", "status", ",", "errmsg", "=", "load_database_servers", "(", "data", "[", "'filename'", "]", ",", "data", "[", "'selected_sever_ids'", "]", ")", "if", "not", "status", ":", "return", "internal_server_error", "(", "errmsg", ")", "return", "make_json_response", "(", "success", "=", "1", ")" ]
https://github.com/postgres/pgadmin4/blob/374c5e952fa594d749fadf1f88076c1cba8c5f64/web/pgadmin/tools/import_export_servers/__init__.py#L188-L222
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py
python
EntryPoint.__str__
(self)
return s
[]
def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s
[ "def", "__str__", "(", "self", ")", ":", "s", "=", "\"%s = %s\"", "%", "(", "self", ".", "name", ",", "self", ".", "module_name", ")", "if", "self", ".", "attrs", ":", "s", "+=", "':'", "+", "'.'", ".", "join", "(", "self", ".", "attrs", ")", "if", "self", ".", "extras", ":", "s", "+=", "' [%s]'", "%", "','", ".", "join", "(", "self", ".", "extras", ")", "return", "s" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L2294-L2300
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/pandas-0.24.2-py3.7-macosx-10.9-x86_64.egg/pandas/io/pytables.py
python
HDFStore.select_column
(self, key, column, **kwargs)
return self.get_storer(key).read_column(column=column, **kwargs)
return a single column from the table. This is generally only useful to select an indexable Parameters ---------- key : object column: the column of interest Exceptions ---------- raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block)
return a single column from the table. This is generally only useful to select an indexable
[ "return", "a", "single", "column", "from", "the", "table", ".", "This", "is", "generally", "only", "useful", "to", "select", "an", "indexable" ]
def select_column(self, key, column, **kwargs): """ return a single column from the table. This is generally only useful to select an indexable Parameters ---------- key : object column: the column of interest Exceptions ---------- raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block) """ return self.get_storer(key).read_column(column=column, **kwargs)
[ "def", "select_column", "(", "self", ",", "key", ",", "column", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "get_storer", "(", "key", ")", ".", "read_column", "(", "column", "=", "column", ",", "*", "*", "kwargs", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pandas-0.24.2-py3.7-macosx-10.9-x86_64.egg/pandas/io/pytables.py#L758-L776
hippich/Bitcoin-Poker-Room
b89e8c2df7a57d19d1aa6deff3f6a92ebe8134fa
lib/ppn/pokernetwork/pokerservice.py
python
PokerService.getTableBestByCriteria
(self, serial, currency_serial = None, variant = None, betting_structure = None, min_players = 0)
return bestTable
Return a PokerTable object optimal table based on the given criteria in list_table_query_str plus the amount of currency the user represented by serial has. The caller is assured that the user represented by serial can afford at least the minimum buy-in for the table returned (if one is returned). Arguments in order: serial: a user_serial for the user who wants a good table. currency_serial: must be a positive integer or None variant: must be a string or None betting_structure: must be a string or None min_players: must be a positive integer or None General algorithm used by this method: First, a list of tables is requested from self.searchTables(currency_serial, variant, betting_structure, min_players). If an empty list is returned by searchTables(), then None is returned here. Second, this method iterates across the tables returned from searchTables() and eliminates any tables for which the user represented by serial has less than the minimum buy-in, and those that have too many users sitting out such that the criteria from the user is effectively not met. If there are multiple tables found, the first one from the list coming from searchTables() that the user can buy into is returned. Methods of interest used by this method: self.getMoney() : used to find out how much money the serial has. table.game.sitCount() : used to find out how many users are sitting out. table.game.all() : used to shortcut away from tables that are full and should not be considered.
Return a PokerTable object optimal table based on the given criteria in list_table_query_str plus the amount of currency the user represented by serial has. The caller is assured that the user represented by serial can afford at least the minimum buy-in for the table returned (if one is returned).
[ "Return", "a", "PokerTable", "object", "optimal", "table", "based", "on", "the", "given", "criteria", "in", "list_table_query_str", "plus", "the", "amount", "of", "currency", "the", "user", "represented", "by", "serial", "has", ".", "The", "caller", "is", "assured", "that", "the", "user", "represented", "by", "serial", "can", "afford", "at", "least", "the", "minimum", "buy", "-", "in", "for", "the", "table", "returned", "(", "if", "one", "is", "returned", ")", "." ]
def getTableBestByCriteria(self, serial, currency_serial = None, variant = None, betting_structure = None, min_players = 0): """Return a PokerTable object optimal table based on the given criteria in list_table_query_str plus the amount of currency the user represented by serial has. The caller is assured that the user represented by serial can afford at least the minimum buy-in for the table returned (if one is returned). Arguments in order: serial: a user_serial for the user who wants a good table. currency_serial: must be a positive integer or None variant: must be a string or None betting_structure: must be a string or None min_players: must be a positive integer or None General algorithm used by this method: First, a list of tables is requested from self.searchTables(currency_serial, variant, betting_structure, min_players). If an empty list is returned by searchTables(), then None is returned here. Second, this method iterates across the tables returned from searchTables() and eliminates any tables for which the user represented by serial has less than the minimum buy-in, and those that have too many users sitting out such that the criteria from the user is effectively not met. If there are multiple tables found, the first one from the list coming from searchTables() that the user can buy into is returned. Methods of interest used by this method: self.getMoney() : used to find out how much money the serial has. table.game.sitCount() : used to find out how many users are sitting out. table.game.all() : used to shortcut away from tables that are full and should not be considered. """ bestTable = None # money_results dict is used to store lookups made to self.getMoney() money_results = {} # A bit of a cheat, listTables() caches the value for min_players # when it does parsing so that we can use it here. for rr in self.searchTables(currency_serial, variant, betting_structure, min_players): table = self.getTable(rr['serial']) # Skip considering table entirely if it is full. if table.game.full(): continue # Check to see that the players sitting out don't effecitvely # cause the user to fail to meet the number of players # criteria. if table.game.sitCount() < min_players: continue buy_in = table.game.buyIn(serial) currency_serial = rr['currency_serial'] if not money_results.has_key(currency_serial): money_results[currency_serial] = self.getMoney(serial, currency_serial) if money_results[currency_serial] > buy_in: # If the user can afford the buy_in, we've found a table for them! bestTable = table break return bestTable
[ "def", "getTableBestByCriteria", "(", "self", ",", "serial", ",", "currency_serial", "=", "None", ",", "variant", "=", "None", ",", "betting_structure", "=", "None", ",", "min_players", "=", "0", ")", ":", "bestTable", "=", "None", "# money_results dict is used to store lookups made to self.getMoney()", "money_results", "=", "{", "}", "# A bit of a cheat, listTables() caches the value for min_players", "# when it does parsing so that we can use it here.", "for", "rr", "in", "self", ".", "searchTables", "(", "currency_serial", ",", "variant", ",", "betting_structure", ",", "min_players", ")", ":", "table", "=", "self", ".", "getTable", "(", "rr", "[", "'serial'", "]", ")", "# Skip considering table entirely if it is full.", "if", "table", ".", "game", ".", "full", "(", ")", ":", "continue", "# Check to see that the players sitting out don't effecitvely", "# cause the user to fail to meet the number of players", "# criteria.", "if", "table", ".", "game", ".", "sitCount", "(", ")", "<", "min_players", ":", "continue", "buy_in", "=", "table", ".", "game", ".", "buyIn", "(", "serial", ")", "currency_serial", "=", "rr", "[", "'currency_serial'", "]", "if", "not", "money_results", ".", "has_key", "(", "currency_serial", ")", ":", "money_results", "[", "currency_serial", "]", "=", "self", ".", "getMoney", "(", "serial", ",", "currency_serial", ")", "if", "money_results", "[", "currency_serial", "]", ">", "buy_in", ":", "# If the user can afford the buy_in, we've found a table for them!", "bestTable", "=", "table", "break", "return", "bestTable" ]
https://github.com/hippich/Bitcoin-Poker-Room/blob/b89e8c2df7a57d19d1aa6deff3f6a92ebe8134fa/lib/ppn/pokernetwork/pokerservice.py#L2551-L2625
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/printing/pretty/pretty.py
python
PrettyPrinter._print_matrix_contents
(self, e)
return D
This method factors out what is essentially grid printing.
This method factors out what is essentially grid printing.
[ "This", "method", "factors", "out", "what", "is", "essentially", "grid", "printing", "." ]
def _print_matrix_contents(self, e): """ This method factors out what is essentially grid printing. """ M = e # matrix Ms = {} # i,j -> pretty(M[i,j]) for i in range(M.rows): for j in range(M.cols): Ms[i, j] = self._print(M[i, j]) # h- and v- spacers hsep = 2 vsep = 1 # max width for columns maxw = [-1] * M.cols for j in range(M.cols): maxw[j] = max([Ms[i, j].width() for i in range(M.rows)] or [0]) # drawing result D = None for i in range(M.rows): D_row = None for j in range(M.cols): s = Ms[i, j] # reshape s to maxw # XXX this should be generalized, and go to stringPict.reshape ? assert s.width() <= maxw[j] # hcenter it, +0.5 to the right 2 # ( it's better to align formula starts for say 0 and r ) # XXX this is not good in all cases -- maybe introduce vbaseline? wdelta = maxw[j] - s.width() wleft = wdelta // 2 wright = wdelta - wleft s = prettyForm(*s.right(' '*wright)) s = prettyForm(*s.left(' '*wleft)) # we don't need vcenter cells -- this is automatically done in # a pretty way because when their baselines are taking into # account in .right() if D_row is None: D_row = s # first box in a row continue D_row = prettyForm(*D_row.right(' '*hsep)) # h-spacer D_row = prettyForm(*D_row.right(s)) if D is None: D = D_row # first row in a picture continue # v-spacer for _ in range(vsep): D = prettyForm(*D.below(' ')) D = prettyForm(*D.below(D_row)) if D is None: D = prettyForm('') # Empty Matrix return D
[ "def", "_print_matrix_contents", "(", "self", ",", "e", ")", ":", "M", "=", "e", "# matrix", "Ms", "=", "{", "}", "# i,j -> pretty(M[i,j])", "for", "i", "in", "range", "(", "M", ".", "rows", ")", ":", "for", "j", "in", "range", "(", "M", ".", "cols", ")", ":", "Ms", "[", "i", ",", "j", "]", "=", "self", ".", "_print", "(", "M", "[", "i", ",", "j", "]", ")", "# h- and v- spacers", "hsep", "=", "2", "vsep", "=", "1", "# max width for columns", "maxw", "=", "[", "-", "1", "]", "*", "M", ".", "cols", "for", "j", "in", "range", "(", "M", ".", "cols", ")", ":", "maxw", "[", "j", "]", "=", "max", "(", "[", "Ms", "[", "i", ",", "j", "]", ".", "width", "(", ")", "for", "i", "in", "range", "(", "M", ".", "rows", ")", "]", "or", "[", "0", "]", ")", "# drawing result", "D", "=", "None", "for", "i", "in", "range", "(", "M", ".", "rows", ")", ":", "D_row", "=", "None", "for", "j", "in", "range", "(", "M", ".", "cols", ")", ":", "s", "=", "Ms", "[", "i", ",", "j", "]", "# reshape s to maxw", "# XXX this should be generalized, and go to stringPict.reshape ?", "assert", "s", ".", "width", "(", ")", "<=", "maxw", "[", "j", "]", "# hcenter it, +0.5 to the right 2", "# ( it's better to align formula starts for say 0 and r )", "# XXX this is not good in all cases -- maybe introduce vbaseline?", "wdelta", "=", "maxw", "[", "j", "]", "-", "s", ".", "width", "(", ")", "wleft", "=", "wdelta", "//", "2", "wright", "=", "wdelta", "-", "wleft", "s", "=", "prettyForm", "(", "*", "s", ".", "right", "(", "' '", "*", "wright", ")", ")", "s", "=", "prettyForm", "(", "*", "s", ".", "left", "(", "' '", "*", "wleft", ")", ")", "# we don't need vcenter cells -- this is automatically done in", "# a pretty way because when their baselines are taking into", "# account in .right()", "if", "D_row", "is", "None", ":", "D_row", "=", "s", "# first box in a row", "continue", "D_row", "=", "prettyForm", "(", "*", "D_row", ".", "right", "(", "' '", "*", "hsep", ")", ")", "# h-spacer", "D_row", "=", "prettyForm", "(", "*", "D_row", ".", "right", "(", "s", ")", ")", "if", "D", "is", "None", ":", "D", "=", "D_row", "# first row in a picture", "continue", "# v-spacer", "for", "_", "in", "range", "(", "vsep", ")", ":", "D", "=", "prettyForm", "(", "*", "D", ".", "below", "(", "' '", ")", ")", "D", "=", "prettyForm", "(", "*", "D", ".", "below", "(", "D_row", ")", ")", "if", "D", "is", "None", ":", "D", "=", "prettyForm", "(", "''", ")", "# Empty Matrix", "return", "D" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/printing/pretty/pretty.py#L657-L724
mesonbuild/meson
a22d0f9a0a787df70ce79b05d0c45de90a970048
mesonbuild/modules/windows.py
python
initialize
(interp: 'Interpreter')
return WindowsModule(interp)
[]
def initialize(interp: 'Interpreter') -> WindowsModule: return WindowsModule(interp)
[ "def", "initialize", "(", "interp", ":", "'Interpreter'", ")", "->", "WindowsModule", ":", "return", "WindowsModule", "(", "interp", ")" ]
https://github.com/mesonbuild/meson/blob/a22d0f9a0a787df70ce79b05d0c45de90a970048/mesonbuild/modules/windows.py#L207-L208
WerWolv/EdiZon_CheatsConfigsAndScripts
d16d36c7509c01dca770f402babd83ff2e9ae6e7
Scripts/lib/python3.5/email/generator.py
python
Generator.__init__
(self, outfp, mangle_from_=None, maxheaderlen=None, *, policy=None)
Create the generator for message flattening. outfp is the output file-like object for writing the message to. It must have a write() method. Optional mangle_from_ is a flag that, when True (the default if policy is not set), escapes From_ lines in the body of the message by putting a `>' in front of them. Optional maxheaderlen specifies the longest length for a non-continued header. When a header line is longer (in characters, with tabs expanded to 8 spaces) than maxheaderlen, the header will split as defined in the Header class. Set maxheaderlen to zero to disable header wrapping. The default is 78, as recommended (but not required) by RFC 2822. The policy keyword specifies a policy object that controls a number of aspects of the generator's operation. If no policy is specified, the policy associated with the Message object passed to the flatten method is used.
Create the generator for message flattening.
[ "Create", "the", "generator", "for", "message", "flattening", "." ]
def __init__(self, outfp, mangle_from_=None, maxheaderlen=None, *, policy=None): """Create the generator for message flattening. outfp is the output file-like object for writing the message to. It must have a write() method. Optional mangle_from_ is a flag that, when True (the default if policy is not set), escapes From_ lines in the body of the message by putting a `>' in front of them. Optional maxheaderlen specifies the longest length for a non-continued header. When a header line is longer (in characters, with tabs expanded to 8 spaces) than maxheaderlen, the header will split as defined in the Header class. Set maxheaderlen to zero to disable header wrapping. The default is 78, as recommended (but not required) by RFC 2822. The policy keyword specifies a policy object that controls a number of aspects of the generator's operation. If no policy is specified, the policy associated with the Message object passed to the flatten method is used. """ if mangle_from_ is None: mangle_from_ = True if policy is None else policy.mangle_from_ self._fp = outfp self._mangle_from_ = mangle_from_ self.maxheaderlen = maxheaderlen self.policy = policy
[ "def", "__init__", "(", "self", ",", "outfp", ",", "mangle_from_", "=", "None", ",", "maxheaderlen", "=", "None", ",", "*", ",", "policy", "=", "None", ")", ":", "if", "mangle_from_", "is", "None", ":", "mangle_from_", "=", "True", "if", "policy", "is", "None", "else", "policy", ".", "mangle_from_", "self", ".", "_fp", "=", "outfp", "self", ".", "_mangle_from_", "=", "mangle_from_", "self", ".", "maxheaderlen", "=", "maxheaderlen", "self", ".", "policy", "=", "policy" ]
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/email/generator.py#L36-L66
postlund/pyatv
4ed1f5539f37d86d80272663d1f2ea34a6c41ec4
pyatv/scripts/atvproxy.py
python
CompanionAppleTVProxy.enable_encryption
(self, output_key: bytes, input_key: bytes)
Enable encryption with the specified keys.
Enable encryption with the specified keys.
[ "Enable", "encryption", "with", "the", "specified", "keys", "." ]
def enable_encryption(self, output_key: bytes, input_key: bytes) -> None: """Enable encryption with the specified keys.""" self.chacha = chacha20.Chacha20Cipher(output_key, input_key, nonce_length=12)
[ "def", "enable_encryption", "(", "self", ",", "output_key", ":", "bytes", ",", "input_key", ":", "bytes", ")", "->", "None", ":", "self", ".", "chacha", "=", "chacha20", ".", "Chacha20Cipher", "(", "output_key", ",", "input_key", ",", "nonce_length", "=", "12", ")" ]
https://github.com/postlund/pyatv/blob/4ed1f5539f37d86d80272663d1f2ea34a6c41ec4/pyatv/scripts/atvproxy.py#L178-L180
realpython/book2-exercises
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
web2py-rest/gluon/html.py
python
DIV.__setitem__
(self, i, value)
Sets attribute with name 'i' or component #i. Args: i: index. If i is a string: the name of the attribute otherwise references to number of the component value: the new value
Sets attribute with name 'i' or component #i.
[ "Sets", "attribute", "with", "name", "i", "or", "component", "#i", "." ]
def __setitem__(self, i, value): """ Sets attribute with name 'i' or component #i. Args: i: index. If i is a string: the name of the attribute otherwise references to number of the component value: the new value """ self._setnode(value) if isinstance(i, (str, unicode)): self.attributes[i] = value else: self.components[i] = value
[ "def", "__setitem__", "(", "self", ",", "i", ",", "value", ")", ":", "self", ".", "_setnode", "(", "value", ")", "if", "isinstance", "(", "i", ",", "(", "str", ",", "unicode", ")", ")", ":", "self", ".", "attributes", "[", "i", "]", "=", "value", "else", ":", "self", ".", "components", "[", "i", "]", "=", "value" ]
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py-rest/gluon/html.py#L791-L804
spyder-ide/spyder
55da47c032dfcf519600f67f8b30eab467f965e7
spyder/utils/programs.py
python
_get_mac_application_icon_path
(app_bundle_path)
return icon_path
Parse mac application bundle and return path for *.icns file.
Parse mac application bundle and return path for *.icns file.
[ "Parse", "mac", "application", "bundle", "and", "return", "path", "for", "*", ".", "icns", "file", "." ]
def _get_mac_application_icon_path(app_bundle_path): """Parse mac application bundle and return path for *.icns file.""" import plistlib contents_path = info_path = os.path.join(app_bundle_path, 'Contents') info_path = os.path.join(contents_path, 'Info.plist') pl = {} if os.path.isfile(info_path): try: # readPlist is deprecated but needed for py27 compat pl = plistlib.readPlist(info_path) except Exception: pass icon_file = pl.get('CFBundleIconFile') icon_path = None if icon_file: icon_path = os.path.join(contents_path, 'Resources', icon_file) # Some app bundles seem to list the icon name without extension if not icon_path.endswith('.icns'): icon_path = icon_path + '.icns' if not os.path.isfile(icon_path): icon_path = None return icon_path
[ "def", "_get_mac_application_icon_path", "(", "app_bundle_path", ")", ":", "import", "plistlib", "contents_path", "=", "info_path", "=", "os", ".", "path", ".", "join", "(", "app_bundle_path", ",", "'Contents'", ")", "info_path", "=", "os", ".", "path", ".", "join", "(", "contents_path", ",", "'Info.plist'", ")", "pl", "=", "{", "}", "if", "os", ".", "path", ".", "isfile", "(", "info_path", ")", ":", "try", ":", "# readPlist is deprecated but needed for py27 compat", "pl", "=", "plistlib", ".", "readPlist", "(", "info_path", ")", "except", "Exception", ":", "pass", "icon_file", "=", "pl", ".", "get", "(", "'CFBundleIconFile'", ")", "icon_path", "=", "None", "if", "icon_file", ":", "icon_path", "=", "os", ".", "path", ".", "join", "(", "contents_path", ",", "'Resources'", ",", "icon_file", ")", "# Some app bundles seem to list the icon name without extension", "if", "not", "icon_path", ".", "endswith", "(", "'.icns'", ")", ":", "icon_path", "=", "icon_path", "+", "'.icns'", "if", "not", "os", ".", "path", ".", "isfile", "(", "icon_path", ")", ":", "icon_path", "=", "None", "return", "icon_path" ]
https://github.com/spyder-ide/spyder/blob/55da47c032dfcf519600f67f8b30eab467f965e7/spyder/utils/programs.py#L311-L337
samdroid-apps/something-for-reddit
071733a9e6050d0d171e6620c189b7860af56bab
redditisgtk/gtkutil.py
python
process_shortcuts
(shortcuts, event: Gdk.Event)
Shortcuts is a dict of: accelerator string: (self._function, [arguments]) Accelerator is passed to Gtk.accelerator_parse Event is the GdkEvent
Shortcuts is a dict of: accelerator string: (self._function, [arguments])
[ "Shortcuts", "is", "a", "dict", "of", ":", "accelerator", "string", ":", "(", "self", ".", "_function", "[", "arguments", "]", ")" ]
def process_shortcuts(shortcuts, event: Gdk.Event): ''' Shortcuts is a dict of: accelerator string: (self._function, [arguments]) Accelerator is passed to Gtk.accelerator_parse Event is the GdkEvent ''' if event.type != Gdk.EventType.KEY_PRESS: return for accel_string, value in shortcuts.items(): key, mods = Gtk.accelerator_parse(accel_string) emods = event.state & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.SHIFT_MASK) if event.keyval == key and (emods & mods or mods == emods == 0): func, args = value try: func(*args) except Exception as e: raise return False return True
[ "def", "process_shortcuts", "(", "shortcuts", ",", "event", ":", "Gdk", ".", "Event", ")", ":", "if", "event", ".", "type", "!=", "Gdk", ".", "EventType", ".", "KEY_PRESS", ":", "return", "for", "accel_string", ",", "value", "in", "shortcuts", ".", "items", "(", ")", ":", "key", ",", "mods", "=", "Gtk", ".", "accelerator_parse", "(", "accel_string", ")", "emods", "=", "event", ".", "state", "&", "(", "Gdk", ".", "ModifierType", ".", "CONTROL_MASK", "|", "Gdk", ".", "ModifierType", ".", "SHIFT_MASK", ")", "if", "event", ".", "keyval", "==", "key", "and", "(", "emods", "&", "mods", "or", "mods", "==", "emods", "==", "0", ")", ":", "func", ",", "args", "=", "value", "try", ":", "func", "(", "*", "args", ")", "except", "Exception", "as", "e", ":", "raise", "return", "False", "return", "True" ]
https://github.com/samdroid-apps/something-for-reddit/blob/071733a9e6050d0d171e6620c189b7860af56bab/redditisgtk/gtkutil.py#L22-L44
AzureAD/microsoft-authentication-library-for-python
a18c2231896d8a050ad181461928f4dbd818049f
msal/oauth2cli/oauth2.py
python
Client.obtain_token_by_device_flow
(self, flow, exit_condition=lambda flow: flow.get("expires_at", 0) < time.time(), **kwargs)
Obtain token by a device flow object, with customizable polling effect. Args: flow (dict): An object previously generated by initiate_device_flow(...). Its content WILL BE CHANGED by this method during each run. We share this object with you, so that you could implement your own loop, should you choose to do so. exit_condition (Callable): This method implements a loop to provide polling effect. The loop's exit condition is calculated by this callback. The default callback makes the loop run until the flow expires. Therefore, one of the ways to exit the polling early, is to change the flow["expires_at"] to a small number such as 0. In case you are doing async programming, you may want to completely turn off the loop. You can do so by using a callback as: exit_condition = lambda flow: True to make the loop run only once, i.e. no polling, hence non-block.
Obtain token by a device flow object, with customizable polling effect.
[ "Obtain", "token", "by", "a", "device", "flow", "object", "with", "customizable", "polling", "effect", "." ]
def obtain_token_by_device_flow(self, flow, exit_condition=lambda flow: flow.get("expires_at", 0) < time.time(), **kwargs): # type: (dict, Callable) -> dict """Obtain token by a device flow object, with customizable polling effect. Args: flow (dict): An object previously generated by initiate_device_flow(...). Its content WILL BE CHANGED by this method during each run. We share this object with you, so that you could implement your own loop, should you choose to do so. exit_condition (Callable): This method implements a loop to provide polling effect. The loop's exit condition is calculated by this callback. The default callback makes the loop run until the flow expires. Therefore, one of the ways to exit the polling early, is to change the flow["expires_at"] to a small number such as 0. In case you are doing async programming, you may want to completely turn off the loop. You can do so by using a callback as: exit_condition = lambda flow: True to make the loop run only once, i.e. no polling, hence non-block. """ while True: result = self._obtain_token_by_device_flow(flow, **kwargs) if result.get("error") not in self.DEVICE_FLOW_RETRIABLE_ERRORS: return result for i in range(flow.get("interval", 5)): # Wait interval seconds if exit_condition(flow): return result time.sleep(1)
[ "def", "obtain_token_by_device_flow", "(", "self", ",", "flow", ",", "exit_condition", "=", "lambda", "flow", ":", "flow", ".", "get", "(", "\"expires_at\"", ",", "0", ")", "<", "time", ".", "time", "(", ")", ",", "*", "*", "kwargs", ")", ":", "# type: (dict, Callable) -> dict", "while", "True", ":", "result", "=", "self", ".", "_obtain_token_by_device_flow", "(", "flow", ",", "*", "*", "kwargs", ")", "if", "result", ".", "get", "(", "\"error\"", ")", "not", "in", "self", ".", "DEVICE_FLOW_RETRIABLE_ERRORS", ":", "return", "result", "for", "i", "in", "range", "(", "flow", ".", "get", "(", "\"interval\"", ",", "5", ")", ")", ":", "# Wait interval seconds", "if", "exit_condition", "(", "flow", ")", ":", "return", "result", "time", ".", "sleep", "(", "1", ")" ]
https://github.com/AzureAD/microsoft-authentication-library-for-python/blob/a18c2231896d8a050ad181461928f4dbd818049f/msal/oauth2cli/oauth2.py#L354-L390
pyg-team/pytorch_geometric
b920e9a3a64e22c8356be55301c88444ff051cae
torch_geometric/nn/fx.py
python
Transformer.find_by_name
(self, name: str)
return None
[]
def find_by_name(self, name: str) -> Optional[Node]: for node in self.graph.nodes: if node.name == name: return node return None
[ "def", "find_by_name", "(", "self", ",", "name", ":", "str", ")", "->", "Optional", "[", "Node", "]", ":", "for", "node", "in", "self", ".", "graph", ".", "nodes", ":", "if", "node", ".", "name", "==", "name", ":", "return", "node", "return", "None" ]
https://github.com/pyg-team/pytorch_geometric/blob/b920e9a3a64e22c8356be55301c88444ff051cae/torch_geometric/nn/fx.py#L211-L215
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/idlelib/squeezer.py
python
count_lines_with_wrapping
(s, linewidth=80)
return linecount
Count the number of lines in a given string. Lines are counted as if the string was wrapped so that lines are never over linewidth characters long. Tabs are considered tabwidth characters long.
Count the number of lines in a given string.
[ "Count", "the", "number", "of", "lines", "in", "a", "given", "string", "." ]
def count_lines_with_wrapping(s, linewidth=80): """Count the number of lines in a given string. Lines are counted as if the string was wrapped so that lines are never over linewidth characters long. Tabs are considered tabwidth characters long. """ tabwidth = 8 # Currently always true in Shell. pos = 0 linecount = 1 current_column = 0 for m in re.finditer(r"[\t\n]", s): # Process the normal chars up to tab or newline. numchars = m.start() - pos pos += numchars current_column += numchars # Deal with tab or newline. if s[pos] == '\n': # Avoid the `current_column == 0` edge-case, and while we're # at it, don't bother adding 0. if current_column > linewidth: # If the current column was exactly linewidth, divmod # would give (1,0), even though a new line hadn't yet # been started. The same is true if length is any exact # multiple of linewidth. Therefore, subtract 1 before # dividing a non-empty line. linecount += (current_column - 1) // linewidth linecount += 1 current_column = 0 else: assert s[pos] == '\t' current_column += tabwidth - (current_column % tabwidth) # If a tab passes the end of the line, consider the entire # tab as being on the next line. if current_column > linewidth: linecount += 1 current_column = tabwidth pos += 1 # After the tab or newline. # Process remaining chars (no more tabs or newlines). current_column += len(s) - pos # Avoid divmod(-1, linewidth). if current_column > 0: linecount += (current_column - 1) // linewidth else: # Text ended with newline; don't count an extra line after it. linecount -= 1 return linecount
[ "def", "count_lines_with_wrapping", "(", "s", ",", "linewidth", "=", "80", ")", ":", "tabwidth", "=", "8", "# Currently always true in Shell.", "pos", "=", "0", "linecount", "=", "1", "current_column", "=", "0", "for", "m", "in", "re", ".", "finditer", "(", "r\"[\\t\\n]\"", ",", "s", ")", ":", "# Process the normal chars up to tab or newline.", "numchars", "=", "m", ".", "start", "(", ")", "-", "pos", "pos", "+=", "numchars", "current_column", "+=", "numchars", "# Deal with tab or newline.", "if", "s", "[", "pos", "]", "==", "'\\n'", ":", "# Avoid the `current_column == 0` edge-case, and while we're", "# at it, don't bother adding 0.", "if", "current_column", ">", "linewidth", ":", "# If the current column was exactly linewidth, divmod", "# would give (1,0), even though a new line hadn't yet", "# been started. The same is true if length is any exact", "# multiple of linewidth. Therefore, subtract 1 before", "# dividing a non-empty line.", "linecount", "+=", "(", "current_column", "-", "1", ")", "//", "linewidth", "linecount", "+=", "1", "current_column", "=", "0", "else", ":", "assert", "s", "[", "pos", "]", "==", "'\\t'", "current_column", "+=", "tabwidth", "-", "(", "current_column", "%", "tabwidth", ")", "# If a tab passes the end of the line, consider the entire", "# tab as being on the next line.", "if", "current_column", ">", "linewidth", ":", "linecount", "+=", "1", "current_column", "=", "tabwidth", "pos", "+=", "1", "# After the tab or newline.", "# Process remaining chars (no more tabs or newlines).", "current_column", "+=", "len", "(", "s", ")", "-", "pos", "# Avoid divmod(-1, linewidth).", "if", "current_column", ">", "0", ":", "linecount", "+=", "(", "current_column", "-", "1", ")", "//", "linewidth", "else", ":", "# Text ended with newline; don't count an extra line after it.", "linecount", "-=", "1", "return", "linecount" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/idlelib/squeezer.py#L28-L81
ncbi-nlp/bluebert
ccc828c74e0eed942fd1ececff3638babc38269c
bert/tokenization.py
python
BasicTokenizer.tokenize
(self, text)
return output_tokens
Tokenizes a piece of text.
Tokenizes a piece of text.
[ "Tokenizes", "a", "piece", "of", "text", "." ]
def tokenize(self, text): """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens
[ "def", "tokenize", "(", "self", ",", "text", ")", ":", "text", "=", "convert_to_unicode", "(", "text", ")", "text", "=", "self", ".", "_clean_text", "(", "text", ")", "# This was added on November 1st, 2018 for the multilingual and Chinese", "# models. This is also applied to the English models now, but it doesn't", "# matter since the English models were not trained on any Chinese data", "# and generally don't have any Chinese data in them (there are Chinese", "# characters in the vocabulary because Wikipedia does have some Chinese", "# words in the English Wikipedia.).", "text", "=", "self", ".", "_tokenize_chinese_chars", "(", "text", ")", "orig_tokens", "=", "whitespace_tokenize", "(", "text", ")", "split_tokens", "=", "[", "]", "for", "token", "in", "orig_tokens", ":", "if", "self", ".", "do_lower_case", ":", "token", "=", "token", ".", "lower", "(", ")", "token", "=", "self", ".", "_run_strip_accents", "(", "token", ")", "split_tokens", ".", "extend", "(", "self", ".", "_run_split_on_punc", "(", "token", ")", ")", "output_tokens", "=", "whitespace_tokenize", "(", "\" \"", ".", "join", "(", "split_tokens", ")", ")", "return", "output_tokens" ]
https://github.com/ncbi-nlp/bluebert/blob/ccc828c74e0eed942fd1ececff3638babc38269c/bert/tokenization.py#L196-L218
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/models/visual_bert/modeling_visual_bert.py
python
VisualBertSelfOutput.forward
(self, hidden_states, input_tensor)
return hidden_states
[]
def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
[ "def", "forward", "(", "self", ",", "hidden_states", ",", "input_tensor", ")", ":", "hidden_states", "=", "self", ".", "dense", "(", "hidden_states", ")", "hidden_states", "=", "self", ".", "dropout", "(", "hidden_states", ")", "hidden_states", "=", "self", ".", "LayerNorm", "(", "hidden_states", "+", "input_tensor", ")", "return", "hidden_states" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/models/visual_bert/modeling_visual_bert.py#L276-L280
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/netrc.py
python
netrc._parse
(self, file, fp, default_netrc)
[]
def _parse(self, file, fp, default_netrc): lexer = shlex.shlex(fp) lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" lexer.commenters = lexer.commenters.replace('#', '') while 1: # Look for a machine, default, or macdef top-level keyword saved_lineno = lexer.lineno toplevel = tt = lexer.get_token() if not tt: break elif tt[0] == '#': if lexer.lineno == saved_lineno and len(tt) == 1: lexer.instream.readline() continue elif tt == 'machine': entryname = lexer.get_token() elif tt == 'default': entryname = 'default' elif tt == 'macdef': # Just skip to end of macdefs entryname = lexer.get_token() self.macros[entryname] = [] lexer.whitespace = ' \t' while 1: line = lexer.instream.readline() if not line or line == '\012': lexer.whitespace = ' \t\r\n' break self.macros[entryname].append(line) continue else: raise NetrcParseError( "bad toplevel token %r" % tt, file, lexer.lineno) # We're looking at start of an entry for a named machine or default. login = '' account = password = None self.hosts[entryname] = {} while 1: tt = lexer.get_token() if (tt.startswith('#') or tt in {'', 'machine', 'default', 'macdef'}): if password: self.hosts[entryname] = (login, account, password) lexer.push_token(tt) break else: raise NetrcParseError( "malformed %s entry %s terminated by %s" % (toplevel, entryname, repr(tt)), file, lexer.lineno) elif tt == 'login' or tt == 'user': login = lexer.get_token() elif tt == 'account': account = lexer.get_token() elif tt == 'password': if os.name == 'posix' and default_netrc: prop = os.fstat(fp.fileno()) if prop.st_uid != os.getuid(): import pwd try: fowner = pwd.getpwuid(prop.st_uid)[0] except KeyError: fowner = 'uid %s' % prop.st_uid try: user = pwd.getpwuid(os.getuid())[0] except KeyError: user = 'uid %s' % os.getuid() raise NetrcParseError( ("~/.netrc file owner (%s) does not match" " current user (%s)") % (fowner, user), file, lexer.lineno) if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)): raise NetrcParseError( "~/.netrc access too permissive: access" " permissions must restrict access to only" " the owner", file, lexer.lineno) password = lexer.get_token() else: raise NetrcParseError("bad follower token %r" % tt, file, lexer.lineno)
[ "def", "_parse", "(", "self", ",", "file", ",", "fp", ",", "default_netrc", ")", ":", "lexer", "=", "shlex", ".", "shlex", "(", "fp", ")", "lexer", ".", "wordchars", "+=", "r\"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\"\"", "lexer", ".", "commenters", "=", "lexer", ".", "commenters", ".", "replace", "(", "'#'", ",", "''", ")", "while", "1", ":", "# Look for a machine, default, or macdef top-level keyword", "saved_lineno", "=", "lexer", ".", "lineno", "toplevel", "=", "tt", "=", "lexer", ".", "get_token", "(", ")", "if", "not", "tt", ":", "break", "elif", "tt", "[", "0", "]", "==", "'#'", ":", "if", "lexer", ".", "lineno", "==", "saved_lineno", "and", "len", "(", "tt", ")", "==", "1", ":", "lexer", ".", "instream", ".", "readline", "(", ")", "continue", "elif", "tt", "==", "'machine'", ":", "entryname", "=", "lexer", ".", "get_token", "(", ")", "elif", "tt", "==", "'default'", ":", "entryname", "=", "'default'", "elif", "tt", "==", "'macdef'", ":", "# Just skip to end of macdefs", "entryname", "=", "lexer", ".", "get_token", "(", ")", "self", ".", "macros", "[", "entryname", "]", "=", "[", "]", "lexer", ".", "whitespace", "=", "' \\t'", "while", "1", ":", "line", "=", "lexer", ".", "instream", ".", "readline", "(", ")", "if", "not", "line", "or", "line", "==", "'\\012'", ":", "lexer", ".", "whitespace", "=", "' \\t\\r\\n'", "break", "self", ".", "macros", "[", "entryname", "]", ".", "append", "(", "line", ")", "continue", "else", ":", "raise", "NetrcParseError", "(", "\"bad toplevel token %r\"", "%", "tt", ",", "file", ",", "lexer", ".", "lineno", ")", "# We're looking at start of an entry for a named machine or default.", "login", "=", "''", "account", "=", "password", "=", "None", "self", ".", "hosts", "[", "entryname", "]", "=", "{", "}", "while", "1", ":", "tt", "=", "lexer", ".", "get_token", "(", ")", "if", "(", "tt", ".", "startswith", "(", "'#'", ")", "or", "tt", "in", "{", "''", ",", "'machine'", ",", "'default'", ",", "'macdef'", "}", ")", ":", "if", "password", ":", "self", ".", "hosts", "[", "entryname", "]", "=", "(", "login", ",", "account", ",", "password", ")", "lexer", ".", "push_token", "(", "tt", ")", "break", "else", ":", "raise", "NetrcParseError", "(", "\"malformed %s entry %s terminated by %s\"", "%", "(", "toplevel", ",", "entryname", ",", "repr", "(", "tt", ")", ")", ",", "file", ",", "lexer", ".", "lineno", ")", "elif", "tt", "==", "'login'", "or", "tt", "==", "'user'", ":", "login", "=", "lexer", ".", "get_token", "(", ")", "elif", "tt", "==", "'account'", ":", "account", "=", "lexer", ".", "get_token", "(", ")", "elif", "tt", "==", "'password'", ":", "if", "os", ".", "name", "==", "'posix'", "and", "default_netrc", ":", "prop", "=", "os", ".", "fstat", "(", "fp", ".", "fileno", "(", ")", ")", "if", "prop", ".", "st_uid", "!=", "os", ".", "getuid", "(", ")", ":", "import", "pwd", "try", ":", "fowner", "=", "pwd", ".", "getpwuid", "(", "prop", ".", "st_uid", ")", "[", "0", "]", "except", "KeyError", ":", "fowner", "=", "'uid %s'", "%", "prop", ".", "st_uid", "try", ":", "user", "=", "pwd", ".", "getpwuid", "(", "os", ".", "getuid", "(", ")", ")", "[", "0", "]", "except", "KeyError", ":", "user", "=", "'uid %s'", "%", "os", ".", "getuid", "(", ")", "raise", "NetrcParseError", "(", "(", "\"~/.netrc file owner (%s) does not match\"", "\" current user (%s)\"", ")", "%", "(", "fowner", ",", "user", ")", ",", "file", ",", "lexer", ".", "lineno", ")", "if", "(", "prop", ".", "st_mode", "&", "(", "stat", ".", "S_IRWXG", "|", "stat", ".", "S_IRWXO", ")", ")", ":", "raise", "NetrcParseError", "(", "\"~/.netrc access too permissive: access\"", "\" permissions must restrict access to only\"", "\" the owner\"", ",", "file", ",", "lexer", ".", "lineno", ")", "password", "=", "lexer", ".", "get_token", "(", ")", "else", ":", "raise", "NetrcParseError", "(", "\"bad follower token %r\"", "%", "tt", ",", "file", ",", "lexer", ".", "lineno", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/netrc.py#L32-L111
PaddlePaddle/PaddleX
2bab73f81ab54e328204e7871e6ae4a82e719f5d
paddlex/ppdet/data/transform/autoaugment_utils.py
python
unwrap
(image, replace)
return image.astype(np.uint8)
Unwraps an image produced by wrap. Where there is a 0 in the last channel for every spatial position, the rest of the three channels in that spatial dimension are grayed (set to 128). Operations like translate and shear on a wrapped Tensor will leave 0s in empty locations. Some transformations look at the intensity of values to do preprocessing, and we want these empty pixels to assume the 'average' value, rather than pure black. Args: image: A 3D Image Tensor with 4 channels. replace: A one or three value 1D tensor to fill empty pixels. Returns: image: A 3D image Tensor with 3 channels.
Unwraps an image produced by wrap.
[ "Unwraps", "an", "image", "produced", "by", "wrap", "." ]
def unwrap(image, replace): """Unwraps an image produced by wrap. Where there is a 0 in the last channel for every spatial position, the rest of the three channels in that spatial dimension are grayed (set to 128). Operations like translate and shear on a wrapped Tensor will leave 0s in empty locations. Some transformations look at the intensity of values to do preprocessing, and we want these empty pixels to assume the 'average' value, rather than pure black. Args: image: A 3D Image Tensor with 4 channels. replace: A one or three value 1D tensor to fill empty pixels. Returns: image: A 3D image Tensor with 3 channels. """ image_shape = image.shape # Flatten the spatial dimensions. flattened_image = np.reshape(image, [-1, image_shape[2]]) # Find all pixels where the last channel is zero. alpha_channel = flattened_image[:, 3] replace = np.concatenate([replace, np.ones([1], image.dtype)], 0) # Where they are zero, fill them in with 'replace'. alpha_channel = np.reshape(alpha_channel, (-1, 1)) alpha_channel = np.tile(alpha_channel, reps=(1, flattened_image.shape[1])) flattened_image = np.where( np.equal(alpha_channel, 0), np.ones_like( flattened_image, dtype=image.dtype) * replace, flattened_image) image = np.reshape(flattened_image, image_shape) image = image[:, :, :3] return image.astype(np.uint8)
[ "def", "unwrap", "(", "image", ",", "replace", ")", ":", "image_shape", "=", "image", ".", "shape", "# Flatten the spatial dimensions.", "flattened_image", "=", "np", ".", "reshape", "(", "image", ",", "[", "-", "1", ",", "image_shape", "[", "2", "]", "]", ")", "# Find all pixels where the last channel is zero.", "alpha_channel", "=", "flattened_image", "[", ":", ",", "3", "]", "replace", "=", "np", ".", "concatenate", "(", "[", "replace", ",", "np", ".", "ones", "(", "[", "1", "]", ",", "image", ".", "dtype", ")", "]", ",", "0", ")", "# Where they are zero, fill them in with 'replace'.", "alpha_channel", "=", "np", ".", "reshape", "(", "alpha_channel", ",", "(", "-", "1", ",", "1", ")", ")", "alpha_channel", "=", "np", ".", "tile", "(", "alpha_channel", ",", "reps", "=", "(", "1", ",", "flattened_image", ".", "shape", "[", "1", "]", ")", ")", "flattened_image", "=", "np", ".", "where", "(", "np", ".", "equal", "(", "alpha_channel", ",", "0", ")", ",", "np", ".", "ones_like", "(", "flattened_image", ",", "dtype", "=", "image", ".", "dtype", ")", "*", "replace", ",", "flattened_image", ")", "image", "=", "np", ".", "reshape", "(", "flattened_image", ",", "image_shape", ")", "image", "=", "image", "[", ":", ",", ":", ",", ":", "3", "]", "return", "image", ".", "astype", "(", "np", ".", "uint8", ")" ]
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/paddlex/ppdet/data/transform/autoaugment_utils.py#L1149-L1188
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-35/fabmetheus_utilities/geometry/geometry_utilities/booleansolid.py
python
addLoopsXSegmentIntersections
( lineLoopsIntersections, loops, segmentFirstX, segmentSecondX, segmentYMirror, y )
Add intersections of the loops with the x segment.
Add intersections of the loops with the x segment.
[ "Add", "intersections", "of", "the", "loops", "with", "the", "x", "segment", "." ]
def addLoopsXSegmentIntersections( lineLoopsIntersections, loops, segmentFirstX, segmentSecondX, segmentYMirror, y ): "Add intersections of the loops with the x segment." for loop in loops: addLoopXSegmentIntersections( lineLoopsIntersections, loop, segmentFirstX, segmentSecondX, segmentYMirror, y )
[ "def", "addLoopsXSegmentIntersections", "(", "lineLoopsIntersections", ",", "loops", ",", "segmentFirstX", ",", "segmentSecondX", ",", "segmentYMirror", ",", "y", ")", ":", "for", "loop", "in", "loops", ":", "addLoopXSegmentIntersections", "(", "lineLoopsIntersections", ",", "loop", ",", "segmentFirstX", ",", "segmentSecondX", ",", "segmentYMirror", ",", "y", ")" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-35/fabmetheus_utilities/geometry/geometry_utilities/booleansolid.py#L82-L85
open-mmlab/mmskeleton
b4c076baa9e02e69b5876c49fa7c509866d902c7
mmskeleton/datasets/utils/video_demo.py
python
VideoDemo.skeleton_preprocess
(image, bboxes, skeleton_cfg)
return result, meta
[]
def skeleton_preprocess(image, bboxes, skeleton_cfg): # output collector result_list = [] meta = dict() meta['scale'] = [] meta['rotation'] = [] meta['center'] = [] meta['score'] = [] # preprocess config image_size = skeleton_cfg.image_size image_width = image_size[0] image_height = image_size[1] aspect_ratio = image_width * 1.0 / image_height pixel_std = skeleton_cfg.pixel_std image_mean = skeleton_cfg.image_mean image_std = skeleton_cfg.image_std for idx, bbox in enumerate(bboxes): x1, y1, x2, y2 = bbox[:4] w, h = x2 - x1, y2 - y1 center, scale = xywh2cs(x1, y1, h, w, aspect_ratio, pixel_std) trans = get_affine_transform(center, scale, 0, image_size) transformed_image = cv2.warpAffine( image, trans, (int(image_size[0]), int(image_size[1])), flags=cv2.INTER_LINEAR) # transfer into Torch.Tensor transformed_image = transformed_image / 255.0 transformed_image = transformed_image - image_mean transformed_image = transformed_image / image_std transformed_image = transformed_image.transpose(2, 0, 1) result_list.append(transformed_image) # from IPython import embed; embed() meta['scale'].append(scale) meta['rotation'].append(0) meta['center'].append(center) meta['score'].append(bbox[4]) result = torch.from_numpy(np.array(result_list)).float() for name, data in meta.items(): meta[name] = torch.from_numpy(np.array(data)).float() return result, meta
[ "def", "skeleton_preprocess", "(", "image", ",", "bboxes", ",", "skeleton_cfg", ")", ":", "# output collector", "result_list", "=", "[", "]", "meta", "=", "dict", "(", ")", "meta", "[", "'scale'", "]", "=", "[", "]", "meta", "[", "'rotation'", "]", "=", "[", "]", "meta", "[", "'center'", "]", "=", "[", "]", "meta", "[", "'score'", "]", "=", "[", "]", "# preprocess config", "image_size", "=", "skeleton_cfg", ".", "image_size", "image_width", "=", "image_size", "[", "0", "]", "image_height", "=", "image_size", "[", "1", "]", "aspect_ratio", "=", "image_width", "*", "1.0", "/", "image_height", "pixel_std", "=", "skeleton_cfg", ".", "pixel_std", "image_mean", "=", "skeleton_cfg", ".", "image_mean", "image_std", "=", "skeleton_cfg", ".", "image_std", "for", "idx", ",", "bbox", "in", "enumerate", "(", "bboxes", ")", ":", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "bbox", "[", ":", "4", "]", "w", ",", "h", "=", "x2", "-", "x1", ",", "y2", "-", "y1", "center", ",", "scale", "=", "xywh2cs", "(", "x1", ",", "y1", ",", "h", ",", "w", ",", "aspect_ratio", ",", "pixel_std", ")", "trans", "=", "get_affine_transform", "(", "center", ",", "scale", ",", "0", ",", "image_size", ")", "transformed_image", "=", "cv2", ".", "warpAffine", "(", "image", ",", "trans", ",", "(", "int", "(", "image_size", "[", "0", "]", ")", ",", "int", "(", "image_size", "[", "1", "]", ")", ")", ",", "flags", "=", "cv2", ".", "INTER_LINEAR", ")", "# transfer into Torch.Tensor", "transformed_image", "=", "transformed_image", "/", "255.0", "transformed_image", "=", "transformed_image", "-", "image_mean", "transformed_image", "=", "transformed_image", "/", "image_std", "transformed_image", "=", "transformed_image", ".", "transpose", "(", "2", ",", "0", ",", "1", ")", "result_list", ".", "append", "(", "transformed_image", ")", "# from IPython import embed; embed()", "meta", "[", "'scale'", "]", ".", "append", "(", "scale", ")", "meta", "[", "'rotation'", "]", ".", "append", "(", "0", ")", "meta", "[", "'center'", "]", ".", "append", "(", "center", ")", "meta", "[", "'score'", "]", ".", "append", "(", "bbox", "[", "4", "]", ")", "result", "=", "torch", ".", "from_numpy", "(", "np", ".", "array", "(", "result_list", ")", ")", ".", "float", "(", ")", "for", "name", ",", "data", "in", "meta", ".", "items", "(", ")", ":", "meta", "[", "name", "]", "=", "torch", ".", "from_numpy", "(", "np", ".", "array", "(", "data", ")", ")", ".", "float", "(", ")", "return", "result", ",", "meta" ]
https://github.com/open-mmlab/mmskeleton/blob/b4c076baa9e02e69b5876c49fa7c509866d902c7/mmskeleton/datasets/utils/video_demo.py#L36-L79
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/requests/requests/packages/urllib3/packages/six.py
python
_add_doc
(func, doc)
Add documentation to a function.
Add documentation to a function.
[ "Add", "documentation", "to", "a", "function", "." ]
def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc
[ "def", "_add_doc", "(", "func", ",", "doc", ")", ":", "func", ".", "__doc__", "=", "doc" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/requests/requests/packages/urllib3/packages/six.py#L67-L69
comadan/FM_FTRL
a86c51d11d86b7e4a9e6b3db5b50351849e61fb2
FM_FTRL_machine.py
python
FM_FTRL_machine.init_fm
(self, i)
initialize the factorization weight vector for variable i.
initialize the factorization weight vector for variable i.
[ "initialize", "the", "factorization", "weight", "vector", "for", "variable", "i", "." ]
def init_fm(self, i): ''' initialize the factorization weight vector for variable i. ''' if i not in self.n_fm: self.n_fm[i] = [0.] * self.fm_dim self.w_fm[i] = [0.] * self.fm_dim self.z_fm[i] = [0.] * self.fm_dim for k in range(self.fm_dim): self.z_fm[i][k] = random.gauss(0., self.fm_initDev)
[ "def", "init_fm", "(", "self", ",", "i", ")", ":", "if", "i", "not", "in", "self", ".", "n_fm", ":", "self", ".", "n_fm", "[", "i", "]", "=", "[", "0.", "]", "*", "self", ".", "fm_dim", "self", ".", "w_fm", "[", "i", "]", "=", "[", "0.", "]", "*", "self", ".", "fm_dim", "self", ".", "z_fm", "[", "i", "]", "=", "[", "0.", "]", "*", "self", ".", "fm_dim", "for", "k", "in", "range", "(", "self", ".", "fm_dim", ")", ":", "self", ".", "z_fm", "[", "i", "]", "[", "k", "]", "=", "random", ".", "gauss", "(", "0.", ",", "self", ".", "fm_initDev", ")" ]
https://github.com/comadan/FM_FTRL/blob/a86c51d11d86b7e4a9e6b3db5b50351849e61fb2/FM_FTRL_machine.py#L43-L52
WerWolv/EdiZon_CheatsConfigsAndScripts
d16d36c7509c01dca770f402babd83ff2e9ae6e7
Scripts/lib/python3.5/pydoc.py
python
HTMLDoc.section
(self, title, fgcol, bgcol, contents, width=6, prelude='', marginalia=None, gap='&nbsp;')
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
Format a section with a heading.
Format a section with a heading.
[ "Format", "a", "section", "with", "a", "heading", "." ]
def section(self, title, fgcol, bgcol, contents, width=6, prelude='', marginalia=None, gap='&nbsp;'): """Format a section with a heading.""" if marginalia is None: marginalia = '<tt>' + '&nbsp;' * width + '</tt>' result = '''<p> <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section"> <tr bgcolor="%s"> <td colspan=3 valign=bottom>&nbsp;<br> <font color="%s" face="helvetica, arial">%s</font></td></tr> ''' % (bgcol, fgcol, title) if prelude: result = result + ''' <tr bgcolor="%s"><td rowspan=2>%s</td> <td colspan=2>%s</td></tr> <tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap) else: result = result + ''' <tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap) return result + '\n<td width="100%%">%s</td></tr></table>' % contents
[ "def", "section", "(", "self", ",", "title", ",", "fgcol", ",", "bgcol", ",", "contents", ",", "width", "=", "6", ",", "prelude", "=", "''", ",", "marginalia", "=", "None", ",", "gap", "=", "'&nbsp;'", ")", ":", "if", "marginalia", "is", "None", ":", "marginalia", "=", "'<tt>'", "+", "'&nbsp;'", "*", "width", "+", "'</tt>'", "result", "=", "'''<p>\n<table width=\"100%%\" cellspacing=0 cellpadding=2 border=0 summary=\"section\">\n<tr bgcolor=\"%s\">\n<td colspan=3 valign=bottom>&nbsp;<br>\n<font color=\"%s\" face=\"helvetica, arial\">%s</font></td></tr>\n '''", "%", "(", "bgcol", ",", "fgcol", ",", "title", ")", "if", "prelude", ":", "result", "=", "result", "+", "'''\n<tr bgcolor=\"%s\"><td rowspan=2>%s</td>\n<td colspan=2>%s</td></tr>\n<tr><td>%s</td>'''", "%", "(", "bgcol", ",", "marginalia", ",", "prelude", ",", "gap", ")", "else", ":", "result", "=", "result", "+", "'''\n<tr><td bgcolor=\"%s\">%s</td><td>%s</td>'''", "%", "(", "bgcol", ",", "marginalia", ",", "gap", ")", "return", "result", "+", "'\\n<td width=\"100%%\">%s</td></tr></table>'", "%", "contents" ]
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/pydoc.py#L488-L508
Kozea/WeasyPrint
6cce2978165134e37683cb5b3d156cac6a11a7f9
weasyprint/css/validation/expanders.py
python
expand_flex_flow
(name, tokens)
Expand the ``flex-flow`` property.
Expand the ``flex-flow`` property.
[ "Expand", "the", "flex", "-", "flow", "property", "." ]
def expand_flex_flow(name, tokens): """Expand the ``flex-flow`` property.""" if len(tokens) == 2: for sorted_tokens in tokens, tokens[::-1]: direction = flex_direction([sorted_tokens[0]]) wrap = flex_wrap([sorted_tokens[1]]) if direction and wrap: yield 'flex-direction', [sorted_tokens[0]] yield 'flex-wrap', [sorted_tokens[1]] break else: raise InvalidValues elif len(tokens) == 1: direction = flex_direction([tokens[0]]) if direction: yield 'flex-direction', [tokens[0]] else: wrap = flex_wrap([tokens[0]]) if wrap: yield 'flex-wrap', [tokens[0]] else: raise InvalidValues else: raise InvalidValues
[ "def", "expand_flex_flow", "(", "name", ",", "tokens", ")", ":", "if", "len", "(", "tokens", ")", "==", "2", ":", "for", "sorted_tokens", "in", "tokens", ",", "tokens", "[", ":", ":", "-", "1", "]", ":", "direction", "=", "flex_direction", "(", "[", "sorted_tokens", "[", "0", "]", "]", ")", "wrap", "=", "flex_wrap", "(", "[", "sorted_tokens", "[", "1", "]", "]", ")", "if", "direction", "and", "wrap", ":", "yield", "'flex-direction'", ",", "[", "sorted_tokens", "[", "0", "]", "]", "yield", "'flex-wrap'", ",", "[", "sorted_tokens", "[", "1", "]", "]", "break", "else", ":", "raise", "InvalidValues", "elif", "len", "(", "tokens", ")", "==", "1", ":", "direction", "=", "flex_direction", "(", "[", "tokens", "[", "0", "]", "]", ")", "if", "direction", ":", "yield", "'flex-direction'", ",", "[", "tokens", "[", "0", "]", "]", "else", ":", "wrap", "=", "flex_wrap", "(", "[", "tokens", "[", "0", "]", "]", ")", "if", "wrap", ":", "yield", "'flex-wrap'", ",", "[", "tokens", "[", "0", "]", "]", "else", ":", "raise", "InvalidValues", "else", ":", "raise", "InvalidValues" ]
https://github.com/Kozea/WeasyPrint/blob/6cce2978165134e37683cb5b3d156cac6a11a7f9/weasyprint/css/validation/expanders.py#L626-L649
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/MimeWriter.py
python
MimeWriter.flushheaders
(self)
Writes out and forgets all headers accumulated so far. This is useful if you don't need a body part at all; for example, for a subpart of type message/rfc822 that's (mis)used to store some header-like information.
Writes out and forgets all headers accumulated so far.
[ "Writes", "out", "and", "forgets", "all", "headers", "accumulated", "so", "far", "." ]
def flushheaders(self): """Writes out and forgets all headers accumulated so far. This is useful if you don't need a body part at all; for example, for a subpart of type message/rfc822 that's (mis)used to store some header-like information. """ self._fp.writelines(self._headers) self._headers = []
[ "def", "flushheaders", "(", "self", ")", ":", "self", ".", "_fp", ".", "writelines", "(", "self", ".", "_headers", ")", "self", ".", "_headers", "=", "[", "]" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/MimeWriter.py#L117-L126
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py
python
subqueryload
(loadopt, attr)
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
Indicate that the given attribute should be loaded using subquery eager loading. This function is part of the :class:`.Load` interface and supports both method-chained and standalone operation. examples:: # subquery-load the "orders" collection on "User" query(User).options(subqueryload(User.orders)) # subquery-load Order.items and then Item.keywords query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords)) # lazily load Order.items, but when Items are loaded, # subquery-load the keywords collection query(Order).options(lazyload(Order.items).subqueryload(Item.keywords)) .. seealso:: :ref:`loading_toplevel` :func:`.orm.joinedload` :func:`.orm.lazyload` :paramref:`.relationship.lazy`
Indicate that the given attribute should be loaded using subquery eager loading.
[ "Indicate", "that", "the", "given", "attribute", "should", "be", "loaded", "using", "subquery", "eager", "loading", "." ]
def subqueryload(loadopt, attr): """Indicate that the given attribute should be loaded using subquery eager loading. This function is part of the :class:`.Load` interface and supports both method-chained and standalone operation. examples:: # subquery-load the "orders" collection on "User" query(User).options(subqueryload(User.orders)) # subquery-load Order.items and then Item.keywords query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords)) # lazily load Order.items, but when Items are loaded, # subquery-load the keywords collection query(Order).options(lazyload(Order.items).subqueryload(Item.keywords)) .. seealso:: :ref:`loading_toplevel` :func:`.orm.joinedload` :func:`.orm.lazyload` :paramref:`.relationship.lazy` """ return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
[ "def", "subqueryload", "(", "loadopt", ",", "attr", ")", ":", "return", "loadopt", ".", "set_relationship_strategy", "(", "attr", ",", "{", "\"lazy\"", ":", "\"subquery\"", "}", ")" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py#L770-L801
opensistemas-hub/osbrain
a9abc82fb194348cceaabb897b394821fee2f135
osbrain/agent.py
python
Agent.log_info
(self, message, logger='_logger')
Log an info message. Parameters ---------- message : str Message to log. logger : str Alias of the logger.
Log an info message.
[ "Log", "an", "info", "message", "." ]
def log_info(self, message, logger='_logger'): """ Log an info message. Parameters ---------- message : str Message to log. logger : str Alias of the logger. """ self._log_message('INFO', message, logger)
[ "def", "log_info", "(", "self", ",", "message", ",", "logger", "=", "'_logger'", ")", ":", "self", ".", "_log_message", "(", "'INFO'", ",", "message", ",", "logger", ")" ]
https://github.com/opensistemas-hub/osbrain/blob/a9abc82fb194348cceaabb897b394821fee2f135/osbrain/agent.py#L551-L562
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/Centos_5.9/Crypto/Random/Fortuna/SHAd256.py
python
_SHAd256.update
(self, data)
[]
def update(self, data): self._h.update(data)
[ "def", "update", "(", "self", ",", "data", ")", ":", "self", ".", "_h", ".", "update", "(", "data", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Centos_5.9/Crypto/Random/Fortuna/SHAd256.py#L83-L84
bendmorris/static-python
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
Lib/decimal.py
python
Decimal.__ceil__
(self)
return int(self._rescale(0, ROUND_CEILING))
Return the ceiling of self, as an integer. For a finite Decimal instance self, return the least integer n such that n >= self. If self is infinite or a NaN then a Python exception is raised.
Return the ceiling of self, as an integer.
[ "Return", "the", "ceiling", "of", "self", "as", "an", "integer", "." ]
def __ceil__(self): """Return the ceiling of self, as an integer. For a finite Decimal instance self, return the least integer n such that n >= self. If self is infinite or a NaN then a Python exception is raised. """ if self._is_special: if self.is_nan(): raise ValueError("cannot round a NaN") else: raise OverflowError("cannot round an infinity") return int(self._rescale(0, ROUND_CEILING))
[ "def", "__ceil__", "(", "self", ")", ":", "if", "self", ".", "_is_special", ":", "if", "self", ".", "is_nan", "(", ")", ":", "raise", "ValueError", "(", "\"cannot round a NaN\"", ")", "else", ":", "raise", "OverflowError", "(", "\"cannot round an infinity\"", ")", "return", "int", "(", "self", ".", "_rescale", "(", "0", ",", "ROUND_CEILING", ")", ")" ]
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/decimal.py#L1897-L1910
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/xml/dom/minidom.py
python
NamedNodeMap.itemsNS
(self)
return L
[]
def itemsNS(self): L = [] for node in self._attrs.values(): L.append(((node.namespaceURI, node.localName), node.value)) return L
[ "def", "itemsNS", "(", "self", ")", ":", "L", "=", "[", "]", "for", "node", "in", "self", ".", "_attrs", ".", "values", "(", ")", ":", "L", ".", "append", "(", "(", "(", "node", ".", "namespaceURI", ",", "node", ".", "localName", ")", ",", "node", ".", "value", ")", ")", "return", "L" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/xml/dom/minidom.py#L502-L506
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/motion_blinds/cover.py
python
MotionTDBUDevice.stop_cover
(self, **kwargs)
Stop the cover.
Stop the cover.
[ "Stop", "the", "cover", "." ]
def stop_cover(self, **kwargs): """Stop the cover.""" self._blind.Stop(motor=self._motor_key)
[ "def", "stop_cover", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_blind", ".", "Stop", "(", "motor", "=", "self", ".", "_motor_key", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/motion_blinds/cover.py#L316-L318
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AdminServer/appscale/admin/instance_manager/utils.py
python
remove_logrotate
(project_id)
Removes logrotate script for the given project. Args: project_id: A string, the name of the project to remove logrotate for.
Removes logrotate script for the given project.
[ "Removes", "logrotate", "script", "for", "the", "given", "project", "." ]
def remove_logrotate(project_id): """ Removes logrotate script for the given project. Args: project_id: A string, the name of the project to remove logrotate for. """ app_logrotate_script = "{0}/appscale-{1}".\ format(LOGROTATE_CONFIG_DIR, project_id) logger.debug("Removing script: {}".format(app_logrotate_script)) try: os.remove(app_logrotate_script) except OSError: logging.error("Error while removing log rotation for application: {}". format(project_id))
[ "def", "remove_logrotate", "(", "project_id", ")", ":", "app_logrotate_script", "=", "\"{0}/appscale-{1}\"", ".", "format", "(", "LOGROTATE_CONFIG_DIR", ",", "project_id", ")", "logger", ".", "debug", "(", "\"Removing script: {}\"", ".", "format", "(", "app_logrotate_script", ")", ")", "try", ":", "os", ".", "remove", "(", "app_logrotate_script", ")", "except", "OSError", ":", "logging", ".", "error", "(", "\"Error while removing log rotation for application: {}\"", ".", "format", "(", "project_id", ")", ")" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AdminServer/appscale/admin/instance_manager/utils.py#L106-L120
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/plat-irix6/IN.py
python
IN_CLASSB
(i)
return (((__int32_t)(i) & 0xc0000000) == 0x80000000)
[]
def IN_CLASSB(i): return (((__int32_t)(i) & 0xc0000000) == 0x80000000)
[ "def", "IN_CLASSB", "(", "i", ")", ":", "return", "(", "(", "(", "__int32_t", ")", "(", "i", ")", "&", "0xc0000000", ")", "==", "0x80000000", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/plat-irix6/IN.py#L191-L191
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
keras_/kerascv/models/common.py
python
channel_shuffle_lambda
(channels, groups, **kwargs)
return nn.Lambda(channel_shuffle, arguments={"groups": groups}, **kwargs)
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. Returns: ------- Layer Channel shuffle layer.
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
[ "Channel", "shuffle", "layer", ".", "This", "is", "a", "wrapper", "over", "the", "same", "operation", ".", "It", "is", "designed", "to", "save", "the", "number", "of", "groups", "." ]
def channel_shuffle_lambda(channels, groups, **kwargs): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. Returns: ------- Layer Channel shuffle layer. """ assert (channels % groups == 0) return nn.Lambda(channel_shuffle, arguments={"groups": groups}, **kwargs)
[ "def", "channel_shuffle_lambda", "(", "channels", ",", "groups", ",", "*", "*", "kwargs", ")", ":", "assert", "(", "channels", "%", "groups", "==", "0", ")", "return", "nn", ".", "Lambda", "(", "channel_shuffle", ",", "arguments", "=", "{", "\"groups\"", ":", "groups", "}", ",", "*", "*", "kwargs", ")" ]
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/keras_/kerascv/models/common.py#L1322-L1342
rlvaugh/Impractical_Python_Projects
ff9065e94430dc4ecf76d2c9e78f05fae499213e
Chapter_13/practice_45.py
python
Particle.vector
(self)
Calculate particle vector at launch.
Calculate particle vector at launch.
[ "Calculate", "particle", "vector", "at", "launch", "." ]
def vector(self): """Calculate particle vector at launch.""" angles = [65, 55, 45, 35, 25] # 90 is vertical orient = random.choice(angles) if orient == 45: self.color = WHITE else: self.color = GRAY radians = math.radians(orient) self.dx = self.vel * math.cos(radians) self.dy = -self.vel * math.sin(radians)
[ "def", "vector", "(", "self", ")", ":", "angles", "=", "[", "65", ",", "55", ",", "45", ",", "35", ",", "25", "]", "# 90 is vertical", "orient", "=", "random", ".", "choice", "(", "angles", ")", "if", "orient", "==", "45", ":", "self", ".", "color", "=", "WHITE", "else", ":", "self", ".", "color", "=", "GRAY", "radians", "=", "math", ".", "radians", "(", "orient", ")", "self", ".", "dx", "=", "self", ".", "vel", "*", "math", ".", "cos", "(", "radians", ")", "self", ".", "dy", "=", "-", "self", ".", "vel", "*", "math", ".", "sin", "(", "radians", ")" ]
https://github.com/rlvaugh/Impractical_Python_Projects/blob/ff9065e94430dc4ecf76d2c9e78f05fae499213e/Chapter_13/practice_45.py#L40-L50
inasafe/inasafe
355eb2ce63f516b9c26af0c86a24f99e53f63f87
safe/gui/tools/wizard/step_kw43_threshold.py
python
StepKwThreshold.set_widgets
(self)
Set widgets on the Threshold tab.
Set widgets on the Threshold tab.
[ "Set", "widgets", "on", "the", "Threshold", "tab", "." ]
def set_widgets(self): """Set widgets on the Threshold tab.""" clear_layout(self.gridLayoutThreshold) # Set text in the label layer_purpose = self.parent.step_kw_purpose.selected_purpose() layer_subcategory = self.parent.step_kw_subcategory.\ selected_subcategory() classification = self.parent.step_kw_classification. \ selected_classification() if is_raster_layer(self.parent.layer): statistics = self.parent.layer.dataProvider().bandStatistics( 1, QgsRasterBandStats.All, self.parent.layer.extent(), 0) text = continuous_raster_question % ( layer_purpose['name'], layer_subcategory['name'], classification['name'], statistics.minimumValue, statistics.maximumValue) else: field_name = self.parent.step_kw_field.selected_fields() field_index = self.parent.layer.fields().lookupField(field_name) min_value_layer = self.parent.layer.minimumValue(field_index) max_value_layer = self.parent.layer.maximumValue(field_index) text = continuous_vector_question % ( layer_purpose['name'], layer_subcategory['name'], field_name, classification['name'], min_value_layer, max_value_layer) self.lblThreshold.setText(text) thresholds = self.parent.get_existing_keyword('thresholds') selected_unit = self.parent.step_kw_unit.selected_unit()['key'] self.classes = OrderedDict() classes = classification.get('classes') # Sort by value, put the lowest first classes = sorted(classes, key=lambda k: k['value']) for i, the_class in enumerate(classes): class_layout = QHBoxLayout() # Class label class_label = QLabel(the_class['name']) # Min label min_label = QLabel(tr('Min >')) # Min value as double spin min_value_input = QDoubleSpinBox() # TODO(IS) We can set the min and max depends on the unit, later min_value_input.setMinimum(0) min_value_input.setMaximum(999999) if thresholds.get(the_class['key']): min_value_input.setValue(thresholds[the_class['key']][0]) else: default_min = the_class['numeric_default_min'] if isinstance(default_min, dict): default_min = the_class[ 'numeric_default_min'][selected_unit] min_value_input.setValue(default_min) min_value_input.setSingleStep(0.1) # Max label max_label = QLabel(tr('Max <=')) # Max value as double spin max_value_input = QDoubleSpinBox() # TODO(IS) We can set the min and max depends on the unit, later max_value_input.setMinimum(0) max_value_input.setMaximum(999999) if thresholds.get(the_class['key']): max_value_input.setValue(thresholds[the_class['key']][1]) else: default_max = the_class['numeric_default_max'] if isinstance(default_max, dict): default_max = the_class[ 'numeric_default_max'][selected_unit] max_value_input.setValue(default_max) max_value_input.setSingleStep(0.1) # Add to class_layout class_layout.addWidget(min_label) class_layout.addWidget(min_value_input) # class_layout.addStretch(1) class_layout.addWidget(max_label) class_layout.addWidget(max_value_input) # Add to grid_layout self.gridLayoutThreshold.addWidget(class_label, i, 0) self.gridLayoutThreshold.addLayout(class_layout, i, 1) self.classes[the_class['key']] = [min_value_input, max_value_input] self.gridLayoutThreshold.setSpacing(0) def min_max_changed(index, the_string): """Slot when min or max value change. :param index: The index of the double spin. :type index: int :param the_string: The flag to indicate the min or max value. :type the_string: str """ if the_string == 'Max value': current_max_value = list(self.classes.values())[index][1] target_min_value = list(self.classes.values())[index + 1][0] if current_max_value.value() != target_min_value.value(): target_min_value.setValue(current_max_value.value()) elif the_string == 'Min value': current_min_value = list(self.classes.values())[index][0] target_max_value = list(self.classes.values())[index - 1][1] if current_min_value.value() != target_max_value.value(): target_max_value.setValue(current_min_value.value()) # Set behaviour for k, v in list(self.classes.items()): index = list(self.classes.keys()).index(k) if index < len(self.classes) - 1: # Max value changed v[1].valueChanged.connect(partial( min_max_changed, index=index, the_string='Max value')) if index > 0: # Min value v[0].valueChanged.connect(partial( min_max_changed, index=index, the_string='Min value'))
[ "def", "set_widgets", "(", "self", ")", ":", "clear_layout", "(", "self", ".", "gridLayoutThreshold", ")", "# Set text in the label", "layer_purpose", "=", "self", ".", "parent", ".", "step_kw_purpose", ".", "selected_purpose", "(", ")", "layer_subcategory", "=", "self", ".", "parent", ".", "step_kw_subcategory", ".", "selected_subcategory", "(", ")", "classification", "=", "self", ".", "parent", ".", "step_kw_classification", ".", "selected_classification", "(", ")", "if", "is_raster_layer", "(", "self", ".", "parent", ".", "layer", ")", ":", "statistics", "=", "self", ".", "parent", ".", "layer", ".", "dataProvider", "(", ")", ".", "bandStatistics", "(", "1", ",", "QgsRasterBandStats", ".", "All", ",", "self", ".", "parent", ".", "layer", ".", "extent", "(", ")", ",", "0", ")", "text", "=", "continuous_raster_question", "%", "(", "layer_purpose", "[", "'name'", "]", ",", "layer_subcategory", "[", "'name'", "]", ",", "classification", "[", "'name'", "]", ",", "statistics", ".", "minimumValue", ",", "statistics", ".", "maximumValue", ")", "else", ":", "field_name", "=", "self", ".", "parent", ".", "step_kw_field", ".", "selected_fields", "(", ")", "field_index", "=", "self", ".", "parent", ".", "layer", ".", "fields", "(", ")", ".", "lookupField", "(", "field_name", ")", "min_value_layer", "=", "self", ".", "parent", ".", "layer", ".", "minimumValue", "(", "field_index", ")", "max_value_layer", "=", "self", ".", "parent", ".", "layer", ".", "maximumValue", "(", "field_index", ")", "text", "=", "continuous_vector_question", "%", "(", "layer_purpose", "[", "'name'", "]", ",", "layer_subcategory", "[", "'name'", "]", ",", "field_name", ",", "classification", "[", "'name'", "]", ",", "min_value_layer", ",", "max_value_layer", ")", "self", ".", "lblThreshold", ".", "setText", "(", "text", ")", "thresholds", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'thresholds'", ")", "selected_unit", "=", "self", ".", "parent", ".", "step_kw_unit", ".", "selected_unit", "(", ")", "[", "'key'", "]", "self", ".", "classes", "=", "OrderedDict", "(", ")", "classes", "=", "classification", ".", "get", "(", "'classes'", ")", "# Sort by value, put the lowest first", "classes", "=", "sorted", "(", "classes", ",", "key", "=", "lambda", "k", ":", "k", "[", "'value'", "]", ")", "for", "i", ",", "the_class", "in", "enumerate", "(", "classes", ")", ":", "class_layout", "=", "QHBoxLayout", "(", ")", "# Class label", "class_label", "=", "QLabel", "(", "the_class", "[", "'name'", "]", ")", "# Min label", "min_label", "=", "QLabel", "(", "tr", "(", "'Min >'", ")", ")", "# Min value as double spin", "min_value_input", "=", "QDoubleSpinBox", "(", ")", "# TODO(IS) We can set the min and max depends on the unit, later", "min_value_input", ".", "setMinimum", "(", "0", ")", "min_value_input", ".", "setMaximum", "(", "999999", ")", "if", "thresholds", ".", "get", "(", "the_class", "[", "'key'", "]", ")", ":", "min_value_input", ".", "setValue", "(", "thresholds", "[", "the_class", "[", "'key'", "]", "]", "[", "0", "]", ")", "else", ":", "default_min", "=", "the_class", "[", "'numeric_default_min'", "]", "if", "isinstance", "(", "default_min", ",", "dict", ")", ":", "default_min", "=", "the_class", "[", "'numeric_default_min'", "]", "[", "selected_unit", "]", "min_value_input", ".", "setValue", "(", "default_min", ")", "min_value_input", ".", "setSingleStep", "(", "0.1", ")", "# Max label", "max_label", "=", "QLabel", "(", "tr", "(", "'Max <='", ")", ")", "# Max value as double spin", "max_value_input", "=", "QDoubleSpinBox", "(", ")", "# TODO(IS) We can set the min and max depends on the unit, later", "max_value_input", ".", "setMinimum", "(", "0", ")", "max_value_input", ".", "setMaximum", "(", "999999", ")", "if", "thresholds", ".", "get", "(", "the_class", "[", "'key'", "]", ")", ":", "max_value_input", ".", "setValue", "(", "thresholds", "[", "the_class", "[", "'key'", "]", "]", "[", "1", "]", ")", "else", ":", "default_max", "=", "the_class", "[", "'numeric_default_max'", "]", "if", "isinstance", "(", "default_max", ",", "dict", ")", ":", "default_max", "=", "the_class", "[", "'numeric_default_max'", "]", "[", "selected_unit", "]", "max_value_input", ".", "setValue", "(", "default_max", ")", "max_value_input", ".", "setSingleStep", "(", "0.1", ")", "# Add to class_layout", "class_layout", ".", "addWidget", "(", "min_label", ")", "class_layout", ".", "addWidget", "(", "min_value_input", ")", "# class_layout.addStretch(1)", "class_layout", ".", "addWidget", "(", "max_label", ")", "class_layout", ".", "addWidget", "(", "max_value_input", ")", "# Add to grid_layout", "self", ".", "gridLayoutThreshold", ".", "addWidget", "(", "class_label", ",", "i", ",", "0", ")", "self", ".", "gridLayoutThreshold", ".", "addLayout", "(", "class_layout", ",", "i", ",", "1", ")", "self", ".", "classes", "[", "the_class", "[", "'key'", "]", "]", "=", "[", "min_value_input", ",", "max_value_input", "]", "self", ".", "gridLayoutThreshold", ".", "setSpacing", "(", "0", ")", "def", "min_max_changed", "(", "index", ",", "the_string", ")", ":", "\"\"\"Slot when min or max value change.\n\n :param index: The index of the double spin.\n :type index: int\n\n :param the_string: The flag to indicate the min or max value.\n :type the_string: str\n \"\"\"", "if", "the_string", "==", "'Max value'", ":", "current_max_value", "=", "list", "(", "self", ".", "classes", ".", "values", "(", ")", ")", "[", "index", "]", "[", "1", "]", "target_min_value", "=", "list", "(", "self", ".", "classes", ".", "values", "(", ")", ")", "[", "index", "+", "1", "]", "[", "0", "]", "if", "current_max_value", ".", "value", "(", ")", "!=", "target_min_value", ".", "value", "(", ")", ":", "target_min_value", ".", "setValue", "(", "current_max_value", ".", "value", "(", ")", ")", "elif", "the_string", "==", "'Min value'", ":", "current_min_value", "=", "list", "(", "self", ".", "classes", ".", "values", "(", ")", ")", "[", "index", "]", "[", "0", "]", "target_max_value", "=", "list", "(", "self", ".", "classes", ".", "values", "(", ")", ")", "[", "index", "-", "1", "]", "[", "1", "]", "if", "current_min_value", ".", "value", "(", ")", "!=", "target_max_value", ".", "value", "(", ")", ":", "target_max_value", ".", "setValue", "(", "current_min_value", ".", "value", "(", ")", ")", "# Set behaviour", "for", "k", ",", "v", "in", "list", "(", "self", ".", "classes", ".", "items", "(", ")", ")", ":", "index", "=", "list", "(", "self", ".", "classes", ".", "keys", "(", ")", ")", ".", "index", "(", "k", ")", "if", "index", "<", "len", "(", "self", ".", "classes", ")", "-", "1", ":", "# Max value changed", "v", "[", "1", "]", ".", "valueChanged", ".", "connect", "(", "partial", "(", "min_max_changed", ",", "index", "=", "index", ",", "the_string", "=", "'Max value'", ")", ")", "if", "index", ">", "0", ":", "# Min value", "v", "[", "0", "]", ".", "valueChanged", ".", "connect", "(", "partial", "(", "min_max_changed", ",", "index", "=", "index", ",", "the_string", "=", "'Min value'", ")", ")" ]
https://github.com/inasafe/inasafe/blob/355eb2ce63f516b9c26af0c86a24f99e53f63f87/safe/gui/tools/wizard/step_kw43_threshold.py#L93-L222
feisuzhu/thbattle
ac0dee1b2d86de7664289cf432b157ef25427ba1
src/pyglet/app/base.py
python
EventLoop.idle
(self)
return self.clock.get_sleep_time(True)
Called during each iteration of the event loop. The method is called immediately after any window events (i.e., after any user input). The method can return a duration after which the idle method will be called again. The method may be called earlier if the user creates more input events. The method can return `None` to only wait for user events. For example, return ``1.0`` to have the idle method called every second, or immediately after any user events. The default implementation dispatches the `pyglet.window.Window.on_draw` event for all windows and uses `pyglet.clock.tick` and `pyglet.clock.get_sleep_time` on the default clock to determine the return value. This method should be overridden by advanced users only. To have code execute at regular intervals, use the `pyglet.clock.schedule` methods. :rtype: float :return: The number of seconds before the idle method should be called again, or `None` to block for user input.
Called during each iteration of the event loop.
[ "Called", "during", "each", "iteration", "of", "the", "event", "loop", "." ]
def idle(self): '''Called during each iteration of the event loop. The method is called immediately after any window events (i.e., after any user input). The method can return a duration after which the idle method will be called again. The method may be called earlier if the user creates more input events. The method can return `None` to only wait for user events. For example, return ``1.0`` to have the idle method called every second, or immediately after any user events. The default implementation dispatches the `pyglet.window.Window.on_draw` event for all windows and uses `pyglet.clock.tick` and `pyglet.clock.get_sleep_time` on the default clock to determine the return value. This method should be overridden by advanced users only. To have code execute at regular intervals, use the `pyglet.clock.schedule` methods. :rtype: float :return: The number of seconds before the idle method should be called again, or `None` to block for user input. ''' dt = self.clock.update_time() redraw_all = self.clock.call_scheduled_functions(dt) # Redraw all windows for window in app.windows: if redraw_all or (window._legacy_invalid and window.invalid): window.switch_to() window.dispatch_event('on_draw') window.flip() window._legacy_invalid = False # Update timout return self.clock.get_sleep_time(True)
[ "def", "idle", "(", "self", ")", ":", "dt", "=", "self", ".", "clock", ".", "update_time", "(", ")", "redraw_all", "=", "self", ".", "clock", ".", "call_scheduled_functions", "(", "dt", ")", "# Redraw all windows", "for", "window", "in", "app", ".", "windows", ":", "if", "redraw_all", "or", "(", "window", ".", "_legacy_invalid", "and", "window", ".", "invalid", ")", ":", "window", ".", "switch_to", "(", ")", "window", ".", "dispatch_event", "(", "'on_draw'", ")", "window", ".", "flip", "(", ")", "window", ".", "_legacy_invalid", "=", "False", "# Update timout", "return", "self", ".", "clock", ".", "get_sleep_time", "(", "True", ")" ]
https://github.com/feisuzhu/thbattle/blob/ac0dee1b2d86de7664289cf432b157ef25427ba1/src/pyglet/app/base.py#L248-L285
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/words/protocols/irc.py
python
IRCClient.joined
(self, channel)
Called when I finish joining a channel. channel has the starting character (C{'#'}, C{'&'}, C{'!'}, or C{'+'}) intact.
Called when I finish joining a channel.
[ "Called", "when", "I", "finish", "joining", "a", "channel", "." ]
def joined(self, channel): """ Called when I finish joining a channel. channel has the starting character (C{'#'}, C{'&'}, C{'!'}, or C{'+'}) intact. """
[ "def", "joined", "(", "self", ",", "channel", ")", ":" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/words/protocols/irc.py#L1182-L1188
rietveld-codereview/rietveld
82e415f6a291c58c714d3869c7a1de818546c7d5
third_party/oauth2client/client.py
python
OAuth2Credentials.refresh
(self, http)
Forces a refresh of the access_token. Args: http: httplib2.Http, an http object to be used to make the refresh request.
Forces a refresh of the access_token.
[ "Forces", "a", "refresh", "of", "the", "access_token", "." ]
def refresh(self, http): """Forces a refresh of the access_token. Args: http: httplib2.Http, an http object to be used to make the refresh request. """ self._refresh(http.request)
[ "def", "refresh", "(", "self", ",", "http", ")", ":", "self", ".", "_refresh", "(", "http", ".", "request", ")" ]
https://github.com/rietveld-codereview/rietveld/blob/82e415f6a291c58c714d3869c7a1de818546c7d5/third_party/oauth2client/client.py#L509-L516
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/django/views/generic/list.py
python
MultipleObjectMixin.paginate_queryset
(self, queryset, page_size)
Paginate the queryset, if needed.
Paginate the queryset, if needed.
[ "Paginate", "the", "queryset", "if", "needed", "." ]
def paginate_queryset(self, queryset, page_size): """ Paginate the queryset, if needed. """ paginator = self.get_paginator( queryset, page_size, orphans=self.get_paginate_orphans(), allow_empty_first_page=self.get_allow_empty()) page_kwarg = self.page_kwarg page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1 try: page_number = int(page) except ValueError: if page == 'last': page_number = paginator.num_pages else: raise Http404(_("Page is not 'last', nor can it be converted to an int.")) try: page = paginator.page(page_number) return (paginator, page, page.object_list, page.has_other_pages()) except InvalidPage as e: raise Http404(_('Invalid page (%(page_number)s): %(message)s') % { 'page_number': page_number, 'message': force_text(e), })
[ "def", "paginate_queryset", "(", "self", ",", "queryset", ",", "page_size", ")", ":", "paginator", "=", "self", ".", "get_paginator", "(", "queryset", ",", "page_size", ",", "orphans", "=", "self", ".", "get_paginate_orphans", "(", ")", ",", "allow_empty_first_page", "=", "self", ".", "get_allow_empty", "(", ")", ")", "page_kwarg", "=", "self", ".", "page_kwarg", "page", "=", "self", ".", "kwargs", ".", "get", "(", "page_kwarg", ")", "or", "self", ".", "request", ".", "GET", ".", "get", "(", "page_kwarg", ")", "or", "1", "try", ":", "page_number", "=", "int", "(", "page", ")", "except", "ValueError", ":", "if", "page", "==", "'last'", ":", "page_number", "=", "paginator", ".", "num_pages", "else", ":", "raise", "Http404", "(", "_", "(", "\"Page is not 'last', nor can it be converted to an int.\"", ")", ")", "try", ":", "page", "=", "paginator", ".", "page", "(", "page_number", ")", "return", "(", "paginator", ",", "page", ",", "page", ".", "object_list", ",", "page", ".", "has_other_pages", "(", ")", ")", "except", "InvalidPage", "as", "e", ":", "raise", "Http404", "(", "_", "(", "'Invalid page (%(page_number)s): %(message)s'", ")", "%", "{", "'page_number'", ":", "page_number", ",", "'message'", ":", "force_text", "(", "e", ")", ",", "}", ")" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/views/generic/list.py#L62-L85
ChangeMyUsername/algorithms-sedgewick-python
d3ccd86c93016c7fee270ad02e1a823d205cea80
chapter_2/module_2_2.py
python
MergeSort.insertion_sort
( self, seq: MutableSequence[CT], low: int, high: int)
Insertion sort, apply this method with certain size array to improve performance. Args: seq (MutableSequence[CT]): sorting array low (int): start index high (int): end index >>> merge_sort = MergeSort() >>> sequence = [0, 4, 7, 1, -3, 9] >>> merge_sort.insertion_sort(sequence, 0, len(sequence) - 1) >>> merge_sort.is_sorted(sequence) True >>> merge_sort.show(sequence) -3 0 1 4 7 9
Insertion sort, apply this method with certain size array to improve performance.
[ "Insertion", "sort", "apply", "this", "method", "with", "certain", "size", "array", "to", "improve", "performance", "." ]
def insertion_sort( self, seq: MutableSequence[CT], low: int, high: int) -> None: """Insertion sort, apply this method with certain size array to improve performance. Args: seq (MutableSequence[CT]): sorting array low (int): start index high (int): end index >>> merge_sort = MergeSort() >>> sequence = [0, 4, 7, 1, -3, 9] >>> merge_sort.insertion_sort(sequence, 0, len(sequence) - 1) >>> merge_sort.is_sorted(sequence) True >>> merge_sort.show(sequence) -3 0 1 4 7 9 """ for i in range(low + 1, high + 1): j = i while j > low and seq[j] < seq[j - 1]: self.exch(seq, j, j - 1) j -= 1
[ "def", "insertion_sort", "(", "self", ",", "seq", ":", "MutableSequence", "[", "CT", "]", ",", "low", ":", "int", ",", "high", ":", "int", ")", "->", "None", ":", "for", "i", "in", "range", "(", "low", "+", "1", ",", "high", "+", "1", ")", ":", "j", "=", "i", "while", "j", ">", "low", "and", "seq", "[", "j", "]", "<", "seq", "[", "j", "-", "1", "]", ":", "self", ".", "exch", "(", "seq", ",", "j", ",", "j", "-", "1", ")", "j", "-=", "1" ]
https://github.com/ChangeMyUsername/algorithms-sedgewick-python/blob/d3ccd86c93016c7fee270ad02e1a823d205cea80/chapter_2/module_2_2.py#L65-L87
pytorch/opacus
5c83d59fc169e93667946204f7a6859827a38ace
opacus/accountants/analysis/rdp.py
python
compute_rdp
( *, q: float, noise_multiplier: float, steps: int, orders: Union[List[float], float] )
return rdp * steps
r"""Computes Renyi Differential Privacy (RDP) guarantees of the Sampled Gaussian Mechanism (SGM) iterated ``steps`` times. Args: q: Sampling rate of SGM. noise_multiplier: The ratio of the standard deviation of the additive Gaussian noise to the L2-sensitivity of the function to which it is added. Note that this is same as the standard deviation of the additive Gaussian noise when the L2-sensitivity of the function is 1. steps: The number of iterations of the mechanism. orders: An array (or a scalar) of RDP orders. Returns: The RDP guarantees at all orders; can be ``np.inf``.
r"""Computes Renyi Differential Privacy (RDP) guarantees of the Sampled Gaussian Mechanism (SGM) iterated ``steps`` times.
[ "r", "Computes", "Renyi", "Differential", "Privacy", "(", "RDP", ")", "guarantees", "of", "the", "Sampled", "Gaussian", "Mechanism", "(", "SGM", ")", "iterated", "steps", "times", "." ]
def compute_rdp( *, q: float, noise_multiplier: float, steps: int, orders: Union[List[float], float] ) -> Union[List[float], float]: r"""Computes Renyi Differential Privacy (RDP) guarantees of the Sampled Gaussian Mechanism (SGM) iterated ``steps`` times. Args: q: Sampling rate of SGM. noise_multiplier: The ratio of the standard deviation of the additive Gaussian noise to the L2-sensitivity of the function to which it is added. Note that this is same as the standard deviation of the additive Gaussian noise when the L2-sensitivity of the function is 1. steps: The number of iterations of the mechanism. orders: An array (or a scalar) of RDP orders. Returns: The RDP guarantees at all orders; can be ``np.inf``. """ if isinstance(orders, float): rdp = _compute_rdp(q, noise_multiplier, orders) else: rdp = np.array([_compute_rdp(q, noise_multiplier, order) for order in orders]) return rdp * steps
[ "def", "compute_rdp", "(", "*", ",", "q", ":", "float", ",", "noise_multiplier", ":", "float", ",", "steps", ":", "int", ",", "orders", ":", "Union", "[", "List", "[", "float", "]", ",", "float", "]", ")", "->", "Union", "[", "List", "[", "float", "]", ",", "float", "]", ":", "if", "isinstance", "(", "orders", ",", "float", ")", ":", "rdp", "=", "_compute_rdp", "(", "q", ",", "noise_multiplier", ",", "orders", ")", "else", ":", "rdp", "=", "np", ".", "array", "(", "[", "_compute_rdp", "(", "q", ",", "noise_multiplier", ",", "order", ")", "for", "order", "in", "orders", "]", ")", "return", "rdp", "*", "steps" ]
https://github.com/pytorch/opacus/blob/5c83d59fc169e93667946204f7a6859827a38ace/opacus/accountants/analysis/rdp.py#L250-L274
numenta/nupic
b9ebedaf54f49a33de22d8d44dff7c765cdb5548
src/nupic/frameworks/opf/prediction_metrics_manager.py
python
MetricsManager.getMetrics
(self)
return result
Gets the current metric values :returns: (dict) where each key is the metric-name, and the values are it scalar value. Same as the output of :meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update`
Gets the current metric values
[ "Gets", "the", "current", "metric", "values" ]
def getMetrics(self): """ Gets the current metric values :returns: (dict) where each key is the metric-name, and the values are it scalar value. Same as the output of :meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update` """ result = {} for metricObj, label in zip(self.__metrics, self.__metricLabels): value = metricObj.getMetric() result[label] = value['value'] return result
[ "def", "getMetrics", "(", "self", ")", ":", "result", "=", "{", "}", "for", "metricObj", ",", "label", "in", "zip", "(", "self", ".", "__metrics", ",", "self", ".", "__metricLabels", ")", ":", "value", "=", "metricObj", ".", "getMetric", "(", ")", "result", "[", "label", "]", "=", "value", "[", "'value'", "]", "return", "result" ]
https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/src/nupic/frameworks/opf/prediction_metrics_manager.py#L159-L174
google/qkeras
e0faa69b3a3d99850b06c5793e6831b7d9b83306
qkeras/autoqkeras/autoqkeras_internal.py
python
AutoQKeras.fit
(self, *fit_args, **fit_kwargs)
Invokes tuner fit algorithm.
Invokes tuner fit algorithm.
[ "Invokes", "tuner", "fit", "algorithm", "." ]
def fit(self, *fit_args, **fit_kwargs): """Invokes tuner fit algorithm.""" callbacks = fit_kwargs.get("callbacks", None) if callbacks is None: callbacks = [] epochs = fit_kwargs.get("epochs", None) if epochs is None: epochs = 10 if not self._has_earlystopping(callbacks): callbacks = callbacks + [ tf.keras.callbacks.EarlyStopping( "val_loss", patience=min(20, epochs//5)) ] fit_kwargs["callbacks"] = callbacks self.tuner.search(*fit_args, **fit_kwargs)
[ "def", "fit", "(", "self", ",", "*", "fit_args", ",", "*", "*", "fit_kwargs", ")", ":", "callbacks", "=", "fit_kwargs", ".", "get", "(", "\"callbacks\"", ",", "None", ")", "if", "callbacks", "is", "None", ":", "callbacks", "=", "[", "]", "epochs", "=", "fit_kwargs", ".", "get", "(", "\"epochs\"", ",", "None", ")", "if", "epochs", "is", "None", ":", "epochs", "=", "10", "if", "not", "self", ".", "_has_earlystopping", "(", "callbacks", ")", ":", "callbacks", "=", "callbacks", "+", "[", "tf", ".", "keras", ".", "callbacks", ".", "EarlyStopping", "(", "\"val_loss\"", ",", "patience", "=", "min", "(", "20", ",", "epochs", "//", "5", ")", ")", "]", "fit_kwargs", "[", "\"callbacks\"", "]", "=", "callbacks", "self", ".", "tuner", ".", "search", "(", "*", "fit_args", ",", "*", "*", "fit_kwargs", ")" ]
https://github.com/google/qkeras/blob/e0faa69b3a3d99850b06c5793e6831b7d9b83306/qkeras/autoqkeras/autoqkeras_internal.py#L948-L968
mrlesmithjr/Ansible
d44f0dc0d942bdf3bf7334b307e6048f0ee16e36
roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/requests/utils.py
python
from_key_val_list
(value)
return OrderedDict(value)
Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: need more than 1 value to unpack >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict
Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g.,
[ "Take", "an", "object", "and", "test", "to", "see", "if", "it", "can", "be", "represented", "as", "a", "dictionary", ".", "Unless", "it", "can", "not", "be", "represented", "as", "such", "return", "an", "OrderedDict", "e", ".", "g", "." ]
def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: need more than 1 value to unpack >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value)
[ "def", "from_key_val_list", "(", "value", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "(", "str", ",", "bytes", ",", "bool", ",", "int", ")", ")", ":", "raise", "ValueError", "(", "'cannot encode objects that are not 2-tuples'", ")", "return", "OrderedDict", "(", "value", ")" ]
https://github.com/mrlesmithjr/Ansible/blob/d44f0dc0d942bdf3bf7334b307e6048f0ee16e36/roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/requests/utils.py#L219-L241
KhronosGroup/OpenXR-SDK-Source
76756e2e7849b15466d29bee7d80cada92865550
specification/scripts/spec_tools/consistency_tools.py
python
XMLChecker.is_api_type
(self, member_elem)
return self.conventions.type_prefix in membertext
Return true if the member/parameter ElementTree passed is from this API. May override or extend.
Return true if the member/parameter ElementTree passed is from this API.
[ "Return", "true", "if", "the", "member", "/", "parameter", "ElementTree", "passed", "is", "from", "this", "API", "." ]
def is_api_type(self, member_elem): """Return true if the member/parameter ElementTree passed is from this API. May override or extend.""" membertext = "".join(member_elem.itertext()) return self.conventions.type_prefix in membertext
[ "def", "is_api_type", "(", "self", ",", "member_elem", ")", ":", "membertext", "=", "\"\"", ".", "join", "(", "member_elem", ".", "itertext", "(", ")", ")", "return", "self", ".", "conventions", ".", "type_prefix", "in", "membertext" ]
https://github.com/KhronosGroup/OpenXR-SDK-Source/blob/76756e2e7849b15466d29bee7d80cada92865550/specification/scripts/spec_tools/consistency_tools.py#L118-L124
Lawouach/WebSocket-for-Python
a3e6d157b7bb1da1009e66aa750170f1c07aa143
example/websensors/app.py
python
render_template
(template)
Renders a mako template to HTML and sets the CherryPy response's body with it.
Renders a mako template to HTML and sets the CherryPy response's body with it.
[ "Renders", "a", "mako", "template", "to", "HTML", "and", "sets", "the", "CherryPy", "response", "s", "body", "with", "it", "." ]
def render_template(template): """ Renders a mako template to HTML and sets the CherryPy response's body with it. """ if cherrypy.response.status > 399: return data = cherrypy.response.body or {} template = lookup.get_template(template) if template and isinstance(data, dict): cherrypy.response.body = template.render(**data)
[ "def", "render_template", "(", "template", ")", ":", "if", "cherrypy", ".", "response", ".", "status", ">", "399", ":", "return", "data", "=", "cherrypy", ".", "response", ".", "body", "or", "{", "}", "template", "=", "lookup", ".", "get_template", "(", "template", ")", "if", "template", "and", "isinstance", "(", "data", ",", "dict", ")", ":", "cherrypy", ".", "response", ".", "body", "=", "template", ".", "render", "(", "*", "*", "data", ")" ]
https://github.com/Lawouach/WebSocket-for-Python/blob/a3e6d157b7bb1da1009e66aa750170f1c07aa143/example/websensors/app.py#L194-L206
riffnshred/nhl-led-scoreboard
14baa7f0691ca507e4c6f7f2ec02e50ccd1ed9e1
src/nhl_api/game.py
python
Overview.__init__
(self, data)
[]
def __init__(self, data): # loop through data for x in data: # set information as correct data type try: setattr(self, x, int(data[x])) except ValueError: try: setattr(self, x, float(data[x])) except ValueError: # string if not number setattr(self, x, str(data[x])) except TypeError: obj = nhl_api.object.Object(data[x]) setattr(self, x, obj) # calculate the winning team if self.home_score > self.away_score: self.w_team = self.home_team_id self.w_score = self.home_score self.l_team = self.away_team_id self.l_score = self.away_score elif self.away_score > self.home_score: self.w_team = self.away_team_id self.w_score = self.away_score self.l_team = self.home_team_id self.l_score = self.home_score
[ "def", "__init__", "(", "self", ",", "data", ")", ":", "# loop through data", "for", "x", "in", "data", ":", "# set information as correct data type", "try", ":", "setattr", "(", "self", ",", "x", ",", "int", "(", "data", "[", "x", "]", ")", ")", "except", "ValueError", ":", "try", ":", "setattr", "(", "self", ",", "x", ",", "float", "(", "data", "[", "x", "]", ")", ")", "except", "ValueError", ":", "# string if not number", "setattr", "(", "self", ",", "x", ",", "str", "(", "data", "[", "x", "]", ")", ")", "except", "TypeError", ":", "obj", "=", "nhl_api", ".", "object", ".", "Object", "(", "data", "[", "x", "]", ")", "setattr", "(", "self", ",", "x", ",", "obj", ")", "# calculate the winning team", "if", "self", ".", "home_score", ">", "self", ".", "away_score", ":", "self", ".", "w_team", "=", "self", ".", "home_team_id", "self", ".", "w_score", "=", "self", ".", "home_score", "self", ".", "l_team", "=", "self", ".", "away_team_id", "self", ".", "l_score", "=", "self", ".", "away_score", "elif", "self", ".", "away_score", ">", "self", ".", "home_score", ":", "self", ".", "w_team", "=", "self", ".", "away_team_id", "self", ".", "w_score", "=", "self", ".", "away_score", "self", ".", "l_team", "=", "self", ".", "home_team_id", "self", ".", "l_score", "=", "self", ".", "home_score" ]
https://github.com/riffnshred/nhl-led-scoreboard/blob/14baa7f0691ca507e4c6f7f2ec02e50ccd1ed9e1/src/nhl_api/game.py#L172-L198
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/distlib/util.py
python
resolve
(module_name, dotted_path)
return result
[]
def resolve(module_name, dotted_path): if module_name in sys.modules: mod = sys.modules[module_name] else: mod = __import__(module_name) if dotted_path is None: result = mod else: parts = dotted_path.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result
[ "def", "resolve", "(", "module_name", ",", "dotted_path", ")", ":", "if", "module_name", "in", "sys", ".", "modules", ":", "mod", "=", "sys", ".", "modules", "[", "module_name", "]", "else", ":", "mod", "=", "__import__", "(", "module_name", ")", "if", "dotted_path", "is", "None", ":", "result", "=", "mod", "else", ":", "parts", "=", "dotted_path", ".", "split", "(", "'.'", ")", "result", "=", "getattr", "(", "mod", ",", "parts", ".", "pop", "(", "0", ")", ")", "for", "p", "in", "parts", ":", "result", "=", "getattr", "(", "result", ",", "p", ")", "return", "result" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/distlib/util.py#L519-L531
mautrix/telegram
9f48eca5a6654bc38012cb761ecaaaf416aabdd0
mautrix_telegram/portal.py
python
Portal._send_bridge_error
( self, sender: u.User, err: Exception, event_id: EventID, event_type: EventType, message_type: MessageType | None = None, msg: str | None = None, )
[]
async def _send_bridge_error( self, sender: u.User, err: Exception, event_id: EventID, event_type: EventType, message_type: MessageType | None = None, msg: str | None = None, ) -> None: sender.send_remote_checkpoint( MessageSendCheckpointStatus.PERM_FAILURE, event_id, self.mxid, event_type, message_type=message_type, error=err, ) if msg and self.config["bridge.delivery_error_reports"]: await self._send_message( self.main_intent, TextMessageEventContent(msgtype=MessageType.NOTICE, body=msg) )
[ "async", "def", "_send_bridge_error", "(", "self", ",", "sender", ":", "u", ".", "User", ",", "err", ":", "Exception", ",", "event_id", ":", "EventID", ",", "event_type", ":", "EventType", ",", "message_type", ":", "MessageType", "|", "None", "=", "None", ",", "msg", ":", "str", "|", "None", "=", "None", ",", ")", "->", "None", ":", "sender", ".", "send_remote_checkpoint", "(", "MessageSendCheckpointStatus", ".", "PERM_FAILURE", ",", "event_id", ",", "self", ".", "mxid", ",", "event_type", ",", "message_type", "=", "message_type", ",", "error", "=", "err", ",", ")", "if", "msg", "and", "self", ".", "config", "[", "\"bridge.delivery_error_reports\"", "]", ":", "await", "self", ".", "_send_message", "(", "self", ".", "main_intent", ",", "TextMessageEventContent", "(", "msgtype", "=", "MessageType", ".", "NOTICE", ",", "body", "=", "msg", ")", ")" ]
https://github.com/mautrix/telegram/blob/9f48eca5a6654bc38012cb761ecaaaf416aabdd0/mautrix_telegram/portal.py#L1685-L1706
bikalims/bika.lims
35e4bbdb5a3912cae0b5eb13e51097c8b0486349
bika/lims/browser/widgets/artemplateanalyseswidget.py
python
ARTemplateAnalysesView.__init__
(self, context, request, fieldvalue=[], allow_edit=False)
[]
def __init__(self, context, request, fieldvalue=[], allow_edit=False): super(ARTemplateAnalysesView, self).__init__(context, request) self.catalog = "bika_setup_catalog" self.contentFilter = {'portal_type': 'AnalysisService', 'sort_on': 'sortable_title', 'inactive_state': 'active',} self.context_actions = {} self.base_url = self.context.absolute_url() self.view_url = self.base_url self.show_sort_column = False self.show_select_row = False self.show_select_all_checkbox = False self.show_column_toggles = False self.show_select_column = True self.pagesize = 999999 self.allow_edit = allow_edit self.form_id = "analyses" self.categories = [] self.do_cats = self.context.bika_setup.getCategoriseAnalysisServices() if self.do_cats: self.pagesize = 999999 # hide batching controls self.show_categories = True self.expand_all_categories = False self.ajax_categories = True self.ajax_categories_url = self.context.absolute_url() + \ "/artemplate_analysesview" self.category_index = 'getCategoryTitle' self.columns = { 'Title': {'title': _('Service'), 'index': 'sortable_title', 'sortable': False,}, 'Price': {'title': _('Price'), 'sortable': False,}, 'Partition': {'title': _('Partition'), 'sortable': False,}, } self.review_states = [ {'id':'default', 'title': _('All'), 'contentFilter':{}, 'columns': ['Title', 'Price', 'Partition', ], 'transitions': [{'id':'empty'}, ], # none }, ] if not self.context.bika_setup.getShowPrices(): self.review_states[0]['columns'].remove('Price') self.fieldvalue = fieldvalue self.selected = [x['service_uid'] for x in fieldvalue] if self.aq_parent.portal_type == 'ARTemplate': # Custom settings for the Analysis Services assigned to # the Analysis Request Template # https://jira.bikalabs.com/browse/LIMS-1324 self.artemplate = self.aq_parent self.columns['Hidden'] = {'title': _('Hidden'), 'sortable': False, 'type': 'boolean'} self.review_states[0]['columns'].insert(1, 'Hidden')
[ "def", "__init__", "(", "self", ",", "context", ",", "request", ",", "fieldvalue", "=", "[", "]", ",", "allow_edit", "=", "False", ")", ":", "super", "(", "ARTemplateAnalysesView", ",", "self", ")", ".", "__init__", "(", "context", ",", "request", ")", "self", ".", "catalog", "=", "\"bika_setup_catalog\"", "self", ".", "contentFilter", "=", "{", "'portal_type'", ":", "'AnalysisService'", ",", "'sort_on'", ":", "'sortable_title'", ",", "'inactive_state'", ":", "'active'", ",", "}", "self", ".", "context_actions", "=", "{", "}", "self", ".", "base_url", "=", "self", ".", "context", ".", "absolute_url", "(", ")", "self", ".", "view_url", "=", "self", ".", "base_url", "self", ".", "show_sort_column", "=", "False", "self", ".", "show_select_row", "=", "False", "self", ".", "show_select_all_checkbox", "=", "False", "self", ".", "show_column_toggles", "=", "False", "self", ".", "show_select_column", "=", "True", "self", ".", "pagesize", "=", "999999", "self", ".", "allow_edit", "=", "allow_edit", "self", ".", "form_id", "=", "\"analyses\"", "self", ".", "categories", "=", "[", "]", "self", ".", "do_cats", "=", "self", ".", "context", ".", "bika_setup", ".", "getCategoriseAnalysisServices", "(", ")", "if", "self", ".", "do_cats", ":", "self", ".", "pagesize", "=", "999999", "# hide batching controls", "self", ".", "show_categories", "=", "True", "self", ".", "expand_all_categories", "=", "False", "self", ".", "ajax_categories", "=", "True", "self", ".", "ajax_categories_url", "=", "self", ".", "context", ".", "absolute_url", "(", ")", "+", "\"/artemplate_analysesview\"", "self", ".", "category_index", "=", "'getCategoryTitle'", "self", ".", "columns", "=", "{", "'Title'", ":", "{", "'title'", ":", "_", "(", "'Service'", ")", ",", "'index'", ":", "'sortable_title'", ",", "'sortable'", ":", "False", ",", "}", ",", "'Price'", ":", "{", "'title'", ":", "_", "(", "'Price'", ")", ",", "'sortable'", ":", "False", ",", "}", ",", "'Partition'", ":", "{", "'title'", ":", "_", "(", "'Partition'", ")", ",", "'sortable'", ":", "False", ",", "}", ",", "}", "self", ".", "review_states", "=", "[", "{", "'id'", ":", "'default'", ",", "'title'", ":", "_", "(", "'All'", ")", ",", "'contentFilter'", ":", "{", "}", ",", "'columns'", ":", "[", "'Title'", ",", "'Price'", ",", "'Partition'", ",", "]", ",", "'transitions'", ":", "[", "{", "'id'", ":", "'empty'", "}", ",", "]", ",", "# none", "}", ",", "]", "if", "not", "self", ".", "context", ".", "bika_setup", ".", "getShowPrices", "(", ")", ":", "self", ".", "review_states", "[", "0", "]", "[", "'columns'", "]", ".", "remove", "(", "'Price'", ")", "self", ".", "fieldvalue", "=", "fieldvalue", "self", ".", "selected", "=", "[", "x", "[", "'service_uid'", "]", "for", "x", "in", "fieldvalue", "]", "if", "self", ".", "aq_parent", ".", "portal_type", "==", "'ARTemplate'", ":", "# Custom settings for the Analysis Services assigned to", "# the Analysis Request Template", "# https://jira.bikalabs.com/browse/LIMS-1324", "self", ".", "artemplate", "=", "self", ".", "aq_parent", "self", ".", "columns", "[", "'Hidden'", "]", "=", "{", "'title'", ":", "_", "(", "'Hidden'", ")", ",", "'sortable'", ":", "False", ",", "'type'", ":", "'boolean'", "}", "self", ".", "review_states", "[", "0", "]", "[", "'columns'", "]", ".", "insert", "(", "1", ",", "'Hidden'", ")" ]
https://github.com/bikalims/bika.lims/blob/35e4bbdb5a3912cae0b5eb13e51097c8b0486349/bika/lims/browser/widgets/artemplateanalyseswidget.py#L25-L90
DetectionTeamUCAS/RRPN_Faster-RCNN_Tensorflow
cca574844df5fc8bbf380227725a4e3106fbc48c
libs/losses/losses.py
python
smooth_l1_loss_rcnn
(bbox_pred, bbox_targets, label, num_classes, sigma=1.0)
return bbox_loss
:param bbox_pred: [-1, (cfgs.CLS_NUM +1) * 5] :param bbox_targets:[-1, (cfgs.CLS_NUM +1) * 5] :param label:[-1] :param num_classes: :param sigma: :return:
[]
def smooth_l1_loss_rcnn(bbox_pred, bbox_targets, label, num_classes, sigma=1.0): ''' :param bbox_pred: [-1, (cfgs.CLS_NUM +1) * 5] :param bbox_targets:[-1, (cfgs.CLS_NUM +1) * 5] :param label:[-1] :param num_classes: :param sigma: :return: ''' outside_mask = tf.stop_gradient(tf.to_float(tf.greater(label, 0))) bbox_pred = tf.reshape(bbox_pred, [-1, num_classes, 5]) bbox_targets = tf.reshape(bbox_targets, [-1, num_classes, 5]) value = _smooth_l1_loss_base(bbox_pred, bbox_targets, sigma=sigma) value = tf.reduce_sum(value, 2) value = tf.reshape(value, [-1, num_classes]) inside_mask = tf.one_hot(tf.reshape(label, [-1, 1]), depth=num_classes, axis=1) inside_mask = tf.stop_gradient( tf.to_float(tf.reshape(inside_mask, [-1, num_classes]))) normalizer = tf.to_float(tf.shape(bbox_pred)[0]) bbox_loss = tf.reduce_sum( tf.reduce_sum(value * inside_mask, 1)*outside_mask) / normalizer return bbox_loss
[ "def", "smooth_l1_loss_rcnn", "(", "bbox_pred", ",", "bbox_targets", ",", "label", ",", "num_classes", ",", "sigma", "=", "1.0", ")", ":", "outside_mask", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "to_float", "(", "tf", ".", "greater", "(", "label", ",", "0", ")", ")", ")", "bbox_pred", "=", "tf", ".", "reshape", "(", "bbox_pred", ",", "[", "-", "1", ",", "num_classes", ",", "5", "]", ")", "bbox_targets", "=", "tf", ".", "reshape", "(", "bbox_targets", ",", "[", "-", "1", ",", "num_classes", ",", "5", "]", ")", "value", "=", "_smooth_l1_loss_base", "(", "bbox_pred", ",", "bbox_targets", ",", "sigma", "=", "sigma", ")", "value", "=", "tf", ".", "reduce_sum", "(", "value", ",", "2", ")", "value", "=", "tf", ".", "reshape", "(", "value", ",", "[", "-", "1", ",", "num_classes", "]", ")", "inside_mask", "=", "tf", ".", "one_hot", "(", "tf", ".", "reshape", "(", "label", ",", "[", "-", "1", ",", "1", "]", ")", ",", "depth", "=", "num_classes", ",", "axis", "=", "1", ")", "inside_mask", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "to_float", "(", "tf", ".", "reshape", "(", "inside_mask", ",", "[", "-", "1", ",", "num_classes", "]", ")", ")", ")", "normalizer", "=", "tf", ".", "to_float", "(", "tf", ".", "shape", "(", "bbox_pred", ")", "[", "0", "]", ")", "bbox_loss", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "reduce_sum", "(", "value", "*", "inside_mask", ",", "1", ")", "*", "outside_mask", ")", "/", "normalizer", "return", "bbox_loss" ]
https://github.com/DetectionTeamUCAS/RRPN_Faster-RCNN_Tensorflow/blob/cca574844df5fc8bbf380227725a4e3106fbc48c/libs/losses/losses.py#L60-L92
lutris/lutris
66675a4d5537f6b2a2ba2b6df0b3cdf8924c823a
lutris/installer/commands.py
python
CommandsMixin._killable_process
(self, func, *args, **kwargs)
Run function `func` in a separate, killable process.
Run function `func` in a separate, killable process.
[ "Run", "function", "func", "in", "a", "separate", "killable", "process", "." ]
def _killable_process(self, func, *args, **kwargs): """Run function `func` in a separate, killable process.""" with multiprocessing.Pool(1) as process: result_obj = process.apply_async(func, args, kwargs) self.abort_current_task = process.terminate result = result_obj.get() # Wait process end & re-raise exceptions self.abort_current_task = None logger.debug("Process %s returned: %s", func, result) return result
[ "def", "_killable_process", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "multiprocessing", ".", "Pool", "(", "1", ")", "as", "process", ":", "result_obj", "=", "process", ".", "apply_async", "(", "func", ",", "args", ",", "kwargs", ")", "self", ".", "abort_current_task", "=", "process", ".", "terminate", "result", "=", "result_obj", ".", "get", "(", ")", "# Wait process end & re-raise exceptions", "self", ".", "abort_current_task", "=", "None", "logger", ".", "debug", "(", "\"Process %s returned: %s\"", ",", "func", ",", "result", ")", "return", "result" ]
https://github.com/lutris/lutris/blob/66675a4d5537f6b2a2ba2b6df0b3cdf8924c823a/lutris/installer/commands.py#L516-L524
alberanid/imdbpy
88cf37772186e275eff212857f512669086b382c
imdb/parser/http/movieParser.py
python
makeSplitter
(lstrip=None, sep='|', comments=True, origNotesSep=' (', newNotesSep='::(', strip=None)
return splitter
Return a splitter function suitable for a given set of data.
Return a splitter function suitable for a given set of data.
[ "Return", "a", "splitter", "function", "suitable", "for", "a", "given", "set", "of", "data", "." ]
def makeSplitter(lstrip=None, sep='|', comments=True, origNotesSep=' (', newNotesSep='::(', strip=None): """Return a splitter function suitable for a given set of data.""" def splitter(x): if not x: return x x = x.strip() if not x: return x if lstrip is not None: x = x.lstrip(lstrip).lstrip() lx = x.split(sep) lx[:] = [_f for _f in [j.strip() for j in lx] if _f] if comments: lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx] if strip: lx[:] = [j.strip(strip) for j in lx] return lx return splitter
[ "def", "makeSplitter", "(", "lstrip", "=", "None", ",", "sep", "=", "'|'", ",", "comments", "=", "True", ",", "origNotesSep", "=", "' ('", ",", "newNotesSep", "=", "'::('", ",", "strip", "=", "None", ")", ":", "def", "splitter", "(", "x", ")", ":", "if", "not", "x", ":", "return", "x", "x", "=", "x", ".", "strip", "(", ")", "if", "not", "x", ":", "return", "x", "if", "lstrip", "is", "not", "None", ":", "x", "=", "x", ".", "lstrip", "(", "lstrip", ")", ".", "lstrip", "(", ")", "lx", "=", "x", ".", "split", "(", "sep", ")", "lx", "[", ":", "]", "=", "[", "_f", "for", "_f", "in", "[", "j", ".", "strip", "(", ")", "for", "j", "in", "lx", "]", "if", "_f", "]", "if", "comments", ":", "lx", "[", ":", "]", "=", "[", "j", ".", "replace", "(", "origNotesSep", ",", "newNotesSep", ",", "1", ")", "for", "j", "in", "lx", "]", "if", "strip", ":", "lx", "[", ":", "]", "=", "[", "j", ".", "strip", "(", "strip", ")", "for", "j", "in", "lx", "]", "return", "lx", "return", "splitter" ]
https://github.com/alberanid/imdbpy/blob/88cf37772186e275eff212857f512669086b382c/imdb/parser/http/movieParser.py#L144-L164
kirthevasank/nasbot
3c745dc986be30e3721087c8fa768099032a0802
utils/oper_utils.py
python
random_sample
(obj, bounds, max_evals, vectorised=True)
return rand_pts, obj_vals
Optimises a function by randomly sampling and choosing its maximum.
Optimises a function by randomly sampling and choosing its maximum.
[ "Optimises", "a", "function", "by", "randomly", "sampling", "and", "choosing", "its", "maximum", "." ]
def random_sample(obj, bounds, max_evals, vectorised=True): """ Optimises a function by randomly sampling and choosing its maximum. """ dim = len(bounds) rand_pts = map_to_bounds(np.random.random((int(max_evals), dim)), bounds) if vectorised: obj_vals = obj(rand_pts) else: obj_vals = np.array([obj(x) for x in rand_pts]) return rand_pts, obj_vals
[ "def", "random_sample", "(", "obj", ",", "bounds", ",", "max_evals", ",", "vectorised", "=", "True", ")", ":", "dim", "=", "len", "(", "bounds", ")", "rand_pts", "=", "map_to_bounds", "(", "np", ".", "random", ".", "random", "(", "(", "int", "(", "max_evals", ")", ",", "dim", ")", ")", ",", "bounds", ")", "if", "vectorised", ":", "obj_vals", "=", "obj", "(", "rand_pts", ")", "else", ":", "obj_vals", "=", "np", ".", "array", "(", "[", "obj", "(", "x", ")", "for", "x", "in", "rand_pts", "]", ")", "return", "rand_pts", ",", "obj_vals" ]
https://github.com/kirthevasank/nasbot/blob/3c745dc986be30e3721087c8fa768099032a0802/utils/oper_utils.py#L44-L52
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/agw/ultimatelistctrl.py
python
UltimateListItemAttr.HasFooterFont
(self)
return self._footerFont.IsOk()
Returns ``True`` if the currently set font for the footer item is valid.
Returns ``True`` if the currently set font for the footer item is valid.
[ "Returns", "True", "if", "the", "currently", "set", "font", "for", "the", "footer", "item", "is", "valid", "." ]
def HasFooterFont(self): """ Returns ``True`` if the currently set font for the footer item is valid. """ return self._footerFont.IsOk()
[ "def", "HasFooterFont", "(", "self", ")", ":", "return", "self", ".", "_footerFont", ".", "IsOk", "(", ")" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/ultimatelistctrl.py#L1286-L1292
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/recorder/util.py
python
async_migration_in_progress
(hass: HomeAssistant)
return hass.data[DATA_INSTANCE].migration_in_progress
Determine is a migration is in progress. This is a thin wrapper that allows us to change out the implementation later.
Determine is a migration is in progress.
[ "Determine", "is", "a", "migration", "is", "in", "progress", "." ]
def async_migration_in_progress(hass: HomeAssistant) -> bool: """Determine is a migration is in progress. This is a thin wrapper that allows us to change out the implementation later. """ if DATA_INSTANCE not in hass.data: return False return hass.data[DATA_INSTANCE].migration_in_progress
[ "def", "async_migration_in_progress", "(", "hass", ":", "HomeAssistant", ")", "->", "bool", ":", "if", "DATA_INSTANCE", "not", "in", "hass", ".", "data", ":", "return", "False", "return", "hass", ".", "data", "[", "DATA_INSTANCE", "]", ".", "migration_in_progress" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/recorder/util.py#L482-L490
SoCo/SoCo
e83fef84d2645d05265dbd574598518655a9c125
soco/soap.py
python
SoapMessage.prepare
(self)
return (headers, data)
Prepare the SOAP message for sending to the server.
Prepare the SOAP message for sending to the server.
[ "Prepare", "the", "SOAP", "message", "for", "sending", "to", "the", "server", "." ]
def prepare(self): """Prepare the SOAP message for sending to the server.""" headers = self.prepare_headers(self.http_headers, self.soap_action) soap_header = self.prepare_soap_header(self.soap_header) soap_body = self.prepare_soap_body(self.method, self.parameters, self.namespace) data = self.prepare_soap_envelope(soap_header, soap_body) return (headers, data)
[ "def", "prepare", "(", "self", ")", ":", "headers", "=", "self", ".", "prepare_headers", "(", "self", ".", "http_headers", ",", "self", ".", "soap_action", ")", "soap_header", "=", "self", ".", "prepare_soap_header", "(", "self", ".", "soap_header", ")", "soap_body", "=", "self", ".", "prepare_soap_body", "(", "self", ".", "method", ",", "self", ".", "parameters", ",", "self", ".", "namespace", ")", "data", "=", "self", ".", "prepare_soap_envelope", "(", "soap_header", ",", "soap_body", ")", "return", "(", "headers", ",", "data", ")" ]
https://github.com/SoCo/SoCo/blob/e83fef84d2645d05265dbd574598518655a9c125/soco/soap.py#L257-L264
annoviko/pyclustering
bf4f51a472622292627ec8c294eb205585e50f52
pyclustering/core/som_wrapper.py
python
som_get_awards
(som_pointer)
return result
! @brief Returns list of amount of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map.
!
[ "!" ]
def som_get_awards(som_pointer): """! @brief Returns list of amount of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. """ ccore = ccore_library.get() ccore.som_get_awards.restype = POINTER(pyclustering_package) package = ccore.som_get_awards(som_pointer) result = package_extractor(package).extract() return result
[ "def", "som_get_awards", "(", "som_pointer", ")", ":", "ccore", "=", "ccore_library", ".", "get", "(", ")", "ccore", ".", "som_get_awards", ".", "restype", "=", "POINTER", "(", "pyclustering_package", ")", "package", "=", "ccore", ".", "som_get_awards", "(", "som_pointer", ")", "result", "=", "package_extractor", "(", "package", ")", ".", "extract", "(", ")", "return", "result" ]
https://github.com/annoviko/pyclustering/blob/bf4f51a472622292627ec8c294eb205585e50f52/pyclustering/core/som_wrapper.py#L204-L218
dcramer/django-sphinx
0071d1cae5390d0ec8c669786ca3c7275abb6410
djangosphinx/apis/api278/__init__.py
python
SphinxClient.SetFilter
( self, attribute, values, exclude=0 )
Set values set filter. Only match records where 'attribute' value is in given 'values' set.
Set values set filter. Only match records where 'attribute' value is in given 'values' set.
[ "Set", "values", "set", "filter", ".", "Only", "match", "records", "where", "attribute", "value", "is", "in", "given", "values", "set", "." ]
def SetFilter ( self, attribute, values, exclude=0 ): """ Set values set filter. Only match records where 'attribute' value is in given 'values' set. """ assert(isinstance(attribute, str)) assert iter(values) for value in values: assert(isinstance(value, (int, long))) self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
[ "def", "SetFilter", "(", "self", ",", "attribute", ",", "values", ",", "exclude", "=", "0", ")", ":", "assert", "(", "isinstance", "(", "attribute", ",", "str", ")", ")", "assert", "iter", "(", "values", ")", "for", "value", "in", "values", ":", "assert", "(", "isinstance", "(", "value", ",", "(", "int", ",", "long", ")", ")", ")", "self", ".", "_filters", ".", "append", "(", "{", "'type'", ":", "SPH_FILTER_VALUES", ",", "'attr'", ":", "attribute", ",", "'exclude'", ":", "exclude", ",", "'values'", ":", "values", "}", ")" ]
https://github.com/dcramer/django-sphinx/blob/0071d1cae5390d0ec8c669786ca3c7275abb6410/djangosphinx/apis/api278/__init__.py#L364-L375