nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
sequence
function
stringlengths
18
4.83M
function_tokens
sequence
url
stringlengths
83
304
diyjac/SDC-P5
818a2de532c37f16761e2913ca3ff18d2de9f828
vehicleLab/chogtrainingRGB5.py
python
get_hog_features
(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)
[]
def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True): # Call with two outputs if vis==True if vis == True: features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise=vis, feature_vector=feature_vec) return features, hog_image # Otherwise call with one output else: features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise=vis, feature_vector=feature_vec) return features
[ "def", "get_hog_features", "(", "img", ",", "orient", ",", "pix_per_cell", ",", "cell_per_block", ",", "vis", "=", "False", ",", "feature_vec", "=", "True", ")", ":", "# Call with two outputs if vis==True", "if", "vis", "==", "True", ":", "features", ",", "hog_image", "=", "hog", "(", "img", ",", "orientations", "=", "orient", ",", "pixels_per_cell", "=", "(", "pix_per_cell", ",", "pix_per_cell", ")", ",", "cells_per_block", "=", "(", "cell_per_block", ",", "cell_per_block", ")", ",", "transform_sqrt", "=", "True", ",", "visualise", "=", "vis", ",", "feature_vector", "=", "feature_vec", ")", "return", "features", ",", "hog_image", "# Otherwise call with one output", "else", ":", "features", "=", "hog", "(", "img", ",", "orientations", "=", "orient", ",", "pixels_per_cell", "=", "(", "pix_per_cell", ",", "pix_per_cell", ")", ",", "cells_per_block", "=", "(", "cell_per_block", ",", "cell_per_block", ")", ",", "transform_sqrt", "=", "True", ",", "visualise", "=", "vis", ",", "feature_vector", "=", "feature_vec", ")", "return", "features" ]
https://github.com/diyjac/SDC-P5/blob/818a2de532c37f16761e2913ca3ff18d2de9f828/vehicleLab/chogtrainingRGB5.py#L161-L174
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/document/document.py
python
Document.apply_json_patch_string
(self, patch)
Apply a JSON patch provided as a string. Args: patch (str) : Returns: None
Apply a JSON patch provided as a string.
[ "Apply", "a", "JSON", "patch", "provided", "as", "a", "string", "." ]
def apply_json_patch_string(self, patch): ''' Apply a JSON patch provided as a string. Args: patch (str) : Returns: None ''' json_parsed = loads(patch) self.apply_json_patch(json_parsed)
[ "def", "apply_json_patch_string", "(", "self", ",", "patch", ")", ":", "json_parsed", "=", "loads", "(", "patch", ")", "self", ".", "apply_json_patch", "(", "json_parsed", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/document/document.py#L437-L448
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
sklearn/utils/__init__.py
python
gen_batches
(n, batch_size, *, min_batch_size=0)
Generator to create slices containing batch_size elements, from 0 to n. The last slice may contain less than batch_size elements, when batch_size does not divide n. Parameters ---------- n : int batch_size : int Number of element in each batch. min_batch_size : int, default=0 Minimum batch size to produce. Yields ------ slice of batch_size elements See Also -------- gen_even_slices: Generator to create n_packs slices going up to n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] >>> list(gen_batches(7, 3, min_batch_size=0)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(7, 3, min_batch_size=2)) [slice(0, 3, None), slice(3, 7, None)]
Generator to create slices containing batch_size elements, from 0 to n.
[ "Generator", "to", "create", "slices", "containing", "batch_size", "elements", "from", "0", "to", "n", "." ]
def gen_batches(n, batch_size, *, min_batch_size=0): """Generator to create slices containing batch_size elements, from 0 to n. The last slice may contain less than batch_size elements, when batch_size does not divide n. Parameters ---------- n : int batch_size : int Number of element in each batch. min_batch_size : int, default=0 Minimum batch size to produce. Yields ------ slice of batch_size elements See Also -------- gen_even_slices: Generator to create n_packs slices going up to n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] >>> list(gen_batches(7, 3, min_batch_size=0)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(7, 3, min_batch_size=2)) [slice(0, 3, None), slice(3, 7, None)] """ if not isinstance(batch_size, numbers.Integral): raise TypeError( "gen_batches got batch_size=%s, must be an integer" % batch_size ) if batch_size <= 0: raise ValueError("gen_batches got batch_size=%s, must be positive" % batch_size) start = 0 for _ in range(int(n // batch_size)): end = start + batch_size if end + min_batch_size > n: continue yield slice(start, end) start = end if start < n: yield slice(start, n)
[ "def", "gen_batches", "(", "n", ",", "batch_size", ",", "*", ",", "min_batch_size", "=", "0", ")", ":", "if", "not", "isinstance", "(", "batch_size", ",", "numbers", ".", "Integral", ")", ":", "raise", "TypeError", "(", "\"gen_batches got batch_size=%s, must be an integer\"", "%", "batch_size", ")", "if", "batch_size", "<=", "0", ":", "raise", "ValueError", "(", "\"gen_batches got batch_size=%s, must be positive\"", "%", "batch_size", ")", "start", "=", "0", "for", "_", "in", "range", "(", "int", "(", "n", "//", "batch_size", ")", ")", ":", "end", "=", "start", "+", "batch_size", "if", "end", "+", "min_batch_size", ">", "n", ":", "continue", "yield", "slice", "(", "start", ",", "end", ")", "start", "=", "end", "if", "start", "<", "n", ":", "yield", "slice", "(", "start", ",", "n", ")" ]
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/utils/__init__.py#L665-L715
CGCookie/retopoflow
3d8b3a47d1d661f99ab0aeb21d31370bf15de35e
addon_common/common/drawing.py
python
CC_DRAW.border
(cls, *, width=None, color=None)
[]
def border(cls, *, width=None, color=None): s = Drawing._instance.scale if width is not None: CC_DRAW._border_width = s(width) if color is not None: CC_DRAW._border_color = color cls.update()
[ "def", "border", "(", "cls", ",", "*", ",", "width", "=", "None", ",", "color", "=", "None", ")", ":", "s", "=", "Drawing", ".", "_instance", ".", "scale", "if", "width", "is", "not", "None", ":", "CC_DRAW", ".", "_border_width", "=", "s", "(", "width", ")", "if", "color", "is", "not", "None", ":", "CC_DRAW", ".", "_border_color", "=", "color", "cls", ".", "update", "(", ")" ]
https://github.com/CGCookie/retopoflow/blob/3d8b3a47d1d661f99ab0aeb21d31370bf15de35e/addon_common/common/drawing.py#L883-L889
plastex/plastex
af1628719b50cf25fbe80f16a3e100d566e9bc32
plasTeX/Renderers/PageTemplate/simpletal/simpleTAL.py
python
SubTemplate.getProgram
(self)
return (self.commandList, self.startRange, self.symbolTable[self.endRangeSymbol]+1, self.symbolTable)
Returns a tuple of (commandList, startPoint, endPoint, symbolTable)
Returns a tuple of (commandList, startPoint, endPoint, symbolTable)
[ "Returns", "a", "tuple", "of", "(", "commandList", "startPoint", "endPoint", "symbolTable", ")" ]
def getProgram (self): """ Returns a tuple of (commandList, startPoint, endPoint, symbolTable) """ return (self.commandList, self.startRange, self.symbolTable[self.endRangeSymbol]+1, self.symbolTable)
[ "def", "getProgram", "(", "self", ")", ":", "return", "(", "self", ".", "commandList", ",", "self", ".", "startRange", ",", "self", ".", "symbolTable", "[", "self", ".", "endRangeSymbol", "]", "+", "1", ",", "self", ".", "symbolTable", ")" ]
https://github.com/plastex/plastex/blob/af1628719b50cf25fbe80f16a3e100d566e9bc32/plasTeX/Renderers/PageTemplate/simpletal/simpleTAL.py#L677-L679
uvemas/ViTables
2ce8ec26f85c7392677cf0c7c83ad1ddd7d071e0
vitables/plugins/dbstreesort/dbs_tree_sort.py
python
customiseDBsTreeModel
()
Slot connected to the convenience dbtree_model_created signal.
Slot connected to the convenience dbtree_model_created signal.
[ "Slot", "connected", "to", "the", "convenience", "dbtree_model_created", "signal", "." ]
def customiseDBsTreeModel(): """Slot connected to the convenience dbtree_model_created signal. """ # The absolute path of the INI file ini_filename = os.path.join(os.path.dirname(__file__), 'sorting_algorithm.ini') config = configparser.ConfigParser() try: config.read(ini_filename) initial_sorting = config.get('DBsTreeSorting', 'algorithm') except (IOError, configparser.ParsingError): log.error( translate('DBsTreeSort', 'The configuration file of the ' 'dbs_tree_sort plugin cannot be read.', 'DBsTreeSort error message')) return # The essence of the plugin is pretty simple, just monkeypatch # the insertRows() method of the model to get the desired result. # TODO how can the nodes be chronologically sorted? if initial_sorting == 'human': dbstreemodel.DBsTreeModel.insertRows = humanSort elif initial_sorting == 'alphabetical': dbstreemodel.DBsTreeModel.insertRows = alphabeticalSort else: log.warning( translate('DBsTreeSort', 'Unknown sorting algorithm: {}.', 'DBsTreeSort error message').format(initial_sorting))
[ "def", "customiseDBsTreeModel", "(", ")", ":", "# The absolute path of the INI file", "ini_filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'sorting_algorithm.ini'", ")", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "try", ":", "config", ".", "read", "(", "ini_filename", ")", "initial_sorting", "=", "config", ".", "get", "(", "'DBsTreeSorting'", ",", "'algorithm'", ")", "except", "(", "IOError", ",", "configparser", ".", "ParsingError", ")", ":", "log", ".", "error", "(", "translate", "(", "'DBsTreeSort'", ",", "'The configuration file of the '", "'dbs_tree_sort plugin cannot be read.'", ",", "'DBsTreeSort error message'", ")", ")", "return", "# The essence of the plugin is pretty simple, just monkeypatch", "# the insertRows() method of the model to get the desired result.", "# TODO how can the nodes be chronologically sorted?", "if", "initial_sorting", "==", "'human'", ":", "dbstreemodel", ".", "DBsTreeModel", ".", "insertRows", "=", "humanSort", "elif", "initial_sorting", "==", "'alphabetical'", ":", "dbstreemodel", ".", "DBsTreeModel", ".", "insertRows", "=", "alphabeticalSort", "else", ":", "log", ".", "warning", "(", "translate", "(", "'DBsTreeSort'", ",", "'Unknown sorting algorithm: {}.'", ",", "'DBsTreeSort error message'", ")", ".", "format", "(", "initial_sorting", ")", ")" ]
https://github.com/uvemas/ViTables/blob/2ce8ec26f85c7392677cf0c7c83ad1ddd7d071e0/vitables/plugins/dbstreesort/dbs_tree_sort.py#L56-L84
treigerm/WaterNet
5f30e796b03519b1d79be2ac1f148b873bf9e877
waterNet/model.py
python
get_matrix_form
(features, labels, tile_size)
return np.array(features), np.array(labels)
Transform a list of triples of features and labels. To a matrix which contains only the tiles used for training the model.
Transform a list of triples of features and labels. To a matrix which contains only the tiles used for training the model.
[ "Transform", "a", "list", "of", "triples", "of", "features", "and", "labels", ".", "To", "a", "matrix", "which", "contains", "only", "the", "tiles", "used", "for", "training", "the", "model", "." ]
def get_matrix_form(features, labels, tile_size): """Transform a list of triples of features and labels. To a matrix which contains only the tiles used for training the model.""" features = [tile for tile, position, path in features] labels = [tile for tile, position, path in labels] # The model will have one output corresponding to each pixel in the feature tile. # So we need to transform the labels which are given as a 2D bitmap into a vector. labels = np.reshape(labels, (len(labels), tile_size * tile_size)) return np.array(features), np.array(labels)
[ "def", "get_matrix_form", "(", "features", ",", "labels", ",", "tile_size", ")", ":", "features", "=", "[", "tile", "for", "tile", ",", "position", ",", "path", "in", "features", "]", "labels", "=", "[", "tile", "for", "tile", ",", "position", ",", "path", "in", "labels", "]", "# The model will have one output corresponding to each pixel in the feature tile.", "# So we need to transform the labels which are given as a 2D bitmap into a vector.", "labels", "=", "np", ".", "reshape", "(", "labels", ",", "(", "len", "(", "labels", ")", ",", "tile_size", "*", "tile_size", ")", ")", "return", "np", ".", "array", "(", "features", ")", ",", "np", ".", "array", "(", "labels", ")" ]
https://github.com/treigerm/WaterNet/blob/5f30e796b03519b1d79be2ac1f148b873bf9e877/waterNet/model.py#L140-L149
mtianyan/OnlineMooc
51a910e27c8d2808a8a5198b4db31f463e646bf6
tyadmin_api/utils.py
python
random_str
(random_length=8)
return str_base
[]
def random_str(random_length=8): str_base = '' chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789' length = len(chars) - 1 random = Random() for i in range(random_length): str_base += chars[random.randint(0, length)] return str_base
[ "def", "random_str", "(", "random_length", "=", "8", ")", ":", "str_base", "=", "''", "chars", "=", "'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'", "length", "=", "len", "(", "chars", ")", "-", "1", "random", "=", "Random", "(", ")", "for", "i", "in", "range", "(", "random_length", ")", ":", "str_base", "+=", "chars", "[", "random", ".", "randint", "(", "0", ",", "length", ")", "]", "return", "str_base" ]
https://github.com/mtianyan/OnlineMooc/blob/51a910e27c8d2808a8a5198b4db31f463e646bf6/tyadmin_api/utils.py#L28-L35
Source-Python-Dev-Team/Source.Python
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
addons/source-python/packages/site-packages/pytz/tzinfo.py
python
StaticTzInfo.fromutc
(self, dt)
return (dt + self._utcoffset).replace(tzinfo=self)
See datetime.tzinfo.fromutc
See datetime.tzinfo.fromutc
[ "See", "datetime", ".", "tzinfo", ".", "fromutc" ]
def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' if dt.tzinfo is not None and dt.tzinfo is not self: raise ValueError('fromutc: dt.tzinfo is not self') return (dt + self._utcoffset).replace(tzinfo=self)
[ "def", "fromutc", "(", "self", ",", "dt", ")", ":", "if", "dt", ".", "tzinfo", "is", "not", "None", "and", "dt", ".", "tzinfo", "is", "not", "self", ":", "raise", "ValueError", "(", "'fromutc: dt.tzinfo is not self'", ")", "return", "(", "dt", "+", "self", ".", "_utcoffset", ")", ".", "replace", "(", "tzinfo", "=", "self", ")" ]
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/packages/site-packages/pytz/tzinfo.py#L75-L79
learningequality/ka-lite
571918ea668013dcf022286ea85eff1c5333fb8b
kalite/packages/bundled/django/contrib/gis/geos/geometry.py
python
GEOSGeometry.__init__
(self, geo_input, srid=None)
The base constructor for GEOS geometry objects, and may take the following inputs: * strings: - WKT - HEXEWKB (a PostGIS-specific canonical form) - GeoJSON (requires GDAL) * buffer: - WKB The `srid` keyword is used to specify the Source Reference Identifier (SRID) number for this Geometry. If not set, the SRID will be None.
The base constructor for GEOS geometry objects, and may take the following inputs:
[ "The", "base", "constructor", "for", "GEOS", "geometry", "objects", "and", "may", "take", "the", "following", "inputs", ":" ]
def __init__(self, geo_input, srid=None): """ The base constructor for GEOS geometry objects, and may take the following inputs: * strings: - WKT - HEXEWKB (a PostGIS-specific canonical form) - GeoJSON (requires GDAL) * buffer: - WKB The `srid` keyword is used to specify the Source Reference Identifier (SRID) number for this Geometry. If not set, the SRID will be None. """ if isinstance(geo_input, bytes): geo_input = force_text(geo_input) if isinstance(geo_input, six.string_types): wkt_m = wkt_regex.match(geo_input) if wkt_m: # Handling WKT input. if wkt_m.group('srid'): srid = int(wkt_m.group('srid')) g = wkt_r().read(force_bytes(wkt_m.group('wkt'))) elif hex_regex.match(geo_input): # Handling HEXEWKB input. g = wkb_r().read(force_bytes(geo_input)) elif gdal.HAS_GDAL and json_regex.match(geo_input): # Handling GeoJSON input. g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb) else: raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.') elif isinstance(geo_input, GEOM_PTR): # When the input is a pointer to a geomtry (GEOM_PTR). g = geo_input elif isinstance(geo_input, memoryview): # When the input is a buffer (WKB). g = wkb_r().read(geo_input) elif isinstance(geo_input, GEOSGeometry): g = capi.geom_clone(geo_input.ptr) else: # Invalid geometry type. raise TypeError('Improper geometry input type: %s' % str(type(geo_input))) if bool(g): # Setting the pointer object with a valid pointer. self.ptr = g else: raise GEOSException('Could not initialize GEOS Geometry with given input.') # Post-initialization setup. self._post_init(srid)
[ "def", "__init__", "(", "self", ",", "geo_input", ",", "srid", "=", "None", ")", ":", "if", "isinstance", "(", "geo_input", ",", "bytes", ")", ":", "geo_input", "=", "force_text", "(", "geo_input", ")", "if", "isinstance", "(", "geo_input", ",", "six", ".", "string_types", ")", ":", "wkt_m", "=", "wkt_regex", ".", "match", "(", "geo_input", ")", "if", "wkt_m", ":", "# Handling WKT input.", "if", "wkt_m", ".", "group", "(", "'srid'", ")", ":", "srid", "=", "int", "(", "wkt_m", ".", "group", "(", "'srid'", ")", ")", "g", "=", "wkt_r", "(", ")", ".", "read", "(", "force_bytes", "(", "wkt_m", ".", "group", "(", "'wkt'", ")", ")", ")", "elif", "hex_regex", ".", "match", "(", "geo_input", ")", ":", "# Handling HEXEWKB input.", "g", "=", "wkb_r", "(", ")", ".", "read", "(", "force_bytes", "(", "geo_input", ")", ")", "elif", "gdal", ".", "HAS_GDAL", "and", "json_regex", ".", "match", "(", "geo_input", ")", ":", "# Handling GeoJSON input.", "g", "=", "wkb_r", "(", ")", ".", "read", "(", "gdal", ".", "OGRGeometry", "(", "geo_input", ")", ".", "wkb", ")", "else", ":", "raise", "ValueError", "(", "'String or unicode input unrecognized as WKT EWKT, and HEXEWKB.'", ")", "elif", "isinstance", "(", "geo_input", ",", "GEOM_PTR", ")", ":", "# When the input is a pointer to a geomtry (GEOM_PTR).", "g", "=", "geo_input", "elif", "isinstance", "(", "geo_input", ",", "memoryview", ")", ":", "# When the input is a buffer (WKB).", "g", "=", "wkb_r", "(", ")", ".", "read", "(", "geo_input", ")", "elif", "isinstance", "(", "geo_input", ",", "GEOSGeometry", ")", ":", "g", "=", "capi", ".", "geom_clone", "(", "geo_input", ".", "ptr", ")", "else", ":", "# Invalid geometry type.", "raise", "TypeError", "(", "'Improper geometry input type: %s'", "%", "str", "(", "type", "(", "geo_input", ")", ")", ")", "if", "bool", "(", "g", ")", ":", "# Setting the pointer object with a valid pointer.", "self", ".", "ptr", "=", "g", "else", ":", "raise", "GEOSException", "(", "'Could not initialize GEOS Geometry with given input.'", ")", "# Post-initialization setup.", "self", ".", "_post_init", "(", "srid", ")" ]
https://github.com/learningequality/ka-lite/blob/571918ea668013dcf022286ea85eff1c5333fb8b/kalite/packages/bundled/django/contrib/gis/geos/geometry.py#L47-L97
CellProfiler/CellProfiler
a90e17e4d258c6f3900238be0f828e0b4bd1b293
cellprofiler/modules/untangleworms.py
python
UntangleWorms.get_graph_from_branching_areas_and_segments
( self, branch_areas_binary, segments_binary )
return Result( branch_areas_binary, counts, i, j, branch_ij, branch_counts, incidence_matrix, incidence_directions, )
Turn branches + segments into a graph branch_areas_binary - binary mask of branch areas segments_binary - binary mask of segments != branch_areas Given two binary images, one containing "branch areas" one containing "segments", returns a structure describing the incidence relations between the branch areas and the segments. Output is same format as get_graph_from_binary(), so for details, see get_graph_from_binary
Turn branches + segments into a graph
[ "Turn", "branches", "+", "segments", "into", "a", "graph" ]
def get_graph_from_branching_areas_and_segments( self, branch_areas_binary, segments_binary ): """Turn branches + segments into a graph branch_areas_binary - binary mask of branch areas segments_binary - binary mask of segments != branch_areas Given two binary images, one containing "branch areas" one containing "segments", returns a structure describing the incidence relations between the branch areas and the segments. Output is same format as get_graph_from_binary(), so for details, see get_graph_from_binary """ branch_areas_labeled, num_branch_areas = scipy.ndimage.label( branch_areas_binary, centrosome.cpmorphology.eight_connect ) i, j, labels, order, distance, num_segments = self.trace_segments( segments_binary ) ooo = numpy.lexsort((order, labels)) i = i[ooo] j = j[ooo] labels = labels[ooo] order = order[ooo] distance = distance[ooo] counts = ( numpy.zeros(0, int) if len(labels) == 0 else numpy.bincount(labels.flatten())[1:] ) branch_ij = numpy.argwhere(branch_areas_binary) if len(branch_ij) > 0: ooo = numpy.lexsort( [ branch_ij[:, 0], branch_ij[:, 1], branch_areas_labeled[branch_ij[:, 0], branch_ij[:, 1]], ] ) branch_ij = branch_ij[ooo] branch_labels = branch_areas_labeled[branch_ij[:, 0], branch_ij[:, 1]] branch_counts = numpy.bincount(branch_areas_labeled.flatten())[1:] else: branch_labels = numpy.zeros(0, int) branch_counts = numpy.zeros(0, int) # # "find" the segment starts # starts = order == 0 start_labels = numpy.zeros(segments_binary.shape, int) start_labels[i[starts], j[starts]] = labels[starts] # # incidence_directions = True for starts # incidence_directions = self.make_incidence_matrix( branch_areas_labeled, num_branch_areas, start_labels, num_segments ) # # Get the incidence matrix for the ends # ends = numpy.cumsum(counts) - 1 end_labels = numpy.zeros(segments_binary.shape, int) end_labels[i[ends], j[ends]] = labels[ends] incidence_matrix = self.make_incidence_matrix( branch_areas_labeled, num_branch_areas, end_labels, num_segments ) incidence_matrix |= incidence_directions class Result(object): """A result graph: image_size: size of input image segments: a list for each segment of a forward (index = 0) and reverse N x 2 array of coordinates of pixels in a segment segment_indexes: the index of label X into segments segment_counts: # of points per segment segment_order: for each pixel, its order when tracing branch_areas: an N x 2 array of branch point coordinates branch_area_indexes: index into the branch areas per branchpoint branch_area_counts: # of points in each branch incidence_matrix: matrix of areas x segments indicating connections incidence_directions: direction of each connection """ def __init__( self, branch_areas_binary, counts, i, j, branch_ij, branch_counts, incidence_matrix, incidence_directions, ): self.image_size = tuple(branch_areas_binary.shape) self.segment_coords = numpy.column_stack((i, j)) self.segment_indexes = numpy.cumsum(counts) - counts self.segment_counts = counts self.segment_order = order self.segments = [ ( self.segment_coords[ self.segment_indexes[i] : ( self.segment_indexes[i] + self.segment_counts[i] ) ], self.segment_coords[ self.segment_indexes[i] : ( self.segment_indexes[i] + self.segment_counts[i] ) ][::-1], ) for i in range(len(counts)) ] self.branch_areas = branch_ij self.branch_area_indexes = numpy.cumsum(branch_counts) - branch_counts self.branch_area_counts = branch_counts self.incidence_matrix = incidence_matrix self.incidence_directions = incidence_directions return Result( branch_areas_binary, counts, i, j, branch_ij, branch_counts, incidence_matrix, incidence_directions, )
[ "def", "get_graph_from_branching_areas_and_segments", "(", "self", ",", "branch_areas_binary", ",", "segments_binary", ")", ":", "branch_areas_labeled", ",", "num_branch_areas", "=", "scipy", ".", "ndimage", ".", "label", "(", "branch_areas_binary", ",", "centrosome", ".", "cpmorphology", ".", "eight_connect", ")", "i", ",", "j", ",", "labels", ",", "order", ",", "distance", ",", "num_segments", "=", "self", ".", "trace_segments", "(", "segments_binary", ")", "ooo", "=", "numpy", ".", "lexsort", "(", "(", "order", ",", "labels", ")", ")", "i", "=", "i", "[", "ooo", "]", "j", "=", "j", "[", "ooo", "]", "labels", "=", "labels", "[", "ooo", "]", "order", "=", "order", "[", "ooo", "]", "distance", "=", "distance", "[", "ooo", "]", "counts", "=", "(", "numpy", ".", "zeros", "(", "0", ",", "int", ")", "if", "len", "(", "labels", ")", "==", "0", "else", "numpy", ".", "bincount", "(", "labels", ".", "flatten", "(", ")", ")", "[", "1", ":", "]", ")", "branch_ij", "=", "numpy", ".", "argwhere", "(", "branch_areas_binary", ")", "if", "len", "(", "branch_ij", ")", ">", "0", ":", "ooo", "=", "numpy", ".", "lexsort", "(", "[", "branch_ij", "[", ":", ",", "0", "]", ",", "branch_ij", "[", ":", ",", "1", "]", ",", "branch_areas_labeled", "[", "branch_ij", "[", ":", ",", "0", "]", ",", "branch_ij", "[", ":", ",", "1", "]", "]", ",", "]", ")", "branch_ij", "=", "branch_ij", "[", "ooo", "]", "branch_labels", "=", "branch_areas_labeled", "[", "branch_ij", "[", ":", ",", "0", "]", ",", "branch_ij", "[", ":", ",", "1", "]", "]", "branch_counts", "=", "numpy", ".", "bincount", "(", "branch_areas_labeled", ".", "flatten", "(", ")", ")", "[", "1", ":", "]", "else", ":", "branch_labels", "=", "numpy", ".", "zeros", "(", "0", ",", "int", ")", "branch_counts", "=", "numpy", ".", "zeros", "(", "0", ",", "int", ")", "#", "# \"find\" the segment starts", "#", "starts", "=", "order", "==", "0", "start_labels", "=", "numpy", ".", "zeros", "(", "segments_binary", ".", "shape", ",", "int", ")", "start_labels", "[", "i", "[", "starts", "]", ",", "j", "[", "starts", "]", "]", "=", "labels", "[", "starts", "]", "#", "# incidence_directions = True for starts", "#", "incidence_directions", "=", "self", ".", "make_incidence_matrix", "(", "branch_areas_labeled", ",", "num_branch_areas", ",", "start_labels", ",", "num_segments", ")", "#", "# Get the incidence matrix for the ends", "#", "ends", "=", "numpy", ".", "cumsum", "(", "counts", ")", "-", "1", "end_labels", "=", "numpy", ".", "zeros", "(", "segments_binary", ".", "shape", ",", "int", ")", "end_labels", "[", "i", "[", "ends", "]", ",", "j", "[", "ends", "]", "]", "=", "labels", "[", "ends", "]", "incidence_matrix", "=", "self", ".", "make_incidence_matrix", "(", "branch_areas_labeled", ",", "num_branch_areas", ",", "end_labels", ",", "num_segments", ")", "incidence_matrix", "|=", "incidence_directions", "class", "Result", "(", "object", ")", ":", "\"\"\"A result graph:\n\n image_size: size of input image\n\n segments: a list for each segment of a forward (index = 0) and\n reverse N x 2 array of coordinates of pixels in a segment\n\n segment_indexes: the index of label X into segments\n\n segment_counts: # of points per segment\n\n segment_order: for each pixel, its order when tracing\n\n branch_areas: an N x 2 array of branch point coordinates\n\n branch_area_indexes: index into the branch areas per branchpoint\n\n branch_area_counts: # of points in each branch\n\n incidence_matrix: matrix of areas x segments indicating connections\n\n incidence_directions: direction of each connection\n \"\"\"", "def", "__init__", "(", "self", ",", "branch_areas_binary", ",", "counts", ",", "i", ",", "j", ",", "branch_ij", ",", "branch_counts", ",", "incidence_matrix", ",", "incidence_directions", ",", ")", ":", "self", ".", "image_size", "=", "tuple", "(", "branch_areas_binary", ".", "shape", ")", "self", ".", "segment_coords", "=", "numpy", ".", "column_stack", "(", "(", "i", ",", "j", ")", ")", "self", ".", "segment_indexes", "=", "numpy", ".", "cumsum", "(", "counts", ")", "-", "counts", "self", ".", "segment_counts", "=", "counts", "self", ".", "segment_order", "=", "order", "self", ".", "segments", "=", "[", "(", "self", ".", "segment_coords", "[", "self", ".", "segment_indexes", "[", "i", "]", ":", "(", "self", ".", "segment_indexes", "[", "i", "]", "+", "self", ".", "segment_counts", "[", "i", "]", ")", "]", ",", "self", ".", "segment_coords", "[", "self", ".", "segment_indexes", "[", "i", "]", ":", "(", "self", ".", "segment_indexes", "[", "i", "]", "+", "self", ".", "segment_counts", "[", "i", "]", ")", "]", "[", ":", ":", "-", "1", "]", ",", ")", "for", "i", "in", "range", "(", "len", "(", "counts", ")", ")", "]", "self", ".", "branch_areas", "=", "branch_ij", "self", ".", "branch_area_indexes", "=", "numpy", ".", "cumsum", "(", "branch_counts", ")", "-", "branch_counts", "self", ".", "branch_area_counts", "=", "branch_counts", "self", ".", "incidence_matrix", "=", "incidence_matrix", "self", ".", "incidence_directions", "=", "incidence_directions", "return", "Result", "(", "branch_areas_binary", ",", "counts", ",", "i", ",", "j", ",", "branch_ij", ",", "branch_counts", ",", "incidence_matrix", ",", "incidence_directions", ",", ")" ]
https://github.com/CellProfiler/CellProfiler/blob/a90e17e4d258c6f3900238be0f828e0b4bd1b293/cellprofiler/modules/untangleworms.py#L1568-L1714
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/models/v1_ingress_rule.py
python
V1IngressRule.http
(self, http)
Sets the http of this V1IngressRule. :param http: The http of this V1IngressRule. # noqa: E501 :type: V1HTTPIngressRuleValue
Sets the http of this V1IngressRule.
[ "Sets", "the", "http", "of", "this", "V1IngressRule", "." ]
def http(self, http): """Sets the http of this V1IngressRule. :param http: The http of this V1IngressRule. # noqa: E501 :type: V1HTTPIngressRuleValue """ self._http = http
[ "def", "http", "(", "self", ",", "http", ")", ":", "self", ".", "_http", "=", "http" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1_ingress_rule.py#L94-L102
pyannote/pyannote-audio
a448164b4abe56a2c0da11e143648d4fed5967f8
pyannote/audio/pipeline/overlap_detection.py
python
OverlapDetection.to_overlap
(reference: Annotation)
return overlap.support().to_annotation()
Get overlapped speech reference annotation Parameters ---------- reference : Annotation File yielded by pyannote.database protocols. Returns ------- overlap : `pyannote.core.Annotation` Overlapped speech reference.
Get overlapped speech reference annotation
[ "Get", "overlapped", "speech", "reference", "annotation" ]
def to_overlap(reference: Annotation) -> Annotation: """Get overlapped speech reference annotation Parameters ---------- reference : Annotation File yielded by pyannote.database protocols. Returns ------- overlap : `pyannote.core.Annotation` Overlapped speech reference. """ overlap = Timeline(uri=reference.uri) for (s1, t1), (s2, t2) in reference.co_iter(reference): l1 = reference[s1, t1] l2 = reference[s2, t2] if l1 == l2: continue overlap.add(s1 & s2) return overlap.support().to_annotation()
[ "def", "to_overlap", "(", "reference", ":", "Annotation", ")", "->", "Annotation", ":", "overlap", "=", "Timeline", "(", "uri", "=", "reference", ".", "uri", ")", "for", "(", "s1", ",", "t1", ")", ",", "(", "s2", ",", "t2", ")", "in", "reference", ".", "co_iter", "(", "reference", ")", ":", "l1", "=", "reference", "[", "s1", ",", "t1", "]", "l2", "=", "reference", "[", "s2", ",", "t2", "]", "if", "l1", "==", "l2", ":", "continue", "overlap", ".", "add", "(", "s1", "&", "s2", ")", "return", "overlap", ".", "support", "(", ")", ".", "to_annotation", "(", ")" ]
https://github.com/pyannote/pyannote-audio/blob/a448164b4abe56a2c0da11e143648d4fed5967f8/pyannote/audio/pipeline/overlap_detection.py#L152-L173
thunlp/OpenNE
d9cbf34aff87c9d09fa58a074907ed40a0e06146
src/openne/gcn/utils.py
python
chebyshev_polynomials
(adj, k)
return sparse_to_tuple(t_k)
Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).
Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).
[ "Calculate", "Chebyshev", "polynomials", "up", "to", "order", "k", ".", "Return", "a", "list", "of", "sparse", "matrices", "(", "tuple", "representation", ")", "." ]
def chebyshev_polynomials(adj, k): """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).""" print("Calculating Chebyshev polynomials up to order {}...".format(k)) adj_normalized = normalize_adj(adj) laplacian = sp.eye(adj.shape[0]) - adj_normalized largest_eigval, _ = eigsh(laplacian, 1, which='LM') scaled_laplacian = ( 2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0]) t_k = list() t_k.append(sp.eye(adj.shape[0])) t_k.append(scaled_laplacian) def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap): s_lap = sp.csr_matrix(scaled_lap, copy=True) return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two for i in range(2, k+1): t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian)) return sparse_to_tuple(t_k)
[ "def", "chebyshev_polynomials", "(", "adj", ",", "k", ")", ":", "print", "(", "\"Calculating Chebyshev polynomials up to order {}...\"", ".", "format", "(", "k", ")", ")", "adj_normalized", "=", "normalize_adj", "(", "adj", ")", "laplacian", "=", "sp", ".", "eye", "(", "adj", ".", "shape", "[", "0", "]", ")", "-", "adj_normalized", "largest_eigval", ",", "_", "=", "eigsh", "(", "laplacian", ",", "1", ",", "which", "=", "'LM'", ")", "scaled_laplacian", "=", "(", "2.", "/", "largest_eigval", "[", "0", "]", ")", "*", "laplacian", "-", "sp", ".", "eye", "(", "adj", ".", "shape", "[", "0", "]", ")", "t_k", "=", "list", "(", ")", "t_k", ".", "append", "(", "sp", ".", "eye", "(", "adj", ".", "shape", "[", "0", "]", ")", ")", "t_k", ".", "append", "(", "scaled_laplacian", ")", "def", "chebyshev_recurrence", "(", "t_k_minus_one", ",", "t_k_minus_two", ",", "scaled_lap", ")", ":", "s_lap", "=", "sp", ".", "csr_matrix", "(", "scaled_lap", ",", "copy", "=", "True", ")", "return", "2", "*", "s_lap", ".", "dot", "(", "t_k_minus_one", ")", "-", "t_k_minus_two", "for", "i", "in", "range", "(", "2", ",", "k", "+", "1", ")", ":", "t_k", ".", "append", "(", "chebyshev_recurrence", "(", "t_k", "[", "-", "1", "]", ",", "t_k", "[", "-", "2", "]", ",", "scaled_laplacian", ")", ")", "return", "sparse_to_tuple", "(", "t_k", ")" ]
https://github.com/thunlp/OpenNE/blob/d9cbf34aff87c9d09fa58a074907ed40a0e06146/src/openne/gcn/utils.py#L135-L156
jamiecaesar/securecrt-tools
f3cbb49223a485fc9af86e9799b5c940f19e8027
securecrt_tools/sessions.py
python
CRTSession.close
(self)
A method to close the SecureCRT tab associated with this CRTSession.
A method to close the SecureCRT tab associated with this CRTSession.
[ "A", "method", "to", "close", "the", "SecureCRT", "tab", "associated", "with", "this", "CRTSession", "." ]
def close(self): """ A method to close the SecureCRT tab associated with this CRTSession. """ if self.tab.Index != self.script.crt.GetScriptTab().Index: self.tab.Close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "tab", ".", "Index", "!=", "self", ".", "script", ".", "crt", ".", "GetScriptTab", "(", ")", ".", "Index", ":", "self", ".", "tab", ".", "Close", "(", ")" ]
https://github.com/jamiecaesar/securecrt-tools/blob/f3cbb49223a485fc9af86e9799b5c940f19e8027/securecrt_tools/sessions.py#L387-L392
algorhythms/LeetCode
3fb14aeea62a960442e47dfde9f964c7ffce32be
961 N-Repeated Element in Size 2N Array.py
python
Solution.repeatedNTimes
(self, A: List[int])
Counter. Straightforward. O(N) space O(1) space 2N items, N + 1 unique, 1 repeat N times N = 2 a t b t t a b t N = 3 a t b t c t window 2, cannot find the target window 3, can find the target? no [9,5,6,9] window 4, can find * There is a major element in a length 2 subarray, or; * Every length 2 subarray has exactly 1 major element, which means that a length 4 subarray that begins at a major element will have 2 major elements.
Counter. Straightforward. O(N) space
[ "Counter", ".", "Straightforward", ".", "O", "(", "N", ")", "space" ]
def repeatedNTimes(self, A: List[int]) -> int: """ Counter. Straightforward. O(N) space O(1) space 2N items, N + 1 unique, 1 repeat N times N = 2 a t b t t a b t N = 3 a t b t c t window 2, cannot find the target window 3, can find the target? no [9,5,6,9] window 4, can find * There is a major element in a length 2 subarray, or; * Every length 2 subarray has exactly 1 major element, which means that a length 4 subarray that begins at a major element will have 2 major elements. """ n = len(A) for i in range(n - 1): for j in range(3): if A[i] == A[min(n - 1, i + 1 + j)]: return A[i] raise
[ "def", "repeatedNTimes", "(", "self", ",", "A", ":", "List", "[", "int", "]", ")", "->", "int", ":", "n", "=", "len", "(", "A", ")", "for", "i", "in", "range", "(", "n", "-", "1", ")", ":", "for", "j", "in", "range", "(", "3", ")", ":", "if", "A", "[", "i", "]", "==", "A", "[", "min", "(", "n", "-", "1", ",", "i", "+", "1", "+", "j", ")", "]", ":", "return", "A", "[", "i", "]", "raise" ]
https://github.com/algorhythms/LeetCode/blob/3fb14aeea62a960442e47dfde9f964c7ffce32be/961 N-Repeated Element in Size 2N Array.py#L32-L61
JasonKessler/scattertext
ef33f06d4c31f9d64b551a7ab86bf157aca82644
scattertext/TermDocMatrix.py
python
TermDocMatrix.get_scaled_f_scores
(self, category, scaler_algo=DEFAULT_SCALER_ALGO, beta=DEFAULT_BETA)
return np.array(scores)
Computes scaled-fscores Parameters ---------- category : str category name to score scaler_algo : str Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default. beta : float Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Default. Returns ------- np.array of harmonic means of scaled P(word|category) and scaled P(category|word)
Computes scaled-fscores Parameters ---------- category : str category name to score scaler_algo : str Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default. beta : float Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Default. Returns ------- np.array of harmonic means of scaled P(word|category) and scaled P(category|word)
[ "Computes", "scaled", "-", "fscores", "Parameters", "----------", "category", ":", "str", "category", "name", "to", "score", "scaler_algo", ":", "str", "Function", "that", "scales", "an", "array", "to", "a", "range", "\\", "in", "[", "0", "and", "1", "]", ".", "Use", "percentile", "normcdf", ".", "Default", ".", "beta", ":", "float", "Beta", "in", "(", "1", "+", "B^2", ")", "*", "(", "Scale", "(", "P", "(", "w|c", "))", "*", "Scale", "(", "P", "(", "c|w", ")))", "/", "(", "B^2", "*", "Scale", "(", "P", "(", "w|c", "))", "+", "Scale", "(", "P", "(", "c|w", ")))", ".", "Default", ".", "Returns", "-------", "np", ".", "array", "of", "harmonic", "means", "of", "scaled", "P", "(", "word|category", ")", "and", "scaled", "P", "(", "category|word", ")" ]
def get_scaled_f_scores(self, category, scaler_algo=DEFAULT_SCALER_ALGO, beta=DEFAULT_BETA): ''' Computes scaled-fscores Parameters ---------- category : str category name to score scaler_algo : str Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default. beta : float Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Default. Returns ------- np.array of harmonic means of scaled P(word|category) and scaled P(category|word) ''' assert beta > 0 cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category) scores = self._get_scaled_f_score_from_counts(cat_word_counts, not_cat_word_counts, scaler_algo, beta) return np.array(scores)
[ "def", "get_scaled_f_scores", "(", "self", ",", "category", ",", "scaler_algo", "=", "DEFAULT_SCALER_ALGO", ",", "beta", "=", "DEFAULT_BETA", ")", ":", "assert", "beta", ">", "0", "cat_word_counts", ",", "not_cat_word_counts", "=", "self", ".", "_get_catetgory_and_non_category_word_counts", "(", "category", ")", "scores", "=", "self", ".", "_get_scaled_f_score_from_counts", "(", "cat_word_counts", ",", "not_cat_word_counts", ",", "scaler_algo", ",", "beta", ")", "return", "np", ".", "array", "(", "scores", ")" ]
https://github.com/JasonKessler/scattertext/blob/ef33f06d4c31f9d64b551a7ab86bf157aca82644/scattertext/TermDocMatrix.py#L534-L555
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/pkb.py
python
SetUpPKB
()
Set globals and environment variables for PKB. After SetUpPKB() returns, it should be possible to call PKB functions, like benchmark_spec.Prepare() or benchmark_spec.Run(). SetUpPKB() also modifies the local file system by creating a temp directory and storing new SSH keys.
Set globals and environment variables for PKB.
[ "Set", "globals", "and", "environment", "variables", "for", "PKB", "." ]
def SetUpPKB(): """Set globals and environment variables for PKB. After SetUpPKB() returns, it should be possible to call PKB functions, like benchmark_spec.Prepare() or benchmark_spec.Run(). SetUpPKB() also modifies the local file system by creating a temp directory and storing new SSH keys. """ try: _InitializeRunUri() except errors.Error as e: logging.error(e) sys.exit(1) # Initialize logging. vm_util.GenTempDir() if FLAGS.use_pkb_logging: log_util.ConfigureLogging( stderr_log_level=log_util.LOG_LEVELS[FLAGS.log_level], log_path=vm_util.PrependTempDir(LOG_FILE_NAME), run_uri=FLAGS.run_uri, file_log_level=log_util.LOG_LEVELS[FLAGS.file_log_level]) logging.info('PerfKitBenchmarker version: %s', version.VERSION) # Translate deprecated flags and log all provided flag values. disk.WarnAndTranslateDiskFlags() _LogCommandLineFlags() # Register skip pending runs functionality. RegisterSkipPendingRunsCheck(_SkipPendingRunsFile) # Check environment. if not FLAGS.ignore_package_requirements: requirements.CheckBasicRequirements() for executable in REQUIRED_EXECUTABLES: if not vm_util.ExecutableOnPath(executable): raise errors.Setup.MissingExecutableError( 'Could not find required executable "%s"' % executable) # Check mutually exclusive flags if FLAGS.run_stage_iterations > 1 and FLAGS.run_stage_time > 0: raise errors.Setup.InvalidFlagConfigurationError( 'Flags run_stage_iterations and run_stage_time are mutually exclusive') vm_util.SSHKeyGen() if FLAGS.static_vm_file: with open(FLAGS.static_vm_file) as fp: static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile( fp) events.initialization_complete.send(parsed_flags=FLAGS) benchmark_lookup.SetBenchmarkModuleFunction(benchmark_sets.BenchmarkModule) package_lookup.SetPackageModuleFunction(benchmark_sets.PackageModule) # Update max_concurrent_threads to use at least as many threads as VMs. This # is important for the cluster_boot benchmark where we want to launch the VMs # in parallel. if not FLAGS.max_concurrent_threads: FLAGS.max_concurrent_threads = max( background_tasks.MAX_CONCURRENT_THREADS, FLAGS.num_vms) logging.info('Setting --max_concurrent_threads=%d.', FLAGS.max_concurrent_threads)
[ "def", "SetUpPKB", "(", ")", ":", "try", ":", "_InitializeRunUri", "(", ")", "except", "errors", ".", "Error", "as", "e", ":", "logging", ".", "error", "(", "e", ")", "sys", ".", "exit", "(", "1", ")", "# Initialize logging.", "vm_util", ".", "GenTempDir", "(", ")", "if", "FLAGS", ".", "use_pkb_logging", ":", "log_util", ".", "ConfigureLogging", "(", "stderr_log_level", "=", "log_util", ".", "LOG_LEVELS", "[", "FLAGS", ".", "log_level", "]", ",", "log_path", "=", "vm_util", ".", "PrependTempDir", "(", "LOG_FILE_NAME", ")", ",", "run_uri", "=", "FLAGS", ".", "run_uri", ",", "file_log_level", "=", "log_util", ".", "LOG_LEVELS", "[", "FLAGS", ".", "file_log_level", "]", ")", "logging", ".", "info", "(", "'PerfKitBenchmarker version: %s'", ",", "version", ".", "VERSION", ")", "# Translate deprecated flags and log all provided flag values.", "disk", ".", "WarnAndTranslateDiskFlags", "(", ")", "_LogCommandLineFlags", "(", ")", "# Register skip pending runs functionality.", "RegisterSkipPendingRunsCheck", "(", "_SkipPendingRunsFile", ")", "# Check environment.", "if", "not", "FLAGS", ".", "ignore_package_requirements", ":", "requirements", ".", "CheckBasicRequirements", "(", ")", "for", "executable", "in", "REQUIRED_EXECUTABLES", ":", "if", "not", "vm_util", ".", "ExecutableOnPath", "(", "executable", ")", ":", "raise", "errors", ".", "Setup", ".", "MissingExecutableError", "(", "'Could not find required executable \"%s\"'", "%", "executable", ")", "# Check mutually exclusive flags", "if", "FLAGS", ".", "run_stage_iterations", ">", "1", "and", "FLAGS", ".", "run_stage_time", ">", "0", ":", "raise", "errors", ".", "Setup", ".", "InvalidFlagConfigurationError", "(", "'Flags run_stage_iterations and run_stage_time are mutually exclusive'", ")", "vm_util", ".", "SSHKeyGen", "(", ")", "if", "FLAGS", ".", "static_vm_file", ":", "with", "open", "(", "FLAGS", ".", "static_vm_file", ")", "as", "fp", ":", "static_virtual_machine", ".", "StaticVirtualMachine", ".", "ReadStaticVirtualMachineFile", "(", "fp", ")", "events", ".", "initialization_complete", ".", "send", "(", "parsed_flags", "=", "FLAGS", ")", "benchmark_lookup", ".", "SetBenchmarkModuleFunction", "(", "benchmark_sets", ".", "BenchmarkModule", ")", "package_lookup", ".", "SetPackageModuleFunction", "(", "benchmark_sets", ".", "PackageModule", ")", "# Update max_concurrent_threads to use at least as many threads as VMs. This", "# is important for the cluster_boot benchmark where we want to launch the VMs", "# in parallel.", "if", "not", "FLAGS", ".", "max_concurrent_threads", ":", "FLAGS", ".", "max_concurrent_threads", "=", "max", "(", "background_tasks", ".", "MAX_CONCURRENT_THREADS", ",", "FLAGS", ".", "num_vms", ")", "logging", ".", "info", "(", "'Setting --max_concurrent_threads=%d.'", ",", "FLAGS", ".", "max_concurrent_threads", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/pkb.py#L1362-L1428
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
util/cmdln.py
python
RawCmdln.do_help
(self, argv)
${cmd_name}: give detailed help on a specific sub-command Usage: ${name} help [COMMAND]
${cmd_name}: give detailed help on a specific sub-command
[ "$", "{", "cmd_name", "}", ":", "give", "detailed", "help", "on", "a", "specific", "sub", "-", "command" ]
def do_help(self, argv): """${cmd_name}: give detailed help on a specific sub-command Usage: ${name} help [COMMAND] """ if len(argv) > 1: # asking for help on a particular command doc = None cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1] if not cmdname: return self.helpdefault(argv[1], False) else: helpfunc = getattr(self, "help_"+cmdname, None) if helpfunc: doc = helpfunc() else: handler = self._get_cmd_handler(cmdname) if handler: doc = handler.__doc__ if doc is None: return self.helpdefault(argv[1], handler != None) else: # bare "help" command doc = self.__class__.__doc__ # try class docstring if doc is None: # Try to provide some reasonable useful default help. if self.cmdlooping: prefix = "" else: prefix = self.name+' ' doc = """Usage: %sCOMMAND [ARGS...] %shelp [COMMAND] ${option_list} ${command_list} ${help_list} """ % (prefix, prefix) cmdname = None if doc: # *do* have help content, massage and print that doc = self._help_reindent(doc) doc = self._help_preprocess(doc, cmdname) doc = doc.rstrip() + '\n' # trim down trailing space self.stdout.write(self._str(doc)) self.stdout.flush()
[ "def", "do_help", "(", "self", ",", "argv", ")", ":", "if", "len", "(", "argv", ")", ">", "1", ":", "# asking for help on a particular command", "doc", "=", "None", "cmdname", "=", "self", ".", "_get_canonical_cmd_name", "(", "argv", "[", "1", "]", ")", "or", "argv", "[", "1", "]", "if", "not", "cmdname", ":", "return", "self", ".", "helpdefault", "(", "argv", "[", "1", "]", ",", "False", ")", "else", ":", "helpfunc", "=", "getattr", "(", "self", ",", "\"help_\"", "+", "cmdname", ",", "None", ")", "if", "helpfunc", ":", "doc", "=", "helpfunc", "(", ")", "else", ":", "handler", "=", "self", ".", "_get_cmd_handler", "(", "cmdname", ")", "if", "handler", ":", "doc", "=", "handler", ".", "__doc__", "if", "doc", "is", "None", ":", "return", "self", ".", "helpdefault", "(", "argv", "[", "1", "]", ",", "handler", "!=", "None", ")", "else", ":", "# bare \"help\" command", "doc", "=", "self", ".", "__class__", ".", "__doc__", "# try class docstring", "if", "doc", "is", "None", ":", "# Try to provide some reasonable useful default help.", "if", "self", ".", "cmdlooping", ":", "prefix", "=", "\"\"", "else", ":", "prefix", "=", "self", ".", "name", "+", "' '", "doc", "=", "\"\"\"Usage:\n %sCOMMAND [ARGS...]\n %shelp [COMMAND]\n\n ${option_list}\n ${command_list}\n ${help_list}\n \"\"\"", "%", "(", "prefix", ",", "prefix", ")", "cmdname", "=", "None", "if", "doc", ":", "# *do* have help content, massage and print that", "doc", "=", "self", ".", "_help_reindent", "(", "doc", ")", "doc", "=", "self", ".", "_help_preprocess", "(", "doc", ",", "cmdname", ")", "doc", "=", "doc", ".", "rstrip", "(", ")", "+", "'\\n'", "# trim down trailing space", "self", ".", "stdout", ".", "write", "(", "self", ".", "_str", "(", "doc", ")", ")", "self", ".", "stdout", ".", "flush", "(", ")" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/util/cmdln.py#L478-L520
donnemartin/gitsome
d7c57abc7cb66e9c910a844f15d4536866da3310
xonsh/tokenize.py
python
untokenize
(iterable)
return out
Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output bytes will tokenize the back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2
Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize.
[ "Transform", "tokens", "back", "into", "Python", "source", "code", ".", "It", "returns", "a", "bytes", "object", "encoded", "using", "the", "ENCODING", "token", "which", "is", "the", "first", "token", "sequence", "output", "by", "tokenize", "." ]
def untokenize(iterable): """Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output bytes will tokenize the back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 """ ut = Untokenizer() out = ut.untokenize(iterable) if ut.encoding is not None: out = out.encode(ut.encoding) return out
[ "def", "untokenize", "(", "iterable", ")", ":", "ut", "=", "Untokenizer", "(", ")", "out", "=", "ut", ".", "untokenize", "(", "iterable", ")", "if", "ut", ".", "encoding", "is", "not", "None", ":", "out", "=", "out", ".", "encode", "(", "ut", ".", "encoding", ")", "return", "out" ]
https://github.com/donnemartin/gitsome/blob/d7c57abc7cb66e9c910a844f15d4536866da3310/xonsh/tokenize.py#L705-L729
1012598167/flask_mongodb_game
60c7e0351586656ec38f851592886338e50b4110
python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/appdirs.py
python
user_cache_dir
(appname=None, appauthor=None, version=None, opinion=True)
return path
r"""Return full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option.
r"""Return full path to the user-specific cache dir for this application.
[ "r", "Return", "full", "path", "to", "the", "user", "-", "specific", "cache", "dir", "for", "this", "application", "." ]
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) if opinion: path = os.path.join(path, "Cache") elif system == 'darwin': path = os.path.expanduser('~/Library/Caches') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path
[ "def", "user_cache_dir", "(", "appname", "=", "None", ",", "appauthor", "=", "None", ",", "version", "=", "None", ",", "opinion", "=", "True", ")", ":", "if", "system", "==", "\"win32\"", ":", "if", "appauthor", "is", "None", ":", "appauthor", "=", "appname", "path", "=", "os", ".", "path", ".", "normpath", "(", "_get_win_folder", "(", "\"CSIDL_LOCAL_APPDATA\"", ")", ")", "if", "appname", ":", "if", "appauthor", "is", "not", "False", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "appauthor", ",", "appname", ")", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "appname", ")", "if", "opinion", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"Cache\"", ")", "elif", "system", "==", "'darwin'", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "'~/Library/Caches'", ")", "if", "appname", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "appname", ")", "else", ":", "path", "=", "os", ".", "getenv", "(", "'XDG_CACHE_HOME'", ",", "os", ".", "path", ".", "expanduser", "(", "'~/.cache'", ")", ")", "if", "appname", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "appname", ")", "if", "appname", "and", "version", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "version", ")", "return", "path" ]
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/appdirs.py#L257-L311
lsbardel/python-stdnet
78db5320bdedc3f28c5e4f38cda13a4469e35db7
stdnet/odm/related.py
python
Many2ManyThroughModel
(field)
Create a Many2Many through model with two foreign key fields and a CompositeFieldId depending on the two foreign keys.
Create a Many2Many through model with two foreign key fields and a CompositeFieldId depending on the two foreign keys.
[ "Create", "a", "Many2Many", "through", "model", "with", "two", "foreign", "key", "fields", "and", "a", "CompositeFieldId", "depending", "on", "the", "two", "foreign", "keys", "." ]
def Many2ManyThroughModel(field): '''Create a Many2Many through model with two foreign key fields and a CompositeFieldId depending on the two foreign keys.''' from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField name_model = field.model._meta.name name_relmodel = field.relmodel._meta.name # The two models are the same. if name_model == name_relmodel: name_relmodel += '2' through = field.through # Create the through model if through is None: name = '{0}_{1}'.format(name_model, name_relmodel) class Meta: app_label = field.model._meta.app_label through = ModelType(name, (StdModel,), {'Meta': Meta}) field.through = through # The first field field1 = ForeignKey(field.model, related_name=field.name, related_manager_class=makeMany2ManyRelatedManager( field.relmodel, name_model, name_relmodel) ) field1.register_with_model(name_model, through) # The second field field2 = ForeignKey(field.relmodel, related_name=field.related_name, related_manager_class=makeMany2ManyRelatedManager( field.model, name_relmodel, name_model) ) field2.register_with_model(name_relmodel, through) pk = CompositeIdField(name_model, name_relmodel) pk.register_with_model('id', through)
[ "def", "Many2ManyThroughModel", "(", "field", ")", ":", "from", "stdnet", ".", "odm", "import", "ModelType", ",", "StdModel", ",", "ForeignKey", ",", "CompositeIdField", "name_model", "=", "field", ".", "model", ".", "_meta", ".", "name", "name_relmodel", "=", "field", ".", "relmodel", ".", "_meta", ".", "name", "# The two models are the same.", "if", "name_model", "==", "name_relmodel", ":", "name_relmodel", "+=", "'2'", "through", "=", "field", ".", "through", "# Create the through model", "if", "through", "is", "None", ":", "name", "=", "'{0}_{1}'", ".", "format", "(", "name_model", ",", "name_relmodel", ")", "class", "Meta", ":", "app_label", "=", "field", ".", "model", ".", "_meta", ".", "app_label", "through", "=", "ModelType", "(", "name", ",", "(", "StdModel", ",", ")", ",", "{", "'Meta'", ":", "Meta", "}", ")", "field", ".", "through", "=", "through", "# The first field", "field1", "=", "ForeignKey", "(", "field", ".", "model", ",", "related_name", "=", "field", ".", "name", ",", "related_manager_class", "=", "makeMany2ManyRelatedManager", "(", "field", ".", "relmodel", ",", "name_model", ",", "name_relmodel", ")", ")", "field1", ".", "register_with_model", "(", "name_model", ",", "through", ")", "# The second field", "field2", "=", "ForeignKey", "(", "field", ".", "relmodel", ",", "related_name", "=", "field", ".", "related_name", ",", "related_manager_class", "=", "makeMany2ManyRelatedManager", "(", "field", ".", "model", ",", "name_relmodel", ",", "name_model", ")", ")", "field2", ".", "register_with_model", "(", "name_relmodel", ",", "through", ")", "pk", "=", "CompositeIdField", "(", "name_model", ",", "name_relmodel", ")", "pk", ".", "register_with_model", "(", "'id'", ",", "through", ")" ]
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L77-L114
SiCKRAGE/SiCKRAGE
45fb67c0c730fc22a34c695b5a62b11970621c53
sickrage/libs/adba/aniDBresponses.py
python
UptimeResponse.__init__
(self, cmd, restag, rescode, resstr, datalines)
attributes: data: uptime - udpserver uptime in milliseconds
attributes:
[ "attributes", ":" ]
def __init__(self, cmd, restag, rescode, resstr, datalines): """ attributes: data: uptime - udpserver uptime in milliseconds """ Response.__init__(self, cmd, restag, rescode, resstr, datalines) self.codestr = 'UPTIME' self.codehead = () self.codetail = ('uptime',) self.coderep = ()
[ "def", "__init__", "(", "self", ",", "cmd", ",", "restag", ",", "rescode", ",", "resstr", ",", "datalines", ")", ":", "Response", ".", "__init__", "(", "self", ",", "cmd", ",", "restag", ",", "rescode", ",", "resstr", ",", "datalines", ")", "self", ".", "codestr", "=", "'UPTIME'", "self", ".", "codehead", "=", "(", ")", "self", ".", "codetail", "=", "(", "'uptime'", ",", ")", "self", ".", "coderep", "=", "(", ")" ]
https://github.com/SiCKRAGE/SiCKRAGE/blob/45fb67c0c730fc22a34c695b5a62b11970621c53/sickrage/libs/adba/aniDBresponses.py#L207-L219
emesene/emesene
4548a4098310e21b16437bb36223a7f632a4f7bc
emesene/e3/xmpp/SleekXMPP/sleekxmpp/plugins/base.py
python
PluginManager.__iter__
(self)
return self._plugins.__iter__()
Return an iterator over the set of enabled plugins.
Return an iterator over the set of enabled plugins.
[ "Return", "an", "iterator", "over", "the", "set", "of", "enabled", "plugins", "." ]
def __iter__(self): """Return an iterator over the set of enabled plugins.""" return self._plugins.__iter__()
[ "def", "__iter__", "(", "self", ")", ":", "return", "self", ".", "_plugins", ".", "__iter__", "(", ")" ]
https://github.com/emesene/emesene/blob/4548a4098310e21b16437bb36223a7f632a4f7bc/emesene/e3/xmpp/SleekXMPP/sleekxmpp/plugins/base.py#L251-L253
reviewboard/rbtools
b4838a640b458641ffd233093ae65971d0b4d529
rbtools/clients/__init__.py
python
scan_usable_client
(config, options, client_name=None)
return repository_info, tool
Scan for a usable SCMClient. Args: config (dict): The loaded user config. options (argparse.Namespace): The parsed command line arguments. client_name (unicode, optional): A specific client name, which can come from the configuration. This can be used to disambiguate if there are nested repositories, or to speed up detection. Returns: tuple: A 2-tuple, containing the repository info structure and the tool instance.
Scan for a usable SCMClient.
[ "Scan", "for", "a", "usable", "SCMClient", "." ]
def scan_usable_client(config, options, client_name=None): """Scan for a usable SCMClient. Args: config (dict): The loaded user config. options (argparse.Namespace): The parsed command line arguments. client_name (unicode, optional): A specific client name, which can come from the configuration. This can be used to disambiguate if there are nested repositories, or to speed up detection. Returns: tuple: A 2-tuple, containing the repository info structure and the tool instance. """ repository_info = None tool = None # TODO: We should only load all of the scm clients if the client_name # isn't provided. if SCMCLIENTS is None: load_scmclients(config, options) if client_name: if client_name not in SCMCLIENTS: logging.error('The provided repository type "%s" is invalid.', client_name) sys.exit(1) else: scmclients = { client_name: SCMCLIENTS[client_name] } else: scmclients = SCMCLIENTS # First go through and see if any repositories are configured in # remote-only mode. For example, SVN can post changes purely with a remote # URL and no working directory. for name, tool in six.iteritems(scmclients): if tool.is_remote_only(): break else: tool = None # Now scan through the repositories to find any local working directories. # If there are multiple repositories which appear to be active in the CWD, # choose the deepest and emit a warning. if tool is None: candidate_repos = [] for name, tool in six.iteritems(scmclients): logging.debug('Checking for a %s repository...', tool.name) local_path = tool.get_local_path() if local_path: candidate_repos.append((local_path, tool)) if len(candidate_repos) == 1: tool = candidate_repos[0][1] elif candidate_repos: logging.debug('Finding deepest repository of multiple matching ' 'repository types.') deepest_repo_len = 0 deepest_repo_tool = None deepest_local_path = None found_multiple = False for local_path, tool in candidate_repos: if len(os.path.normpath(local_path)) > deepest_repo_len: if deepest_repo_tool is not None: found_multiple = True deepest_repo_len = len(local_path) deepest_repo_tool = tool deepest_local_path = local_path if found_multiple: logging.warn('Multiple matching repositories were found. ' 'Using %s repository at %s.', tool.name, deepest_local_path) logging.warn('Define REPOSITORY_TYPE in .reviewboardrc if ' 'you wish to use a different repository.') tool = deepest_repo_tool repository_info = tool and tool.get_repository_info() if repository_info is None: if client_name: logging.error('The provided repository type was not detected ' 'in the current directory.') elif getattr(options, 'repository_url', None): logging.error('No supported repository could be accessed at ' 'the supplied url.') else: logging.error('The current directory does not contain a checkout ' 'from a supported source code repository.') sys.exit(1) # Verify that options specific to an SCM Client have not been mis-used. if (getattr(options, 'change_only', False) and not tool.supports_changesets): logging.error('The --change-only option is not valid for the ' 'current SCM client.\n') sys.exit(1) if (getattr(options, 'parent_branch', None) and not tool.supports_parent_diffs): logging.error('The --parent option is not valid for the ' 'current SCM client.') sys.exit(1) from rbtools.clients.perforce import PerforceClient if (not isinstance(tool, PerforceClient) and (getattr(options, 'p4_client', None) or getattr(options, 'p4_port', None))): logging.error('The --p4-client and --p4-port options are not ' 'valid for the current SCM client.\n') sys.exit(1) return repository_info, tool
[ "def", "scan_usable_client", "(", "config", ",", "options", ",", "client_name", "=", "None", ")", ":", "repository_info", "=", "None", "tool", "=", "None", "# TODO: We should only load all of the scm clients if the client_name", "# isn't provided.", "if", "SCMCLIENTS", "is", "None", ":", "load_scmclients", "(", "config", ",", "options", ")", "if", "client_name", ":", "if", "client_name", "not", "in", "SCMCLIENTS", ":", "logging", ".", "error", "(", "'The provided repository type \"%s\" is invalid.'", ",", "client_name", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "scmclients", "=", "{", "client_name", ":", "SCMCLIENTS", "[", "client_name", "]", "}", "else", ":", "scmclients", "=", "SCMCLIENTS", "# First go through and see if any repositories are configured in", "# remote-only mode. For example, SVN can post changes purely with a remote", "# URL and no working directory.", "for", "name", ",", "tool", "in", "six", ".", "iteritems", "(", "scmclients", ")", ":", "if", "tool", ".", "is_remote_only", "(", ")", ":", "break", "else", ":", "tool", "=", "None", "# Now scan through the repositories to find any local working directories.", "# If there are multiple repositories which appear to be active in the CWD,", "# choose the deepest and emit a warning.", "if", "tool", "is", "None", ":", "candidate_repos", "=", "[", "]", "for", "name", ",", "tool", "in", "six", ".", "iteritems", "(", "scmclients", ")", ":", "logging", ".", "debug", "(", "'Checking for a %s repository...'", ",", "tool", ".", "name", ")", "local_path", "=", "tool", ".", "get_local_path", "(", ")", "if", "local_path", ":", "candidate_repos", ".", "append", "(", "(", "local_path", ",", "tool", ")", ")", "if", "len", "(", "candidate_repos", ")", "==", "1", ":", "tool", "=", "candidate_repos", "[", "0", "]", "[", "1", "]", "elif", "candidate_repos", ":", "logging", ".", "debug", "(", "'Finding deepest repository of multiple matching '", "'repository types.'", ")", "deepest_repo_len", "=", "0", "deepest_repo_tool", "=", "None", "deepest_local_path", "=", "None", "found_multiple", "=", "False", "for", "local_path", ",", "tool", "in", "candidate_repos", ":", "if", "len", "(", "os", ".", "path", ".", "normpath", "(", "local_path", ")", ")", ">", "deepest_repo_len", ":", "if", "deepest_repo_tool", "is", "not", "None", ":", "found_multiple", "=", "True", "deepest_repo_len", "=", "len", "(", "local_path", ")", "deepest_repo_tool", "=", "tool", "deepest_local_path", "=", "local_path", "if", "found_multiple", ":", "logging", ".", "warn", "(", "'Multiple matching repositories were found. '", "'Using %s repository at %s.'", ",", "tool", ".", "name", ",", "deepest_local_path", ")", "logging", ".", "warn", "(", "'Define REPOSITORY_TYPE in .reviewboardrc if '", "'you wish to use a different repository.'", ")", "tool", "=", "deepest_repo_tool", "repository_info", "=", "tool", "and", "tool", ".", "get_repository_info", "(", ")", "if", "repository_info", "is", "None", ":", "if", "client_name", ":", "logging", ".", "error", "(", "'The provided repository type was not detected '", "'in the current directory.'", ")", "elif", "getattr", "(", "options", ",", "'repository_url'", ",", "None", ")", ":", "logging", ".", "error", "(", "'No supported repository could be accessed at '", "'the supplied url.'", ")", "else", ":", "logging", ".", "error", "(", "'The current directory does not contain a checkout '", "'from a supported source code repository.'", ")", "sys", ".", "exit", "(", "1", ")", "# Verify that options specific to an SCM Client have not been mis-used.", "if", "(", "getattr", "(", "options", ",", "'change_only'", ",", "False", ")", "and", "not", "tool", ".", "supports_changesets", ")", ":", "logging", ".", "error", "(", "'The --change-only option is not valid for the '", "'current SCM client.\\n'", ")", "sys", ".", "exit", "(", "1", ")", "if", "(", "getattr", "(", "options", ",", "'parent_branch'", ",", "None", ")", "and", "not", "tool", ".", "supports_parent_diffs", ")", ":", "logging", ".", "error", "(", "'The --parent option is not valid for the '", "'current SCM client.'", ")", "sys", ".", "exit", "(", "1", ")", "from", "rbtools", ".", "clients", ".", "perforce", "import", "PerforceClient", "if", "(", "not", "isinstance", "(", "tool", ",", "PerforceClient", ")", "and", "(", "getattr", "(", "options", ",", "'p4_client'", ",", "None", ")", "or", "getattr", "(", "options", ",", "'p4_port'", ",", "None", ")", ")", ")", ":", "logging", ".", "error", "(", "'The --p4-client and --p4-port options are not '", "'valid for the current SCM client.\\n'", ")", "sys", ".", "exit", "(", "1", ")", "return", "repository_info", ",", "tool" ]
https://github.com/reviewboard/rbtools/blob/b4838a640b458641ffd233093ae65971d0b4d529/rbtools/clients/__init__.py#L994-L1122
veusz/veusz
5a1e2af5f24df0eb2a2842be51f2997c4999c7fb
veusz/setting/setting.py
python
Setting.path
(self)
return '/'.join(path)
Return full path of setting.
Return full path of setting.
[ "Return", "full", "path", "of", "setting", "." ]
def path(self): """Return full path of setting.""" path = [] obj = self while obj is not None: # logic easier to understand here # do not add settings name for settings of widget if not obj.iswidget and obj.parent.iswidget: pass else: if obj.name == '/': path.insert(0, '') else: path.insert(0, obj.name) obj = obj.parent return '/'.join(path)
[ "def", "path", "(", "self", ")", ":", "path", "=", "[", "]", "obj", "=", "self", "while", "obj", "is", "not", "None", ":", "# logic easier to understand here", "# do not add settings name for settings of widget", "if", "not", "obj", ".", "iswidget", "and", "obj", ".", "parent", ".", "iswidget", ":", "pass", "else", ":", "if", "obj", ".", "name", "==", "'/'", ":", "path", ".", "insert", "(", "0", ",", "''", ")", "else", ":", "path", ".", "insert", "(", "0", ",", "obj", ".", "name", ")", "obj", "=", "obj", ".", "parent", "return", "'/'", ".", "join", "(", "path", ")" ]
https://github.com/veusz/veusz/blob/5a1e2af5f24df0eb2a2842be51f2997c4999c7fb/veusz/setting/setting.py#L165-L180
Confusezius/Deep-Metric-Learning-Baselines
60772745e28bc90077831bb4c9f07a233e602797
losses.py
python
TripletLoss.__init__
(self, margin=1, sampling_method='random')
Basic Triplet Loss as proposed in 'FaceNet: A Unified Embedding for Face Recognition and Clustering' Args: margin: float, Triplet Margin - Ensures that positives aren't placed arbitrarily close to the anchor. Similarl, negatives should not be placed arbitrarily far away. sampling_method: Method to use for sampling training triplets. Used for the TupleSampler-class.
Basic Triplet Loss as proposed in 'FaceNet: A Unified Embedding for Face Recognition and Clustering' Args: margin: float, Triplet Margin - Ensures that positives aren't placed arbitrarily close to the anchor. Similarl, negatives should not be placed arbitrarily far away. sampling_method: Method to use for sampling training triplets. Used for the TupleSampler-class.
[ "Basic", "Triplet", "Loss", "as", "proposed", "in", "FaceNet", ":", "A", "Unified", "Embedding", "for", "Face", "Recognition", "and", "Clustering", "Args", ":", "margin", ":", "float", "Triplet", "Margin", "-", "Ensures", "that", "positives", "aren", "t", "placed", "arbitrarily", "close", "to", "the", "anchor", ".", "Similarl", "negatives", "should", "not", "be", "placed", "arbitrarily", "far", "away", ".", "sampling_method", ":", "Method", "to", "use", "for", "sampling", "training", "triplets", ".", "Used", "for", "the", "TupleSampler", "-", "class", "." ]
def __init__(self, margin=1, sampling_method='random'): """ Basic Triplet Loss as proposed in 'FaceNet: A Unified Embedding for Face Recognition and Clustering' Args: margin: float, Triplet Margin - Ensures that positives aren't placed arbitrarily close to the anchor. Similarl, negatives should not be placed arbitrarily far away. sampling_method: Method to use for sampling training triplets. Used for the TupleSampler-class. """ super(TripletLoss, self).__init__() self.margin = margin self.sampler = TupleSampler(method=sampling_method)
[ "def", "__init__", "(", "self", ",", "margin", "=", "1", ",", "sampling_method", "=", "'random'", ")", ":", "super", "(", "TripletLoss", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "margin", "=", "margin", "self", ".", "sampler", "=", "TupleSampler", "(", "method", "=", "sampling_method", ")" ]
https://github.com/Confusezius/Deep-Metric-Learning-Baselines/blob/60772745e28bc90077831bb4c9f07a233e602797/losses.py#L316-L326
IntelAI/models
1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c
models/language_modeling/tensorflow/bert_large/training/fp32/create_pretraining_data.py
python
write_instance_to_example_files
(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files)
Create TF example files from `TrainingInstance`s.
Create TF example files from `TrainingInstance`s.
[ "Create", "TF", "example", "files", "from", "TrainingInstance", "s", "." ]
def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files): """Create TF example files from `TrainingInstance`s.""" writers = [] for output_file in output_files: writers.append(tf.io.TFRecordWriter(output_file)) writer_index = 0 total_written = 0 for (inst_index, instance) in enumerate(instances): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = [1] * len(input_ids) segment_ids = list(instance.segment_ids) assert len(input_ids) <= max_seq_length while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = [1.0] * len(masked_lm_ids) while len(masked_lm_positions) < max_predictions_per_seq: masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = 1 if instance.is_random_next else 0 features = collections.OrderedDict() features["input_ids"] = create_int_feature(input_ids) features["input_mask"] = create_int_feature(input_mask) features["segment_ids"] = create_int_feature(segment_ids) features["masked_lm_positions"] = create_int_feature(masked_lm_positions) features["masked_lm_ids"] = create_int_feature(masked_lm_ids) features["masked_lm_weights"] = create_float_feature(masked_lm_weights) features["next_sentence_labels"] = create_int_feature([next_sentence_label]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writers[writer_index].write(tf_example.SerializeToString()) writer_index = (writer_index + 1) % len(writers) total_written += 1 if inst_index < 20: tf.compat.v1.logging.info("*** Example ***") tf.compat.v1.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in instance.tokens])) for feature_name in features.keys(): feature = features[feature_name] values = [] if feature.int64_list.value: values = feature.int64_list.value elif feature.float_list.value: values = feature.float_list.value tf.compat.v1.logging.info( "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) for writer in writers: writer.close() tf.compat.v1.logging.info("Wrote %d total instances", total_written)
[ "def", "write_instance_to_example_files", "(", "instances", ",", "tokenizer", ",", "max_seq_length", ",", "max_predictions_per_seq", ",", "output_files", ")", ":", "writers", "=", "[", "]", "for", "output_file", "in", "output_files", ":", "writers", ".", "append", "(", "tf", ".", "io", ".", "TFRecordWriter", "(", "output_file", ")", ")", "writer_index", "=", "0", "total_written", "=", "0", "for", "(", "inst_index", ",", "instance", ")", "in", "enumerate", "(", "instances", ")", ":", "input_ids", "=", "tokenizer", ".", "convert_tokens_to_ids", "(", "instance", ".", "tokens", ")", "input_mask", "=", "[", "1", "]", "*", "len", "(", "input_ids", ")", "segment_ids", "=", "list", "(", "instance", ".", "segment_ids", ")", "assert", "len", "(", "input_ids", ")", "<=", "max_seq_length", "while", "len", "(", "input_ids", ")", "<", "max_seq_length", ":", "input_ids", ".", "append", "(", "0", ")", "input_mask", ".", "append", "(", "0", ")", "segment_ids", ".", "append", "(", "0", ")", "assert", "len", "(", "input_ids", ")", "==", "max_seq_length", "assert", "len", "(", "input_mask", ")", "==", "max_seq_length", "assert", "len", "(", "segment_ids", ")", "==", "max_seq_length", "masked_lm_positions", "=", "list", "(", "instance", ".", "masked_lm_positions", ")", "masked_lm_ids", "=", "tokenizer", ".", "convert_tokens_to_ids", "(", "instance", ".", "masked_lm_labels", ")", "masked_lm_weights", "=", "[", "1.0", "]", "*", "len", "(", "masked_lm_ids", ")", "while", "len", "(", "masked_lm_positions", ")", "<", "max_predictions_per_seq", ":", "masked_lm_positions", ".", "append", "(", "0", ")", "masked_lm_ids", ".", "append", "(", "0", ")", "masked_lm_weights", ".", "append", "(", "0.0", ")", "next_sentence_label", "=", "1", "if", "instance", ".", "is_random_next", "else", "0", "features", "=", "collections", ".", "OrderedDict", "(", ")", "features", "[", "\"input_ids\"", "]", "=", "create_int_feature", "(", "input_ids", ")", "features", "[", "\"input_mask\"", "]", "=", "create_int_feature", "(", "input_mask", ")", "features", "[", "\"segment_ids\"", "]", "=", "create_int_feature", "(", "segment_ids", ")", "features", "[", "\"masked_lm_positions\"", "]", "=", "create_int_feature", "(", "masked_lm_positions", ")", "features", "[", "\"masked_lm_ids\"", "]", "=", "create_int_feature", "(", "masked_lm_ids", ")", "features", "[", "\"masked_lm_weights\"", "]", "=", "create_float_feature", "(", "masked_lm_weights", ")", "features", "[", "\"next_sentence_labels\"", "]", "=", "create_int_feature", "(", "[", "next_sentence_label", "]", ")", "tf_example", "=", "tf", ".", "train", ".", "Example", "(", "features", "=", "tf", ".", "train", ".", "Features", "(", "feature", "=", "features", ")", ")", "writers", "[", "writer_index", "]", ".", "write", "(", "tf_example", ".", "SerializeToString", "(", ")", ")", "writer_index", "=", "(", "writer_index", "+", "1", ")", "%", "len", "(", "writers", ")", "total_written", "+=", "1", "if", "inst_index", "<", "20", ":", "tf", ".", "compat", ".", "v1", ".", "logging", ".", "info", "(", "\"*** Example ***\"", ")", "tf", ".", "compat", ".", "v1", ".", "logging", ".", "info", "(", "\"tokens: %s\"", "%", "\" \"", ".", "join", "(", "[", "tokenization", ".", "printable_text", "(", "x", ")", "for", "x", "in", "instance", ".", "tokens", "]", ")", ")", "for", "feature_name", "in", "features", ".", "keys", "(", ")", ":", "feature", "=", "features", "[", "feature_name", "]", "values", "=", "[", "]", "if", "feature", ".", "int64_list", ".", "value", ":", "values", "=", "feature", ".", "int64_list", ".", "value", "elif", "feature", ".", "float_list", ".", "value", ":", "values", "=", "feature", ".", "float_list", ".", "value", "tf", ".", "compat", ".", "v1", ".", "logging", ".", "info", "(", "\"%s: %s\"", "%", "(", "feature_name", ",", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "values", "]", ")", ")", ")", "for", "writer", "in", "writers", ":", "writer", ".", "close", "(", ")", "tf", ".", "compat", ".", "v1", ".", "logging", ".", "info", "(", "\"Wrote %d total instances\"", ",", "total_written", ")" ]
https://github.com/IntelAI/models/blob/1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c/models/language_modeling/tensorflow/bert_large/training/fp32/create_pretraining_data.py#L99-L169
selfteaching/selfteaching-python-camp
9982ee964b984595e7d664b07c389cddaf158f1e
exercises/1901100088/d10/mymodule/stats_word.py
python
stats_text
(text,count)
Calculate the occurrence number of the word both in english and chiness.
Calculate the occurrence number of the word both in english and chiness.
[ "Calculate", "the", "occurrence", "number", "of", "the", "word", "both", "in", "english", "and", "chiness", "." ]
def stats_text(text,count): """Calculate the occurrence number of the word both in english and chiness. """ if type(text) == str and type(count) == int: return(stats_text_en(text,count)+stats_text_cn(text,count)) else: raise ValueError('参数类型错误,需传入字符串及整数参数')
[ "def", "stats_text", "(", "text", ",", "count", ")", ":", "if", "type", "(", "text", ")", "==", "str", "and", "type", "(", "count", ")", "==", "int", ":", "return", "(", "stats_text_en", "(", "text", ",", "count", ")", "+", "stats_text_cn", "(", "text", ",", "count", ")", ")", "else", ":", "raise", "ValueError", "(", "'参数类型错误,需传入字符串及整数参数')", "" ]
https://github.com/selfteaching/selfteaching-python-camp/blob/9982ee964b984595e7d664b07c389cddaf158f1e/exercises/1901100088/d10/mymodule/stats_word.py#L42-L49
avocado-framework/avocado
1f9b3192e8ba47d029c33fe21266bd113d17811f
avocado/utils/lv_utils.py
python
lv_revert
(vg_name, lv_name, lv_snapshot_name)
Revert the origin logical volume to a snapshot. :param str vg_name: name of the volume group :param str lv_name: name of the logical volume :param str lv_snapshot_name: name of the snapshot to be reverted :raises: :py:class:`process.CmdError` on failure to revert snapshot :raises: :py:class:`LVException` if preconditions or execution fails
Revert the origin logical volume to a snapshot.
[ "Revert", "the", "origin", "logical", "volume", "to", "a", "snapshot", "." ]
def lv_revert(vg_name, lv_name, lv_snapshot_name): """ Revert the origin logical volume to a snapshot. :param str vg_name: name of the volume group :param str lv_name: name of the logical volume :param str lv_snapshot_name: name of the snapshot to be reverted :raises: :py:class:`process.CmdError` on failure to revert snapshot :raises: :py:class:`LVException` if preconditions or execution fails """ try: if not vg_check(vg_name): raise LVException("Volume group could not be found") if not lv_check(vg_name, lv_snapshot_name): raise LVException("Snapshot could not be found") if (not lv_check(vg_name, lv_snapshot_name) and not lv_check(vg_name, lv_name)): raise LVException("Snapshot and its origin could not be found") if (lv_check(vg_name, lv_snapshot_name) and not lv_check(vg_name, lv_name)): raise LVException("Snapshot origin could not be found") cmd = ("lvconvert --merge --interval 1 /dev/%s/%s" % (vg_name, lv_snapshot_name)) result = process.run(cmd, sudo=True) if (("Merging of snapshot %s will start next activation." % lv_snapshot_name) in result.stdout_text): raise LVException("The Logical volume %s is still active" % lv_name) except process.CmdError as ex: # detect if merge of snapshot was postponed # and attempt to reactivate the volume. active_lv_pattern = re.escape("%s [active]" % lv_snapshot_name) lvdisplay_output = process.run("lvdisplay", sudo=True).stdout_text if ('Snapshot could not be found' in ex.result.stderr_text and re.search(active_lv_pattern, lvdisplay_output) or "The Logical volume %s is still active" % lv_name in ex.result.stderr_text): log_msg = "Logical volume %s is still active! Attempting to deactivate..." LOGGER.debug(log_msg, lv_name) lv_reactivate(vg_name, lv_name) LOGGER.error("Continuing after reactivation") elif 'Snapshot could not be found' in ex.result.stderr_text: LOGGER.error("Could not revert to snapshot:") LOGGER.error(ex.result) else: raise ex
[ "def", "lv_revert", "(", "vg_name", ",", "lv_name", ",", "lv_snapshot_name", ")", ":", "try", ":", "if", "not", "vg_check", "(", "vg_name", ")", ":", "raise", "LVException", "(", "\"Volume group could not be found\"", ")", "if", "not", "lv_check", "(", "vg_name", ",", "lv_snapshot_name", ")", ":", "raise", "LVException", "(", "\"Snapshot could not be found\"", ")", "if", "(", "not", "lv_check", "(", "vg_name", ",", "lv_snapshot_name", ")", "and", "not", "lv_check", "(", "vg_name", ",", "lv_name", ")", ")", ":", "raise", "LVException", "(", "\"Snapshot and its origin could not be found\"", ")", "if", "(", "lv_check", "(", "vg_name", ",", "lv_snapshot_name", ")", "and", "not", "lv_check", "(", "vg_name", ",", "lv_name", ")", ")", ":", "raise", "LVException", "(", "\"Snapshot origin could not be found\"", ")", "cmd", "=", "(", "\"lvconvert --merge --interval 1 /dev/%s/%s\"", "%", "(", "vg_name", ",", "lv_snapshot_name", ")", ")", "result", "=", "process", ".", "run", "(", "cmd", ",", "sudo", "=", "True", ")", "if", "(", "(", "\"Merging of snapshot %s will start next activation.\"", "%", "lv_snapshot_name", ")", "in", "result", ".", "stdout_text", ")", ":", "raise", "LVException", "(", "\"The Logical volume %s is still active\"", "%", "lv_name", ")", "except", "process", ".", "CmdError", "as", "ex", ":", "# detect if merge of snapshot was postponed", "# and attempt to reactivate the volume.", "active_lv_pattern", "=", "re", ".", "escape", "(", "\"%s [active]\"", "%", "lv_snapshot_name", ")", "lvdisplay_output", "=", "process", ".", "run", "(", "\"lvdisplay\"", ",", "sudo", "=", "True", ")", ".", "stdout_text", "if", "(", "'Snapshot could not be found'", "in", "ex", ".", "result", ".", "stderr_text", "and", "re", ".", "search", "(", "active_lv_pattern", ",", "lvdisplay_output", ")", "or", "\"The Logical volume %s is still active\"", "%", "lv_name", "in", "ex", ".", "result", ".", "stderr_text", ")", ":", "log_msg", "=", "\"Logical volume %s is still active! Attempting to deactivate...\"", "LOGGER", ".", "debug", "(", "log_msg", ",", "lv_name", ")", "lv_reactivate", "(", "vg_name", ",", "lv_name", ")", "LOGGER", ".", "error", "(", "\"Continuing after reactivation\"", ")", "elif", "'Snapshot could not be found'", "in", "ex", ".", "result", ".", "stderr_text", ":", "LOGGER", ".", "error", "(", "\"Could not revert to snapshot:\"", ")", "LOGGER", ".", "error", "(", "ex", ".", "result", ")", "else", ":", "raise", "ex" ]
https://github.com/avocado-framework/avocado/blob/1f9b3192e8ba47d029c33fe21266bd113d17811f/avocado/utils/lv_utils.py#L531-L576
fxsjy/jiebademo
ba3e5a34cd84b612e13f4dfb9f3ec037928c4339
jiebademo/bottle.py
python
Bottle.__call__
(self, environ, start_response)
return self.wsgi(environ, start_response)
Each instance of :class:'Bottle' is a WSGI application.
Each instance of :class:'Bottle' is a WSGI application.
[ "Each", "instance", "of", ":", "class", ":", "Bottle", "is", "a", "WSGI", "application", "." ]
def __call__(self, environ, start_response): ''' Each instance of :class:'Bottle' is a WSGI application. ''' return self.wsgi(environ, start_response)
[ "def", "__call__", "(", "self", ",", "environ", ",", "start_response", ")", ":", "return", "self", ".", "wsgi", "(", "environ", ",", "start_response", ")" ]
https://github.com/fxsjy/jiebademo/blob/ba3e5a34cd84b612e13f4dfb9f3ec037928c4339/jiebademo/bottle.py#L854-L856
cylc/cylc-flow
5ec221143476c7c616c156b74158edfbcd83794a
cylc/flow/pathutil.py
python
parse_rm_dirs
(rm_dirs: Iterable[str])
return result
Parse a list of possibly colon-separated dirs (or files or globs). Return the set of all the dirs. Used by cylc clean with the --rm option.
Parse a list of possibly colon-separated dirs (or files or globs). Return the set of all the dirs.
[ "Parse", "a", "list", "of", "possibly", "colon", "-", "separated", "dirs", "(", "or", "files", "or", "globs", ")", ".", "Return", "the", "set", "of", "all", "the", "dirs", "." ]
def parse_rm_dirs(rm_dirs: Iterable[str]) -> Set[str]: """Parse a list of possibly colon-separated dirs (or files or globs). Return the set of all the dirs. Used by cylc clean with the --rm option. """ result: Set[str] = set() for item in rm_dirs: for part in item.split(':'): part = part.strip() if not part: continue is_dir = part.endswith(os.sep) part = os.path.normpath(part) if os.path.isabs(part): raise UserInputError("--rm option cannot take absolute paths") if ( part in {os.curdir, os.pardir} or part.startswith(f"{os.pardir}{os.sep}") # '../' ): raise UserInputError( "--rm option cannot take paths that point to the " "run directory or above" ) if is_dir: # Preserve trailing slash to ensure it only matches dirs, # not files, when globbing part += os.sep result.add(part) return result
[ "def", "parse_rm_dirs", "(", "rm_dirs", ":", "Iterable", "[", "str", "]", ")", "->", "Set", "[", "str", "]", ":", "result", ":", "Set", "[", "str", "]", "=", "set", "(", ")", "for", "item", "in", "rm_dirs", ":", "for", "part", "in", "item", ".", "split", "(", "':'", ")", ":", "part", "=", "part", ".", "strip", "(", ")", "if", "not", "part", ":", "continue", "is_dir", "=", "part", ".", "endswith", "(", "os", ".", "sep", ")", "part", "=", "os", ".", "path", ".", "normpath", "(", "part", ")", "if", "os", ".", "path", ".", "isabs", "(", "part", ")", ":", "raise", "UserInputError", "(", "\"--rm option cannot take absolute paths\"", ")", "if", "(", "part", "in", "{", "os", ".", "curdir", ",", "os", ".", "pardir", "}", "or", "part", ".", "startswith", "(", "f\"{os.pardir}{os.sep}\"", ")", "# '../'", ")", ":", "raise", "UserInputError", "(", "\"--rm option cannot take paths that point to the \"", "\"run directory or above\"", ")", "if", "is_dir", ":", "# Preserve trailing slash to ensure it only matches dirs,", "# not files, when globbing", "part", "+=", "os", ".", "sep", "result", ".", "add", "(", "part", ")", "return", "result" ]
https://github.com/cylc/cylc-flow/blob/5ec221143476c7c616c156b74158edfbcd83794a/cylc/flow/pathutil.py#L373-L402
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/Centos_5.9/Crypto/Util/number.py
python
getRandomNumber
(N, randfunc=None)
return getRandomNBitInteger(N, randfunc)
Deprecated. Use getRandomInteger or getRandomNBitInteger instead.
Deprecated. Use getRandomInteger or getRandomNBitInteger instead.
[ "Deprecated", ".", "Use", "getRandomInteger", "or", "getRandomNBitInteger", "instead", "." ]
def getRandomNumber(N, randfunc=None): """Deprecated. Use getRandomInteger or getRandomNBitInteger instead.""" warnings.warn("Crypto.Util.number.getRandomNumber has confusing semantics"+ "and has been deprecated. Use getRandomInteger or getRandomNBitInteger instead.", GetRandomNumber_DeprecationWarning) return getRandomNBitInteger(N, randfunc)
[ "def", "getRandomNumber", "(", "N", ",", "randfunc", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"Crypto.Util.number.getRandomNumber has confusing semantics\"", "+", "\"and has been deprecated. Use getRandomInteger or getRandomNBitInteger instead.\"", ",", "GetRandomNumber_DeprecationWarning", ")", "return", "getRandomNBitInteger", "(", "N", ",", "randfunc", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Centos_5.9/Crypto/Util/number.py#L84-L89
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/webostv/device_trigger.py
python
async_validate_trigger_config
( hass: HomeAssistant, config: ConfigType )
return config
Validate config.
Validate config.
[ "Validate", "config", "." ]
async def async_validate_trigger_config( hass: HomeAssistant, config: ConfigType ) -> ConfigType: """Validate config.""" config = TRIGGER_SCHEMA(config) try: if async_is_device_config_entry_not_loaded(hass, config[CONF_DEVICE_ID]): return config except ValueError as err: raise InvalidDeviceAutomationConfig(err) from err if config[CONF_TYPE] == TURN_ON_PLATFORM_TYPE: device_id = config[CONF_DEVICE_ID] try: device = async_get_device_entry_by_device_id(hass, device_id) async_get_client_wrapper_by_device_entry(hass, device) except ValueError as err: raise InvalidDeviceAutomationConfig(err) from err return config
[ "async", "def", "async_validate_trigger_config", "(", "hass", ":", "HomeAssistant", ",", "config", ":", "ConfigType", ")", "->", "ConfigType", ":", "config", "=", "TRIGGER_SCHEMA", "(", "config", ")", "try", ":", "if", "async_is_device_config_entry_not_loaded", "(", "hass", ",", "config", "[", "CONF_DEVICE_ID", "]", ")", ":", "return", "config", "except", "ValueError", "as", "err", ":", "raise", "InvalidDeviceAutomationConfig", "(", "err", ")", "from", "err", "if", "config", "[", "CONF_TYPE", "]", "==", "TURN_ON_PLATFORM_TYPE", ":", "device_id", "=", "config", "[", "CONF_DEVICE_ID", "]", "try", ":", "device", "=", "async_get_device_entry_by_device_id", "(", "hass", ",", "device_id", ")", "async_get_client_wrapper_by_device_entry", "(", "hass", ",", "device", ")", "except", "ValueError", "as", "err", ":", "raise", "InvalidDeviceAutomationConfig", "(", "err", ")", "from", "err", "return", "config" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/webostv/device_trigger.py#L36-L56
angr/angr
4b04d56ace135018083d36d9083805be8146688b
angr/analyses/disassembly.py
python
FuncComment._render
(self, formatting=None)
return ['##', '## Function ' + self.func.name, '##']
[]
def _render(self, formatting=None): return ['##', '## Function ' + self.func.name, '##']
[ "def", "_render", "(", "self", ",", "formatting", "=", "None", ")", ":", "return", "[", "'##'", ",", "'## Function '", "+", "self", ".", "func", ".", "name", ",", "'##'", "]" ]
https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/analyses/disassembly.py#L801-L802
phimpme/phimpme-generator
ba6d11190b9016238f27672e1ad55e6a875b74a0
Phimpme/site-packages/requests/packages/urllib3/packages/ordered_dict.py
python
OrderedDict.iterkeys
(self)
return iter(self)
od.iterkeys() -> an iterator over the keys in od
od.iterkeys() -> an iterator over the keys in od
[ "od", ".", "iterkeys", "()", "-", ">", "an", "iterator", "over", "the", "keys", "in", "od" ]
def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self)
[ "def", "iterkeys", "(", "self", ")", ":", "return", "iter", "(", "self", ")" ]
https://github.com/phimpme/phimpme-generator/blob/ba6d11190b9016238f27672e1ad55e6a875b74a0/Phimpme/site-packages/requests/packages/urllib3/packages/ordered_dict.py#L129-L131
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/choroplethmapbox/colorbar/_title.py
python
Title.font
(self)
return self["font"]
Sets this color bar's title font. Note that the title's font used to be set by the now deprecated `titlefont` attribute. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.title.Font` - A dict of string/value properties that will be passed to the Font constructor Supported dict properties: color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size Returns ------- plotly.graph_objs.choroplethmapbox.colorbar.title.Font
Sets this color bar's title font. Note that the title's font used to be set by the now deprecated `titlefont` attribute. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.title.Font` - A dict of string/value properties that will be passed to the Font constructor Supported dict properties: color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size
[ "Sets", "this", "color", "bar", "s", "title", "font", ".", "Note", "that", "the", "title", "s", "font", "used", "to", "be", "set", "by", "the", "now", "deprecated", "titlefont", "attribute", ".", "The", "font", "property", "is", "an", "instance", "of", "Font", "that", "may", "be", "specified", "as", ":", "-", "An", "instance", "of", ":", "class", ":", "plotly", ".", "graph_objs", ".", "choroplethmapbox", ".", "colorbar", ".", "title", ".", "Font", "-", "A", "dict", "of", "string", "/", "value", "properties", "that", "will", "be", "passed", "to", "the", "Font", "constructor", "Supported", "dict", "properties", ":", "color", "family", "HTML", "font", "family", "-", "the", "typeface", "that", "will", "be", "applied", "by", "the", "web", "browser", ".", "The", "web", "browser", "will", "only", "be", "able", "to", "apply", "a", "font", "if", "it", "is", "available", "on", "the", "system", "which", "it", "operates", ".", "Provide", "multiple", "font", "families", "separated", "by", "commas", "to", "indicate", "the", "preference", "in", "which", "to", "apply", "fonts", "if", "they", "aren", "t", "available", "on", "the", "system", ".", "The", "Chart", "Studio", "Cloud", "(", "at", "https", ":", "//", "chart", "-", "studio", ".", "plotly", ".", "com", "or", "on", "-", "premise", ")", "generates", "images", "on", "a", "server", "where", "only", "a", "select", "number", "of", "fonts", "are", "installed", "and", "supported", ".", "These", "include", "Arial", "Balto", "Courier", "New", "Droid", "Sans", "Droid", "Serif", "Droid", "Sans", "Mono", "Gravitas", "One", "Old", "Standard", "TT", "Open", "Sans", "Overpass", "PT", "Sans", "Narrow", "Raleway", "Times", "New", "Roman", ".", "size" ]
def font(self): """ Sets this color bar's title font. Note that the title's font used to be set by the now deprecated `titlefont` attribute. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.title.Font` - A dict of string/value properties that will be passed to the Font constructor Supported dict properties: color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size Returns ------- plotly.graph_objs.choroplethmapbox.colorbar.title.Font """ return self["font"]
[ "def", "font", "(", "self", ")", ":", "return", "self", "[", "\"font\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/choroplethmapbox/colorbar/_title.py#L16-L54
erikdubois/Aureola
005fb14b3cab0ba1929ebf9ac3ac68d2c6e1c0ef
shailen/dropbox.py
python
lansync
(argv)
u"""enables or disables LAN sync dropbox lansync [y/n] options: y dropbox will use LAN sync (default) n dropbox will not use LAN sync
u"""enables or disables LAN sync dropbox lansync [y/n]
[ "u", "enables", "or", "disables", "LAN", "sync", "dropbox", "lansync", "[", "y", "/", "n", "]" ]
def lansync(argv): u"""enables or disables LAN sync dropbox lansync [y/n] options: y dropbox will use LAN sync (default) n dropbox will not use LAN sync """ if len(argv) != 1: console_print(lansync.__doc__, linebreak=False) return s = argv[0].lower() if s.startswith('y') or s.startswith('-y'): should_lansync = True elif s.startswith('n') or s.startswith('-n'): should_lansync = False else: should_lansync = None if should_lansync is None: console_print(lansync.__doc__,linebreak=False) else: with closing(DropboxCommand()) as dc: dc.set_lan_sync(lansync='enabled' if should_lansync else 'disabled')
[ "def", "lansync", "(", "argv", ")", ":", "if", "len", "(", "argv", ")", "!=", "1", ":", "console_print", "(", "lansync", ".", "__doc__", ",", "linebreak", "=", "False", ")", "return", "s", "=", "argv", "[", "0", "]", ".", "lower", "(", ")", "if", "s", ".", "startswith", "(", "'y'", ")", "or", "s", ".", "startswith", "(", "'-y'", ")", ":", "should_lansync", "=", "True", "elif", "s", ".", "startswith", "(", "'n'", ")", "or", "s", ".", "startswith", "(", "'-n'", ")", ":", "should_lansync", "=", "False", "else", ":", "should_lansync", "=", "None", "if", "should_lansync", "is", "None", ":", "console_print", "(", "lansync", ".", "__doc__", ",", "linebreak", "=", "False", ")", "else", ":", "with", "closing", "(", "DropboxCommand", "(", ")", ")", "as", "dc", ":", "dc", ".", "set_lan_sync", "(", "lansync", "=", "'enabled'", "if", "should_lansync", "else", "'disabled'", ")" ]
https://github.com/erikdubois/Aureola/blob/005fb14b3cab0ba1929ebf9ac3ac68d2c6e1c0ef/shailen/dropbox.py#L1249-L1273
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/calendar.py
python
Calendar.itermonthdates
(self, year, month)
Return an iterator for one month. The iterator will yield datetime.date values and will always iterate through complete weeks, so it will yield dates outside the specified month.
Return an iterator for one month. The iterator will yield datetime.date values and will always iterate through complete weeks, so it will yield dates outside the specified month.
[ "Return", "an", "iterator", "for", "one", "month", ".", "The", "iterator", "will", "yield", "datetime", ".", "date", "values", "and", "will", "always", "iterate", "through", "complete", "weeks", "so", "it", "will", "yield", "dates", "outside", "the", "specified", "month", "." ]
def itermonthdates(self, year, month): """ Return an iterator for one month. The iterator will yield datetime.date values and will always iterate through complete weeks, so it will yield dates outside the specified month. """ date = datetime.date(year, month, 1) # Go back to the beginning of the week days = (date.weekday() - self.firstweekday) % 7 date -= datetime.timedelta(days=days) oneday = datetime.timedelta(days=1) while True: yield date try: date += oneday except OverflowError: # Adding one day could fail after datetime.MAXYEAR break if date.month != month and date.weekday() == self.firstweekday: break
[ "def", "itermonthdates", "(", "self", ",", "year", ",", "month", ")", ":", "date", "=", "datetime", ".", "date", "(", "year", ",", "month", ",", "1", ")", "# Go back to the beginning of the week", "days", "=", "(", "date", ".", "weekday", "(", ")", "-", "self", ".", "firstweekday", ")", "%", "7", "date", "-=", "datetime", ".", "timedelta", "(", "days", "=", "days", ")", "oneday", "=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "while", "True", ":", "yield", "date", "try", ":", "date", "+=", "oneday", "except", "OverflowError", ":", "# Adding one day could fail after datetime.MAXYEAR", "break", "if", "date", ".", "month", "!=", "month", "and", "date", ".", "weekday", "(", ")", "==", "self", ".", "firstweekday", ":", "break" ]
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/calendar.py#L151-L170
LudovicRousseau/pyscard
c0a5e2f626be69a0fc7b530631471cf014e4b20e
smartcard/pcsc/PCSCPart10.py
python
parseFeatureRequest
(response)
return features
Get the list of Part10 features supported by the reader. @param response: result of CM_IOCTL_GET_FEATURE_REQUEST commmand @rtype: list @return: a list of list [[tag1, value1], [tag2, value2]]
Get the list of Part10 features supported by the reader.
[ "Get", "the", "list", "of", "Part10", "features", "supported", "by", "the", "reader", "." ]
def parseFeatureRequest(response): """ Get the list of Part10 features supported by the reader. @param response: result of CM_IOCTL_GET_FEATURE_REQUEST commmand @rtype: list @return: a list of list [[tag1, value1], [tag2, value2]] """ features = [] while (len(response) > 0): tag = response[0] control = ((((((response[2] << 8) + response[3]) << 8) + response[4]) << 8) + response[5]) try: features.append([Features[tag], control]) except KeyError: pass del response[:6] return features
[ "def", "parseFeatureRequest", "(", "response", ")", ":", "features", "=", "[", "]", "while", "(", "len", "(", "response", ")", ">", "0", ")", ":", "tag", "=", "response", "[", "0", "]", "control", "=", "(", "(", "(", "(", "(", "(", "response", "[", "2", "]", "<<", "8", ")", "+", "response", "[", "3", "]", ")", "<<", "8", ")", "+", "response", "[", "4", "]", ")", "<<", "8", ")", "+", "response", "[", "5", "]", ")", "try", ":", "features", ".", "append", "(", "[", "Features", "[", "tag", "]", ",", "control", "]", ")", "except", "KeyError", ":", "pass", "del", "response", "[", ":", "6", "]", "return", "features" ]
https://github.com/LudovicRousseau/pyscard/blob/c0a5e2f626be69a0fc7b530631471cf014e4b20e/smartcard/pcsc/PCSCPart10.py#L110-L130
quantumlib/OpenFermion
6187085f2a7707012b68370b625acaeed547e62b
src/openfermion/circuits/low_rank.py
python
get_chemist_two_body_coefficients
(two_body_coefficients, spin_basis=True)
return one_body_correction, chemist_two_body_coefficients
r"""Convert two-body operator coefficients to low rank tensor. The input is a two-body fermionic Hamiltonian expressed as $\sum_{pqrs} h_{pqrs} a^\dagger_p a^\dagger_q a_r a_s$ We will convert this to the chemistry convention expressing it as $\sum_{pqrs} g_{pqrs} a^\dagger_p a_q a^\dagger_r a_s$ but without the spin degree of freedom. In the process of performing this conversion, constants and one-body terms come out, which will be returned as well. Args: two_body_coefficients (ndarray): an N x N x N x N numpy array giving the $h_{pqrs}$ tensor. spin_basis (bool): True if the two-body terms are passed in spin orbital basis. False if already in spatial orbital basis. Returns: one_body_correction (ndarray): an N x N array of floats giving coefficients of the $a^\dagger_p a_q$ terms that come out. chemist_two_body_coefficients (ndarray): an N x N x N x N numpy array giving the $g_{pqrs}$ tensor in chemist notation. Raises: TypeError: Input must be two-body number conserving FermionOperator or InteractionOperator.
r"""Convert two-body operator coefficients to low rank tensor.
[ "r", "Convert", "two", "-", "body", "operator", "coefficients", "to", "low", "rank", "tensor", "." ]
def get_chemist_two_body_coefficients(two_body_coefficients, spin_basis=True): r"""Convert two-body operator coefficients to low rank tensor. The input is a two-body fermionic Hamiltonian expressed as $\sum_{pqrs} h_{pqrs} a^\dagger_p a^\dagger_q a_r a_s$ We will convert this to the chemistry convention expressing it as $\sum_{pqrs} g_{pqrs} a^\dagger_p a_q a^\dagger_r a_s$ but without the spin degree of freedom. In the process of performing this conversion, constants and one-body terms come out, which will be returned as well. Args: two_body_coefficients (ndarray): an N x N x N x N numpy array giving the $h_{pqrs}$ tensor. spin_basis (bool): True if the two-body terms are passed in spin orbital basis. False if already in spatial orbital basis. Returns: one_body_correction (ndarray): an N x N array of floats giving coefficients of the $a^\dagger_p a_q$ terms that come out. chemist_two_body_coefficients (ndarray): an N x N x N x N numpy array giving the $g_{pqrs}$ tensor in chemist notation. Raises: TypeError: Input must be two-body number conserving FermionOperator or InteractionOperator. """ # Initialize. n_orbitals = two_body_coefficients.shape[0] chemist_two_body_coefficients = numpy.transpose(two_body_coefficients, [0, 3, 1, 2]) # If the specification was in spin-orbitals, chop down to spatial orbitals # assuming a spin-symmetric interaction. if spin_basis: n_orbitals = n_orbitals // 2 alpha_indices = list(range(0, n_orbitals * 2, 2)) beta_indices = list(range(1, n_orbitals * 2, 2)) chemist_two_body_coefficients = chemist_two_body_coefficients[numpy.ix_( alpha_indices, alpha_indices, beta_indices, beta_indices)] # Determine a one body correction in the spin basis from spatial basis. one_body_correction = numpy.zeros((2 * n_orbitals, 2 * n_orbitals), complex) for p, q, r, s in itertools.product(range(n_orbitals), repeat=4): for sigma, tau in itertools.product(range(2), repeat=2): if (q == r) and (sigma == tau): one_body_correction[2 * p + sigma, 2 * s + tau] -= ( chemist_two_body_coefficients[p, q, r, s]) # Return. return one_body_correction, chemist_two_body_coefficients
[ "def", "get_chemist_two_body_coefficients", "(", "two_body_coefficients", ",", "spin_basis", "=", "True", ")", ":", "# Initialize.", "n_orbitals", "=", "two_body_coefficients", ".", "shape", "[", "0", "]", "chemist_two_body_coefficients", "=", "numpy", ".", "transpose", "(", "two_body_coefficients", ",", "[", "0", ",", "3", ",", "1", ",", "2", "]", ")", "# If the specification was in spin-orbitals, chop down to spatial orbitals", "# assuming a spin-symmetric interaction.", "if", "spin_basis", ":", "n_orbitals", "=", "n_orbitals", "//", "2", "alpha_indices", "=", "list", "(", "range", "(", "0", ",", "n_orbitals", "*", "2", ",", "2", ")", ")", "beta_indices", "=", "list", "(", "range", "(", "1", ",", "n_orbitals", "*", "2", ",", "2", ")", ")", "chemist_two_body_coefficients", "=", "chemist_two_body_coefficients", "[", "numpy", ".", "ix_", "(", "alpha_indices", ",", "alpha_indices", ",", "beta_indices", ",", "beta_indices", ")", "]", "# Determine a one body correction in the spin basis from spatial basis.", "one_body_correction", "=", "numpy", ".", "zeros", "(", "(", "2", "*", "n_orbitals", ",", "2", "*", "n_orbitals", ")", ",", "complex", ")", "for", "p", ",", "q", ",", "r", ",", "s", "in", "itertools", ".", "product", "(", "range", "(", "n_orbitals", ")", ",", "repeat", "=", "4", ")", ":", "for", "sigma", ",", "tau", "in", "itertools", ".", "product", "(", "range", "(", "2", ")", ",", "repeat", "=", "2", ")", ":", "if", "(", "q", "==", "r", ")", "and", "(", "sigma", "==", "tau", ")", ":", "one_body_correction", "[", "2", "*", "p", "+", "sigma", ",", "2", "*", "s", "+", "tau", "]", "-=", "(", "chemist_two_body_coefficients", "[", "p", ",", "q", ",", "r", ",", "s", "]", ")", "# Return.", "return", "one_body_correction", ",", "chemist_two_body_coefficients" ]
https://github.com/quantumlib/OpenFermion/blob/6187085f2a7707012b68370b625acaeed547e62b/src/openfermion/circuits/low_rank.py#L21-L73
mrkipling/maraschino
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
lib/sqlalchemy/ext/sqlsoup.py
python
SqlSoup.delete
(self, instance)
Mark an instance as deleted.
Mark an instance as deleted.
[ "Mark", "an", "instance", "as", "deleted", "." ]
def delete(self, instance): """Mark an instance as deleted.""" self.session.delete(instance)
[ "def", "delete", "(", "self", ",", "instance", ")", ":", "self", ".", "session", ".", "delete", "(", "instance", ")" ]
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/sqlalchemy/ext/sqlsoup.py#L555-L558
Qiskit/qiskit-terra
b66030e3b9192efdd3eb95cf25c6545fe0a13da4
qiskit/circuit/library/standard_gates/h.py
python
HGate.__init__
(self, label: Optional[str] = None)
Create new H gate.
Create new H gate.
[ "Create", "new", "H", "gate", "." ]
def __init__(self, label: Optional[str] = None): """Create new H gate.""" super().__init__("h", 1, [], label=label)
[ "def", "__init__", "(", "self", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", ":", "super", "(", ")", ".", "__init__", "(", "\"h\"", ",", "1", ",", "[", "]", ",", "label", "=", "label", ")" ]
https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/circuit/library/standard_gates/h.py#L51-L53
m-rtijn/mpu6050
0626053a5e1182f4951b78b8326691a9223a5f7d
mpu6050/mpu6050.py
python
mpu6050.get_all_data
(self)
return [accel, gyro, temp]
Reads and returns all the available data.
Reads and returns all the available data.
[ "Reads", "and", "returns", "all", "the", "available", "data", "." ]
def get_all_data(self): """Reads and returns all the available data.""" temp = self.get_temp() accel = self.get_accel_data() gyro = self.get_gyro_data() return [accel, gyro, temp]
[ "def", "get_all_data", "(", "self", ")", ":", "temp", "=", "self", ".", "get_temp", "(", ")", "accel", "=", "self", ".", "get_accel_data", "(", ")", "gyro", "=", "self", ".", "get_gyro_data", "(", ")", "return", "[", "accel", ",", "gyro", ",", "temp", "]" ]
https://github.com/m-rtijn/mpu6050/blob/0626053a5e1182f4951b78b8326691a9223a5f7d/mpu6050/mpu6050.py#L255-L261
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/vendor/more_itertools/more.py
python
seekable.elements
(self)
return SequenceView(self._cache)
[]
def elements(self): return SequenceView(self._cache)
[ "def", "elements", "(", "self", ")", ":", "return", "SequenceView", "(", "self", ".", "_cache", ")" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/vendor/more_itertools/more.py#L2693-L2694
spesmilo/electrum
bdbd59300fbd35b01605e66145458e5f396108e8
electrum/util.py
python
base_unit_name_to_decimal_point
(unit_name: str)
[]
def base_unit_name_to_decimal_point(unit_name: str) -> int: # e.g. "BTC" -> 8 try: return base_units[unit_name] except KeyError: raise UnknownBaseUnit(unit_name) from None
[ "def", "base_unit_name_to_decimal_point", "(", "unit_name", ":", "str", ")", "->", "int", ":", "# e.g. \"BTC\" -> 8", "try", ":", "return", "base_units", "[", "unit_name", "]", "except", "KeyError", ":", "raise", "UnknownBaseUnit", "(", "unit_name", ")", "from", "None" ]
https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/util.py#L104-L109
paperswithcode/sota-extractor
6a13c5091900432bea7ea7cae3a12944c8d5ab57
sota_extractor/serialization.py
python
dumps
(tdb: TaskDB)
return json.dumps(tdb.export(), indent=2, sort_keys=True)
Render sota data to a json string.
Render sota data to a json string.
[ "Render", "sota", "data", "to", "a", "json", "string", "." ]
def dumps(tdb: TaskDB) -> str: """Render sota data to a json string.""" return json.dumps(tdb.export(), indent=2, sort_keys=True)
[ "def", "dumps", "(", "tdb", ":", "TaskDB", ")", "->", "str", ":", "return", "json", ".", "dumps", "(", "tdb", ".", "export", "(", ")", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")" ]
https://github.com/paperswithcode/sota-extractor/blob/6a13c5091900432bea7ea7cae3a12944c8d5ab57/sota_extractor/serialization.py#L9-L11
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/messaging/v1/service/phone_number.py
python
PhoneNumberInstance.fetch
(self)
return self._proxy.fetch()
Fetch the PhoneNumberInstance :returns: The fetched PhoneNumberInstance :rtype: twilio.rest.messaging.v1.service.phone_number.PhoneNumberInstance
Fetch the PhoneNumberInstance
[ "Fetch", "the", "PhoneNumberInstance" ]
def fetch(self): """ Fetch the PhoneNumberInstance :returns: The fetched PhoneNumberInstance :rtype: twilio.rest.messaging.v1.service.phone_number.PhoneNumberInstance """ return self._proxy.fetch()
[ "def", "fetch", "(", "self", ")", ":", "return", "self", ".", "_proxy", ".", "fetch", "(", ")" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/messaging/v1/service/phone_number.py#L387-L394
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/mako/codegen.py
python
_GenerateRenderMethod.write_render_callable
(self, node, name, args, buffered, filtered, cached)
write a top-level render callable. this could be the main render() method or that of a top-level def.
write a top-level render callable.
[ "write", "a", "top", "-", "level", "render", "callable", "." ]
def write_render_callable(self, node, name, args, buffered, filtered, cached): """write a top-level render callable. this could be the main render() method or that of a top-level def.""" if self.in_def: decorator = node.decorator if decorator: self.printer.writeline( "@runtime._decorate_toplevel(%s)" % decorator) self.printer.start_source(node.lineno) self.printer.writelines( "def %s(%s):" % (name, ','.join(args)), # push new frame, assign current frame to __M_caller "__M_caller = context.caller_stack._push_frame()", "try:" ) if buffered or filtered or cached: self.printer.writeline("context._push_buffer()") self.identifier_stack.append( self.compiler.identifiers.branch(self.node)) if (not self.in_def or self.node.is_block) and '**pageargs' in args: self.identifier_stack[-1].argument_declared.add('pageargs') if not self.in_def and ( len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0 ): self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % ','.join([ "%s=%s" % (x, x) for x in self.identifiers.argument_declared ])) self.write_variable_declares(self.identifiers, toplevel=True) for n in self.node.nodes: n.accept_visitor(self) self.write_def_finish(self.node, buffered, filtered, cached) self.printer.writeline(None) self.printer.write_blanks(2) if cached: self.write_cache_decorator( node, name, args, buffered, self.identifiers, toplevel=True)
[ "def", "write_render_callable", "(", "self", ",", "node", ",", "name", ",", "args", ",", "buffered", ",", "filtered", ",", "cached", ")", ":", "if", "self", ".", "in_def", ":", "decorator", "=", "node", ".", "decorator", "if", "decorator", ":", "self", ".", "printer", ".", "writeline", "(", "\"@runtime._decorate_toplevel(%s)\"", "%", "decorator", ")", "self", ".", "printer", ".", "start_source", "(", "node", ".", "lineno", ")", "self", ".", "printer", ".", "writelines", "(", "\"def %s(%s):\"", "%", "(", "name", ",", "','", ".", "join", "(", "args", ")", ")", ",", "# push new frame, assign current frame to __M_caller", "\"__M_caller = context.caller_stack._push_frame()\"", ",", "\"try:\"", ")", "if", "buffered", "or", "filtered", "or", "cached", ":", "self", ".", "printer", ".", "writeline", "(", "\"context._push_buffer()\"", ")", "self", ".", "identifier_stack", ".", "append", "(", "self", ".", "compiler", ".", "identifiers", ".", "branch", "(", "self", ".", "node", ")", ")", "if", "(", "not", "self", ".", "in_def", "or", "self", ".", "node", ".", "is_block", ")", "and", "'**pageargs'", "in", "args", ":", "self", ".", "identifier_stack", "[", "-", "1", "]", ".", "argument_declared", ".", "add", "(", "'pageargs'", ")", "if", "not", "self", ".", "in_def", "and", "(", "len", "(", "self", ".", "identifiers", ".", "locally_assigned", ")", ">", "0", "or", "len", "(", "self", ".", "identifiers", ".", "argument_declared", ")", ">", "0", ")", ":", "self", ".", "printer", ".", "writeline", "(", "\"__M_locals = __M_dict_builtin(%s)\"", "%", "','", ".", "join", "(", "[", "\"%s=%s\"", "%", "(", "x", ",", "x", ")", "for", "x", "in", "self", ".", "identifiers", ".", "argument_declared", "]", ")", ")", "self", ".", "write_variable_declares", "(", "self", ".", "identifiers", ",", "toplevel", "=", "True", ")", "for", "n", "in", "self", ".", "node", ".", "nodes", ":", "n", ".", "accept_visitor", "(", "self", ")", "self", ".", "write_def_finish", "(", "self", ".", "node", ",", "buffered", ",", "filtered", ",", "cached", ")", "self", ".", "printer", ".", "writeline", "(", "None", ")", "self", ".", "printer", ".", "write_blanks", "(", "2", ")", "if", "cached", ":", "self", ".", "write_cache_decorator", "(", "node", ",", "name", ",", "args", ",", "buffered", ",", "self", ".", "identifiers", ",", "toplevel", "=", "True", ")" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/mako/codegen.py#L267-L316
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v8/services/services/bidding_strategy_service/client.py
python
BiddingStrategyServiceClient.common_project_path
(project: str,)
return "projects/{project}".format(project=project,)
Return a fully-qualified project string.
Return a fully-qualified project string.
[ "Return", "a", "fully", "-", "qualified", "project", "string", "." ]
def common_project_path(project: str,) -> str: """Return a fully-qualified project string.""" return "projects/{project}".format(project=project,)
[ "def", "common_project_path", "(", "project", ":", "str", ",", ")", "->", "str", ":", "return", "\"projects/{project}\"", ".", "format", "(", "project", "=", "project", ",", ")" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v8/services/services/bidding_strategy_service/client.py#L216-L218
xingyizhou/CenterTrack
d3d52145b71cb9797da2bfb78f0f1e88b286c871
src/lib/model/data_parallel.py
python
data_parallel
(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None)
return gather(outputs, output_device, dim)
r"""Evaluates module(input) in parallel across the GPUs given in device_ids. This is the functional version of the DataParallel module. Args: module: the module to evaluate in parallel inputs: inputs to the module device_ids: GPU ids on which to replicate module output_device: GPU location of the output Use -1 to indicate the CPU. (default: device_ids[0]) Returns: a Variable containing the result of module(input) located on output_device
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
[ "r", "Evaluates", "module", "(", "input", ")", "in", "parallel", "across", "the", "GPUs", "given", "in", "device_ids", "." ]
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None): r"""Evaluates module(input) in parallel across the GPUs given in device_ids. This is the functional version of the DataParallel module. Args: module: the module to evaluate in parallel inputs: inputs to the module device_ids: GPU ids on which to replicate module output_device: GPU location of the output Use -1 to indicate the CPU. (default: device_ids[0]) Returns: a Variable containing the result of module(input) located on output_device """ if not isinstance(inputs, tuple): inputs = (inputs,) if device_ids is None: device_ids = list(range(torch.cuda.device_count())) if output_device is None: output_device = device_ids[0] inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) if len(device_ids) == 1: return module(*inputs[0], **module_kwargs[0]) used_device_ids = device_ids[:len(inputs)] replicas = replicate(module, used_device_ids) outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) return gather(outputs, output_device, dim)
[ "def", "data_parallel", "(", "module", ",", "inputs", ",", "device_ids", "=", "None", ",", "output_device", "=", "None", ",", "dim", "=", "0", ",", "module_kwargs", "=", "None", ")", ":", "if", "not", "isinstance", "(", "inputs", ",", "tuple", ")", ":", "inputs", "=", "(", "inputs", ",", ")", "if", "device_ids", "is", "None", ":", "device_ids", "=", "list", "(", "range", "(", "torch", ".", "cuda", ".", "device_count", "(", ")", ")", ")", "if", "output_device", "is", "None", ":", "output_device", "=", "device_ids", "[", "0", "]", "inputs", ",", "module_kwargs", "=", "scatter_kwargs", "(", "inputs", ",", "module_kwargs", ",", "device_ids", ",", "dim", ")", "if", "len", "(", "device_ids", ")", "==", "1", ":", "return", "module", "(", "*", "inputs", "[", "0", "]", ",", "*", "*", "module_kwargs", "[", "0", "]", ")", "used_device_ids", "=", "device_ids", "[", ":", "len", "(", "inputs", ")", "]", "replicas", "=", "replicate", "(", "module", ",", "used_device_ids", ")", "outputs", "=", "parallel_apply", "(", "replicas", ",", "inputs", ",", "module_kwargs", ",", "used_device_ids", ")", "return", "gather", "(", "outputs", ",", "output_device", ",", "dim", ")" ]
https://github.com/xingyizhou/CenterTrack/blob/d3d52145b71cb9797da2bfb78f0f1e88b286c871/src/lib/model/data_parallel.py#L87-L117
inkandswitch/livebook
93c8d467734787366ad084fc3566bf5cbe249c51
public/pypyjs/modules/numpy/core/fromnumeric.py
python
compress
(condition, a, axis=None, out=None)
return compress(condition, axis, out)
Return selected slices of an array along given axis. When working along a given axis, a slice along that axis is returned in `output` for each index where `condition` evaluates to True. When working on a 1-D array, `compress` is equivalent to `extract`. Parameters ---------- condition : 1-D array of bools Array that selects which entries to return. If len(condition) is less than the size of `a` along the given axis, then output is truncated to the length of the condition array. a : array_like Array from which to extract a part. axis : int, optional Axis along which to take slices. If None (default), work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- compressed_array : ndarray A copy of `a` without the slices along axis for which `condition` is false. See Also -------- take, choose, diag, diagonal, select ndarray.compress : Equivalent method in ndarray np.extract: Equivalent method when working on 1-D arrays numpy.doc.ufuncs : Section "Output arguments" Examples -------- >>> a = np.array([[1, 2], [3, 4], [5, 6]]) >>> a array([[1, 2], [3, 4], [5, 6]]) >>> np.compress([0, 1], a, axis=0) array([[3, 4]]) >>> np.compress([False, True, True], a, axis=0) array([[3, 4], [5, 6]]) >>> np.compress([False, True], a, axis=1) array([[2], [4], [6]]) Working on the flattened array does not return slices along an axis but selects elements. >>> np.compress([False, True], a) array([2])
Return selected slices of an array along given axis.
[ "Return", "selected", "slices", "of", "an", "array", "along", "given", "axis", "." ]
def compress(condition, a, axis=None, out=None): """ Return selected slices of an array along given axis. When working along a given axis, a slice along that axis is returned in `output` for each index where `condition` evaluates to True. When working on a 1-D array, `compress` is equivalent to `extract`. Parameters ---------- condition : 1-D array of bools Array that selects which entries to return. If len(condition) is less than the size of `a` along the given axis, then output is truncated to the length of the condition array. a : array_like Array from which to extract a part. axis : int, optional Axis along which to take slices. If None (default), work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- compressed_array : ndarray A copy of `a` without the slices along axis for which `condition` is false. See Also -------- take, choose, diag, diagonal, select ndarray.compress : Equivalent method in ndarray np.extract: Equivalent method when working on 1-D arrays numpy.doc.ufuncs : Section "Output arguments" Examples -------- >>> a = np.array([[1, 2], [3, 4], [5, 6]]) >>> a array([[1, 2], [3, 4], [5, 6]]) >>> np.compress([0, 1], a, axis=0) array([[3, 4]]) >>> np.compress([False, True, True], a, axis=0) array([[3, 4], [5, 6]]) >>> np.compress([False, True], a, axis=1) array([[2], [4], [6]]) Working on the flattened array does not return slices along an axis but selects elements. >>> np.compress([False, True], a) array([2]) """ try: compress = a.compress except AttributeError: return _wrapit(a, 'compress', condition, axis, out) return compress(condition, axis, out)
[ "def", "compress", "(", "condition", ",", "a", ",", "axis", "=", "None", ",", "out", "=", "None", ")", ":", "try", ":", "compress", "=", "a", ".", "compress", "except", "AttributeError", ":", "return", "_wrapit", "(", "a", ",", "'compress'", ",", "condition", ",", "axis", ",", "out", ")", "return", "compress", "(", "condition", ",", "axis", ",", "out", ")" ]
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/numpy/core/fromnumeric.py#L1609-L1673
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/RiskIQDigitalFootprint/Integrations/RiskIQDigitalFootprint/RiskIQDigitalFootprint.py
python
get_asset_command
(client: Client, args: Dict[str, Any])
return command_results
Retrieve the asset of the specified UUID from Global Inventory. :param client: The Client object used for request :param args: The command arguments :return: CommandResults :return: CommandResults
Retrieve the asset of the specified UUID from Global Inventory.
[ "Retrieve", "the", "asset", "of", "the", "specified", "UUID", "from", "Global", "Inventory", "." ]
def get_asset_command(client: Client, args: Dict[str, Any]) -> Union[str, List[CommandResults]]: """ Retrieve the asset of the specified UUID from Global Inventory. :param client: The Client object used for request :param args: The command arguments :return: CommandResults :return: CommandResults """ uuid, asset_type = validate_and_fetch_get_asset_arguments(args) params = get_asset_params(args) if uuid: resp = client.http_request(method='GET', url_suffix=COMMAND_URL_SUFFIX['GET_ASSET_BY_UUID'].format(uuid), params=params) else: resp = client.http_request(method='GET', url_suffix=COMMAND_URL_SUFFIX['GET_ASSET_BY_NAME_AND_TYPE'] .format(asset_type), params=params) command_results = get_asset_outputs(resp) return command_results
[ "def", "get_asset_command", "(", "client", ":", "Client", ",", "args", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Union", "[", "str", ",", "List", "[", "CommandResults", "]", "]", ":", "uuid", ",", "asset_type", "=", "validate_and_fetch_get_asset_arguments", "(", "args", ")", "params", "=", "get_asset_params", "(", "args", ")", "if", "uuid", ":", "resp", "=", "client", ".", "http_request", "(", "method", "=", "'GET'", ",", "url_suffix", "=", "COMMAND_URL_SUFFIX", "[", "'GET_ASSET_BY_UUID'", "]", ".", "format", "(", "uuid", ")", ",", "params", "=", "params", ")", "else", ":", "resp", "=", "client", ".", "http_request", "(", "method", "=", "'GET'", ",", "url_suffix", "=", "COMMAND_URL_SUFFIX", "[", "'GET_ASSET_BY_NAME_AND_TYPE'", "]", ".", "format", "(", "asset_type", ")", ",", "params", "=", "params", ")", "command_results", "=", "get_asset_outputs", "(", "resp", ")", "return", "command_results" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/RiskIQDigitalFootprint/Integrations/RiskIQDigitalFootprint/RiskIQDigitalFootprint.py#L2308-L2329
ionelmc/python-redis-lock
1842f8a6780613e8c89938218121e0ee464d2a50
src/redis_lock/__init__.py
python
reset_all
(redis_client)
Forcibly deletes all locks if its remains (like a crash reason). Use this with care. :param redis_client: An instance of :class:`~StrictRedis`.
Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
[ "Forcibly", "deletes", "all", "locks", "if", "its", "remains", "(", "like", "a", "crash", "reason", ")", ".", "Use", "this", "with", "care", "." ]
def reset_all(redis_client): """ Forcibly deletes all locks if its remains (like a crash reason). Use this with care. :param redis_client: An instance of :class:`~StrictRedis`. """ Lock.register_scripts(redis_client) reset_all_script(client=redis_client)
[ "def", "reset_all", "(", "redis_client", ")", ":", "Lock", ".", "register_scripts", "(", "redis_client", ")", "reset_all_script", "(", "client", "=", "redis_client", ")" ]
https://github.com/ionelmc/python-redis-lock/blob/1842f8a6780613e8c89938218121e0ee464d2a50/src/redis_lock/__init__.py#L380-L389
rm-hull/luma.oled
5cbac38eaa3d7b06cf97f24d76877693c72d3233
luma/oled/device/__init__.py
python
sh1106.display
(self, image)
Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the SH1106 OLED display. :param image: Image to display. :type image: :py:mod:`PIL.Image`
Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the SH1106 OLED display.
[ "Takes", "a", "1", "-", "bit", ":", "py", ":", "mod", ":", "PIL", ".", "Image", "and", "dumps", "it", "to", "the", "SH1106", "OLED", "display", "." ]
def display(self, image): """ Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the SH1106 OLED display. :param image: Image to display. :type image: :py:mod:`PIL.Image` """ assert(image.mode == self.mode) assert(image.size == self.size) image = self.preprocess(image) set_page_address = 0xB0 image_data = image.getdata() pixels_per_page = self.width * 8 buf = bytearray(self.width) for y in range(0, int(self._pages * pixels_per_page), pixels_per_page): self.command(set_page_address, 0x02, 0x10) set_page_address += 1 offsets = [y + self.width * i for i in range(8)] for x in range(self.width): buf[x] = \ (image_data[x + offsets[0]] and 0x01) | \ (image_data[x + offsets[1]] and 0x02) | \ (image_data[x + offsets[2]] and 0x04) | \ (image_data[x + offsets[3]] and 0x08) | \ (image_data[x + offsets[4]] and 0x10) | \ (image_data[x + offsets[5]] and 0x20) | \ (image_data[x + offsets[6]] and 0x40) | \ (image_data[x + offsets[7]] and 0x80) self.data(list(buf))
[ "def", "display", "(", "self", ",", "image", ")", ":", "assert", "(", "image", ".", "mode", "==", "self", ".", "mode", ")", "assert", "(", "image", ".", "size", "==", "self", ".", "size", ")", "image", "=", "self", ".", "preprocess", "(", "image", ")", "set_page_address", "=", "0xB0", "image_data", "=", "image", ".", "getdata", "(", ")", "pixels_per_page", "=", "self", ".", "width", "*", "8", "buf", "=", "bytearray", "(", "self", ".", "width", ")", "for", "y", "in", "range", "(", "0", ",", "int", "(", "self", ".", "_pages", "*", "pixels_per_page", ")", ",", "pixels_per_page", ")", ":", "self", ".", "command", "(", "set_page_address", ",", "0x02", ",", "0x10", ")", "set_page_address", "+=", "1", "offsets", "=", "[", "y", "+", "self", ".", "width", "*", "i", "for", "i", "in", "range", "(", "8", ")", "]", "for", "x", "in", "range", "(", "self", ".", "width", ")", ":", "buf", "[", "x", "]", "=", "(", "image_data", "[", "x", "+", "offsets", "[", "0", "]", "]", "and", "0x01", ")", "|", "(", "image_data", "[", "x", "+", "offsets", "[", "1", "]", "]", "and", "0x02", ")", "|", "(", "image_data", "[", "x", "+", "offsets", "[", "2", "]", "]", "and", "0x04", ")", "|", "(", "image_data", "[", "x", "+", "offsets", "[", "3", "]", "]", "and", "0x08", ")", "|", "(", "image_data", "[", "x", "+", "offsets", "[", "4", "]", "]", "and", "0x10", ")", "|", "(", "image_data", "[", "x", "+", "offsets", "[", "5", "]", "]", "and", "0x20", ")", "|", "(", "image_data", "[", "x", "+", "offsets", "[", "6", "]", "]", "and", "0x40", ")", "|", "(", "image_data", "[", "x", "+", "offsets", "[", "7", "]", "]", "and", "0x80", ")", "self", ".", "data", "(", "list", "(", "buf", ")", ")" ]
https://github.com/rm-hull/luma.oled/blob/5cbac38eaa3d7b06cf97f24d76877693c72d3233/luma/oled/device/__init__.py#L95-L129
kubernetes-sigs/kubespray
cd601c77c7df953ef4f098a5c728cdd8afe9fdbd
contrib/terraform/terraform.py
python
iter_host_ips
(hosts, ips)
Update hosts that have an entry in the floating IP list
Update hosts that have an entry in the floating IP list
[ "Update", "hosts", "that", "have", "an", "entry", "in", "the", "floating", "IP", "list" ]
def iter_host_ips(hosts, ips): '''Update hosts that have an entry in the floating IP list''' for host in hosts: host_id = host[1]['id'] if host_id in ips: ip = ips[host_id] host[1].update({ 'access_ip_v4': ip, 'access_ip': ip, 'public_ipv4': ip, 'ansible_ssh_host': ip, }) if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0": host[1].pop('access_ip') yield host
[ "def", "iter_host_ips", "(", "hosts", ",", "ips", ")", ":", "for", "host", "in", "hosts", ":", "host_id", "=", "host", "[", "1", "]", "[", "'id'", "]", "if", "host_id", "in", "ips", ":", "ip", "=", "ips", "[", "host_id", "]", "host", "[", "1", "]", ".", "update", "(", "{", "'access_ip_v4'", ":", "ip", ",", "'access_ip'", ":", "ip", ",", "'public_ipv4'", ":", "ip", ",", "'ansible_ssh_host'", ":", "ip", ",", "}", ")", "if", "'use_access_ip'", "in", "host", "[", "1", "]", "[", "'metadata'", "]", "and", "host", "[", "1", "]", "[", "'metadata'", "]", "[", "'use_access_ip'", "]", "==", "\"0\"", ":", "host", "[", "1", "]", ".", "pop", "(", "'access_ip'", ")", "yield", "host" ]
https://github.com/kubernetes-sigs/kubespray/blob/cd601c77c7df953ef4f098a5c728cdd8afe9fdbd/contrib/terraform/terraform.py#L339-L357
ronf/asyncssh
ee1714c598d8c2ea6f5484e465443f38b68714aa
asyncssh/channel.py
python
SSHClientChannel.send_break
(self, msec: int)
Send a break to the remote process This method requests that the server perform a break operation on the remote process or service as described in :rfc:`4335`. :param msec: The duration of the break in milliseconds :type msec: `int` :raises: :exc:`OSError` if the channel is not open
Send a break to the remote process
[ "Send", "a", "break", "to", "the", "remote", "process" ]
def send_break(self, msec: int) -> None: """Send a break to the remote process This method requests that the server perform a break operation on the remote process or service as described in :rfc:`4335`. :param msec: The duration of the break in milliseconds :type msec: `int` :raises: :exc:`OSError` if the channel is not open """ self.logger.info('Sending %d msec break', msec) self._send_request(b'break', UInt32(msec))
[ "def", "send_break", "(", "self", ",", "msec", ":", "int", ")", "->", "None", ":", "self", ".", "logger", ".", "info", "(", "'Sending %d msec break'", ",", "msec", ")", "self", ".", "_send_request", "(", "b'break'", ",", "UInt32", "(", "msec", ")", ")" ]
https://github.com/ronf/asyncssh/blob/ee1714c598d8c2ea6f5484e465443f38b68714aa/asyncssh/channel.py#L1344-L1361
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/geometry/point.py
python
Point.is_collinear
(self, *args)
return Point.affine_rank(*points) <= 1
Returns `True` if there exists a line that contains `self` and `points`. Returns `False` otherwise. A trivially True value is returned if no points are given. Parameters ========== args : sequence of Points Returns ======= is_collinear : boolean See Also ======== sympy.geometry.line.Line Examples ======== >>> from sympy import Point >>> from sympy.abc import x >>> p1, p2 = Point(0, 0), Point(1, 1) >>> p3, p4, p5 = Point(2, 2), Point(x, x), Point(1, 2) >>> Point.is_collinear(p1, p2, p3, p4) True >>> Point.is_collinear(p1, p2, p3, p5) False
Returns `True` if there exists a line that contains `self` and `points`. Returns `False` otherwise. A trivially True value is returned if no points are given.
[ "Returns", "True", "if", "there", "exists", "a", "line", "that", "contains", "self", "and", "points", ".", "Returns", "False", "otherwise", ".", "A", "trivially", "True", "value", "is", "returned", "if", "no", "points", "are", "given", "." ]
def is_collinear(self, *args): """Returns `True` if there exists a line that contains `self` and `points`. Returns `False` otherwise. A trivially True value is returned if no points are given. Parameters ========== args : sequence of Points Returns ======= is_collinear : boolean See Also ======== sympy.geometry.line.Line Examples ======== >>> from sympy import Point >>> from sympy.abc import x >>> p1, p2 = Point(0, 0), Point(1, 1) >>> p3, p4, p5 = Point(2, 2), Point(x, x), Point(1, 2) >>> Point.is_collinear(p1, p2, p3, p4) True >>> Point.is_collinear(p1, p2, p3, p5) False """ points = (self,) + args points = Point._normalize_dimension(*[Point(i) for i in points]) points = list(uniq(points)) return Point.affine_rank(*points) <= 1
[ "def", "is_collinear", "(", "self", ",", "*", "args", ")", ":", "points", "=", "(", "self", ",", ")", "+", "args", "points", "=", "Point", ".", "_normalize_dimension", "(", "*", "[", "Point", "(", "i", ")", "for", "i", "in", "points", "]", ")", "points", "=", "list", "(", "uniq", "(", "points", ")", ")", "return", "Point", ".", "affine_rank", "(", "*", "points", ")", "<=", "1" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/geometry/point.py#L523-L559
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py
python
UTF8Prober.reset
(self)
[]
def reset(self): CharSetProber.reset(self) self._mCodingSM.reset() self._mNumOfMBChar = 0
[ "def", "reset", "(", "self", ")", ":", "CharSetProber", ".", "reset", "(", "self", ")", "self", ".", "_mCodingSM", ".", "reset", "(", ")", "self", ".", "_mNumOfMBChar", "=", "0" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py#L42-L45
NVIDIA/DeepLearningExamples
589604d49e016cd9ef4525f7abcc9c7b826cfc5e
PyTorch/Detection/Efficientdet/effdet/bench.py
python
_post_process
(config, cls_outputs, box_outputs)
return cls_outputs_all_after_topk, box_outputs_all_after_topk, indices_all, classes_all
Selects top-k predictions. Post-proc code adapted from Tensorflow version at: https://github.com/google/automl/tree/master/efficientdet and optimized for PyTorch. Args: config: a parameter dictionary that includes `min_level`, `max_level`, `batch_size`, and `num_classes`. cls_outputs: an OrderDict with keys representing levels and values representing logits in [batch_size, height, width, num_anchors]. box_outputs: an OrderDict with keys representing levels and values representing box regression targets in [batch_size, height, width, num_anchors * 4].
Selects top-k predictions.
[ "Selects", "top", "-", "k", "predictions", "." ]
def _post_process(config, cls_outputs, box_outputs): """Selects top-k predictions. Post-proc code adapted from Tensorflow version at: https://github.com/google/automl/tree/master/efficientdet and optimized for PyTorch. Args: config: a parameter dictionary that includes `min_level`, `max_level`, `batch_size`, and `num_classes`. cls_outputs: an OrderDict with keys representing levels and values representing logits in [batch_size, height, width, num_anchors]. box_outputs: an OrderDict with keys representing levels and values representing box regression targets in [batch_size, height, width, num_anchors * 4]. """ batch_size = cls_outputs[0].shape[0] if config.fused_focal_loss: batch_size, channels, _, _ = cls_outputs[0].shape padded_classes = (config.num_classes + 7) // 8 * 8 anchors = channels // padded_classes _cls_outputs_all = [] for level in range(config.num_levels): _, _, height, width = cls_outputs[level].shape _cls_output = cls_outputs[level].permute(0, 2, 3, 1) _cls_output = _cls_output.view(batch_size, height, width, anchors, padded_classes) _cls_output = _cls_output[..., :config.num_classes] _cls_output = _cls_output.reshape([batch_size, -1, config.num_classes]) _cls_outputs_all.append(_cls_output) cls_outputs_all = torch.cat(_cls_outputs_all, 1) else: cls_outputs_all = torch.cat([ cls_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, -1, config.num_classes]) for level in range(config.num_levels)], 1) box_outputs_all = torch.cat([ box_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, -1, 4]) for level in range(config.num_levels)], 1) _, cls_topk_indices_all = torch.topk(cls_outputs_all.reshape(batch_size, -1), dim=1, k=MAX_DETECTION_POINTS, sorted=False) indices_all = cls_topk_indices_all // config.num_classes classes_all = cls_topk_indices_all % config.num_classes box_outputs_all_after_topk = torch.gather( box_outputs_all, 1, indices_all.unsqueeze(2).expand(-1, -1, 4)) cls_outputs_all_after_topk = torch.gather( cls_outputs_all, 1, indices_all.unsqueeze(2).expand(-1, -1, config.num_classes)) cls_outputs_all_after_topk = torch.gather( cls_outputs_all_after_topk, 2, classes_all.unsqueeze(2)) return cls_outputs_all_after_topk, box_outputs_all_after_topk, indices_all, classes_all
[ "def", "_post_process", "(", "config", ",", "cls_outputs", ",", "box_outputs", ")", ":", "batch_size", "=", "cls_outputs", "[", "0", "]", ".", "shape", "[", "0", "]", "if", "config", ".", "fused_focal_loss", ":", "batch_size", ",", "channels", ",", "_", ",", "_", "=", "cls_outputs", "[", "0", "]", ".", "shape", "padded_classes", "=", "(", "config", ".", "num_classes", "+", "7", ")", "//", "8", "*", "8", "anchors", "=", "channels", "//", "padded_classes", "_cls_outputs_all", "=", "[", "]", "for", "level", "in", "range", "(", "config", ".", "num_levels", ")", ":", "_", ",", "_", ",", "height", ",", "width", "=", "cls_outputs", "[", "level", "]", ".", "shape", "_cls_output", "=", "cls_outputs", "[", "level", "]", ".", "permute", "(", "0", ",", "2", ",", "3", ",", "1", ")", "_cls_output", "=", "_cls_output", ".", "view", "(", "batch_size", ",", "height", ",", "width", ",", "anchors", ",", "padded_classes", ")", "_cls_output", "=", "_cls_output", "[", "...", ",", ":", "config", ".", "num_classes", "]", "_cls_output", "=", "_cls_output", ".", "reshape", "(", "[", "batch_size", ",", "-", "1", ",", "config", ".", "num_classes", "]", ")", "_cls_outputs_all", ".", "append", "(", "_cls_output", ")", "cls_outputs_all", "=", "torch", ".", "cat", "(", "_cls_outputs_all", ",", "1", ")", "else", ":", "cls_outputs_all", "=", "torch", ".", "cat", "(", "[", "cls_outputs", "[", "level", "]", ".", "permute", "(", "0", ",", "2", ",", "3", ",", "1", ")", ".", "reshape", "(", "[", "batch_size", ",", "-", "1", ",", "config", ".", "num_classes", "]", ")", "for", "level", "in", "range", "(", "config", ".", "num_levels", ")", "]", ",", "1", ")", "box_outputs_all", "=", "torch", ".", "cat", "(", "[", "box_outputs", "[", "level", "]", ".", "permute", "(", "0", ",", "2", ",", "3", ",", "1", ")", ".", "reshape", "(", "[", "batch_size", ",", "-", "1", ",", "4", "]", ")", "for", "level", "in", "range", "(", "config", ".", "num_levels", ")", "]", ",", "1", ")", "_", ",", "cls_topk_indices_all", "=", "torch", ".", "topk", "(", "cls_outputs_all", ".", "reshape", "(", "batch_size", ",", "-", "1", ")", ",", "dim", "=", "1", ",", "k", "=", "MAX_DETECTION_POINTS", ",", "sorted", "=", "False", ")", "indices_all", "=", "cls_topk_indices_all", "//", "config", ".", "num_classes", "classes_all", "=", "cls_topk_indices_all", "%", "config", ".", "num_classes", "box_outputs_all_after_topk", "=", "torch", ".", "gather", "(", "box_outputs_all", ",", "1", ",", "indices_all", ".", "unsqueeze", "(", "2", ")", ".", "expand", "(", "-", "1", ",", "-", "1", ",", "4", ")", ")", "cls_outputs_all_after_topk", "=", "torch", ".", "gather", "(", "cls_outputs_all", ",", "1", ",", "indices_all", ".", "unsqueeze", "(", "2", ")", ".", "expand", "(", "-", "1", ",", "-", "1", ",", "config", ".", "num_classes", ")", ")", "cls_outputs_all_after_topk", "=", "torch", ".", "gather", "(", "cls_outputs_all_after_topk", ",", "2", ",", "classes_all", ".", "unsqueeze", "(", "2", ")", ")", "return", "cls_outputs_all_after_topk", ",", "box_outputs_all_after_topk", ",", "indices_all", ",", "classes_all" ]
https://github.com/NVIDIA/DeepLearningExamples/blob/589604d49e016cd9ef4525f7abcc9c7b826cfc5e/PyTorch/Detection/Efficientdet/effdet/bench.py#L27-L77
zulip/python-zulip-api
70b86614bd15347e28ec2cab4c87c01122faae16
zulip/zulip/__init__.py
python
Client.get_subscriptions
(self, request: Optional[Dict[str, Any]] = None)
return self.call_endpoint( url="users/me/subscriptions", method="GET", request=request, )
See examples/get-subscriptions for example usage.
See examples/get-subscriptions for example usage.
[ "See", "examples", "/", "get", "-", "subscriptions", "for", "example", "usage", "." ]
def get_subscriptions(self, request: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: """ See examples/get-subscriptions for example usage. """ return self.call_endpoint( url="users/me/subscriptions", method="GET", request=request, )
[ "def", "get_subscriptions", "(", "self", ",", "request", ":", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "return", "self", ".", "call_endpoint", "(", "url", "=", "\"users/me/subscriptions\"", ",", "method", "=", "\"GET\"", ",", "request", "=", "request", ",", ")" ]
https://github.com/zulip/python-zulip-api/blob/70b86614bd15347e28ec2cab4c87c01122faae16/zulip/zulip/__init__.py#L1358-L1366
spesmilo/electrum
bdbd59300fbd35b01605e66145458e5f396108e8
electrum/gui/qt/util.py
python
MySortModel.lessThan
(self, source_left: QModelIndex, source_right: QModelIndex)
[]
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex): item1 = self.sourceModel().itemFromIndex(source_left) item2 = self.sourceModel().itemFromIndex(source_right) data1 = item1.data(self._sort_role) data2 = item2.data(self._sort_role) if data1 is not None and data2 is not None: return data1 < data2 v1 = item1.text() v2 = item2.text() try: return Decimal(v1) < Decimal(v2) except: return v1 < v2
[ "def", "lessThan", "(", "self", ",", "source_left", ":", "QModelIndex", ",", "source_right", ":", "QModelIndex", ")", ":", "item1", "=", "self", ".", "sourceModel", "(", ")", ".", "itemFromIndex", "(", "source_left", ")", "item2", "=", "self", ".", "sourceModel", "(", ")", ".", "itemFromIndex", "(", "source_right", ")", "data1", "=", "item1", ".", "data", "(", "self", ".", "_sort_role", ")", "data2", "=", "item2", ".", "data", "(", "self", ".", "_sort_role", ")", "if", "data1", "is", "not", "None", "and", "data2", "is", "not", "None", ":", "return", "data1", "<", "data2", "v1", "=", "item1", ".", "text", "(", ")", "v2", "=", "item2", ".", "text", "(", ")", "try", ":", "return", "Decimal", "(", "v1", ")", "<", "Decimal", "(", "v2", ")", "except", ":", "return", "v1", "<", "v2" ]
https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/gui/qt/util.py#L792-L804
espeed/bulbs
628e5b14f0249f9ca4fa1ceea6f2af2dca45f75a
bulbs/neo4jserver/index.py
python
Index.count
(self, key=None, value=None, **pair)
return total_size
Return the number of items in the index for the key and value. :param key: The index key. :type key: str :param value: The key's value. :type value: str or int :param pair: Optional key/value pair. Example: name="James" :type pair: name/value pair :rtype: int
Return the number of items in the index for the key and value.
[ "Return", "the", "number", "of", "items", "in", "the", "index", "for", "the", "key", "and", "value", "." ]
def count(self, key=None, value=None, **pair): """ Return the number of items in the index for the key and value. :param key: The index key. :type key: str :param value: The key's value. :type value: str or int :param pair: Optional key/value pair. Example: name="James" :type pair: name/value pair :rtype: int """ key, value = self._get_key_value(key,value,pair) script = self.client.scripts.get('index_count') params = dict(index_name=self.index_name,key=key,value=value) resp = self.client.gremlin(script,params) total_size = int(resp.content) return total_size
[ "def", "count", "(", "self", ",", "key", "=", "None", ",", "value", "=", "None", ",", "*", "*", "pair", ")", ":", "key", ",", "value", "=", "self", ".", "_get_key_value", "(", "key", ",", "value", ",", "pair", ")", "script", "=", "self", ".", "client", ".", "scripts", ".", "get", "(", "'index_count'", ")", "params", "=", "dict", "(", "index_name", "=", "self", ".", "index_name", ",", "key", "=", "key", ",", "value", "=", "value", ")", "resp", "=", "self", ".", "client", ".", "gremlin", "(", "script", ",", "params", ")", "total_size", "=", "int", "(", "resp", ".", "content", ")", "return", "total_size" ]
https://github.com/espeed/bulbs/blob/628e5b14f0249f9ca4fa1ceea6f2af2dca45f75a/bulbs/neo4jserver/index.py#L389-L410
JacksonWuxs/DaPy
b2bf72707ffcc92d05af1ac890e0786d5787816e
DaPy/core/base/Sheet.py
python
SeriesSet.map
(self, func, cols=None, inplace=False)
return self._map(func, cols)
apply(func, col=None, *args, **kwrds) apply a function to columns or rows Parameters ---------- func : callable object or dict-like object col : str, str in list (default='all') the columns that you expect to process inplace : True or False (default=0) update values in current dataset or return new values Returns ------- mapped_sheet : SeriesSet Notes ----- 1. Function may be locked when `inplace` is True and sheet.locked is False. When you operate the column which is an Index, it will be locked.
apply(func, col=None, *args, **kwrds) apply a function to columns or rows
[ "apply", "(", "func", "col", "=", "None", "*", "args", "**", "kwrds", ")", "apply", "a", "function", "to", "columns", "or", "rows" ]
def map(self, func, cols=None, inplace=False): '''apply(func, col=None, *args, **kwrds) apply a function to columns or rows Parameters ---------- func : callable object or dict-like object col : str, str in list (default='all') the columns that you expect to process inplace : True or False (default=0) update values in current dataset or return new values Returns ------- mapped_sheet : SeriesSet Notes ----- 1. Function may be locked when `inplace` is True and sheet.locked is False. When you operate the column which is an Index, it will be locked. ''' assert inplace in (True, False), '`inplace` must be True or False' if inplace is False: return SeriesSet(self)._map(func, cols) return self._map(func, cols)
[ "def", "map", "(", "self", ",", "func", ",", "cols", "=", "None", ",", "inplace", "=", "False", ")", ":", "assert", "inplace", "in", "(", "True", ",", "False", ")", ",", "'`inplace` must be True or False'", "if", "inplace", "is", "False", ":", "return", "SeriesSet", "(", "self", ")", ".", "_map", "(", "func", ",", "cols", ")", "return", "self", ".", "_map", "(", "func", ",", "cols", ")" ]
https://github.com/JacksonWuxs/DaPy/blob/b2bf72707ffcc92d05af1ac890e0786d5787816e/DaPy/core/base/Sheet.py#L264-L292
CouchPotato/CouchPotatoServer
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
libs/pytwitter/__init__.py
python
List.AsDict
(self)
return data
A dict representation of this twitter.List instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.List instance
A dict representation of this twitter.List instance.
[ "A", "dict", "representation", "of", "this", "twitter", ".", "List", "instance", "." ]
def AsDict(self): '''A dict representation of this twitter.List instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.List instance ''' data = {} if self.id: data['id'] = self.id if self.name: data['name'] = self.name if self.slug: data['slug'] = self.slug if self.description: data['description'] = self.description if self.full_name: data['full_name'] = self.full_name if self.mode: data['mode'] = self.mode if self.uri: data['uri'] = self.uri if self.member_count is not None: data['member_count'] = self.member_count if self.subscriber_count is not None: data['subscriber_count'] = self.subscriber_count if self.following is not None: data['following'] = self.following if self.user is not None: data['user'] = self.user.AsDict() return data
[ "def", "AsDict", "(", "self", ")", ":", "data", "=", "{", "}", "if", "self", ".", "id", ":", "data", "[", "'id'", "]", "=", "self", ".", "id", "if", "self", ".", "name", ":", "data", "[", "'name'", "]", "=", "self", ".", "name", "if", "self", ".", "slug", ":", "data", "[", "'slug'", "]", "=", "self", ".", "slug", "if", "self", ".", "description", ":", "data", "[", "'description'", "]", "=", "self", ".", "description", "if", "self", ".", "full_name", ":", "data", "[", "'full_name'", "]", "=", "self", ".", "full_name", "if", "self", ".", "mode", ":", "data", "[", "'mode'", "]", "=", "self", ".", "mode", "if", "self", ".", "uri", ":", "data", "[", "'uri'", "]", "=", "self", ".", "uri", "if", "self", ".", "member_count", "is", "not", "None", ":", "data", "[", "'member_count'", "]", "=", "self", ".", "member_count", "if", "self", ".", "subscriber_count", "is", "not", "None", ":", "data", "[", "'subscriber_count'", "]", "=", "self", ".", "subscriber_count", "if", "self", ".", "following", "is", "not", "None", ":", "data", "[", "'following'", "]", "=", "self", ".", "following", "if", "self", ".", "user", "is", "not", "None", ":", "data", "[", "'user'", "]", "=", "self", ".", "user", ".", "AsDict", "(", ")", "return", "data" ]
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/pytwitter/__init__.py#L1810-L1841
tribe29/checkmk
6260f2512e159e311f426e16b84b19d0b8e9ad0c
cmk/special_agents/agent_aws.py
python
GlacierLimits.get_live_data
(self, *args)
return self._get_response_content(response, "VaultList")
There's no API method for getting account limits thus we have to fetch all vaults.
There's no API method for getting account limits thus we have to fetch all vaults.
[ "There", "s", "no", "API", "method", "for", "getting", "account", "limits", "thus", "we", "have", "to", "fetch", "all", "vaults", "." ]
def get_live_data(self, *args): """ There's no API method for getting account limits thus we have to fetch all vaults. """ response = self._client.list_vaults() return self._get_response_content(response, "VaultList")
[ "def", "get_live_data", "(", "self", ",", "*", "args", ")", ":", "response", "=", "self", ".", "_client", ".", "list_vaults", "(", ")", "return", "self", ".", "_get_response_content", "(", "response", ",", "\"VaultList\"", ")" ]
https://github.com/tribe29/checkmk/blob/6260f2512e159e311f426e16b84b19d0b8e9ad0c/cmk/special_agents/agent_aws.py#L2166-L2172
smart-mobile-software/gitstack
d9fee8f414f202143eb6e620529e8e5539a2af56
python/Lib/site-packages/django/dispatch/dispatcher.py
python
Signal.send
(self, sender, **named)
return responses
Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. Arguments: sender The sender of the signal Either a specific object or None. named Named arguments which will be passed to receivers. Returns a list of tuple pairs [(receiver, response), ... ].
Send signal from sender to all connected receivers.
[ "Send", "signal", "from", "sender", "to", "all", "connected", "receivers", "." ]
def send(self, sender, **named): """ Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. Arguments: sender The sender of the signal Either a specific object or None. named Named arguments which will be passed to receivers. Returns a list of tuple pairs [(receiver, response), ... ]. """ responses = [] if not self.receivers: return responses for receiver in self._live_receivers(_make_id(sender)): response = receiver(signal=self, sender=sender, **named) responses.append((receiver, response)) return responses
[ "def", "send", "(", "self", ",", "sender", ",", "*", "*", "named", ")", ":", "responses", "=", "[", "]", "if", "not", "self", ".", "receivers", ":", "return", "responses", "for", "receiver", "in", "self", ".", "_live_receivers", "(", "_make_id", "(", "sender", ")", ")", ":", "response", "=", "receiver", "(", "signal", "=", "self", ",", "sender", "=", "sender", ",", "*", "*", "named", ")", "responses", ".", "append", "(", "(", "receiver", ",", "response", ")", ")", "return", "responses" ]
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/site-packages/django/dispatch/dispatcher.py#L149-L174
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/distutils/cmd.py
python
Command.finalize_options
(self)
Set final values for all the options that this command supports. This is always called as late as possible, ie. after any option assignments from the command-line or from other commands have been done. Thus, this is the place to code option dependencies: if 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as long as 'foo' still has the same value it was assigned in 'initialize_options()'. This method must be implemented by all command classes.
Set final values for all the options that this command supports. This is always called as late as possible, ie. after any option assignments from the command-line or from other commands have been done. Thus, this is the place to code option dependencies: if 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as long as 'foo' still has the same value it was assigned in 'initialize_options()'.
[ "Set", "final", "values", "for", "all", "the", "options", "that", "this", "command", "supports", ".", "This", "is", "always", "called", "as", "late", "as", "possible", "ie", ".", "after", "any", "option", "assignments", "from", "the", "command", "-", "line", "or", "from", "other", "commands", "have", "been", "done", ".", "Thus", "this", "is", "the", "place", "to", "code", "option", "dependencies", ":", "if", "foo", "depends", "on", "bar", "then", "it", "is", "safe", "to", "set", "foo", "from", "bar", "as", "long", "as", "foo", "still", "has", "the", "same", "value", "it", "was", "assigned", "in", "initialize_options", "()", "." ]
def finalize_options(self): """Set final values for all the options that this command supports. This is always called as late as possible, ie. after any option assignments from the command-line or from other commands have been done. Thus, this is the place to code option dependencies: if 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as long as 'foo' still has the same value it was assigned in 'initialize_options()'. This method must be implemented by all command classes. """ raise RuntimeError, \ "abstract method -- subclass %s must override" % self.__class__
[ "def", "finalize_options", "(", "self", ")", ":", "raise", "RuntimeError", ",", "\"abstract method -- subclass %s must override\"", "%", "self", ".", "__class__" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/distutils/cmd.py#L138-L150
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/stats/error_prop.py
python
variance_prop
(expr, consts=(), include_covar=False)
return var_expr
r"""Symbolically propagates variance (`\sigma^2`) for expressions. This is computed as as seen in [1]_. Parameters ========== expr : Expr A sympy expression to compute the variance for. consts : sequence of Symbols, optional Represents symbols that are known constants in the expr, and thus have zero variance. All symbols not in consts are assumed to be variant. include_covar : bool, optional Flag for whether or not to include covariances, default=False. Returns ======= var_expr : Expr An expression for the total variance of the expr. The variance for the original symbols (e.g. x) are represented via instance of the Variance symbol (e.g. Variance(x)). Examples ======== >>> from sympy import symbols, exp >>> from sympy.stats.error_prop import variance_prop >>> x, y = symbols('x y') >>> variance_prop(x + y) Variance(x) + Variance(y) >>> variance_prop(x * y) x**2*Variance(y) + y**2*Variance(x) >>> variance_prop(exp(2*x)) 4*exp(4*x)*Variance(x) References ========== .. [1] https://en.wikipedia.org/wiki/Propagation_of_uncertainty
r"""Symbolically propagates variance (`\sigma^2`) for expressions. This is computed as as seen in [1]_.
[ "r", "Symbolically", "propagates", "variance", "(", "\\", "sigma^2", ")", "for", "expressions", ".", "This", "is", "computed", "as", "as", "seen", "in", "[", "1", "]", "_", "." ]
def variance_prop(expr, consts=(), include_covar=False): r"""Symbolically propagates variance (`\sigma^2`) for expressions. This is computed as as seen in [1]_. Parameters ========== expr : Expr A sympy expression to compute the variance for. consts : sequence of Symbols, optional Represents symbols that are known constants in the expr, and thus have zero variance. All symbols not in consts are assumed to be variant. include_covar : bool, optional Flag for whether or not to include covariances, default=False. Returns ======= var_expr : Expr An expression for the total variance of the expr. The variance for the original symbols (e.g. x) are represented via instance of the Variance symbol (e.g. Variance(x)). Examples ======== >>> from sympy import symbols, exp >>> from sympy.stats.error_prop import variance_prop >>> x, y = symbols('x y') >>> variance_prop(x + y) Variance(x) + Variance(y) >>> variance_prop(x * y) x**2*Variance(y) + y**2*Variance(x) >>> variance_prop(exp(2*x)) 4*exp(4*x)*Variance(x) References ========== .. [1] https://en.wikipedia.org/wiki/Propagation_of_uncertainty """ args = expr.args if len(args) == 0: if expr in consts: return S.Zero elif isinstance(expr, RandomSymbol): return Variance(expr).doit() elif isinstance(expr, Symbol): return Variance(RandomSymbol(expr)).doit() else: return S.Zero nargs = len(args) var_args = list(map(variance_prop, args, repeat(consts, nargs), repeat(include_covar, nargs))) if isinstance(expr, Add): var_expr = Add(*var_args) if include_covar: terms = [2 * Covariance(_arg0_or_var(x), _arg0_or_var(y)).doit() \ for x, y in combinations(var_args, 2)] var_expr += Add(*terms) elif isinstance(expr, Mul): terms = [v/a**2 for a, v in zip(args, var_args)] var_expr = simplify(expr**2 * Add(*terms)) if include_covar: terms = [2*Covariance(_arg0_or_var(x), _arg0_or_var(y)).doit()/(a*b) \ for (a, b), (x, y) in zip(combinations(args, 2), combinations(var_args, 2))] var_expr += Add(*terms) elif isinstance(expr, Pow): b = args[1] v = var_args[0] * (expr * b / args[0])**2 var_expr = simplify(v) elif isinstance(expr, exp): var_expr = simplify(var_args[0] * expr**2) else: # unknown how to proceed, return variance of whole expr. var_expr = Variance(expr) return var_expr
[ "def", "variance_prop", "(", "expr", ",", "consts", "=", "(", ")", ",", "include_covar", "=", "False", ")", ":", "args", "=", "expr", ".", "args", "if", "len", "(", "args", ")", "==", "0", ":", "if", "expr", "in", "consts", ":", "return", "S", ".", "Zero", "elif", "isinstance", "(", "expr", ",", "RandomSymbol", ")", ":", "return", "Variance", "(", "expr", ")", ".", "doit", "(", ")", "elif", "isinstance", "(", "expr", ",", "Symbol", ")", ":", "return", "Variance", "(", "RandomSymbol", "(", "expr", ")", ")", ".", "doit", "(", ")", "else", ":", "return", "S", ".", "Zero", "nargs", "=", "len", "(", "args", ")", "var_args", "=", "list", "(", "map", "(", "variance_prop", ",", "args", ",", "repeat", "(", "consts", ",", "nargs", ")", ",", "repeat", "(", "include_covar", ",", "nargs", ")", ")", ")", "if", "isinstance", "(", "expr", ",", "Add", ")", ":", "var_expr", "=", "Add", "(", "*", "var_args", ")", "if", "include_covar", ":", "terms", "=", "[", "2", "*", "Covariance", "(", "_arg0_or_var", "(", "x", ")", ",", "_arg0_or_var", "(", "y", ")", ")", ".", "doit", "(", ")", "for", "x", ",", "y", "in", "combinations", "(", "var_args", ",", "2", ")", "]", "var_expr", "+=", "Add", "(", "*", "terms", ")", "elif", "isinstance", "(", "expr", ",", "Mul", ")", ":", "terms", "=", "[", "v", "/", "a", "**", "2", "for", "a", ",", "v", "in", "zip", "(", "args", ",", "var_args", ")", "]", "var_expr", "=", "simplify", "(", "expr", "**", "2", "*", "Add", "(", "*", "terms", ")", ")", "if", "include_covar", ":", "terms", "=", "[", "2", "*", "Covariance", "(", "_arg0_or_var", "(", "x", ")", ",", "_arg0_or_var", "(", "y", ")", ")", ".", "doit", "(", ")", "/", "(", "a", "*", "b", ")", "for", "(", "a", ",", "b", ")", ",", "(", "x", ",", "y", ")", "in", "zip", "(", "combinations", "(", "args", ",", "2", ")", ",", "combinations", "(", "var_args", ",", "2", ")", ")", "]", "var_expr", "+=", "Add", "(", "*", "terms", ")", "elif", "isinstance", "(", "expr", ",", "Pow", ")", ":", "b", "=", "args", "[", "1", "]", "v", "=", "var_args", "[", "0", "]", "*", "(", "expr", "*", "b", "/", "args", "[", "0", "]", ")", "**", "2", "var_expr", "=", "simplify", "(", "v", ")", "elif", "isinstance", "(", "expr", ",", "exp", ")", ":", "var_expr", "=", "simplify", "(", "var_args", "[", "0", "]", "*", "expr", "**", "2", ")", "else", ":", "# unknown how to proceed, return variance of whole expr.", "var_expr", "=", "Variance", "(", "expr", ")", "return", "var_expr" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/stats/error_prop.py#L12-L94
GoogleCloudPlatform/professional-services
0c707aa97437f3d154035ef8548109b7882f71da
examples/cloudml-sklearn-pipeline/trainer/util/preprocess_utils.py
python
get_preprocess_pipeline
(feature_columns, categorical_names, numerical_names)
return preprocessor
Helper function that construct the preprocessing pipeline based on the type of the feature, i.e., numerical or categorical. Args: feature_columns: (List[string]), name of all the columns for the data goes into preprocessing pipeline categorical_names: (List[string]), name of all categorical features numerical_names: (List[string]), name of all numerical features Returns: sklearn.compose.ColumnTransformer
Helper function that construct the preprocessing pipeline based on the type of the feature, i.e., numerical or categorical.
[ "Helper", "function", "that", "construct", "the", "preprocessing", "pipeline", "based", "on", "the", "type", "of", "the", "feature", "i", ".", "e", ".", "numerical", "or", "categorical", "." ]
def get_preprocess_pipeline(feature_columns, categorical_names, numerical_names): """Helper function that construct the preprocessing pipeline based on the type of the feature, i.e., numerical or categorical. Args: feature_columns: (List[string]), name of all the columns for the data goes into preprocessing pipeline categorical_names: (List[string]), name of all categorical features numerical_names: (List[string]), name of all numerical features Returns: sklearn.compose.ColumnTransformer """ # Currently, this reply on the settings (numerical columns and # categorical columns) in metadata.py. May consider move it to a dedicated # config or setting file together with get_transform_pipeline numeric_transformer = pipeline.Pipeline([ ('imputer', impute.SimpleImputer(strategy='median')), ('scaler', preprocessing.StandardScaler()), ]) # Apply scale transformation to numerical attributes. # Log transformation is used here. numeric_log_transformer = pipeline.Pipeline([ ('imputer', impute.SimpleImputer(strategy='median')), ('log', preprocessing.FunctionTransformer( func=np.log1p, inverse_func=np.expm1, validate=True)), ('scaler', preprocessing.StandardScaler()), ]) # Bucketing numerical attributes numeric_bin_transformer = pipeline.Pipeline([ ('imputer', impute.SimpleImputer(strategy='median')), ('bin', preprocessing.KBinsDiscretizer(n_bins=3, encode='onehot-dense')), ]) categorical_transformer = pipeline.Pipeline([ ('imputer', impute.SimpleImputer( strategy='constant', fill_value=None)), ('onehot', preprocessing.OneHotEncoder( handle_unknown='ignore', sparse=False)), ]) boolean_mask = functools.partial(utils.boolean_mask, feature_columns) numerical_boolean = boolean_mask(numerical_names) categorical_boolean = boolean_mask(categorical_names) transform_list = [] # If there exist numerical columns if any(numerical_boolean): transform_list.extend([ ('numeric', numeric_transformer, numerical_boolean), ('numeric_log', numeric_log_transformer, numerical_boolean), ('numeric_bin', numeric_bin_transformer, numerical_boolean), ]) # If there exist categorical columns if any(categorical_boolean): transform_list.extend([ ('categorical', categorical_transformer, categorical_boolean), ]) preprocessor = compose.ColumnTransformer(transform_list) return preprocessor
[ "def", "get_preprocess_pipeline", "(", "feature_columns", ",", "categorical_names", ",", "numerical_names", ")", ":", "# Currently, this reply on the settings (numerical columns and", "# categorical columns) in metadata.py. May consider move it to a dedicated", "# config or setting file together with get_transform_pipeline", "numeric_transformer", "=", "pipeline", ".", "Pipeline", "(", "[", "(", "'imputer'", ",", "impute", ".", "SimpleImputer", "(", "strategy", "=", "'median'", ")", ")", ",", "(", "'scaler'", ",", "preprocessing", ".", "StandardScaler", "(", ")", ")", ",", "]", ")", "# Apply scale transformation to numerical attributes.", "# Log transformation is used here.", "numeric_log_transformer", "=", "pipeline", ".", "Pipeline", "(", "[", "(", "'imputer'", ",", "impute", ".", "SimpleImputer", "(", "strategy", "=", "'median'", ")", ")", ",", "(", "'log'", ",", "preprocessing", ".", "FunctionTransformer", "(", "func", "=", "np", ".", "log1p", ",", "inverse_func", "=", "np", ".", "expm1", ",", "validate", "=", "True", ")", ")", ",", "(", "'scaler'", ",", "preprocessing", ".", "StandardScaler", "(", ")", ")", ",", "]", ")", "# Bucketing numerical attributes", "numeric_bin_transformer", "=", "pipeline", ".", "Pipeline", "(", "[", "(", "'imputer'", ",", "impute", ".", "SimpleImputer", "(", "strategy", "=", "'median'", ")", ")", ",", "(", "'bin'", ",", "preprocessing", ".", "KBinsDiscretizer", "(", "n_bins", "=", "3", ",", "encode", "=", "'onehot-dense'", ")", ")", ",", "]", ")", "categorical_transformer", "=", "pipeline", ".", "Pipeline", "(", "[", "(", "'imputer'", ",", "impute", ".", "SimpleImputer", "(", "strategy", "=", "'constant'", ",", "fill_value", "=", "None", ")", ")", ",", "(", "'onehot'", ",", "preprocessing", ".", "OneHotEncoder", "(", "handle_unknown", "=", "'ignore'", ",", "sparse", "=", "False", ")", ")", ",", "]", ")", "boolean_mask", "=", "functools", ".", "partial", "(", "utils", ".", "boolean_mask", ",", "feature_columns", ")", "numerical_boolean", "=", "boolean_mask", "(", "numerical_names", ")", "categorical_boolean", "=", "boolean_mask", "(", "categorical_names", ")", "transform_list", "=", "[", "]", "# If there exist numerical columns", "if", "any", "(", "numerical_boolean", ")", ":", "transform_list", ".", "extend", "(", "[", "(", "'numeric'", ",", "numeric_transformer", ",", "numerical_boolean", ")", ",", "(", "'numeric_log'", ",", "numeric_log_transformer", ",", "numerical_boolean", ")", ",", "(", "'numeric_bin'", ",", "numeric_bin_transformer", ",", "numerical_boolean", ")", ",", "]", ")", "# If there exist categorical columns", "if", "any", "(", "categorical_boolean", ")", ":", "transform_list", ".", "extend", "(", "[", "(", "'categorical'", ",", "categorical_transformer", ",", "categorical_boolean", ")", ",", "]", ")", "preprocessor", "=", "compose", ".", "ColumnTransformer", "(", "transform_list", ")", "return", "preprocessor" ]
https://github.com/GoogleCloudPlatform/professional-services/blob/0c707aa97437f3d154035ef8548109b7882f71da/examples/cloudml-sklearn-pipeline/trainer/util/preprocess_utils.py#L29-L94
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/server/grr_response_server/artifact.py
python
ParseResults.Responses
(self)
return iter(self._responses)
[]
def Responses(self) -> Iterator[rdfvalue.RDFValue]: return iter(self._responses)
[ "def", "Responses", "(", "self", ")", "->", "Iterator", "[", "rdfvalue", ".", "RDFValue", "]", ":", "return", "iter", "(", "self", ".", "_responses", ")" ]
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/artifact.py#L333-L334
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/wheel.py
python
_cache_for_link
(cache_dir, link)
return os.path.join(cache_dir, "wheels", *parts)
Return a directory to store cached wheels in for link. Because there are M wheels for any one sdist, we provide a directory to cache them in, and then consult that directory when looking up cache hits. We only insert things into the cache if they have plausible version numbers, so that we don't contaminate the cache with things that were not unique. E.g. ./package might have dozens of installs done for it and build a version of 0.0...and if we built and cached a wheel, we'd end up using the same wheel even if the source has been edited. :param cache_dir: The cache_dir being used by pip. :param link: The link of the sdist for which this will cache wheels.
Return a directory to store cached wheels in for link.
[ "Return", "a", "directory", "to", "store", "cached", "wheels", "in", "for", "link", "." ]
def _cache_for_link(cache_dir, link): """ Return a directory to store cached wheels in for link. Because there are M wheels for any one sdist, we provide a directory to cache them in, and then consult that directory when looking up cache hits. We only insert things into the cache if they have plausible version numbers, so that we don't contaminate the cache with things that were not unique. E.g. ./package might have dozens of installs done for it and build a version of 0.0...and if we built and cached a wheel, we'd end up using the same wheel even if the source has been edited. :param cache_dir: The cache_dir being used by pip. :param link: The link of the sdist for which this will cache wheels. """ # We want to generate an url to use as our cache key, we don't want to just # re-use the URL because it might have other items in the fragment and we # don't care about those. key_parts = [link.url_without_fragment] if link.hash_name is not None and link.hash is not None: key_parts.append("=".join([link.hash_name, link.hash])) key_url = "#".join(key_parts) # Encode our key url with sha224, we'll use this because it has similar # security properties to sha256, but with a shorter total output (and thus # less secure). However the differences don't make a lot of difference for # our use case here. hashed = hashlib.sha224(key_url.encode()).hexdigest() # We want to nest the directories some to prevent having a ton of top level # directories where we might run out of sub directories on some FS. parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] # Inside of the base location for cached wheels, expand our parts and join # them all together. return os.path.join(cache_dir, "wheels", *parts)
[ "def", "_cache_for_link", "(", "cache_dir", ",", "link", ")", ":", "# We want to generate an url to use as our cache key, we don't want to just", "# re-use the URL because it might have other items in the fragment and we", "# don't care about those.", "key_parts", "=", "[", "link", ".", "url_without_fragment", "]", "if", "link", ".", "hash_name", "is", "not", "None", "and", "link", ".", "hash", "is", "not", "None", ":", "key_parts", ".", "append", "(", "\"=\"", ".", "join", "(", "[", "link", ".", "hash_name", ",", "link", ".", "hash", "]", ")", ")", "key_url", "=", "\"#\"", ".", "join", "(", "key_parts", ")", "# Encode our key url with sha224, we'll use this because it has similar", "# security properties to sha256, but with a shorter total output (and thus", "# less secure). However the differences don't make a lot of difference for", "# our use case here.", "hashed", "=", "hashlib", ".", "sha224", "(", "key_url", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "# We want to nest the directories some to prevent having a ton of top level", "# directories where we might run out of sub directories on some FS.", "parts", "=", "[", "hashed", "[", ":", "2", "]", ",", "hashed", "[", "2", ":", "4", "]", ",", "hashed", "[", "4", ":", "6", "]", ",", "hashed", "[", "6", ":", "]", "]", "# Inside of the base location for cached wheels, expand our parts and join", "# them all together.", "return", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"wheels\"", ",", "*", "parts", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/wheel.py#L71-L109
MDudek-ICS/TRISIS-TRITON-HATMAN
15a00af7fd1040f0430729d024427601f84886a1
decompiled_code/library/logging/__init__.py
python
Logger.removeHandler
(self, hdlr)
Remove the specified handler from this logger.
Remove the specified handler from this logger.
[ "Remove", "the", "specified", "handler", "from", "this", "logger", "." ]
def removeHandler(self, hdlr): """ Remove the specified handler from this logger. """ _acquireLock() try: if hdlr in self.handlers: self.handlers.remove(hdlr) finally: _releaseLock()
[ "def", "removeHandler", "(", "self", ",", "hdlr", ")", ":", "_acquireLock", "(", ")", "try", ":", "if", "hdlr", "in", "self", ".", "handlers", ":", "self", ".", "handlers", ".", "remove", "(", "hdlr", ")", "finally", ":", "_releaseLock", "(", ")" ]
https://github.com/MDudek-ICS/TRISIS-TRITON-HATMAN/blob/15a00af7fd1040f0430729d024427601f84886a1/decompiled_code/library/logging/__init__.py#L1199-L1208
tensorflow/lattice
784eca50cbdfedf39f183cc7d298c9fe376b69c0
tensorflow_lattice/python/linear_layer.py
python
Linear.assert_constraints
(self, eps=1e-4)
return linear_lib.assert_constraints( weights=self.kernel, monotonicities=utils.canonicalize_monotonicities(self.monotonicities), monotonic_dominances=self.monotonic_dominances, range_dominances=self.range_dominances, input_min=utils.canonicalize_input_bounds(self.input_min), input_max=utils.canonicalize_input_bounds(self.input_max), normalization_order=self.normalization_order, eps=eps)
Asserts that weights satisfy all constraints. In graph mode builds and returns list of assertion ops. In eager mode directly executes assertions. Args: eps: Allowed constraints violation. Returns: List of assertion ops in graph mode or immediately asserts in eager mode.
Asserts that weights satisfy all constraints.
[ "Asserts", "that", "weights", "satisfy", "all", "constraints", "." ]
def assert_constraints(self, eps=1e-4): """Asserts that weights satisfy all constraints. In graph mode builds and returns list of assertion ops. In eager mode directly executes assertions. Args: eps: Allowed constraints violation. Returns: List of assertion ops in graph mode or immediately asserts in eager mode. """ return linear_lib.assert_constraints( weights=self.kernel, monotonicities=utils.canonicalize_monotonicities(self.monotonicities), monotonic_dominances=self.monotonic_dominances, range_dominances=self.range_dominances, input_min=utils.canonicalize_input_bounds(self.input_min), input_max=utils.canonicalize_input_bounds(self.input_max), normalization_order=self.normalization_order, eps=eps)
[ "def", "assert_constraints", "(", "self", ",", "eps", "=", "1e-4", ")", ":", "return", "linear_lib", ".", "assert_constraints", "(", "weights", "=", "self", ".", "kernel", ",", "monotonicities", "=", "utils", ".", "canonicalize_monotonicities", "(", "self", ".", "monotonicities", ")", ",", "monotonic_dominances", "=", "self", ".", "monotonic_dominances", ",", "range_dominances", "=", "self", ".", "range_dominances", ",", "input_min", "=", "utils", ".", "canonicalize_input_bounds", "(", "self", ".", "input_min", ")", ",", "input_max", "=", "utils", ".", "canonicalize_input_bounds", "(", "self", ".", "input_max", ")", ",", "normalization_order", "=", "self", ".", "normalization_order", ",", "eps", "=", "eps", ")" ]
https://github.com/tensorflow/lattice/blob/784eca50cbdfedf39f183cc7d298c9fe376b69c0/tensorflow_lattice/python/linear_layer.py#L317-L337
mesalock-linux/mesapy
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
pypy/module/__builtin__/compiling.py
python
eval
(space, w_code, w_globals=None, w_locals=None)
return w_code.exec_code(space, w_globals, w_locals)
Evaluate the source in the context of globals and locals. The source may be a string representing a Python expression or a code object as returned by compile(). The globals and locals are dictionaries, defaulting to the current current globals and locals. If only globals is given, locals defaults to it.
Evaluate the source in the context of globals and locals. The source may be a string representing a Python expression or a code object as returned by compile(). The globals and locals are dictionaries, defaulting to the current current globals and locals. If only globals is given, locals defaults to it.
[ "Evaluate", "the", "source", "in", "the", "context", "of", "globals", "and", "locals", ".", "The", "source", "may", "be", "a", "string", "representing", "a", "Python", "expression", "or", "a", "code", "object", "as", "returned", "by", "compile", "()", ".", "The", "globals", "and", "locals", "are", "dictionaries", "defaulting", "to", "the", "current", "current", "globals", "and", "locals", ".", "If", "only", "globals", "is", "given", "locals", "defaults", "to", "it", "." ]
def eval(space, w_code, w_globals=None, w_locals=None): """Evaluate the source in the context of globals and locals. The source may be a string representing a Python expression or a code object as returned by compile(). The globals and locals are dictionaries, defaulting to the current current globals and locals. If only globals is given, locals defaults to it. """ if (space.isinstance_w(w_code, space.w_bytes) or space.isinstance_w(w_code, space.w_unicode)): w_code = compile(space, space.call_method(w_code, 'lstrip', space.newtext(' \t')), "<string>", "eval") if not isinstance(w_code, PyCode): raise oefmt(space.w_TypeError, "eval() arg 1 must be a string or code object") if space.is_none(w_globals): caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: w_globals = space.newdict() if space.is_none(w_locals): w_locals = w_globals else: w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): w_locals = w_globals # xxx removed: adding '__builtins__' to the w_globals dict, if there # is none. This logic was removed as costly (it requires to get at # the gettopframe_nohidden()). I bet no test fails, and it's a really # obscure case. return w_code.exec_code(space, w_globals, w_locals)
[ "def", "eval", "(", "space", ",", "w_code", ",", "w_globals", "=", "None", ",", "w_locals", "=", "None", ")", ":", "if", "(", "space", ".", "isinstance_w", "(", "w_code", ",", "space", ".", "w_bytes", ")", "or", "space", ".", "isinstance_w", "(", "w_code", ",", "space", ".", "w_unicode", ")", ")", ":", "w_code", "=", "compile", "(", "space", ",", "space", ".", "call_method", "(", "w_code", ",", "'lstrip'", ",", "space", ".", "newtext", "(", "' \\t'", ")", ")", ",", "\"<string>\"", ",", "\"eval\"", ")", "if", "not", "isinstance", "(", "w_code", ",", "PyCode", ")", ":", "raise", "oefmt", "(", "space", ".", "w_TypeError", ",", "\"eval() arg 1 must be a string or code object\"", ")", "if", "space", ".", "is_none", "(", "w_globals", ")", ":", "caller", "=", "space", ".", "getexecutioncontext", "(", ")", ".", "gettopframe_nohidden", "(", ")", "if", "caller", "is", "None", ":", "w_globals", "=", "space", ".", "newdict", "(", ")", "if", "space", ".", "is_none", "(", "w_locals", ")", ":", "w_locals", "=", "w_globals", "else", ":", "w_globals", "=", "caller", ".", "get_w_globals", "(", ")", "if", "space", ".", "is_none", "(", "w_locals", ")", ":", "w_locals", "=", "caller", ".", "getdictscope", "(", ")", "elif", "space", ".", "is_none", "(", "w_locals", ")", ":", "w_locals", "=", "w_globals", "# xxx removed: adding '__builtins__' to the w_globals dict, if there", "# is none. This logic was removed as costly (it requires to get at", "# the gettopframe_nohidden()). I bet no test fails, and it's a really", "# obscure case.", "return", "w_code", ".", "exec_code", "(", "space", ",", "w_globals", ",", "w_locals", ")" ]
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/pypy/module/__builtin__/compiling.py#L67-L103
cupy/cupy
a47ad3105f0fe817a4957de87d98ddccb8c7491f
cupy/_manipulation/join.py
python
column_stack
(tup)
return concatenate(lst, axis=1)
Stacks 1-D and 2-D arrays as columns into a 2-D array. A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays are concatenated along the second axis. Args: tup (sequence of arrays): 1-D or 2-D arrays to be stacked. Returns: cupy.ndarray: A new 2-D array of stacked columns. .. seealso:: :func:`numpy.column_stack`
Stacks 1-D and 2-D arrays as columns into a 2-D array.
[ "Stacks", "1", "-", "D", "and", "2", "-", "D", "arrays", "as", "columns", "into", "a", "2", "-", "D", "array", "." ]
def column_stack(tup): """Stacks 1-D and 2-D arrays as columns into a 2-D array. A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays are concatenated along the second axis. Args: tup (sequence of arrays): 1-D or 2-D arrays to be stacked. Returns: cupy.ndarray: A new 2-D array of stacked columns. .. seealso:: :func:`numpy.column_stack` """ if any(not isinstance(a, cupy.ndarray) for a in tup): raise TypeError('Only cupy arrays can be column stacked') lst = list(tup) for i, a in enumerate(lst): if a.ndim == 1: a = a[:, cupy.newaxis] lst[i] = a elif a.ndim != 2: raise ValueError( 'Only 1 or 2 dimensional arrays can be column stacked') return concatenate(lst, axis=1)
[ "def", "column_stack", "(", "tup", ")", ":", "if", "any", "(", "not", "isinstance", "(", "a", ",", "cupy", ".", "ndarray", ")", "for", "a", "in", "tup", ")", ":", "raise", "TypeError", "(", "'Only cupy arrays can be column stacked'", ")", "lst", "=", "list", "(", "tup", ")", "for", "i", ",", "a", "in", "enumerate", "(", "lst", ")", ":", "if", "a", ".", "ndim", "==", "1", ":", "a", "=", "a", "[", ":", ",", "cupy", ".", "newaxis", "]", "lst", "[", "i", "]", "=", "a", "elif", "a", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "'Only 1 or 2 dimensional arrays can be column stacked'", ")", "return", "concatenate", "(", "lst", ",", "axis", "=", "1", ")" ]
https://github.com/cupy/cupy/blob/a47ad3105f0fe817a4957de87d98ddccb8c7491f/cupy/_manipulation/join.py#L5-L32
microsoft/debugpy
be8dd607f6837244e0b565345e497aff7a0c08bf
src/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/sql.py
python
MemoryDTO.toMBI
(self, getMemoryDump = False)
return mbi
Returns a L{win32.MemoryBasicInformation} object using the data retrieved from the database. @type getMemoryDump: bool @param getMemoryDump: (Optional) If C{True} retrieve the memory dump. Defaults to C{False} since this may be a costly operation. @rtype: L{win32.MemoryBasicInformation} @return: Memory block information.
Returns a L{win32.MemoryBasicInformation} object using the data retrieved from the database.
[ "Returns", "a", "L", "{", "win32", ".", "MemoryBasicInformation", "}", "object", "using", "the", "data", "retrieved", "from", "the", "database", "." ]
def toMBI(self, getMemoryDump = False): """ Returns a L{win32.MemoryBasicInformation} object using the data retrieved from the database. @type getMemoryDump: bool @param getMemoryDump: (Optional) If C{True} retrieve the memory dump. Defaults to C{False} since this may be a costly operation. @rtype: L{win32.MemoryBasicInformation} @return: Memory block information. """ mbi = win32.MemoryBasicInformation() mbi.BaseAddress = self.address mbi.RegionSize = self.size mbi.State = self._parse_state(self.state) mbi.Protect = self._parse_access(self.access) mbi.Type = self._parse_type(self.type) if self.alloc_base is not None: mbi.AllocationBase = self.alloc_base else: mbi.AllocationBase = mbi.BaseAddress if self.alloc_access is not None: mbi.AllocationProtect = self._parse_access(self.alloc_access) else: mbi.AllocationProtect = mbi.Protect if self.filename is not None: mbi.filename = self.filename if getMemoryDump and self.content is not None: mbi.content = self.content return mbi
[ "def", "toMBI", "(", "self", ",", "getMemoryDump", "=", "False", ")", ":", "mbi", "=", "win32", ".", "MemoryBasicInformation", "(", ")", "mbi", ".", "BaseAddress", "=", "self", ".", "address", "mbi", ".", "RegionSize", "=", "self", ".", "size", "mbi", ".", "State", "=", "self", ".", "_parse_state", "(", "self", ".", "state", ")", "mbi", ".", "Protect", "=", "self", ".", "_parse_access", "(", "self", ".", "access", ")", "mbi", ".", "Type", "=", "self", ".", "_parse_type", "(", "self", ".", "type", ")", "if", "self", ".", "alloc_base", "is", "not", "None", ":", "mbi", ".", "AllocationBase", "=", "self", ".", "alloc_base", "else", ":", "mbi", ".", "AllocationBase", "=", "mbi", ".", "BaseAddress", "if", "self", ".", "alloc_access", "is", "not", "None", ":", "mbi", ".", "AllocationProtect", "=", "self", ".", "_parse_access", "(", "self", ".", "alloc_access", ")", "else", ":", "mbi", ".", "AllocationProtect", "=", "mbi", ".", "Protect", "if", "self", ".", "filename", "is", "not", "None", ":", "mbi", ".", "filename", "=", "self", ".", "filename", "if", "getMemoryDump", "and", "self", ".", "content", "is", "not", "None", ":", "mbi", ".", "content", "=", "self", ".", "content", "return", "mbi" ]
https://github.com/microsoft/debugpy/blob/be8dd607f6837244e0b565345e497aff7a0c08bf/src/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/sql.py#L472-L502
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/string.py
python
split
(s, sep=None, maxsplit=-1)
return s.split(sep, maxsplit)
split(s [,sep [,maxsplit]]) -> list of strings Return a list of the words in the string s, using sep as the delimiter string. If maxsplit is given, splits at no more than maxsplit places (resulting in at most maxsplit+1 words). If sep is not specified or is None, any whitespace string is a separator. (split and splitfields are synonymous)
split(s [,sep [,maxsplit]]) -> list of strings
[ "split", "(", "s", "[", "sep", "[", "maxsplit", "]]", ")", "-", ">", "list", "of", "strings" ]
def split(s, sep=None, maxsplit=-1): """split(s [,sep [,maxsplit]]) -> list of strings Return a list of the words in the string s, using sep as the delimiter string. If maxsplit is given, splits at no more than maxsplit places (resulting in at most maxsplit+1 words). If sep is not specified or is None, any whitespace string is a separator. (split and splitfields are synonymous) """ return s.split(sep, maxsplit)
[ "def", "split", "(", "s", ",", "sep", "=", "None", ",", "maxsplit", "=", "-", "1", ")", ":", "return", "s", ".", "split", "(", "sep", ",", "maxsplit", ")" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/string.py#L284-L295
Ttl/evolutionary-circuits
6a6000ecbfbd64c9fd9df79574aad8682957c954
evolutionary/chromosomes/common.py
python
multipliers
(x)
Convert values with si multipliers to numbers
Convert values with si multipliers to numbers
[ "Convert", "values", "with", "si", "multipliers", "to", "numbers" ]
def multipliers(x): """Convert values with si multipliers to numbers""" try: return float(x) except: pass try: a = x[-1] y = float(x[:-1]) endings = {'G':9,'Meg':6,'k':3,'m':-3,'u':-6,'n':-9,'p':-12,'s':0} return y*(10**endings[a]) except: raise ValueError("I don't know what {} means".format(x))
[ "def", "multipliers", "(", "x", ")", ":", "try", ":", "return", "float", "(", "x", ")", "except", ":", "pass", "try", ":", "a", "=", "x", "[", "-", "1", "]", "y", "=", "float", "(", "x", "[", ":", "-", "1", "]", ")", "endings", "=", "{", "'G'", ":", "9", ",", "'Meg'", ":", "6", ",", "'k'", ":", "3", ",", "'m'", ":", "-", "3", ",", "'u'", ":", "-", "6", ",", "'n'", ":", "-", "9", ",", "'p'", ":", "-", "12", ",", "'s'", ":", "0", "}", "return", "y", "*", "(", "10", "**", "endings", "[", "a", "]", ")", "except", ":", "raise", "ValueError", "(", "\"I don't know what {} means\"", ".", "format", "(", "x", ")", ")" ]
https://github.com/Ttl/evolutionary-circuits/blob/6a6000ecbfbd64c9fd9df79574aad8682957c954/evolutionary/chromosomes/common.py#L38-L50
theotherp/nzbhydra
4b03d7f769384b97dfc60dade4806c0fc987514e
libs/pycparser/_ast_gen.py
python
ASTCodeGenerator.generate
(self, file=None)
Generates the code into file, an open file buffer.
Generates the code into file, an open file buffer.
[ "Generates", "the", "code", "into", "file", "an", "open", "file", "buffer", "." ]
def generate(self, file=None): """ Generates the code into file, an open file buffer. """ src = Template(_PROLOGUE_COMMENT).substitute( cfg_filename=self.cfg_filename) src += _PROLOGUE_CODE for node_cfg in self.node_cfg: src += node_cfg.generate_source() + '\n\n' file.write(src)
[ "def", "generate", "(", "self", ",", "file", "=", "None", ")", ":", "src", "=", "Template", "(", "_PROLOGUE_COMMENT", ")", ".", "substitute", "(", "cfg_filename", "=", "self", ".", "cfg_filename", ")", "src", "+=", "_PROLOGUE_CODE", "for", "node_cfg", "in", "self", ".", "node_cfg", ":", "src", "+=", "node_cfg", ".", "generate_source", "(", ")", "+", "'\\n\\n'", "file", ".", "write", "(", "src", ")" ]
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/pycparser/_ast_gen.py#L26-L36
moggers87/salmon
1d89164836f88aa25e85932b08192e99ba8d21c3
salmon/_version.py
python
get_versions
()
return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
Get version information or return default if unable to do so.
Get version information or return default if unable to do so.
[ "Get", "version", "information", "or", "return", "default", "if", "unable", "to", "do", "so", "." ]
def get_versions(): # noqa: C901 """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
[ "def", "get_versions", "(", ")", ":", "# noqa: C901", "# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have", "# __file__, we can work backwards from there to the root. Some", "# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which", "# case we can only use expanded keywords.", "cfg", "=", "get_config", "(", ")", "verbose", "=", "cfg", ".", "verbose", "try", ":", "return", "git_versions_from_keywords", "(", "get_keywords", "(", ")", ",", "cfg", ".", "tag_prefix", ",", "verbose", ")", "except", "NotThisMethod", ":", "pass", "try", ":", "root", "=", "os", ".", "path", ".", "realpath", "(", "__file__", ")", "# versionfile_source is the relative path from the top of the source", "# tree (where the .git directory might live) to this file. Invert", "# this to find the root from __file__.", "for", "i", "in", "cfg", ".", "versionfile_source", ".", "split", "(", "'/'", ")", ":", "root", "=", "os", ".", "path", ".", "dirname", "(", "root", ")", "except", "NameError", ":", "return", "{", "\"version\"", ":", "\"0+unknown\"", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "\"unable to find root of source tree\"", ",", "\"date\"", ":", "None", "}", "try", ":", "pieces", "=", "git_pieces_from_vcs", "(", "cfg", ".", "tag_prefix", ",", "root", ",", "verbose", ")", "return", "render", "(", "pieces", ",", "cfg", ".", "style", ")", "except", "NotThisMethod", ":", "pass", "try", ":", "if", "cfg", ".", "parentdir_prefix", ":", "return", "versions_from_parentdir", "(", "cfg", ".", "parentdir_prefix", ",", "root", ",", "verbose", ")", "except", "NotThisMethod", ":", "pass", "return", "{", "\"version\"", ":", "\"0+unknown\"", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "\"unable to compute version\"", ",", "\"date\"", ":", "None", "}" ]
https://github.com/moggers87/salmon/blob/1d89164836f88aa25e85932b08192e99ba8d21c3/salmon/_version.py#L477-L520
wistbean/fxxkpython
88e16d79d8dd37236ba6ecd0d0ff11d63143968c
vip/qyxuan/projects/venv/lib/python3.6/site-packages/pygame/sysfont.py
python
initsysfonts_win32
()
return fonts
initialize fonts dictionary on Windows
initialize fonts dictionary on Windows
[ "initialize", "fonts", "dictionary", "on", "Windows" ]
def initsysfonts_win32(): """initialize fonts dictionary on Windows""" fontdir = join(os.environ.get('WINDIR', 'C:\\Windows'), 'Fonts') TrueType_suffix = '(TrueType)' mods = ('demibold', 'narrow', 'light', 'unicode', 'bt', 'mt') fonts = {} # add fonts entered in the registry # find valid registry keys containing font information. # http://docs.python.org/lib/module-sys.html # 0 (VER_PLATFORM_WIN32s) Win32s on Windows 3.1 # 1 (VER_PLATFORM_WIN32_WINDOWS) Windows 95/98/ME # 2 (VER_PLATFORM_WIN32_NT) Windows NT/2000/XP # 3 (VER_PLATFORM_WIN32_CE) Windows CE if sys.getwindowsversion()[0] == 1: key_name = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Fonts" else: key_name = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts" key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_name) for i in xrange_(_winreg.QueryInfoKey(key)[1]): try: # name is the font's name e.g. Times New Roman (TrueType) # font is the font's filename e.g. times.ttf name, font = _winreg.EnumValue(key, i)[0:2] except EnvironmentError: break # try to handle windows unicode strings for file names with # international characters if PY_MAJOR_VERSION < 3: # here are two documents with some information about it: # http://www.python.org/peps/pep-0277.html # https://www.microsoft.com/technet/archive/interopmigration/linux/mvc/lintowin.mspx#ECAA try: font = str(font) except UnicodeEncodeError: # MBCS is the windows encoding for unicode file names. try: font = font.encode('MBCS') except: # no success with str or MBCS encoding... skip this font. continue if splitext(font)[1].lower() not in OpenType_extensions: continue if not dirname(font): font = join(fontdir, font) if name.endswith(TrueType_suffix): name = name.rstrip(TrueType_suffix).rstrip() name = name.lower().split() bold = italic = 0 for m in mods: if m in name: name.remove(m) if 'bold' in name: name.remove('bold') bold = 1 if 'italic' in name: name.remove('italic') italic = 1 name = ''.join(name) name = _simplename(name) _addfont(name, bold, italic, font, fonts) return fonts
[ "def", "initsysfonts_win32", "(", ")", ":", "fontdir", "=", "join", "(", "os", ".", "environ", ".", "get", "(", "'WINDIR'", ",", "'C:\\\\Windows'", ")", ",", "'Fonts'", ")", "TrueType_suffix", "=", "'(TrueType)'", "mods", "=", "(", "'demibold'", ",", "'narrow'", ",", "'light'", ",", "'unicode'", ",", "'bt'", ",", "'mt'", ")", "fonts", "=", "{", "}", "# add fonts entered in the registry", "# find valid registry keys containing font information.", "# http://docs.python.org/lib/module-sys.html", "# 0 (VER_PLATFORM_WIN32s) Win32s on Windows 3.1", "# 1 (VER_PLATFORM_WIN32_WINDOWS) Windows 95/98/ME", "# 2 (VER_PLATFORM_WIN32_NT) Windows NT/2000/XP", "# 3 (VER_PLATFORM_WIN32_CE) Windows CE", "if", "sys", ".", "getwindowsversion", "(", ")", "[", "0", "]", "==", "1", ":", "key_name", "=", "\"SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Fonts\"", "else", ":", "key_name", "=", "\"SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Fonts\"", "key", "=", "_winreg", ".", "OpenKey", "(", "_winreg", ".", "HKEY_LOCAL_MACHINE", ",", "key_name", ")", "for", "i", "in", "xrange_", "(", "_winreg", ".", "QueryInfoKey", "(", "key", ")", "[", "1", "]", ")", ":", "try", ":", "# name is the font's name e.g. Times New Roman (TrueType)", "# font is the font's filename e.g. times.ttf", "name", ",", "font", "=", "_winreg", ".", "EnumValue", "(", "key", ",", "i", ")", "[", "0", ":", "2", "]", "except", "EnvironmentError", ":", "break", "# try to handle windows unicode strings for file names with", "# international characters", "if", "PY_MAJOR_VERSION", "<", "3", ":", "# here are two documents with some information about it:", "# http://www.python.org/peps/pep-0277.html", "# https://www.microsoft.com/technet/archive/interopmigration/linux/mvc/lintowin.mspx#ECAA", "try", ":", "font", "=", "str", "(", "font", ")", "except", "UnicodeEncodeError", ":", "# MBCS is the windows encoding for unicode file names.", "try", ":", "font", "=", "font", ".", "encode", "(", "'MBCS'", ")", "except", ":", "# no success with str or MBCS encoding... skip this font.", "continue", "if", "splitext", "(", "font", ")", "[", "1", "]", ".", "lower", "(", ")", "not", "in", "OpenType_extensions", ":", "continue", "if", "not", "dirname", "(", "font", ")", ":", "font", "=", "join", "(", "fontdir", ",", "font", ")", "if", "name", ".", "endswith", "(", "TrueType_suffix", ")", ":", "name", "=", "name", ".", "rstrip", "(", "TrueType_suffix", ")", ".", "rstrip", "(", ")", "name", "=", "name", ".", "lower", "(", ")", ".", "split", "(", ")", "bold", "=", "italic", "=", "0", "for", "m", "in", "mods", ":", "if", "m", "in", "name", ":", "name", ".", "remove", "(", "m", ")", "if", "'bold'", "in", "name", ":", "name", ".", "remove", "(", "'bold'", ")", "bold", "=", "1", "if", "'italic'", "in", "name", ":", "name", ".", "remove", "(", "'italic'", ")", "italic", "=", "1", "name", "=", "''", ".", "join", "(", "name", ")", "name", "=", "_simplename", "(", "name", ")", "_addfont", "(", "name", ",", "bold", ",", "italic", ",", "font", ",", "fonts", ")", "return", "fonts" ]
https://github.com/wistbean/fxxkpython/blob/88e16d79d8dd37236ba6ecd0d0ff11d63143968c/vip/qyxuan/projects/venv/lib/python3.6/site-packages/pygame/sysfont.py#L66-L139
google-research/language
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
language/labs/drkit/wikidata/preprocessing/distantly_supervise.py
python
SlingExtractor.get_date_property
(self, prop, tail)
return None
Returns date if property accepts '/w/time' as target.
Returns date if property accepts '/w/time' as target.
[ "Returns", "date", "if", "property", "accepts", "/", "w", "/", "time", "as", "target", "." ]
def get_date_property(self, prop, tail): """Returns date if property accepts '/w/time' as target.""" if "target" not in prop: return None if prop.target.id != "/w/time": return None prop_id = self.get_frame_id(prop) if isinstance(tail, int): return (prop_id, tail) elif (isinstance(tail, sling.Frame) and "is" in tail and isinstance(tail["is"], int)): return (prop_id, tail["is"]) return None
[ "def", "get_date_property", "(", "self", ",", "prop", ",", "tail", ")", ":", "if", "\"target\"", "not", "in", "prop", ":", "return", "None", "if", "prop", ".", "target", ".", "id", "!=", "\"/w/time\"", ":", "return", "None", "prop_id", "=", "self", ".", "get_frame_id", "(", "prop", ")", "if", "isinstance", "(", "tail", ",", "int", ")", ":", "return", "(", "prop_id", ",", "tail", ")", "elif", "(", "isinstance", "(", "tail", ",", "sling", ".", "Frame", ")", "and", "\"is\"", "in", "tail", "and", "isinstance", "(", "tail", "[", "\"is\"", "]", ",", "int", ")", ")", ":", "return", "(", "prop_id", ",", "tail", "[", "\"is\"", "]", ")", "return", "None" ]
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/labs/drkit/wikidata/preprocessing/distantly_supervise.py#L137-L149
happinesslz/TANet
2d4b2ab99b8e57c03671b0f1531eab7dca8f3c1f
second.pytorch_with_TANet/second/pytorch/models/loss_utils.py
python
prepare_loss_weights
(labels, pos_cls_weight=1.0, neg_cls_weight=1.0, loss_norm_type=LossNormType.NormByNumPositives, dtype=torch.float32)
return cls_weights, reg_weights, cared
get cls_weights and reg_weights from labels.
get cls_weights and reg_weights from labels.
[ "get", "cls_weights", "and", "reg_weights", "from", "labels", "." ]
def prepare_loss_weights(labels, pos_cls_weight=1.0, neg_cls_weight=1.0, loss_norm_type=LossNormType.NormByNumPositives, dtype=torch.float32): """get cls_weights and reg_weights from labels. """ cared = labels >= 0 # cared: [N, num_anchors] positives = labels > 0 negatives = labels == 0 negative_cls_weights = negatives.type(dtype) * neg_cls_weight cls_weights = negative_cls_weights + pos_cls_weight * positives.type(dtype) reg_weights = positives.type(dtype) if loss_norm_type == LossNormType.NormByNumExamples: num_examples = cared.type(dtype).sum(1, keepdim=True) num_examples = torch.clamp(num_examples, min=1.0) cls_weights /= num_examples bbox_normalizer = positives.sum(1, keepdim=True).type(dtype) reg_weights /= torch.clamp(bbox_normalizer, min=1.0) elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss pos_normalizer = positives.sum(1, keepdim=True).type(dtype) reg_weights /= torch.clamp(pos_normalizer, min=1.0) cls_weights /= torch.clamp(pos_normalizer, min=1.0) elif loss_norm_type == LossNormType.NormByNumPosNeg: pos_neg = torch.stack([positives, negatives], dim=-1).type(dtype) normalizer = pos_neg.sum(1, keepdim=True) # [N, 1, 2] cls_normalizer = (pos_neg * normalizer).sum(-1) # [N, M] cls_normalizer = torch.clamp(cls_normalizer, min=1.0) # cls_normalizer will be pos_or_neg_weight/num_pos_or_neg normalizer = torch.clamp(normalizer, min=1.0) reg_weights /= normalizer[:, 0:1, 0] cls_weights /= cls_normalizer else: raise ValueError( f"unknown loss norm type. available: {list(LossNormType)}") return cls_weights, reg_weights, cared
[ "def", "prepare_loss_weights", "(", "labels", ",", "pos_cls_weight", "=", "1.0", ",", "neg_cls_weight", "=", "1.0", ",", "loss_norm_type", "=", "LossNormType", ".", "NormByNumPositives", ",", "dtype", "=", "torch", ".", "float32", ")", ":", "cared", "=", "labels", ">=", "0", "# cared: [N, num_anchors]", "positives", "=", "labels", ">", "0", "negatives", "=", "labels", "==", "0", "negative_cls_weights", "=", "negatives", ".", "type", "(", "dtype", ")", "*", "neg_cls_weight", "cls_weights", "=", "negative_cls_weights", "+", "pos_cls_weight", "*", "positives", ".", "type", "(", "dtype", ")", "reg_weights", "=", "positives", ".", "type", "(", "dtype", ")", "if", "loss_norm_type", "==", "LossNormType", ".", "NormByNumExamples", ":", "num_examples", "=", "cared", ".", "type", "(", "dtype", ")", ".", "sum", "(", "1", ",", "keepdim", "=", "True", ")", "num_examples", "=", "torch", ".", "clamp", "(", "num_examples", ",", "min", "=", "1.0", ")", "cls_weights", "/=", "num_examples", "bbox_normalizer", "=", "positives", ".", "sum", "(", "1", ",", "keepdim", "=", "True", ")", ".", "type", "(", "dtype", ")", "reg_weights", "/=", "torch", ".", "clamp", "(", "bbox_normalizer", ",", "min", "=", "1.0", ")", "elif", "loss_norm_type", "==", "LossNormType", ".", "NormByNumPositives", ":", "# for focal loss", "pos_normalizer", "=", "positives", ".", "sum", "(", "1", ",", "keepdim", "=", "True", ")", ".", "type", "(", "dtype", ")", "reg_weights", "/=", "torch", ".", "clamp", "(", "pos_normalizer", ",", "min", "=", "1.0", ")", "cls_weights", "/=", "torch", ".", "clamp", "(", "pos_normalizer", ",", "min", "=", "1.0", ")", "elif", "loss_norm_type", "==", "LossNormType", ".", "NormByNumPosNeg", ":", "pos_neg", "=", "torch", ".", "stack", "(", "[", "positives", ",", "negatives", "]", ",", "dim", "=", "-", "1", ")", ".", "type", "(", "dtype", ")", "normalizer", "=", "pos_neg", ".", "sum", "(", "1", ",", "keepdim", "=", "True", ")", "# [N, 1, 2]", "cls_normalizer", "=", "(", "pos_neg", "*", "normalizer", ")", ".", "sum", "(", "-", "1", ")", "# [N, M]", "cls_normalizer", "=", "torch", ".", "clamp", "(", "cls_normalizer", ",", "min", "=", "1.0", ")", "# cls_normalizer will be pos_or_neg_weight/num_pos_or_neg", "normalizer", "=", "torch", ".", "clamp", "(", "normalizer", ",", "min", "=", "1.0", ")", "reg_weights", "/=", "normalizer", "[", ":", ",", "0", ":", "1", ",", "0", "]", "cls_weights", "/=", "cls_normalizer", "else", ":", "raise", "ValueError", "(", "f\"unknown loss norm type. available: {list(LossNormType)}\"", ")", "return", "cls_weights", ",", "reg_weights", ",", "cared" ]
https://github.com/happinesslz/TANet/blob/2d4b2ab99b8e57c03671b0f1531eab7dca8f3c1f/second.pytorch_with_TANet/second/pytorch/models/loss_utils.py#L195-L231
profusion/sgqlc
465a5e800f8b408ceafe25cde45ee0bde4912482
sgqlc/operation/__init__.py
python
GraphQLErrors.__init__
(self, errors)
[]
def __init__(self, errors): assert len(errors) > 0 msg = str(errors[0].get('message')) super(RuntimeError, self).__init__(msg) self.errors = errors
[ "def", "__init__", "(", "self", ",", "errors", ")", ":", "assert", "len", "(", "errors", ")", ">", "0", "msg", "=", "str", "(", "errors", "[", "0", "]", ".", "get", "(", "'message'", ")", ")", "super", "(", "RuntimeError", ",", "self", ")", ".", "__init__", "(", "msg", ")", "self", ".", "errors", "=", "errors" ]
https://github.com/profusion/sgqlc/blob/465a5e800f8b408ceafe25cde45ee0bde4912482/sgqlc/operation/__init__.py#L2244-L2248
xiepaup/dbatools
8549f2571aaee6a39f5c6f32179ac9c5d301a9aa
mysqlTools/mysql_utilities/mysql/utilities/common/database.py
python
Database.__build_exclude_patterns
(self, exclude_param)
return str
Return a string to add to where clause to exclude objects. This method will add the conditions to exclude objects based on name if there is a dot notation or by a search pattern as specified by the options. exclude_param[in] Name of column to check. Returns (string) String to add to where clause or ""
Return a string to add to where clause to exclude objects.
[ "Return", "a", "string", "to", "add", "to", "where", "clause", "to", "exclude", "objects", "." ]
def __build_exclude_patterns(self, exclude_param): """Return a string to add to where clause to exclude objects. This method will add the conditions to exclude objects based on name if there is a dot notation or by a search pattern as specified by the options. exclude_param[in] Name of column to check. Returns (string) String to add to where clause or "" """ from mysql.utilities.common.options import obj2sql oper = 'NOT REGEXP' if self.use_regexp else 'NOT LIKE' str = "" for pattern in self.exclude_patterns: value = None if pattern.find(".") > 0: db, name = pattern.split(".") if db == self.db_name: value = name else: value = pattern if value is not None: str += " AND {0} {1} {2}".format(exclude_param, oper, obj2sql(value)) return str
[ "def", "__build_exclude_patterns", "(", "self", ",", "exclude_param", ")", ":", "from", "mysql", ".", "utilities", ".", "common", ".", "options", "import", "obj2sql", "oper", "=", "'NOT REGEXP'", "if", "self", ".", "use_regexp", "else", "'NOT LIKE'", "str", "=", "\"\"", "for", "pattern", "in", "self", ".", "exclude_patterns", ":", "value", "=", "None", "if", "pattern", ".", "find", "(", "\".\"", ")", ">", "0", ":", "db", ",", "name", "=", "pattern", ".", "split", "(", "\".\"", ")", "if", "db", "==", "self", ".", "db_name", ":", "value", "=", "name", "else", ":", "value", "=", "pattern", "if", "value", "is", "not", "None", ":", "str", "+=", "\" AND {0} {1} {2}\"", ".", "format", "(", "exclude_param", ",", "oper", ",", "obj2sql", "(", "value", ")", ")", "return", "str" ]
https://github.com/xiepaup/dbatools/blob/8549f2571aaee6a39f5c6f32179ac9c5d301a9aa/mysqlTools/mysql_utilities/mysql/utilities/common/database.py#L704-L731
eea/odfpy
574f0fafad73a15a5b11b115d94821623274b4b0
odf/element.py
python
Text.toXml
(self,level,f)
Write XML in UTF-8
Write XML in UTF-8
[ "Write", "XML", "in", "UTF", "-", "8" ]
def toXml(self,level,f): """ Write XML in UTF-8 """ if self.data: f.write(_sanitize(unicode(self.data)))
[ "def", "toXml", "(", "self", ",", "level", ",", "f", ")", ":", "if", "self", ".", "data", ":", "f", ".", "write", "(", "_sanitize", "(", "unicode", "(", "self", ".", "data", ")", ")", ")" ]
https://github.com/eea/odfpy/blob/574f0fafad73a15a5b11b115d94821623274b4b0/odf/element.py#L317-L320
qiime2/qiime2
3906f67c70a1321e99e7fc59e79550c2432a8cee
qiime2/core/path.py
python
OutPath.__new__
(cls, dir=False, **kwargs)
return obj
Create a tempfile, return pathlib.Path reference to it.
Create a tempfile, return pathlib.Path reference to it.
[ "Create", "a", "tempfile", "return", "pathlib", ".", "Path", "reference", "to", "it", "." ]
def __new__(cls, dir=False, **kwargs): """ Create a tempfile, return pathlib.Path reference to it. """ if dir: name = tempfile.mkdtemp(**kwargs) else: fd, name = tempfile.mkstemp(**kwargs) # fd is now assigned to our process table, but we don't need to do # anything with the file. We will call `open` on the `name` later # producing a different file descriptor, so close this one to # prevent a resource leak. os.close(fd) obj = super().__new__(cls, name) obj._destructor = weakref.finalize(obj, cls._destruct, str(obj)) return obj
[ "def", "__new__", "(", "cls", ",", "dir", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "dir", ":", "name", "=", "tempfile", ".", "mkdtemp", "(", "*", "*", "kwargs", ")", "else", ":", "fd", ",", "name", "=", "tempfile", ".", "mkstemp", "(", "*", "*", "kwargs", ")", "# fd is now assigned to our process table, but we don't need to do", "# anything with the file. We will call `open` on the `name` later", "# producing a different file descriptor, so close this one to", "# prevent a resource leak.", "os", ".", "close", "(", "fd", ")", "obj", "=", "super", "(", ")", ".", "__new__", "(", "cls", ",", "name", ")", "obj", ".", "_destructor", "=", "weakref", ".", "finalize", "(", "obj", ",", "cls", ".", "_destruct", ",", "str", "(", "obj", ")", ")", "return", "obj" ]
https://github.com/qiime2/qiime2/blob/3906f67c70a1321e99e7fc59e79550c2432a8cee/qiime2/core/path.py#L86-L101
deepfakes/faceswap
09c7d8aca3c608d1afad941ea78e9fd9b64d9219
scripts/train.py
python
Train._monitor
(self, thread)
return err
Monitor the background :func:`_training` thread for key presses and errors. Returns ------- bool ``True`` if there has been an error in the background thread otherwise ``False``
Monitor the background :func:`_training` thread for key presses and errors.
[ "Monitor", "the", "background", ":", "func", ":", "_training", "thread", "for", "key", "presses", "and", "errors", "." ]
def _monitor(self, thread): """ Monitor the background :func:`_training` thread for key presses and errors. Returns ------- bool ``True`` if there has been an error in the background thread otherwise ``False`` """ logger.debug("Launching Monitor") logger.info("===================================================") logger.info(" Starting") if self._args.preview: logger.info(" Using live preview") logger.info(" Press '%s' to save and quit", "Stop" if self._args.redirect_gui or self._args.colab else "ENTER") if not self._args.redirect_gui and not self._args.colab: logger.info(" Press 'S' to save model weights immediately") logger.info("===================================================") keypress = KBHit(is_gui=self._args.redirect_gui) err = False while True: try: if self._args.preview: with self._lock: for name, image in self._preview_buffer.items(): cv2.imshow(name, image) # pylint: disable=no-member cv_key = cv2.waitKey(1000) # pylint: disable=no-member else: cv_key = None if thread.has_error: logger.debug("Thread error detected") err = True break if self._stop: logger.debug("Stop received") break # Preview Monitor if not self._preview_monitor(cv_key): break # Console Monitor if keypress.kbhit(): console_key = keypress.getch() if console_key in ("\n", "\r"): logger.debug("Exit requested") break if console_key in ("s", "S"): logger.info("Save requested") self._save_now = True # GUI Preview trigger update monitor self._process_gui_triggers() sleep(1) except KeyboardInterrupt: logger.debug("Keyboard Interrupt received") break keypress.set_normal_term() logger.debug("Closed Monitor") return err
[ "def", "_monitor", "(", "self", ",", "thread", ")", ":", "logger", ".", "debug", "(", "\"Launching Monitor\"", ")", "logger", ".", "info", "(", "\"===================================================\"", ")", "logger", ".", "info", "(", "\" Starting\"", ")", "if", "self", ".", "_args", ".", "preview", ":", "logger", ".", "info", "(", "\" Using live preview\"", ")", "logger", ".", "info", "(", "\" Press '%s' to save and quit\"", ",", "\"Stop\"", "if", "self", ".", "_args", ".", "redirect_gui", "or", "self", ".", "_args", ".", "colab", "else", "\"ENTER\"", ")", "if", "not", "self", ".", "_args", ".", "redirect_gui", "and", "not", "self", ".", "_args", ".", "colab", ":", "logger", ".", "info", "(", "\" Press 'S' to save model weights immediately\"", ")", "logger", ".", "info", "(", "\"===================================================\"", ")", "keypress", "=", "KBHit", "(", "is_gui", "=", "self", ".", "_args", ".", "redirect_gui", ")", "err", "=", "False", "while", "True", ":", "try", ":", "if", "self", ".", "_args", ".", "preview", ":", "with", "self", ".", "_lock", ":", "for", "name", ",", "image", "in", "self", ".", "_preview_buffer", ".", "items", "(", ")", ":", "cv2", ".", "imshow", "(", "name", ",", "image", ")", "# pylint: disable=no-member", "cv_key", "=", "cv2", ".", "waitKey", "(", "1000", ")", "# pylint: disable=no-member", "else", ":", "cv_key", "=", "None", "if", "thread", ".", "has_error", ":", "logger", ".", "debug", "(", "\"Thread error detected\"", ")", "err", "=", "True", "break", "if", "self", ".", "_stop", ":", "logger", ".", "debug", "(", "\"Stop received\"", ")", "break", "# Preview Monitor", "if", "not", "self", ".", "_preview_monitor", "(", "cv_key", ")", ":", "break", "# Console Monitor", "if", "keypress", ".", "kbhit", "(", ")", ":", "console_key", "=", "keypress", ".", "getch", "(", ")", "if", "console_key", "in", "(", "\"\\n\"", ",", "\"\\r\"", ")", ":", "logger", ".", "debug", "(", "\"Exit requested\"", ")", "break", "if", "console_key", "in", "(", "\"s\"", ",", "\"S\"", ")", ":", "logger", ".", "info", "(", "\"Save requested\"", ")", "self", ".", "_save_now", "=", "True", "# GUI Preview trigger update monitor", "self", ".", "_process_gui_triggers", "(", ")", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "logger", ".", "debug", "(", "\"Keyboard Interrupt received\"", ")", "break", "keypress", ".", "set_normal_term", "(", ")", "logger", ".", "debug", "(", "\"Closed Monitor\"", ")", "return", "err" ]
https://github.com/deepfakes/faceswap/blob/09c7d8aca3c608d1afad941ea78e9fd9b64d9219/scripts/train.py#L350-L412
ganglia/gmond_python_modules
2f7fcab3d27926ef4a2feb1b53c09af16a43e729
gpu/nvidia/nvidia-ml-py-3.295.00/build/lib/pynvml.py
python
nvmlDeviceGetCurrPcieLinkWidth
(handle)
return width.value
[]
def nvmlDeviceGetCurrPcieLinkWidth(handle): fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkWidth") width = c_uint() ret = fn(handle, byref(width)) _nvmlCheckReturn(ret) return width.value
[ "def", "nvmlDeviceGetCurrPcieLinkWidth", "(", "handle", ")", ":", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceGetCurrPcieLinkWidth\"", ")", "width", "=", "c_uint", "(", ")", "ret", "=", "fn", "(", "handle", ",", "byref", "(", "width", ")", ")", "_nvmlCheckReturn", "(", "ret", ")", "return", "width", ".", "value" ]
https://github.com/ganglia/gmond_python_modules/blob/2f7fcab3d27926ef4a2feb1b53c09af16a43e729/gpu/nvidia/nvidia-ml-py-3.295.00/build/lib/pynvml.py#L887-L892
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/blobs/migrate_metadata.py
python
get_shared_domain
(doc)
return SHARED_DOMAIN
[]
def get_shared_domain(doc): return SHARED_DOMAIN
[ "def", "get_shared_domain", "(", "doc", ")", ":", "return", "SHARED_DOMAIN" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/blobs/migrate_metadata.py#L271-L272
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/craft_plugins/speed.py
python
getCraftedTextFromText
(gcodeText, repository=None)
return SpeedSkein().getCraftedGcode(gcodeText, repository)
Speed a gcode linear move text.
Speed a gcode linear move text.
[ "Speed", "a", "gcode", "linear", "move", "text", "." ]
def getCraftedTextFromText(gcodeText, repository=None): "Speed a gcode linear move text." if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'speed'): return gcodeText if repository == None: repository = settings.getReadRepository( SpeedRepository() ) if not repository.activateSpeed.value: return gcodeText return SpeedSkein().getCraftedGcode(gcodeText, repository)
[ "def", "getCraftedTextFromText", "(", "gcodeText", ",", "repository", "=", "None", ")", ":", "if", "gcodec", ".", "isProcedureDoneOrFileIsEmpty", "(", "gcodeText", ",", "'speed'", ")", ":", "return", "gcodeText", "if", "repository", "==", "None", ":", "repository", "=", "settings", ".", "getReadRepository", "(", "SpeedRepository", "(", ")", ")", "if", "not", "repository", ".", "activateSpeed", ".", "value", ":", "return", "gcodeText", "return", "SpeedSkein", "(", ")", ".", "getCraftedGcode", "(", "gcodeText", ",", "repository", ")" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/craft_plugins/speed.py#L133-L141
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/elmax/common.py
python
ElmaxCoordinator.panel_entry
(self)
return self._panel_entry
Return the panel entry.
Return the panel entry.
[ "Return", "the", "panel", "entry", "." ]
def panel_entry(self) -> PanelEntry | None: """Return the panel entry.""" return self._panel_entry
[ "def", "panel_entry", "(", "self", ")", "->", "PanelEntry", "|", "None", ":", "return", "self", ".", "_panel_entry" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/elmax/common.py#L58-L60
Netflix/dispatch
f734b7cb91cba0e3a95b4d0adaa7198bfc94552b
src/dispatch/report/scheduled.py
python
incident_report_reminders
(db_session: SessionLocal, project: Project)
Sends report reminders to incident commanders for active incidents.
Sends report reminders to incident commanders for active incidents.
[ "Sends", "report", "reminders", "to", "incident", "commanders", "for", "active", "incidents", "." ]
def incident_report_reminders(db_session: SessionLocal, project: Project): """Sends report reminders to incident commanders for active incidents.""" incidents = incident_service.get_all_by_status( db_session=db_session, project_id=project.id, status=IncidentStatus.active ) for incident in incidents: for report_type in ReportTypes: try: remind_after = incident.created_at if report_type == ReportTypes.tactical_report: notification_hour = incident.incident_priority.tactical_report_reminder if incident.last_tactical_report: remind_after = incident.last_tactical_report.created_at elif report_type == ReportTypes.executive_report: notification_hour = incident.incident_priority.executive_report_reminder if incident.last_executive_report: remind_after = incident.last_executive_report.created_at now = datetime.utcnow() - remind_after # we calculate the number of hours and seconds since last report was sent hours, seconds = divmod((now.days * 86400) + now.seconds, 3600) q, r = divmod(hours, notification_hour) if q >= 1 and r == 0: # it's time to send the reminder send_incident_report_reminder(incident, report_type, db_session) except Exception as e: # we shouldn't fail to send all reminders when one fails log.exception(e)
[ "def", "incident_report_reminders", "(", "db_session", ":", "SessionLocal", ",", "project", ":", "Project", ")", ":", "incidents", "=", "incident_service", ".", "get_all_by_status", "(", "db_session", "=", "db_session", ",", "project_id", "=", "project", ".", "id", ",", "status", "=", "IncidentStatus", ".", "active", ")", "for", "incident", "in", "incidents", ":", "for", "report_type", "in", "ReportTypes", ":", "try", ":", "remind_after", "=", "incident", ".", "created_at", "if", "report_type", "==", "ReportTypes", ".", "tactical_report", ":", "notification_hour", "=", "incident", ".", "incident_priority", ".", "tactical_report_reminder", "if", "incident", ".", "last_tactical_report", ":", "remind_after", "=", "incident", ".", "last_tactical_report", ".", "created_at", "elif", "report_type", "==", "ReportTypes", ".", "executive_report", ":", "notification_hour", "=", "incident", ".", "incident_priority", ".", "executive_report_reminder", "if", "incident", ".", "last_executive_report", ":", "remind_after", "=", "incident", ".", "last_executive_report", ".", "created_at", "now", "=", "datetime", ".", "utcnow", "(", ")", "-", "remind_after", "# we calculate the number of hours and seconds since last report was sent", "hours", ",", "seconds", "=", "divmod", "(", "(", "now", ".", "days", "*", "86400", ")", "+", "now", ".", "seconds", ",", "3600", ")", "q", ",", "r", "=", "divmod", "(", "hours", ",", "notification_hour", ")", "if", "q", ">=", "1", "and", "r", "==", "0", ":", "# it's time to send the reminder", "send_incident_report_reminder", "(", "incident", ",", "report_type", ",", "db_session", ")", "except", "Exception", "as", "e", ":", "# we shouldn't fail to send all reminders when one fails", "log", ".", "exception", "(", "e", ")" ]
https://github.com/Netflix/dispatch/blob/f734b7cb91cba0e3a95b4d0adaa7198bfc94552b/src/dispatch/report/scheduled.py#L21-L51
CvvT/dumpDex
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
python/idaapi.py
python
channel_redir_t.__init__
(self, *args)
__init__(self) -> channel_redir_t
__init__(self) -> channel_redir_t
[ "__init__", "(", "self", ")", "-", ">", "channel_redir_t" ]
def __init__(self, *args): """ __init__(self) -> channel_redir_t """ this = _idaapi.new_channel_redir_t(*args) try: self.this.append(this) except: self.this = this
[ "def", "__init__", "(", "self", ",", "*", "args", ")", ":", "this", "=", "_idaapi", ".", "new_channel_redir_t", "(", "*", "args", ")", "try", ":", "self", ".", "this", ".", "append", "(", "this", ")", "except", ":", "self", ".", "this", "=", "this" ]
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L953-L959
user-cont/conu
0d8962560f6f7f17fe1be0d434a4809e2a0ea51d
conu/backend/buildah/image.py
python
BuildahImage.inspect
(self, refresh=True)
return self._inspect_data
provide metadata about the image; flip refresh=True if cached metadata are enough :param refresh: bool, update the metadata with up to date content :return: dict
provide metadata about the image; flip refresh=True if cached metadata are enough
[ "provide", "metadata", "about", "the", "image", ";", "flip", "refresh", "=", "True", "if", "cached", "metadata", "are", "enough" ]
def inspect(self, refresh=True): """ provide metadata about the image; flip refresh=True if cached metadata are enough :param refresh: bool, update the metadata with up to date content :return: dict """ if refresh or not self._inspect_data: identifier = self._id or self.get_full_name() if not identifier: raise ConuException("This image does not have a valid identifier.") self._inspect_data = self._inspect(identifier) return self._inspect_data
[ "def", "inspect", "(", "self", ",", "refresh", "=", "True", ")", ":", "if", "refresh", "or", "not", "self", ".", "_inspect_data", ":", "identifier", "=", "self", ".", "_id", "or", "self", ".", "get_full_name", "(", ")", "if", "not", "identifier", ":", "raise", "ConuException", "(", "\"This image does not have a valid identifier.\"", ")", "self", ".", "_inspect_data", "=", "self", ".", "_inspect", "(", "identifier", ")", "return", "self", ".", "_inspect_data" ]
https://github.com/user-cont/conu/blob/0d8962560f6f7f17fe1be0d434a4809e2a0ea51d/conu/backend/buildah/image.py#L167-L179
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/ext/mapreduce/handlers.py
python
MapperWorkerCallbackHandler._attempt_slice_recovery
(self, shard_state, tstate)
return self._TASK_DIRECTIVE.RECOVER_SLICE
Recover a slice. This is run when a slice had been previously attempted and output may have been written. If an output writer requires slice recovery, we run those logic to remove output duplicates. Otherwise we just retry the slice. If recovery is needed, then the entire slice will be dedicated to recovery logic. No data processing will take place. Thus we call the slice "recovery slice". This is needed for correctness: An output writer instance can be out of sync from its physical medium only when the slice dies after acquring the shard lock but before committing shard state to db. The worst failure case is when shard state failed to commit after the NAMED task for the next slice was added. Thus, recovery slice has a special logic to increment current slice_id n to n+2. If the task for n+1 had been added, it will be dropped because it is behind shard state. Args: shard_state: an instance of Model.ShardState. tstate: an instance of Model.TransientShardState. Returns: _TASK_DIRECTIVE.PROCEED_TASK to continue with this retry. _TASK_DIRECTIVE.RECOVER_SLICE to recover this slice. The next slice will start at the same input as this slice but output to a new instance of output writer. Combining outputs from all writer instances is up to implementation.
Recover a slice.
[ "Recover", "a", "slice", "." ]
def _attempt_slice_recovery(self, shard_state, tstate): """Recover a slice. This is run when a slice had been previously attempted and output may have been written. If an output writer requires slice recovery, we run those logic to remove output duplicates. Otherwise we just retry the slice. If recovery is needed, then the entire slice will be dedicated to recovery logic. No data processing will take place. Thus we call the slice "recovery slice". This is needed for correctness: An output writer instance can be out of sync from its physical medium only when the slice dies after acquring the shard lock but before committing shard state to db. The worst failure case is when shard state failed to commit after the NAMED task for the next slice was added. Thus, recovery slice has a special logic to increment current slice_id n to n+2. If the task for n+1 had been added, it will be dropped because it is behind shard state. Args: shard_state: an instance of Model.ShardState. tstate: an instance of Model.TransientShardState. Returns: _TASK_DIRECTIVE.PROCEED_TASK to continue with this retry. _TASK_DIRECTIVE.RECOVER_SLICE to recover this slice. The next slice will start at the same input as this slice but output to a new instance of output writer. Combining outputs from all writer instances is up to implementation. """ mapper_spec = tstate.mapreduce_spec.mapper if not (tstate.output_writer and tstate.output_writer._supports_slice_recovery(mapper_spec)): return self._TASK_DIRECTIVE.PROCEED_TASK tstate.output_writer = tstate.output_writer._recover( tstate.mapreduce_spec, shard_state.shard_number, shard_state.retries + 1) return self._TASK_DIRECTIVE.RECOVER_SLICE
[ "def", "_attempt_slice_recovery", "(", "self", ",", "shard_state", ",", "tstate", ")", ":", "mapper_spec", "=", "tstate", ".", "mapreduce_spec", ".", "mapper", "if", "not", "(", "tstate", ".", "output_writer", "and", "tstate", ".", "output_writer", ".", "_supports_slice_recovery", "(", "mapper_spec", ")", ")", ":", "return", "self", ".", "_TASK_DIRECTIVE", ".", "PROCEED_TASK", "tstate", ".", "output_writer", "=", "tstate", ".", "output_writer", ".", "_recover", "(", "tstate", ".", "mapreduce_spec", ",", "shard_state", ".", "shard_number", ",", "shard_state", ".", "retries", "+", "1", ")", "return", "self", ".", "_TASK_DIRECTIVE", ".", "RECOVER_SLICE" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/ext/mapreduce/handlers.py#L838-L876
007gzs/dingtalk-sdk
7979da2e259fdbc571728cae2425a04dbc65850a
dingtalk/client/api/taobao.py
python
TbWuDaoKou.alibaba_wdk_fulfill_bill_return_warehouse_on_task_status_changed
( self, return_warehouse_result=None )
return self._top_request( "alibaba.wdk.fulfill.bill.return.warehouse.on.task.status.changed", { "return_warehouse_result": return_warehouse_result } )
退仓结果回传 退货入仓结果回传 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=44157 :param return_warehouse_result: 退仓结果
退仓结果回传 退货入仓结果回传 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=44157
[ "退仓结果回传", "退货入仓结果回传", "文档地址:https", ":", "//", "open", "-", "doc", ".", "dingtalk", ".", "com", "/", "docs", "/", "api", ".", "htm?apiId", "=", "44157" ]
def alibaba_wdk_fulfill_bill_return_warehouse_on_task_status_changed( self, return_warehouse_result=None ): """ 退仓结果回传 退货入仓结果回传 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=44157 :param return_warehouse_result: 退仓结果 """ return self._top_request( "alibaba.wdk.fulfill.bill.return.warehouse.on.task.status.changed", { "return_warehouse_result": return_warehouse_result } )
[ "def", "alibaba_wdk_fulfill_bill_return_warehouse_on_task_status_changed", "(", "self", ",", "return_warehouse_result", "=", "None", ")", ":", "return", "self", ".", "_top_request", "(", "\"alibaba.wdk.fulfill.bill.return.warehouse.on.task.status.changed\"", ",", "{", "\"return_warehouse_result\"", ":", "return_warehouse_result", "}", ")" ]
https://github.com/007gzs/dingtalk-sdk/blob/7979da2e259fdbc571728cae2425a04dbc65850a/dingtalk/client/api/taobao.py#L65303-L65319
quodlibet/quodlibet
e3099c89f7aa6524380795d325cc14630031886c
quodlibet/packages/raven/versioning.py
python
fetch_git_sha
(path, head=None)
>>> fetch_git_sha(os.path.dirname(__file__))
>>> fetch_git_sha(os.path.dirname(__file__))
[ ">>>", "fetch_git_sha", "(", "os", ".", "path", ".", "dirname", "(", "__file__", "))" ]
def fetch_git_sha(path, head=None): """ >>> fetch_git_sha(os.path.dirname(__file__)) """ if not head: head_path = os.path.join(path, '.git', 'HEAD') if not os.path.exists(head_path): raise InvalidGitRepository( 'Cannot identify HEAD for git repository at %s' % (path,)) with open(head_path, 'r') as fp: head = text_type(fp.read()).strip() if head.startswith('ref: '): head = head[5:] revision_file = os.path.join( path, '.git', *head.split('/') ) else: return head else: revision_file = os.path.join(path, '.git', 'refs', 'heads', head) if not os.path.exists(revision_file): if not os.path.exists(os.path.join(path, '.git')): raise InvalidGitRepository( '%s does not seem to be the root of a git repository' % (path,)) # Check for our .git/packed-refs' file since a `git gc` may have run # https://git-scm.com/book/en/v2/Git-Internals-Maintenance-and-Data-Recovery packed_file = os.path.join(path, '.git', 'packed-refs') if os.path.exists(packed_file): with open(packed_file) as fh: for line in fh: line = line.rstrip() if line and line[:1] not in ('#', '^'): try: revision, ref = line.split(' ', 1) except ValueError: continue if ref == head: return text_type(revision) raise InvalidGitRepository( 'Unable to find ref to head "%s" in repository' % (head,)) with open(revision_file) as fh: return text_type(fh.read()).strip()
[ "def", "fetch_git_sha", "(", "path", ",", "head", "=", "None", ")", ":", "if", "not", "head", ":", "head_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'.git'", ",", "'HEAD'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "head_path", ")", ":", "raise", "InvalidGitRepository", "(", "'Cannot identify HEAD for git repository at %s'", "%", "(", "path", ",", ")", ")", "with", "open", "(", "head_path", ",", "'r'", ")", "as", "fp", ":", "head", "=", "text_type", "(", "fp", ".", "read", "(", ")", ")", ".", "strip", "(", ")", "if", "head", ".", "startswith", "(", "'ref: '", ")", ":", "head", "=", "head", "[", "5", ":", "]", "revision_file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'.git'", ",", "*", "head", ".", "split", "(", "'/'", ")", ")", "else", ":", "return", "head", "else", ":", "revision_file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'.git'", ",", "'refs'", ",", "'heads'", ",", "head", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "revision_file", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'.git'", ")", ")", ":", "raise", "InvalidGitRepository", "(", "'%s does not seem to be the root of a git repository'", "%", "(", "path", ",", ")", ")", "# Check for our .git/packed-refs' file since a `git gc` may have run", "# https://git-scm.com/book/en/v2/Git-Internals-Maintenance-and-Data-Recovery", "packed_file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'.git'", ",", "'packed-refs'", ")", "if", "os", ".", "path", ".", "exists", "(", "packed_file", ")", ":", "with", "open", "(", "packed_file", ")", "as", "fh", ":", "for", "line", "in", "fh", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "line", "and", "line", "[", ":", "1", "]", "not", "in", "(", "'#'", ",", "'^'", ")", ":", "try", ":", "revision", ",", "ref", "=", "line", ".", "split", "(", "' '", ",", "1", ")", "except", "ValueError", ":", "continue", "if", "ref", "==", "head", ":", "return", "text_type", "(", "revision", ")", "raise", "InvalidGitRepository", "(", "'Unable to find ref to head \"%s\" in repository'", "%", "(", "head", ",", ")", ")", "with", "open", "(", "revision_file", ")", "as", "fh", ":", "return", "text_type", "(", "fh", ".", "read", "(", ")", ")", ".", "strip", "(", ")" ]
https://github.com/quodlibet/quodlibet/blob/e3099c89f7aa6524380795d325cc14630031886c/quodlibet/packages/raven/versioning.py#L17-L64
SeldonIO/alibi
ce961caf995d22648a8338857822c90428af4765
alibi/explainers/ale.py
python
ALE.explain
(self, X: np.ndarray, features: Optional[List[int]] = None, min_bin_points: int = 4)
return self.build_explanation( ale_values=ale_values, ale0=ale0, constant_value=constant_value, feature_values=feature_values, feature_deciles=feature_deciles, feature_names=feature_names )
Calculate the ALE curves for each feature with respect to the dataset `X`. Parameters ---------- X An NxF tabular dataset used to calculate the ALE curves. This is typically the training dataset or a representative sample. features: Features for which to calculate ALE. min_bin_points Minimum number of points each discretized interval should contain to ensure more precise ALE estimation. Returns ------- An `Explanation` object containing the data and the metadata of the calculated ALE curves.
Calculate the ALE curves for each feature with respect to the dataset `X`.
[ "Calculate", "the", "ALE", "curves", "for", "each", "feature", "with", "respect", "to", "the", "dataset", "X", "." ]
def explain(self, X: np.ndarray, features: Optional[List[int]] = None, min_bin_points: int = 4) -> Explanation: """ Calculate the ALE curves for each feature with respect to the dataset `X`. Parameters ---------- X An NxF tabular dataset used to calculate the ALE curves. This is typically the training dataset or a representative sample. features: Features for which to calculate ALE. min_bin_points Minimum number of points each discretized interval should contain to ensure more precise ALE estimation. Returns ------- An `Explanation` object containing the data and the metadata of the calculated ALE curves. """ self.meta['params'].update(min_bin_points=min_bin_points) if X.ndim != 2: raise ValueError('The array X must be 2-dimensional') n_features = X.shape[1] # set feature and target names, this is done here as we don't know n_features at init time if self.feature_names is None: self.feature_names = [f'f_{i}' for i in range(n_features)] if self.target_names is None: pred = np.atleast_2d(self.predictor(X[0].reshape(1, -1))) n_targets = pred.shape[1] self.target_names = [f'c_{i}' for i in range(n_targets)] self.feature_names = np.array(self.feature_names) # type: ignore self.target_names = np.array(self.target_names) # type: ignore # only calculate ALE for the specified features and return the explanation for this subset if features: feature_names = self.feature_names[features] # type: ignore else: feature_names = self.feature_names features = list(range(n_features)) feature_values = [] ale_values = [] ale0 = [] feature_deciles = [] # TODO: use joblib to paralelise? for feature in features: q, ale, a0 = ale_num( self.predictor, X=X, feature=feature, min_bin_points=min_bin_points, check_feature_resolution=self.check_feature_resolution, low_resolution_threshold=self.low_resolution_threshold, extrapolate_constant=self.extrapolate_constant, extrapolate_constant_perc=self.extrapolate_constant_perc, extrapolate_constant_min=self.extrapolate_constant_min ) deciles = get_quantiles(X[:, feature], num_quantiles=11) feature_values.append(q) ale_values.append(ale) ale0.append(a0) feature_deciles.append(deciles) constant_value = self.predictor(X).mean() # TODO: an ALE plot ideally requires a rugplot to gauge density of instances in the feature space. # I've replaced this with feature deciles which is coarser but has constant space complexity # as opposed to a rugplot. Alternatively, could consider subsampling to produce a rug with some # maximum number of points. return self.build_explanation( ale_values=ale_values, ale0=ale0, constant_value=constant_value, feature_values=feature_values, feature_deciles=feature_deciles, feature_names=feature_names )
[ "def", "explain", "(", "self", ",", "X", ":", "np", ".", "ndarray", ",", "features", ":", "Optional", "[", "List", "[", "int", "]", "]", "=", "None", ",", "min_bin_points", ":", "int", "=", "4", ")", "->", "Explanation", ":", "self", ".", "meta", "[", "'params'", "]", ".", "update", "(", "min_bin_points", "=", "min_bin_points", ")", "if", "X", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "'The array X must be 2-dimensional'", ")", "n_features", "=", "X", ".", "shape", "[", "1", "]", "# set feature and target names, this is done here as we don't know n_features at init time", "if", "self", ".", "feature_names", "is", "None", ":", "self", ".", "feature_names", "=", "[", "f'f_{i}'", "for", "i", "in", "range", "(", "n_features", ")", "]", "if", "self", ".", "target_names", "is", "None", ":", "pred", "=", "np", ".", "atleast_2d", "(", "self", ".", "predictor", "(", "X", "[", "0", "]", ".", "reshape", "(", "1", ",", "-", "1", ")", ")", ")", "n_targets", "=", "pred", ".", "shape", "[", "1", "]", "self", ".", "target_names", "=", "[", "f'c_{i}'", "for", "i", "in", "range", "(", "n_targets", ")", "]", "self", ".", "feature_names", "=", "np", ".", "array", "(", "self", ".", "feature_names", ")", "# type: ignore", "self", ".", "target_names", "=", "np", ".", "array", "(", "self", ".", "target_names", ")", "# type: ignore", "# only calculate ALE for the specified features and return the explanation for this subset", "if", "features", ":", "feature_names", "=", "self", ".", "feature_names", "[", "features", "]", "# type: ignore", "else", ":", "feature_names", "=", "self", ".", "feature_names", "features", "=", "list", "(", "range", "(", "n_features", ")", ")", "feature_values", "=", "[", "]", "ale_values", "=", "[", "]", "ale0", "=", "[", "]", "feature_deciles", "=", "[", "]", "# TODO: use joblib to paralelise?", "for", "feature", "in", "features", ":", "q", ",", "ale", ",", "a0", "=", "ale_num", "(", "self", ".", "predictor", ",", "X", "=", "X", ",", "feature", "=", "feature", ",", "min_bin_points", "=", "min_bin_points", ",", "check_feature_resolution", "=", "self", ".", "check_feature_resolution", ",", "low_resolution_threshold", "=", "self", ".", "low_resolution_threshold", ",", "extrapolate_constant", "=", "self", ".", "extrapolate_constant", ",", "extrapolate_constant_perc", "=", "self", ".", "extrapolate_constant_perc", ",", "extrapolate_constant_min", "=", "self", ".", "extrapolate_constant_min", ")", "deciles", "=", "get_quantiles", "(", "X", "[", ":", ",", "feature", "]", ",", "num_quantiles", "=", "11", ")", "feature_values", ".", "append", "(", "q", ")", "ale_values", ".", "append", "(", "ale", ")", "ale0", ".", "append", "(", "a0", ")", "feature_deciles", ".", "append", "(", "deciles", ")", "constant_value", "=", "self", ".", "predictor", "(", "X", ")", ".", "mean", "(", ")", "# TODO: an ALE plot ideally requires a rugplot to gauge density of instances in the feature space.", "# I've replaced this with feature deciles which is coarser but has constant space complexity", "# as opposed to a rugplot. Alternatively, could consider subsampling to produce a rug with some", "# maximum number of points.", "return", "self", ".", "build_explanation", "(", "ale_values", "=", "ale_values", ",", "ale0", "=", "ale0", ",", "constant_value", "=", "constant_value", ",", "feature_values", "=", "feature_values", ",", "feature_deciles", "=", "feature_deciles", ",", "feature_names", "=", "feature_names", ")" ]
https://github.com/SeldonIO/alibi/blob/ce961caf995d22648a8338857822c90428af4765/alibi/explainers/ale.py#L92-L172