text
stringlengths
89
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
630
def vcs_rbridge_context_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcs_rbridge_context = ET.Element("vcs_rbridge_context") config = vcs_rbridge_context input = ET.SubElement(vcs_rbridge_context, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "vcs_rbridge_context_input_rbridge_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "vcs_rbridge_context", "=", "ET", ".", "Element", "(", "\"vcs_rbridge_context\"", ")", "config", "=", "vcs_rbridge_context", "input", "=", "ET", ".", "SubElement", "(", "vcs_rbridge_context", ",", "\"input\"", ")", "rbridge_id", "=", "ET", ".", "SubElement", "(", "input", ",", "\"rbridge-id\"", ")", "rbridge_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
40.5
13.083333
def only_owner(func): """ Only owner decorator Restricts access to view ony to profile owner """ def decorated(*_, **kwargs): id = kwargs['id'] if not current_user.is_authenticated: abort(401) elif current_user.id != id: abort(403) return func(**kwargs) return decorated
[ "def", "only_owner", "(", "func", ")", ":", "def", "decorated", "(", "*", "_", ",", "*", "*", "kwargs", ")", ":", "id", "=", "kwargs", "[", "'id'", "]", "if", "not", "current_user", ".", "is_authenticated", ":", "abort", "(", "401", ")", "elif", "current_user", ".", "id", "!=", "id", ":", "abort", "(", "403", ")", "return", "func", "(", "*", "*", "kwargs", ")", "return", "decorated" ]
24.142857
13.142857
def run_samblaster(job, sam): """ Marks reads as PCR duplicates using SAMBLASTER :param JobFunctionWrappingJob job: passed automatically by Toil :param str sam: FileStoreID for SAM file :return: FileStoreID for deduped SAM file :rtype: str """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(sam, os.path.join(work_dir, 'input.sam')) command = ['/usr/local/bin/samblaster', '-i', '/data/input.sam', '-o', '/data/output.sam', '--ignoreUnmated'] start_time = time.time() dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/biocontainers/samblaster:0.1.24--0') end_time = time.time() _log_runtime(job, start_time, end_time, "SAMBLASTER") return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.sam'))
[ "def", "run_samblaster", "(", "job", ",", "sam", ")", ":", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "job", ".", "fileStore", ".", "readGlobalFile", "(", "sam", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'input.sam'", ")", ")", "command", "=", "[", "'/usr/local/bin/samblaster'", ",", "'-i'", ",", "'/data/input.sam'", ",", "'-o'", ",", "'/data/output.sam'", ",", "'--ignoreUnmated'", "]", "start_time", "=", "time", ".", "time", "(", ")", "dockerCall", "(", "job", "=", "job", ",", "workDir", "=", "work_dir", ",", "parameters", "=", "command", ",", "tool", "=", "'quay.io/biocontainers/samblaster:0.1.24--0'", ")", "end_time", "=", "time", ".", "time", "(", ")", "_log_runtime", "(", "job", ",", "start_time", ",", "end_time", ",", "\"SAMBLASTER\"", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'output.sam'", ")", ")" ]
37.73913
14.173913
def _init_multicast_socket(self): """ Init multicast socket :rtype: None """ self.debug("()") # Create a UDP socket self._multicast_socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) # Allow reuse of addresses self._multicast_socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 ) # Set multicast interface to local_ip self._multicast_socket.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self._multicast_ip) ) # Set multicast time-to-live # Should keep our multicast packets from escaping the local network self._multicast_socket.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, self._multicast_ttl ) self._add_membership_multicast_socket() # Bind socket if platform.system().lower() == "darwin": self._multicast_socket.bind(("0.0.0.0", self._multicast_bind_port)) else: self._multicast_socket.bind( (self._multicast_ip, self._multicast_bind_port) ) self._listening.append(self._multicast_socket)
[ "def", "_init_multicast_socket", "(", "self", ")", ":", "self", ".", "debug", "(", "\"()\"", ")", "# Create a UDP socket", "self", ".", "_multicast_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "# Allow reuse of addresses", "self", ".", "_multicast_socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "# Set multicast interface to local_ip", "self", ".", "_multicast_socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_IP", ",", "socket", ".", "IP_MULTICAST_IF", ",", "socket", ".", "inet_aton", "(", "self", ".", "_multicast_ip", ")", ")", "# Set multicast time-to-live", "# Should keep our multicast packets from escaping the local network", "self", ".", "_multicast_socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_IP", ",", "socket", ".", "IP_MULTICAST_TTL", ",", "self", ".", "_multicast_ttl", ")", "self", ".", "_add_membership_multicast_socket", "(", ")", "# Bind socket", "if", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", "==", "\"darwin\"", ":", "self", ".", "_multicast_socket", ".", "bind", "(", "(", "\"0.0.0.0\"", ",", "self", ".", "_multicast_bind_port", ")", ")", "else", ":", "self", ".", "_multicast_socket", ".", "bind", "(", "(", "self", ".", "_multicast_ip", ",", "self", ".", "_multicast_bind_port", ")", ")", "self", ".", "_listening", ".", "append", "(", "self", ".", "_multicast_socket", ")" ]
29
16.636364
def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for _, value in ast.iter_fields(node): if isinstance(value, list): self._handle_ast_list(value) for item in value: if isinstance(item, ast.AST): self.visit(item) elif isinstance(value, ast.AST): self.visit(value)
[ "def", "generic_visit", "(", "self", ",", "node", ")", ":", "for", "_", ",", "value", "in", "ast", ".", "iter_fields", "(", "node", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "self", ".", "_handle_ast_list", "(", "value", ")", "for", "item", "in", "value", ":", "if", "isinstance", "(", "item", ",", "ast", ".", "AST", ")", ":", "self", ".", "visit", "(", "item", ")", "elif", "isinstance", "(", "value", ",", "ast", ".", "AST", ")", ":", "self", ".", "visit", "(", "value", ")" ]
43
4.7
def parse_theme(self, xml): """ Parses a theme from XML returned by Kuler. Gets the theme's id, label and swatches. All of the swatches are converted to RGB. If we have a full description for a theme id in cache, parse that to get tags associated with the theme. """ kt = KulerTheme() kt.author = xml.getElementsByTagName("author")[0] kt.author = kt.author.childNodes[1].childNodes[0].nodeValue kt.id = int(self.parse_tag(xml, "id")) kt.label = self.parse_tag(xml, "label") mode = self.parse_tag(xml, "mode") for swatch in xml.getElementsByTagName("swatch"): c1 = float(self.parse_tag(swatch, "c1")) c2 = float(self.parse_tag(swatch, "c2")) c3 = float(self.parse_tag(swatch, "c3")) c4 = float(self.parse_tag(swatch, "c4")) if mode == "rgb": kt.append((c1,c2,c3)) if mode == "cmyk": kt.append(cmyk_to_rgb(c1,c2,c3,c4)) if mode == "hsv": kt.append(colorsys.hsv_to_rgb(c1,c2,c3)) if mode == "hex": kt.append(hex_to_rgb(c1)) if mode == "lab": kt.append(lab_to_rgb(c1,c2,c3)) # If we have the full theme in cache, # parse tags from it. if self._cache.exists(self.id_string + str(kt.id)): xml = self._cache.read(self.id_string + str(kt.id)) xml = minidom.parseString(xml) for tags in xml.getElementsByTagName("tag"): tags = self.parse_tag(tags, "label") tags = tags.split(" ") kt.tags.extend(tags) return kt
[ "def", "parse_theme", "(", "self", ",", "xml", ")", ":", "kt", "=", "KulerTheme", "(", ")", "kt", ".", "author", "=", "xml", ".", "getElementsByTagName", "(", "\"author\"", ")", "[", "0", "]", "kt", ".", "author", "=", "kt", ".", "author", ".", "childNodes", "[", "1", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", "kt", ".", "id", "=", "int", "(", "self", ".", "parse_tag", "(", "xml", ",", "\"id\"", ")", ")", "kt", ".", "label", "=", "self", ".", "parse_tag", "(", "xml", ",", "\"label\"", ")", "mode", "=", "self", ".", "parse_tag", "(", "xml", ",", "\"mode\"", ")", "for", "swatch", "in", "xml", ".", "getElementsByTagName", "(", "\"swatch\"", ")", ":", "c1", "=", "float", "(", "self", ".", "parse_tag", "(", "swatch", ",", "\"c1\"", ")", ")", "c2", "=", "float", "(", "self", ".", "parse_tag", "(", "swatch", ",", "\"c2\"", ")", ")", "c3", "=", "float", "(", "self", ".", "parse_tag", "(", "swatch", ",", "\"c3\"", ")", ")", "c4", "=", "float", "(", "self", ".", "parse_tag", "(", "swatch", ",", "\"c4\"", ")", ")", "if", "mode", "==", "\"rgb\"", ":", "kt", ".", "append", "(", "(", "c1", ",", "c2", ",", "c3", ")", ")", "if", "mode", "==", "\"cmyk\"", ":", "kt", ".", "append", "(", "cmyk_to_rgb", "(", "c1", ",", "c2", ",", "c3", ",", "c4", ")", ")", "if", "mode", "==", "\"hsv\"", ":", "kt", ".", "append", "(", "colorsys", ".", "hsv_to_rgb", "(", "c1", ",", "c2", ",", "c3", ")", ")", "if", "mode", "==", "\"hex\"", ":", "kt", ".", "append", "(", "hex_to_rgb", "(", "c1", ")", ")", "if", "mode", "==", "\"lab\"", ":", "kt", ".", "append", "(", "lab_to_rgb", "(", "c1", ",", "c2", ",", "c3", ")", ")", "# If we have the full theme in cache,", "# parse tags from it.", "if", "self", ".", "_cache", ".", "exists", "(", "self", ".", "id_string", "+", "str", "(", "kt", ".", "id", ")", ")", ":", "xml", "=", "self", ".", "_cache", ".", "read", "(", "self", ".", "id_string", "+", "str", "(", "kt", ".", "id", ")", ")", "xml", "=", "minidom", ".", "parseString", "(", "xml", ")", "for", "tags", "in", "xml", ".", "getElementsByTagName", "(", "\"tag\"", ")", ":", "tags", "=", "self", ".", "parse_tag", "(", "tags", ",", "\"label\"", ")", "tags", "=", "tags", ".", "split", "(", "\" \"", ")", "kt", ".", "tags", ".", "extend", "(", "tags", ")", "return", "kt" ]
37
14.765957
def _expand(self): """ Expand the free pool, if possible. If out of capacity w.r.t. the defined ID value range, ValueError is raised. """ assert not self._free # free pool is empty expand_end = self._expand_start + self._expand_len if expand_end > self._range_end: # This happens if the size of the value range is not a multiple # of the expansion chunk size. expand_end = self._range_end if self._expand_start == expand_end: raise ValueError("Out of capacity in ID pool") self._free = set(range(self._expand_start, expand_end)) self._expand_start = expand_end
[ "def", "_expand", "(", "self", ")", ":", "assert", "not", "self", ".", "_free", "# free pool is empty", "expand_end", "=", "self", ".", "_expand_start", "+", "self", ".", "_expand_len", "if", "expand_end", ">", "self", ".", "_range_end", ":", "# This happens if the size of the value range is not a multiple", "# of the expansion chunk size.", "expand_end", "=", "self", ".", "_range_end", "if", "self", ".", "_expand_start", "==", "expand_end", ":", "raise", "ValueError", "(", "\"Out of capacity in ID pool\"", ")", "self", ".", "_free", "=", "set", "(", "range", "(", "self", ".", "_expand_start", ",", "expand_end", ")", ")", "self", ".", "_expand_start", "=", "expand_end" ]
40.117647
13.882353
def item(self, infohash, prefetch=None, cache=False): """ Fetch a single item by its info hash. """ return next(self.items(infohash, prefetch, cache))
[ "def", "item", "(", "self", ",", "infohash", ",", "prefetch", "=", "None", ",", "cache", "=", "False", ")", ":", "return", "next", "(", "self", ".", "items", "(", "infohash", ",", "prefetch", ",", "cache", ")", ")" ]
42.75
7.75
def html_serialize(self, attributes, max_length=None): """Returns concatenated HTML code with SPAN tag. Args: attributes (dict): A map of name-value pairs for attributes of output SPAN tags. max_length (:obj:`int`, optional): Maximum length of span enclosed chunk. Returns: The organized HTML code. (str) """ doc = ET.Element('span') for chunk in self: if (chunk.has_cjk() and not (max_length and len(chunk.word) > max_length)): ele = ET.Element('span') ele.text = chunk.word for key, val in attributes.items(): ele.attrib[key] = val doc.append(ele) else: # add word without span tag for non-CJK text (e.g. English) # by appending it after the last element if doc.getchildren(): if doc.getchildren()[-1].tail is None: doc.getchildren()[-1].tail = chunk.word else: doc.getchildren()[-1].tail += chunk.word else: if doc.text is None: doc.text = chunk.word else: doc.text += chunk.word result = ET.tostring(doc, encoding='utf-8').decode('utf-8') result = html5lib.serialize( html5lib.parseFragment(result), sanitize=True, quote_attr_values='always') return result
[ "def", "html_serialize", "(", "self", ",", "attributes", ",", "max_length", "=", "None", ")", ":", "doc", "=", "ET", ".", "Element", "(", "'span'", ")", "for", "chunk", "in", "self", ":", "if", "(", "chunk", ".", "has_cjk", "(", ")", "and", "not", "(", "max_length", "and", "len", "(", "chunk", ".", "word", ")", ">", "max_length", ")", ")", ":", "ele", "=", "ET", ".", "Element", "(", "'span'", ")", "ele", ".", "text", "=", "chunk", ".", "word", "for", "key", ",", "val", "in", "attributes", ".", "items", "(", ")", ":", "ele", ".", "attrib", "[", "key", "]", "=", "val", "doc", ".", "append", "(", "ele", ")", "else", ":", "# add word without span tag for non-CJK text (e.g. English)", "# by appending it after the last element", "if", "doc", ".", "getchildren", "(", ")", ":", "if", "doc", ".", "getchildren", "(", ")", "[", "-", "1", "]", ".", "tail", "is", "None", ":", "doc", ".", "getchildren", "(", ")", "[", "-", "1", "]", ".", "tail", "=", "chunk", ".", "word", "else", ":", "doc", ".", "getchildren", "(", ")", "[", "-", "1", "]", ".", "tail", "+=", "chunk", ".", "word", "else", ":", "if", "doc", ".", "text", "is", "None", ":", "doc", ".", "text", "=", "chunk", ".", "word", "else", ":", "doc", ".", "text", "+=", "chunk", ".", "word", "result", "=", "ET", ".", "tostring", "(", "doc", ",", "encoding", "=", "'utf-8'", ")", ".", "decode", "(", "'utf-8'", ")", "result", "=", "html5lib", ".", "serialize", "(", "html5lib", ".", "parseFragment", "(", "result", ")", ",", "sanitize", "=", "True", ",", "quote_attr_values", "=", "'always'", ")", "return", "result" ]
33.921053
16.842105
def SingleModeCombine(pupils,modeDiameter=None): """ Return the instantaneous coherent fluxes and photometric fluxes for a multiway single-mode fibre combiner """ if modeDiameter is None: modeDiameter=0.9*pupils.shape[-1] amplitudes=FibreCouple(pupils,modeDiameter) cc=np.conj(amplitudes) fluxes=(amplitudes*cc).real coherentFluxes=[amplitudes[i]*cc[j] for i in range(1,len(amplitudes)) for j in range(i)] return fluxes,coherentFluxes
[ "def", "SingleModeCombine", "(", "pupils", ",", "modeDiameter", "=", "None", ")", ":", "if", "modeDiameter", "is", "None", ":", "modeDiameter", "=", "0.9", "*", "pupils", ".", "shape", "[", "-", "1", "]", "amplitudes", "=", "FibreCouple", "(", "pupils", ",", "modeDiameter", ")", "cc", "=", "np", ".", "conj", "(", "amplitudes", ")", "fluxes", "=", "(", "amplitudes", "*", "cc", ")", ".", "real", "coherentFluxes", "=", "[", "amplitudes", "[", "i", "]", "*", "cc", "[", "j", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "amplitudes", ")", ")", "for", "j", "in", "range", "(", "i", ")", "]", "return", "fluxes", ",", "coherentFluxes" ]
36.357143
7.785714
def dot(self, vec): """Dot product with another vector""" if not isinstance(vec, self.__class__): raise TypeError('Dot product operand must be a VectorArray') if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV: raise ValueError('Dot product operands must have the same ' 'number of elements.') return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1)
[ "def", "dot", "(", "self", ",", "vec", ")", ":", "if", "not", "isinstance", "(", "vec", ",", "self", ".", "__class__", ")", ":", "raise", "TypeError", "(", "'Dot product operand must be a VectorArray'", ")", "if", "self", ".", "nV", "!=", "1", "and", "vec", ".", "nV", "!=", "1", "and", "self", ".", "nV", "!=", "vec", ".", "nV", ":", "raise", "ValueError", "(", "'Dot product operands must have the same '", "'number of elements.'", ")", "return", "np", ".", "sum", "(", "(", "getattr", "(", "self", ",", "d", ")", "*", "getattr", "(", "vec", ",", "d", ")", "for", "d", "in", "self", ".", "dims", ")", ",", "1", ")" ]
55.75
20.375
def crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=None, lgcPrint=True): """Create all spatial x feature prf time courses. Parameters ---------- aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions aryTmpExpInf: 2d numpy array, shape [unknown, 4] Temporal information about conditions varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Description of input 1. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 3d numpy array, shape [nr of models, nr of unique feautures, varNumVol] Prf time course models """ # Identify number of unique features vecFeat = np.unique(aryTmpExpInf[:, 3]) vecFeat = vecFeat[np.nonzero(vecFeat)[0]] # Preallocate the output array aryPrfTc = np.zeros((aryMdlRsp.shape[0], 0, varNumVol), dtype=np.float32) # Loop over unique features for indFtr, ftr in enumerate(vecFeat): if lgcPrint: print('---------Create prf time course model for feature ' + str(ftr)) # Derive sptial conditions, onsets and durations for this specific # feature aryTmpCnd = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 0] aryTmpOns = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 1] aryTmpDrt = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 2] # Create temporally upsampled neural time courses. aryNrlTcTmp = crt_nrl_tc(aryMdlRsp, aryTmpCnd, aryTmpOns, aryTmpDrt, varTr, varNumVol, varTmpOvsmpl, lgcPrint=lgcPrint) # Convolve with hrf to create model pRF time courses. aryPrfTcTmp = crt_prf_tc(aryNrlTcTmp, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=dctPrm, lgcPrint=lgcPrint) # Add temporal time course to time course that will be returned aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcTmp), axis=1) return aryPrfTc
[ "def", "crt_prf_ftr_tc", "(", "aryMdlRsp", ",", "aryTmpExpInf", ",", "varNumVol", ",", "varTr", ",", "varTmpOvsmpl", ",", "switchHrfSet", ",", "tplPngSize", ",", "varPar", ",", "dctPrm", "=", "None", ",", "lgcPrint", "=", "True", ")", ":", "# Identify number of unique features", "vecFeat", "=", "np", ".", "unique", "(", "aryTmpExpInf", "[", ":", ",", "3", "]", ")", "vecFeat", "=", "vecFeat", "[", "np", ".", "nonzero", "(", "vecFeat", ")", "[", "0", "]", "]", "# Preallocate the output array", "aryPrfTc", "=", "np", ".", "zeros", "(", "(", "aryMdlRsp", ".", "shape", "[", "0", "]", ",", "0", ",", "varNumVol", ")", ",", "dtype", "=", "np", ".", "float32", ")", "# Loop over unique features", "for", "indFtr", ",", "ftr", "in", "enumerate", "(", "vecFeat", ")", ":", "if", "lgcPrint", ":", "print", "(", "'---------Create prf time course model for feature '", "+", "str", "(", "ftr", ")", ")", "# Derive sptial conditions, onsets and durations for this specific", "# feature", "aryTmpCnd", "=", "aryTmpExpInf", "[", "aryTmpExpInf", "[", ":", ",", "3", "]", "==", "ftr", ",", "0", "]", "aryTmpOns", "=", "aryTmpExpInf", "[", "aryTmpExpInf", "[", ":", ",", "3", "]", "==", "ftr", ",", "1", "]", "aryTmpDrt", "=", "aryTmpExpInf", "[", "aryTmpExpInf", "[", ":", ",", "3", "]", "==", "ftr", ",", "2", "]", "# Create temporally upsampled neural time courses.", "aryNrlTcTmp", "=", "crt_nrl_tc", "(", "aryMdlRsp", ",", "aryTmpCnd", ",", "aryTmpOns", ",", "aryTmpDrt", ",", "varTr", ",", "varNumVol", ",", "varTmpOvsmpl", ",", "lgcPrint", "=", "lgcPrint", ")", "# Convolve with hrf to create model pRF time courses.", "aryPrfTcTmp", "=", "crt_prf_tc", "(", "aryNrlTcTmp", ",", "varNumVol", ",", "varTr", ",", "varTmpOvsmpl", ",", "switchHrfSet", ",", "tplPngSize", ",", "varPar", ",", "dctPrm", "=", "dctPrm", ",", "lgcPrint", "=", "lgcPrint", ")", "# Add temporal time course to time course that will be returned", "aryPrfTc", "=", "np", ".", "concatenate", "(", "(", "aryPrfTc", ",", "aryPrfTcTmp", ")", ",", "axis", "=", "1", ")", "return", "aryPrfTc" ]
40.130435
19.956522
def _get_format_name_loader_mapping(self): """ :return: Mappings of format-name and loader class. :rtype: dict """ loader_table = self._get_common_loader_mapping() loader_table.update( { "excel": ExcelTableFileLoader, "json_lines": JsonLinesTableTextLoader, "markdown": MarkdownTableTextLoader, "mediawiki": MediaWikiTableTextLoader, "ssv": CsvTableFileLoader, } ) return loader_table
[ "def", "_get_format_name_loader_mapping", "(", "self", ")", ":", "loader_table", "=", "self", ".", "_get_common_loader_mapping", "(", ")", "loader_table", ".", "update", "(", "{", "\"excel\"", ":", "ExcelTableFileLoader", ",", "\"json_lines\"", ":", "JsonLinesTableTextLoader", ",", "\"markdown\"", ":", "MarkdownTableTextLoader", ",", "\"mediawiki\"", ":", "MediaWikiTableTextLoader", ",", "\"ssv\"", ":", "CsvTableFileLoader", ",", "}", ")", "return", "loader_table" ]
29.833333
16.388889
async def jsk_vc_resume(self, ctx: commands.Context): """ Resumes a running audio source, if there is one. """ voice = ctx.guild.voice_client if not voice.is_paused(): return await ctx.send("Audio is not paused.") voice.resume() await ctx.send(f"Resumed audio in {voice.channel.name}.")
[ "async", "def", "jsk_vc_resume", "(", "self", ",", "ctx", ":", "commands", ".", "Context", ")", ":", "voice", "=", "ctx", ".", "guild", ".", "voice_client", "if", "not", "voice", ".", "is_paused", "(", ")", ":", "return", "await", "ctx", ".", "send", "(", "\"Audio is not paused.\"", ")", "voice", ".", "resume", "(", ")", "await", "ctx", ".", "send", "(", "f\"Resumed audio in {voice.channel.name}.\"", ")" ]
28.833333
18.166667
def _create_decoding_layers(self): """Create the decoding layers for reconstruction finetuning. :return: output of the final encoding layer. """ next_decode = self.encode for l, layer in reversed(list(enumerate(self.layers))): with tf.name_scope("decode-{}".format(l)): # Create decoding variables if self.tied_weights: dec_w = tf.transpose(self.encoding_w_[l]) else: dec_w = tf.Variable(tf.transpose( self.encoding_w_[l].initialized_value())) dec_b = tf.Variable(tf.constant( 0.1, shape=[dec_w.get_shape().dims[1].value])) self.decoding_w.append(dec_w) self.decoding_b.append(dec_b) y_act = tf.add( tf.matmul(next_decode, dec_w), dec_b ) if self.finetune_dec_act_func[l] is not None: layer_y = self.finetune_dec_act_func[l](y_act) else: layer_y = None # the input to the next layer is the output of this layer next_decode = tf.nn.dropout(layer_y, self.keep_prob) self.layer_nodes.append(next_decode) self.reconstruction = next_decode
[ "def", "_create_decoding_layers", "(", "self", ")", ":", "next_decode", "=", "self", ".", "encode", "for", "l", ",", "layer", "in", "reversed", "(", "list", "(", "enumerate", "(", "self", ".", "layers", ")", ")", ")", ":", "with", "tf", ".", "name_scope", "(", "\"decode-{}\"", ".", "format", "(", "l", ")", ")", ":", "# Create decoding variables", "if", "self", ".", "tied_weights", ":", "dec_w", "=", "tf", ".", "transpose", "(", "self", ".", "encoding_w_", "[", "l", "]", ")", "else", ":", "dec_w", "=", "tf", ".", "Variable", "(", "tf", ".", "transpose", "(", "self", ".", "encoding_w_", "[", "l", "]", ".", "initialized_value", "(", ")", ")", ")", "dec_b", "=", "tf", ".", "Variable", "(", "tf", ".", "constant", "(", "0.1", ",", "shape", "=", "[", "dec_w", ".", "get_shape", "(", ")", ".", "dims", "[", "1", "]", ".", "value", "]", ")", ")", "self", ".", "decoding_w", ".", "append", "(", "dec_w", ")", "self", ".", "decoding_b", ".", "append", "(", "dec_b", ")", "y_act", "=", "tf", ".", "add", "(", "tf", ".", "matmul", "(", "next_decode", ",", "dec_w", ")", ",", "dec_b", ")", "if", "self", ".", "finetune_dec_act_func", "[", "l", "]", "is", "not", "None", ":", "layer_y", "=", "self", ".", "finetune_dec_act_func", "[", "l", "]", "(", "y_act", ")", "else", ":", "layer_y", "=", "None", "# the input to the next layer is the output of this layer", "next_decode", "=", "tf", ".", "nn", ".", "dropout", "(", "layer_y", ",", "self", ".", "keep_prob", ")", "self", ".", "layer_nodes", ".", "append", "(", "next_decode", ")", "self", ".", "reconstruction", "=", "next_decode" ]
33.35
20.725
def get_port_from_port_server(portserver_address, pid=None): """Request a free a port from a system-wide portserver. This follows a very simple portserver protocol: The request consists of our pid (in ASCII) followed by a newline. The response is a port number and a newline, 0 on failure. This function is an implementation detail of pick_unused_port(). It should not normally be called by code outside of this module. Args: portserver_address: The address (path) of a unix domain socket with which to connect to the portserver. A leading '@' character indicates an address in the "abstract namespace." On systems without socket.AF_UNIX, this is an AF_INET address. pid: The PID to tell the portserver to associate the reservation with. If None, the current process's PID is used. Returns: The port number on success or None on failure. """ if not portserver_address: return None # An AF_UNIX address may start with a zero byte, in which case it is in the # "abstract namespace", and doesn't have any filesystem representation. # See 'man 7 unix' for details. # The convention is to write '@' in the address to represent this zero byte. if portserver_address[0] == '@': portserver_address = '\0' + portserver_address[1:] if pid is None: pid = os.getpid() try: # Create socket. if hasattr(socket, 'AF_UNIX'): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) else: # fallback to AF_INET if this is not unix sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: # Connect to portserver. sock.connect(portserver_address) # Write request. sock.sendall(('%d\n' % pid).encode('ascii')) # Read response. # 1K should be ample buffer space. buf = sock.recv(1024) finally: sock.close() except socket.error as e: print('Socket error when connecting to portserver:', e, file=sys.stderr) return None try: port = int(buf.split(b'\n')[0]) except ValueError: print('Portserver failed to find a port.', file=sys.stderr) return None _owned_ports.add(port) return port
[ "def", "get_port_from_port_server", "(", "portserver_address", ",", "pid", "=", "None", ")", ":", "if", "not", "portserver_address", ":", "return", "None", "# An AF_UNIX address may start with a zero byte, in which case it is in the", "# \"abstract namespace\", and doesn't have any filesystem representation.", "# See 'man 7 unix' for details.", "# The convention is to write '@' in the address to represent this zero byte.", "if", "portserver_address", "[", "0", "]", "==", "'@'", ":", "portserver_address", "=", "'\\0'", "+", "portserver_address", "[", "1", ":", "]", "if", "pid", "is", "None", ":", "pid", "=", "os", ".", "getpid", "(", ")", "try", ":", "# Create socket.", "if", "hasattr", "(", "socket", ",", "'AF_UNIX'", ")", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "SOCK_STREAM", ")", "else", ":", "# fallback to AF_INET if this is not unix", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "try", ":", "# Connect to portserver.", "sock", ".", "connect", "(", "portserver_address", ")", "# Write request.", "sock", ".", "sendall", "(", "(", "'%d\\n'", "%", "pid", ")", ".", "encode", "(", "'ascii'", ")", ")", "# Read response.", "# 1K should be ample buffer space.", "buf", "=", "sock", ".", "recv", "(", "1024", ")", "finally", ":", "sock", ".", "close", "(", ")", "except", "socket", ".", "error", "as", "e", ":", "print", "(", "'Socket error when connecting to portserver:'", ",", "e", ",", "file", "=", "sys", ".", "stderr", ")", "return", "None", "try", ":", "port", "=", "int", "(", "buf", ".", "split", "(", "b'\\n'", ")", "[", "0", "]", ")", "except", "ValueError", ":", "print", "(", "'Portserver failed to find a port.'", ",", "file", "=", "sys", ".", "stderr", ")", "return", "None", "_owned_ports", ".", "add", "(", "port", ")", "return", "port" ]
35.9375
22.03125
def format_currency(number, currency, format=None, locale=LC_NUMERIC, currency_digits=True, format_type='standard', decimal_quantization=True): """Return formatted currency value. >>> format_currency(1099.98, 'USD', locale='en_US') u'$1,099.98' >>> format_currency(1099.98, 'USD', locale='es_CO') u'US$\\xa01.099,98' >>> format_currency(1099.98, 'EUR', locale='de_DE') u'1.099,98\\xa0\\u20ac' The format can also be specified explicitly. The currency is placed with the '¤' sign. As the sign gets repeated the format expands (¤ being the symbol, ¤¤ is the currency abbreviation and ¤¤¤ is the full name of the currency): >>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US') u'EUR 1,099.98' >>> format_currency(1099.98, 'EUR', u'#,##0.00 \xa4\xa4\xa4', ... locale='en_US') u'1,099.98 euros' Currencies usually have a specific number of decimal digits. This function favours that information over the given format: >>> format_currency(1099.98, 'JPY', locale='en_US') u'\\xa51,100' >>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES') u'1.100' However, the number of decimal digits can be overriden from the currency information, by setting the last parameter to ``False``: >>> format_currency(1099.98, 'JPY', locale='en_US', currency_digits=False) u'\\xa51,099.98' >>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES', ... currency_digits=False) u'1.099,98' If a format is not specified the type of currency format to use from the locale can be specified: >>> format_currency(1099.98, 'EUR', locale='en_US', format_type='standard') u'\\u20ac1,099.98' When the given currency format type is not available, an exception is raised: >>> format_currency('1099.98', 'EUR', locale='root', format_type='unknown') Traceback (most recent call last): ... UnknownCurrencyFormatError: "'unknown' is not a known currency format type" By default the locale is allowed to truncate and round a high-precision number by forcing its format pattern onto the decimal part. You can bypass this behavior with the `decimal_quantization` parameter: >>> format_currency(1099.9876, 'USD', locale='en_US') u'$1,099.99' >>> format_currency(1099.9876, 'USD', locale='en_US', ... decimal_quantization=False) u'$1,099.9876' :param number: the number to format :param currency: the currency code :param format: the format string to use :param locale: the `Locale` object or locale identifier :param currency_digits: use the currency's natural number of decimal digits :param format_type: the currency format type to use :param decimal_quantization: Truncate and round high-precision numbers to the format pattern. Defaults to `True`. """ locale = Locale.parse(locale) if format: pattern = parse_pattern(format) else: try: p = locale.currency_formats[format_type] pattern = NumberPattern( p.pattern, p.prefix, p.suffix, p.grouping, p.int_prec, p.frac_prec, p.exp_prec, p.exp_plus) except KeyError: raise UnknownCurrencyFormatError( "%r is not a known currency format type" % format_type) return pattern.apply( number, locale, currency=currency, currency_digits=currency_digits, decimal_quantization=decimal_quantization)
[ "def", "format_currency", "(", "number", ",", "currency", ",", "format", "=", "None", ",", "locale", "=", "LC_NUMERIC", ",", "currency_digits", "=", "True", ",", "format_type", "=", "'standard'", ",", "decimal_quantization", "=", "True", ")", ":", "locale", "=", "Locale", ".", "parse", "(", "locale", ")", "if", "format", ":", "pattern", "=", "parse_pattern", "(", "format", ")", "else", ":", "try", ":", "p", "=", "locale", ".", "currency_formats", "[", "format_type", "]", "pattern", "=", "NumberPattern", "(", "p", ".", "pattern", ",", "p", ".", "prefix", ",", "p", ".", "suffix", ",", "p", ".", "grouping", ",", "p", ".", "int_prec", ",", "p", ".", "frac_prec", ",", "p", ".", "exp_prec", ",", "p", ".", "exp_plus", ")", "except", "KeyError", ":", "raise", "UnknownCurrencyFormatError", "(", "\"%r is not a known currency format type\"", "%", "format_type", ")", "return", "pattern", ".", "apply", "(", "number", ",", "locale", ",", "currency", "=", "currency", ",", "currency_digits", "=", "currency_digits", ",", "decimal_quantization", "=", "decimal_quantization", ")" ]
38.923077
24.10989
def data_url_scheme(self): """Get svg in Data URL Scheme format. """ # TODO: move to web.app or make it function # remove #svg from dataframe encoded = base64.b64encode(self.contents().encode()) return "data:image/svg+xml;base64," + encoded.decode()
[ "def", "data_url_scheme", "(", "self", ")", ":", "# TODO: move to web.app or make it function", "# remove #svg from dataframe", "encoded", "=", "base64", ".", "b64encode", "(", "self", ".", "contents", "(", ")", ".", "encode", "(", ")", ")", "return", "\"data:image/svg+xml;base64,\"", "+", "encoded", ".", "decode", "(", ")" ]
41.571429
10.142857
def name(self, name): """Set the member name. Note that a member name cannot appear in other enums, or generally anywhere else in the IDB. """ success = idaapi.set_enum_member_name(self.cid, name) if not success: raise exceptions.CantRenameEnumMember( "Failed renaming {!r} to {!r}. Does the name exist somewhere else?".format(self.name, name))
[ "def", "name", "(", "self", ",", "name", ")", ":", "success", "=", "idaapi", ".", "set_enum_member_name", "(", "self", ".", "cid", ",", "name", ")", "if", "not", "success", ":", "raise", "exceptions", ".", "CantRenameEnumMember", "(", "\"Failed renaming {!r} to {!r}. Does the name exist somewhere else?\"", ".", "format", "(", "self", ".", "name", ",", "name", ")", ")" ]
41.2
21.6
def boll(self, n, dev, array=False): """布林通道""" mid = self.sma(n, array) std = self.std(n, array) up = mid + std * dev down = mid - std * dev return up, down
[ "def", "boll", "(", "self", ",", "n", ",", "dev", ",", "array", "=", "False", ")", ":", "mid", "=", "self", ".", "sma", "(", "n", ",", "array", ")", "std", "=", "self", ".", "std", "(", "n", ",", "array", ")", "up", "=", "mid", "+", "std", "*", "dev", "down", "=", "mid", "-", "std", "*", "dev", "return", "up", ",", "down" ]
22.111111
15.444444
def create_proforma_invoice(sender, instance, created, **kwargs): """ For every Order if there are defined billing_data creates invoice proforma, which is an order confirmation document """ if created: Invoice.create(instance, Invoice.INVOICE_TYPES['PROFORMA'])
[ "def", "create_proforma_invoice", "(", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "if", "created", ":", "Invoice", ".", "create", "(", "instance", ",", "Invoice", ".", "INVOICE_TYPES", "[", "'PROFORMA'", "]", ")" ]
40.428571
17
def _check_cron_env(user, name, value=None): ''' Return the environment changes ''' if value is None: value = "" # Matching value set in salt.modules.cron._render_tab lst = __salt__['cron.list_tab'](user) for env in lst['env']: if name == env['name']: if value != env['value']: return 'update' return 'present' return 'absent'
[ "def", "_check_cron_env", "(", "user", ",", "name", ",", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "value", "=", "\"\"", "# Matching value set in salt.modules.cron._render_tab", "lst", "=", "__salt__", "[", "'cron.list_tab'", "]", "(", "user", ")", "for", "env", "in", "lst", "[", "'env'", "]", ":", "if", "name", "==", "env", "[", "'name'", "]", ":", "if", "value", "!=", "env", "[", "'value'", "]", ":", "return", "'update'", "return", "'present'", "return", "'absent'" ]
29.133333
15.4
def p_literal_list(self, p): """literal_list : literal_list LITERAL | LITERAL""" if len(p) == 3: p[0] = p[1] + [p[2][1:-1]] else: p[0] = [p[1][1:-1]]
[ "def", "p_literal_list", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "2", "]", "[", "1", ":", "-", "1", "]", "]", "else", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "[", "1", ":", "-", "1", "]", "]" ]
26.875
13.375
def get(self, agentml, user=None, key=None): """ Evaluate and return the current active topic :param user: The active user object :type user: agentml.User or None :param agentml: The active AgentML instance :type agentml: AgentML :param key: The user id (defaults to the current user if None) :type key: str :return: Active topic of the user :rtype : str or None """ user = agentml.get_user(key) if key else user if not user: return return user.id
[ "def", "get", "(", "self", ",", "agentml", ",", "user", "=", "None", ",", "key", "=", "None", ")", ":", "user", "=", "agentml", ".", "get_user", "(", "key", ")", "if", "key", "else", "user", "if", "not", "user", ":", "return", "return", "user", ".", "id" ]
27.95
16.65
def wrap_io_os_err(e): '''Formats IO and OS error messages for wrapping in FSQExceptions''' msg = '' if e.strerror: msg = e.strerror if e.message: msg = ' '.join([e.message, msg]) if e.filename: msg = ': '.join([msg, e.filename]) return msg
[ "def", "wrap_io_os_err", "(", "e", ")", ":", "msg", "=", "''", "if", "e", ".", "strerror", ":", "msg", "=", "e", ".", "strerror", "if", "e", ".", "message", ":", "msg", "=", "' '", ".", "join", "(", "[", "e", ".", "message", ",", "msg", "]", ")", "if", "e", ".", "filename", ":", "msg", "=", "': '", ".", "join", "(", "[", "msg", ",", "e", ".", "filename", "]", ")", "return", "msg" ]
27.9
18.9
def list_cameras(): """ List all attached USB cameras that are supported by libgphoto2. :return: All recognized cameras :rtype: list of :py:class:`Camera` """ ctx = lib.gp_context_new() camlist_p = new_gp_object("CameraList") port_list_p = new_gp_object("GPPortInfoList") lib.gp_port_info_list_load(port_list_p) abilities_list_p = new_gp_object("CameraAbilitiesList") lib.gp_abilities_list_load(abilities_list_p, ctx) lib.gp_abilities_list_detect(abilities_list_p, port_list_p, camlist_p, ctx) out = [] for idx in range(lib.gp_list_count(camlist_p)): name = get_string(lib.gp_list_get_name, camlist_p, idx) value = get_string(lib.gp_list_get_value, camlist_p, idx) # Skip iteration if no matches matches = re.match(r"usb:(\d+),(\d+)", value) if not matches: continue bus_no, device_no = (int(x) for x in matches.groups()) abilities = ffi.new("CameraAbilities*") ability_idx = lib.gp_abilities_list_lookup_model( abilities_list_p, name.encode()) lib.gp_abilities_list_get_abilities(abilities_list_p, ability_idx, abilities) if abilities.device_type == lib.GP_DEVICE_STILL_CAMERA: out.append(Camera(bus_no, device_no, lazy=True, _abilities=abilities)) lib.gp_list_free(camlist_p) lib.gp_port_info_list_free(port_list_p) lib.gp_abilities_list_free(abilities_list_p) return out
[ "def", "list_cameras", "(", ")", ":", "ctx", "=", "lib", ".", "gp_context_new", "(", ")", "camlist_p", "=", "new_gp_object", "(", "\"CameraList\"", ")", "port_list_p", "=", "new_gp_object", "(", "\"GPPortInfoList\"", ")", "lib", ".", "gp_port_info_list_load", "(", "port_list_p", ")", "abilities_list_p", "=", "new_gp_object", "(", "\"CameraAbilitiesList\"", ")", "lib", ".", "gp_abilities_list_load", "(", "abilities_list_p", ",", "ctx", ")", "lib", ".", "gp_abilities_list_detect", "(", "abilities_list_p", ",", "port_list_p", ",", "camlist_p", ",", "ctx", ")", "out", "=", "[", "]", "for", "idx", "in", "range", "(", "lib", ".", "gp_list_count", "(", "camlist_p", ")", ")", ":", "name", "=", "get_string", "(", "lib", ".", "gp_list_get_name", ",", "camlist_p", ",", "idx", ")", "value", "=", "get_string", "(", "lib", ".", "gp_list_get_value", ",", "camlist_p", ",", "idx", ")", "# Skip iteration if no matches", "matches", "=", "re", ".", "match", "(", "r\"usb:(\\d+),(\\d+)\"", ",", "value", ")", "if", "not", "matches", ":", "continue", "bus_no", ",", "device_no", "=", "(", "int", "(", "x", ")", "for", "x", "in", "matches", ".", "groups", "(", ")", ")", "abilities", "=", "ffi", ".", "new", "(", "\"CameraAbilities*\"", ")", "ability_idx", "=", "lib", ".", "gp_abilities_list_lookup_model", "(", "abilities_list_p", ",", "name", ".", "encode", "(", ")", ")", "lib", ".", "gp_abilities_list_get_abilities", "(", "abilities_list_p", ",", "ability_idx", ",", "abilities", ")", "if", "abilities", ".", "device_type", "==", "lib", ".", "GP_DEVICE_STILL_CAMERA", ":", "out", ".", "append", "(", "Camera", "(", "bus_no", ",", "device_no", ",", "lazy", "=", "True", ",", "_abilities", "=", "abilities", ")", ")", "lib", ".", "gp_list_free", "(", "camlist_p", ")", "lib", ".", "gp_port_info_list_free", "(", "port_list_p", ")", "lib", ".", "gp_abilities_list_free", "(", "abilities_list_p", ")", "return", "out" ]
41.567568
15.405405
def hairball_files(self, paths, extensions): """Yield filepath to files with the proper extension within paths.""" def add_file(filename): return os.path.splitext(filename)[1] in extensions while paths: arg_path = paths.pop(0) if os.path.isdir(arg_path): found = False for path, dirs, files in os.walk(arg_path): dirs.sort() # Traverse in sorted order for filename in sorted(files): if add_file(filename): yield os.path.join(path, filename) found = True if not found: if not self.options.quiet: print('No files found in {}'.format(arg_path)) elif add_file(arg_path): yield arg_path elif not self.options.quiet: print('Invalid file {}'.format(arg_path)) print('Did you forget to load a Kurt plugin (-k)?')
[ "def", "hairball_files", "(", "self", ",", "paths", ",", "extensions", ")", ":", "def", "add_file", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "in", "extensions", "while", "paths", ":", "arg_path", "=", "paths", ".", "pop", "(", "0", ")", "if", "os", ".", "path", ".", "isdir", "(", "arg_path", ")", ":", "found", "=", "False", "for", "path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "arg_path", ")", ":", "dirs", ".", "sort", "(", ")", "# Traverse in sorted order", "for", "filename", "in", "sorted", "(", "files", ")", ":", "if", "add_file", "(", "filename", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "found", "=", "True", "if", "not", "found", ":", "if", "not", "self", ".", "options", ".", "quiet", ":", "print", "(", "'No files found in {}'", ".", "format", "(", "arg_path", ")", ")", "elif", "add_file", "(", "arg_path", ")", ":", "yield", "arg_path", "elif", "not", "self", ".", "options", ".", "quiet", ":", "print", "(", "'Invalid file {}'", ".", "format", "(", "arg_path", ")", ")", "print", "(", "'Did you forget to load a Kurt plugin (-k)?'", ")" ]
44.695652
12.73913
def build(self, search, raw_query): """Build query. :param search: Search query instance :param raw_query: Raw query arguments dictionary """ unmatched_items = {} for expression, value in raw_query.items(): # Parse query expression into tokens. tokens = expression.split(TOKEN_SEPARATOR) field = tokens[0] tail = tokens[1:] if field not in self.fields: unmatched_items[expression] = value continue # Map field alias to final field. field = self.fields_map.get(field, field) # Parse lookup expression. Currently only no token or a single token is allowed. if tail: if len(tail) > 1: raise NotImplementedError("Nested lookup expressions are not supported") lookup = self.get_lookup(tail[0]) search = lookup.apply(search, field, value) else: # Default lookup. custom_filter = getattr(self.custom_filter_object, 'custom_filter_{}'.format(field), None) if custom_filter is not None: search = custom_filter(value, search) elif isinstance(value, list): # Default is 'should' between matches. If you need anything else, # a custom filter for this field should be implemented. filters = [Q('match', **{field: item}) for item in value] search = search.query('bool', should=filters) else: search = search.query('match', **{field: {'query': value, 'operator': 'and'}}) return (search, unmatched_items)
[ "def", "build", "(", "self", ",", "search", ",", "raw_query", ")", ":", "unmatched_items", "=", "{", "}", "for", "expression", ",", "value", "in", "raw_query", ".", "items", "(", ")", ":", "# Parse query expression into tokens.", "tokens", "=", "expression", ".", "split", "(", "TOKEN_SEPARATOR", ")", "field", "=", "tokens", "[", "0", "]", "tail", "=", "tokens", "[", "1", ":", "]", "if", "field", "not", "in", "self", ".", "fields", ":", "unmatched_items", "[", "expression", "]", "=", "value", "continue", "# Map field alias to final field.", "field", "=", "self", ".", "fields_map", ".", "get", "(", "field", ",", "field", ")", "# Parse lookup expression. Currently only no token or a single token is allowed.", "if", "tail", ":", "if", "len", "(", "tail", ")", ">", "1", ":", "raise", "NotImplementedError", "(", "\"Nested lookup expressions are not supported\"", ")", "lookup", "=", "self", ".", "get_lookup", "(", "tail", "[", "0", "]", ")", "search", "=", "lookup", ".", "apply", "(", "search", ",", "field", ",", "value", ")", "else", ":", "# Default lookup.", "custom_filter", "=", "getattr", "(", "self", ".", "custom_filter_object", ",", "'custom_filter_{}'", ".", "format", "(", "field", ")", ",", "None", ")", "if", "custom_filter", "is", "not", "None", ":", "search", "=", "custom_filter", "(", "value", ",", "search", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "# Default is 'should' between matches. If you need anything else,", "# a custom filter for this field should be implemented.", "filters", "=", "[", "Q", "(", "'match'", ",", "*", "*", "{", "field", ":", "item", "}", ")", "for", "item", "in", "value", "]", "search", "=", "search", ".", "query", "(", "'bool'", ",", "should", "=", "filters", ")", "else", ":", "search", "=", "search", ".", "query", "(", "'match'", ",", "*", "*", "{", "field", ":", "{", "'query'", ":", "value", ",", "'operator'", ":", "'and'", "}", "}", ")", "return", "(", "search", ",", "unmatched_items", ")" ]
41.214286
21.880952
def subscribe(self, topic, callback, ordered=True): """Subscribe to future messages in the given topic The contents of topic should be in the format created by self.publish with a sequence number of message type encoded as a json string. Wildcard topics containing + and # are allowed and Args: topic (string): The MQTT topic to subscribe to callback (callable): The callback to call when a new mesage is received The signature of callback should be callback(sequence, topic, type, message) ordered (bool): Whether messages on this topic have a sequence number that must be checked and queued to ensure that packets are received in order """ if '+' in topic or '#' in topic: regex = re.compile(topic.replace('+', '[^/]+').replace('#', '.*')) self.wildcard_queues.append((topic, regex, callback, ordered)) else: self.queues[topic] = PacketQueue(0, callback, ordered) try: self.client.subscribe(topic, 1, self._on_receive) except operationError as exc: raise InternalError("Could not subscribe to topic", topic=topic, message=exc.message)
[ "def", "subscribe", "(", "self", ",", "topic", ",", "callback", ",", "ordered", "=", "True", ")", ":", "if", "'+'", "in", "topic", "or", "'#'", "in", "topic", ":", "regex", "=", "re", ".", "compile", "(", "topic", ".", "replace", "(", "'+'", ",", "'[^/]+'", ")", ".", "replace", "(", "'#'", ",", "'.*'", ")", ")", "self", ".", "wildcard_queues", ".", "append", "(", "(", "topic", ",", "regex", ",", "callback", ",", "ordered", ")", ")", "else", ":", "self", ".", "queues", "[", "topic", "]", "=", "PacketQueue", "(", "0", ",", "callback", ",", "ordered", ")", "try", ":", "self", ".", "client", ".", "subscribe", "(", "topic", ",", "1", ",", "self", ".", "_on_receive", ")", "except", "operationError", "as", "exc", ":", "raise", "InternalError", "(", "\"Could not subscribe to topic\"", ",", "topic", "=", "topic", ",", "message", "=", "exc", ".", "message", ")" ]
47.076923
29.423077
def _setup_resources(): """Attempt to increase resource limits up to hard limits. This allows us to avoid out of file handle limits where we can move beyond the soft limit up to the hard limit. """ target_procs = 10240 cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC) target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc)) cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE) target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
[ "def", "_setup_resources", "(", ")", ":", "target_procs", "=", "10240", "cur_proc", ",", "max_proc", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NPROC", ")", "target_proc", "=", "min", "(", "max_proc", ",", "target_procs", ")", "if", "max_proc", ">", "0", "else", "target_procs", "resource", ".", "setrlimit", "(", "resource", ".", "RLIMIT_NPROC", ",", "(", "max", "(", "cur_proc", ",", "target_proc", ")", ",", "max_proc", ")", ")", "cur_hdls", ",", "max_hdls", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NOFILE", ")", "target_hdls", "=", "min", "(", "max_hdls", ",", "target_procs", ")", "if", "max_hdls", ">", "0", "else", "target_procs", "resource", ".", "setrlimit", "(", "resource", ".", "RLIMIT_NOFILE", ",", "(", "max", "(", "cur_hdls", ",", "target_hdls", ")", ",", "max_hdls", ")", ")" ]
53.461538
25.615385
def matches(self, desc): """Determines if a given label descriptor matches this enum instance Args: desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`): the instance to test Return: `True` if desc is supported, otherwise `False` """ desc_value_type = desc.valueType or ValueType.STRING # default not parsed return (self.label_name == desc.key and self.value_type == desc_value_type)
[ "def", "matches", "(", "self", ",", "desc", ")", ":", "desc_value_type", "=", "desc", ".", "valueType", "or", "ValueType", ".", "STRING", "# default not parsed", "return", "(", "self", ".", "label_name", "==", "desc", ".", "key", "and", "self", ".", "value_type", "==", "desc_value_type", ")" ]
36.285714
23.5
def regenerate_models(self, propnames=None, exclude=[], deep=False): r""" Re-runs the specified model or models. Parameters ---------- propnames : string or list of strings The list of property names to be regenerated. If None are given then ALL models are re-run (except for those whose ``regen_mode`` is 'constant'). exclude : list of strings Since the default behavior is to run ALL models, this can be used to exclude specific models. It may be more convenient to supply as list of 2 models to exclude than to specify 8 models to include. deep : boolean Specifies whether or not to regenerate models on all associated objects. For instance, if ``True``, then all Physics models will be regenerated when method is called on the corresponding Phase. The default is ``False``. The method does not work in reverse, so regenerating models on a Physics will not update a Phase. """ # If empty list of propnames was given, do nothing and return if type(propnames) is list and len(propnames) == 0: return if type(propnames) is str: # Convert string to list if necessary propnames = [propnames] if propnames is None: # If no props given, then regenerate them all propnames = self.models.dependency_list() # If some props are to be excluded, remove them from list if len(exclude) > 0: propnames = [i for i in propnames if i not in exclude] # Re-order given propnames according to dependency tree self_models = self.models.dependency_list() propnames = [i for i in self_models if i in propnames] if deep: other_models = None # Will trigger regen of ALL models else: # Make list of given propnames that are not in self other_models = list(set(propnames).difference(set(self_models))) # The following has some redundant lines, but is easier to understand if self._isa('phase'): # Start be regenerating models on self for item in propnames: self._regen(item) # Then regen models on associated objects, if any in other_models for phys in self.project.find_physics(phase=self): phys.regenerate_models(propnames=other_models, deep=False) elif self._isa('network'): # Repeat for other object types for item in propnames: self._regen(item) for geom in self.project.geometries().values(): geom.regenerate_models(propnames=other_models, deep=False) else: for item in propnames: self._regen(item)
[ "def", "regenerate_models", "(", "self", ",", "propnames", "=", "None", ",", "exclude", "=", "[", "]", ",", "deep", "=", "False", ")", ":", "# If empty list of propnames was given, do nothing and return", "if", "type", "(", "propnames", ")", "is", "list", "and", "len", "(", "propnames", ")", "==", "0", ":", "return", "if", "type", "(", "propnames", ")", "is", "str", ":", "# Convert string to list if necessary", "propnames", "=", "[", "propnames", "]", "if", "propnames", "is", "None", ":", "# If no props given, then regenerate them all", "propnames", "=", "self", ".", "models", ".", "dependency_list", "(", ")", "# If some props are to be excluded, remove them from list", "if", "len", "(", "exclude", ")", ">", "0", ":", "propnames", "=", "[", "i", "for", "i", "in", "propnames", "if", "i", "not", "in", "exclude", "]", "# Re-order given propnames according to dependency tree", "self_models", "=", "self", ".", "models", ".", "dependency_list", "(", ")", "propnames", "=", "[", "i", "for", "i", "in", "self_models", "if", "i", "in", "propnames", "]", "if", "deep", ":", "other_models", "=", "None", "# Will trigger regen of ALL models", "else", ":", "# Make list of given propnames that are not in self", "other_models", "=", "list", "(", "set", "(", "propnames", ")", ".", "difference", "(", "set", "(", "self_models", ")", ")", ")", "# The following has some redundant lines, but is easier to understand", "if", "self", ".", "_isa", "(", "'phase'", ")", ":", "# Start be regenerating models on self", "for", "item", "in", "propnames", ":", "self", ".", "_regen", "(", "item", ")", "# Then regen models on associated objects, if any in other_models", "for", "phys", "in", "self", ".", "project", ".", "find_physics", "(", "phase", "=", "self", ")", ":", "phys", ".", "regenerate_models", "(", "propnames", "=", "other_models", ",", "deep", "=", "False", ")", "elif", "self", ".", "_isa", "(", "'network'", ")", ":", "# Repeat for other object types", "for", "item", "in", "propnames", ":", "self", ".", "_regen", "(", "item", ")", "for", "geom", "in", "self", ".", "project", ".", "geometries", "(", ")", ".", "values", "(", ")", ":", "geom", ".", "regenerate_models", "(", "propnames", "=", "other_models", ",", "deep", "=", "False", ")", "else", ":", "for", "item", "in", "propnames", ":", "self", ".", "_regen", "(", "item", ")" ]
47.610169
23.627119
def _solve(self, x0, A, l, u, xmin, xmax): """ Solves using Python Interior Point Solver (PIPS). """ s = pips(self._costfcn, x0, A, l, u, xmin, xmax, self._consfcn, self._hessfcn, self.opt) return s
[ "def", "_solve", "(", "self", ",", "x0", ",", "A", ",", "l", ",", "u", ",", "xmin", ",", "xmax", ")", ":", "s", "=", "pips", "(", "self", ".", "_costfcn", ",", "x0", ",", "A", ",", "l", ",", "u", ",", "xmin", ",", "xmax", ",", "self", ".", "_consfcn", ",", "self", ".", "_hessfcn", ",", "self", ".", "opt", ")", "return", "s" ]
40.333333
9.666667
def write_config_file(self, f, comments): """This method write a sample file, with attributes, descriptions, sample values, required flags, using the configuration object properties. """ if comments: f.write("#####################################\n") f.write("# Section : ") f.write("#".join(self.get_representation()) + "\n") f.write("#####################################\n") f.write("[" + self._name + "]\n") if self._desc and comments: f.write("# Description : ") for i in self._desc.split('\n'): f.write("# ") f.write(i) f.write("\n") f.write("\n")
[ "def", "write_config_file", "(", "self", ",", "f", ",", "comments", ")", ":", "if", "comments", ":", "f", ".", "write", "(", "\"#####################################\\n\"", ")", "f", ".", "write", "(", "\"# Section : \"", ")", "f", ".", "write", "(", "\"#\"", ".", "join", "(", "self", ".", "get_representation", "(", ")", ")", "+", "\"\\n\"", ")", "f", ".", "write", "(", "\"#####################################\\n\"", ")", "f", ".", "write", "(", "\"[\"", "+", "self", ".", "_name", "+", "\"]\\n\"", ")", "if", "self", ".", "_desc", "and", "comments", ":", "f", ".", "write", "(", "\"# Description : \"", ")", "for", "i", "in", "self", ".", "_desc", ".", "split", "(", "'\\n'", ")", ":", "f", ".", "write", "(", "\"# \"", ")", "f", ".", "write", "(", "i", ")", "f", ".", "write", "(", "\"\\n\"", ")", "f", ".", "write", "(", "\"\\n\"", ")" ]
40.222222
11.388889
def monitor_experiment(args): '''monitor the experiment''' if args.time <= 0: print_error('please input a positive integer as time interval, the unit is second.') exit(1) while True: try: os.system('clear') update_experiment() show_experiment_info() time.sleep(args.time) except KeyboardInterrupt: exit(0) except Exception as exception: print_error(exception) exit(1)
[ "def", "monitor_experiment", "(", "args", ")", ":", "if", "args", ".", "time", "<=", "0", ":", "print_error", "(", "'please input a positive integer as time interval, the unit is second.'", ")", "exit", "(", "1", ")", "while", "True", ":", "try", ":", "os", ".", "system", "(", "'clear'", ")", "update_experiment", "(", ")", "show_experiment_info", "(", ")", "time", ".", "sleep", "(", "args", ".", "time", ")", "except", "KeyboardInterrupt", ":", "exit", "(", "0", ")", "except", "Exception", "as", "exception", ":", "print_error", "(", "exception", ")", "exit", "(", "1", ")" ]
30.5
16
def _muck_w_date(record): """muck with the date because EPW starts counting from 1 and goes to 24.""" # minute 60 is actually minute 0? temp_d = datetime.datetime(int(record['Year']), int(record['Month']), int(record['Day']), int(record['Hour']) % 24, int(record['Minute']) % 60) d_off = int(record['Hour'])//24 # hour 24 is actually hour 0 if d_off > 0: temp_d += datetime.timedelta(days=d_off) return temp_d
[ "def", "_muck_w_date", "(", "record", ")", ":", "# minute 60 is actually minute 0?", "temp_d", "=", "datetime", ".", "datetime", "(", "int", "(", "record", "[", "'Year'", "]", ")", ",", "int", "(", "record", "[", "'Month'", "]", ")", ",", "int", "(", "record", "[", "'Day'", "]", ")", ",", "int", "(", "record", "[", "'Hour'", "]", ")", "%", "24", ",", "int", "(", "record", "[", "'Minute'", "]", ")", "%", "60", ")", "d_off", "=", "int", "(", "record", "[", "'Hour'", "]", ")", "//", "24", "# hour 24 is actually hour 0", "if", "d_off", ">", "0", ":", "temp_d", "+=", "datetime", ".", "timedelta", "(", "days", "=", "d_off", ")", "return", "temp_d" ]
49.6
18.5
def _unpack_body(self): """Unpack `body` replace it by the result.""" obj = self._get_body_instance() obj.unpack(self.body.value) self.body = obj
[ "def", "_unpack_body", "(", "self", ")", ":", "obj", "=", "self", ".", "_get_body_instance", "(", ")", "obj", ".", "unpack", "(", "self", ".", "body", ".", "value", ")", "self", ".", "body", "=", "obj" ]
34.6
8
def console_print_rect_ex( con: tcod.console.Console, x: int, y: int, w: int, h: int, flag: int, alignment: int, fmt: str, ) -> int: """Print a string constrained to a rectangle with blend and alignment. Returns: int: The number of lines of text once word-wrapped. .. deprecated:: 8.5 Use :any:`Console.print_rect` instead. """ return int( lib.TCOD_console_printf_rect_ex( _console(con), x, y, w, h, flag, alignment, _fmt(fmt) ) )
[ "def", "console_print_rect_ex", "(", "con", ":", "tcod", ".", "console", ".", "Console", ",", "x", ":", "int", ",", "y", ":", "int", ",", "w", ":", "int", ",", "h", ":", "int", ",", "flag", ":", "int", ",", "alignment", ":", "int", ",", "fmt", ":", "str", ",", ")", "->", "int", ":", "return", "int", "(", "lib", ".", "TCOD_console_printf_rect_ex", "(", "_console", "(", "con", ")", ",", "x", ",", "y", ",", "w", ",", "h", ",", "flag", ",", "alignment", ",", "_fmt", "(", "fmt", ")", ")", ")" ]
22.173913
22.217391
def setupTable_VORG(self): """ Make the VORG table. **This should not be called externally.** Subclasses may override or supplement this method to handle the table creation in a different way if desired. """ if "VORG" not in self.tables: return self.otf["VORG"] = vorg = newTable("VORG") vorg.majorVersion = 1 vorg.minorVersion = 0 vorg.VOriginRecords = {} # Find the most frequent verticalOrigin vorg_count = Counter(_getVerticalOrigin(self.otf, glyph) for glyph in self.allGlyphs.values()) vorg.defaultVertOriginY = vorg_count.most_common(1)[0][0] if len(vorg_count) > 1: for glyphName, glyph in self.allGlyphs.items(): vorg.VOriginRecords[glyphName] = _getVerticalOrigin( self.otf, glyph) vorg.numVertOriginYMetrics = len(vorg.VOriginRecords)
[ "def", "setupTable_VORG", "(", "self", ")", ":", "if", "\"VORG\"", "not", "in", "self", ".", "tables", ":", "return", "self", ".", "otf", "[", "\"VORG\"", "]", "=", "vorg", "=", "newTable", "(", "\"VORG\"", ")", "vorg", ".", "majorVersion", "=", "1", "vorg", ".", "minorVersion", "=", "0", "vorg", ".", "VOriginRecords", "=", "{", "}", "# Find the most frequent verticalOrigin", "vorg_count", "=", "Counter", "(", "_getVerticalOrigin", "(", "self", ".", "otf", ",", "glyph", ")", "for", "glyph", "in", "self", ".", "allGlyphs", ".", "values", "(", ")", ")", "vorg", ".", "defaultVertOriginY", "=", "vorg_count", ".", "most_common", "(", "1", ")", "[", "0", "]", "[", "0", "]", "if", "len", "(", "vorg_count", ")", ">", "1", ":", "for", "glyphName", ",", "glyph", "in", "self", ".", "allGlyphs", ".", "items", "(", ")", ":", "vorg", ".", "VOriginRecords", "[", "glyphName", "]", "=", "_getVerticalOrigin", "(", "self", ".", "otf", ",", "glyph", ")", "vorg", ".", "numVertOriginYMetrics", "=", "len", "(", "vorg", ".", "VOriginRecords", ")" ]
39.208333
16.125
def validate_rmq_cluster_running_nodes(self, sentry_units): """Check that all rmq unit hostnames are represented in the cluster_status output of all units. :param host_names: dict of juju unit names to host names :param units: list of sentry unit pointers (all rmq units) :returns: None if successful, otherwise return error message """ host_names = self.get_unit_hostnames(sentry_units) errors = [] # Query every unit for cluster_status running nodes for query_unit in sentry_units: query_unit_name = query_unit.info['unit_name'] running_nodes = self.get_rmq_cluster_running_nodes(query_unit) # Confirm that every unit is represented in the queried unit's # cluster_status running nodes output. for validate_unit in sentry_units: val_host_name = host_names[validate_unit.info['unit_name']] val_node_name = 'rabbit@{}'.format(val_host_name) if val_node_name not in running_nodes: errors.append('Cluster member check failed on {}: {} not ' 'in {}\n'.format(query_unit_name, val_node_name, running_nodes)) if errors: return ''.join(errors)
[ "def", "validate_rmq_cluster_running_nodes", "(", "self", ",", "sentry_units", ")", ":", "host_names", "=", "self", ".", "get_unit_hostnames", "(", "sentry_units", ")", "errors", "=", "[", "]", "# Query every unit for cluster_status running nodes", "for", "query_unit", "in", "sentry_units", ":", "query_unit_name", "=", "query_unit", ".", "info", "[", "'unit_name'", "]", "running_nodes", "=", "self", ".", "get_rmq_cluster_running_nodes", "(", "query_unit", ")", "# Confirm that every unit is represented in the queried unit's", "# cluster_status running nodes output.", "for", "validate_unit", "in", "sentry_units", ":", "val_host_name", "=", "host_names", "[", "validate_unit", ".", "info", "[", "'unit_name'", "]", "]", "val_node_name", "=", "'rabbit@{}'", ".", "format", "(", "val_host_name", ")", "if", "val_node_name", "not", "in", "running_nodes", ":", "errors", ".", "append", "(", "'Cluster member check failed on {}: {} not '", "'in {}\\n'", ".", "format", "(", "query_unit_name", ",", "val_node_name", ",", "running_nodes", ")", ")", "if", "errors", ":", "return", "''", ".", "join", "(", "errors", ")" ]
47.482759
22.034483
def teardown(self): ''' cleanup the monitor data and ''' self.monitor_thread.stop() self.monitor_thread = None super(BaseMonitor, self).teardown()
[ "def", "teardown", "(", "self", ")", ":", "self", ".", "monitor_thread", ".", "stop", "(", ")", "self", ".", "monitor_thread", "=", "None", "super", "(", "BaseMonitor", ",", "self", ")", ".", "teardown", "(", ")" ]
26.857143
14
def main(): """Entrypoint for ``lander`` executable.""" args = parse_args() config_logger(args) logger = structlog.get_logger(__name__) if args.show_version: # only print the version print_version() sys.exit(0) version = pkg_resources.get_distribution('lander').version logger.info('Lander version {0}'.format(version)) config = Configuration(args=args) # disable any build confirmed to be a PR with Travis if config['is_travis_pull_request']: logger.info('Skipping build from PR.') sys.exit(0) lander = Lander(config) lander.build_site() logger.info('Build complete') if config['upload']: lander.upload_site() logger.info('Upload complete') logger.info('Lander complete')
[ "def", "main", "(", ")", ":", "args", "=", "parse_args", "(", ")", "config_logger", "(", "args", ")", "logger", "=", "structlog", ".", "get_logger", "(", "__name__", ")", "if", "args", ".", "show_version", ":", "# only print the version", "print_version", "(", ")", "sys", ".", "exit", "(", "0", ")", "version", "=", "pkg_resources", ".", "get_distribution", "(", "'lander'", ")", ".", "version", "logger", ".", "info", "(", "'Lander version {0}'", ".", "format", "(", "version", ")", ")", "config", "=", "Configuration", "(", "args", "=", "args", ")", "# disable any build confirmed to be a PR with Travis", "if", "config", "[", "'is_travis_pull_request'", "]", ":", "logger", ".", "info", "(", "'Skipping build from PR.'", ")", "sys", ".", "exit", "(", "0", ")", "lander", "=", "Lander", "(", "config", ")", "lander", ".", "build_site", "(", ")", "logger", ".", "info", "(", "'Build complete'", ")", "if", "config", "[", "'upload'", "]", ":", "lander", ".", "upload_site", "(", ")", "logger", ".", "info", "(", "'Upload complete'", ")", "logger", ".", "info", "(", "'Lander complete'", ")" ]
25.533333
18.7
def link_marked_atoms(self, atoms): """ Take a list of marked "interesting" atoms (heteroatoms, special carbons) and attempt to connect them, returning a list of disjoint groups of special atoms (and their connected hydrogens). :param atoms: set of marked "interesting" atoms, presumably identified using other functions in this class. :return: list of sets of ints, representing groups of connected atoms """ # We will add hydrogens to functional groups hydrogens = {n for n in self.molgraph.graph.nodes if str(self.species[n]) == "H"} # Graph representation of only marked atoms subgraph = self.molgraph.graph.subgraph(list(atoms)).to_undirected() func_grps_no_h = [x for x in nx.connected_components(subgraph)] func_grps = [] for func_grp in func_grps_no_h: grp_hs = set() for node in func_grp: neighbors = self.molgraph.graph[node] for neighbor in neighbors.keys(): # Add all associated hydrogens into the functional group if neighbor in hydrogens: grp_hs.add(neighbor) func_grp = func_grp.union(grp_hs) func_grps.append(func_grp) return func_grps
[ "def", "link_marked_atoms", "(", "self", ",", "atoms", ")", ":", "# We will add hydrogens to functional groups", "hydrogens", "=", "{", "n", "for", "n", "in", "self", ".", "molgraph", ".", "graph", ".", "nodes", "if", "str", "(", "self", ".", "species", "[", "n", "]", ")", "==", "\"H\"", "}", "# Graph representation of only marked atoms", "subgraph", "=", "self", ".", "molgraph", ".", "graph", ".", "subgraph", "(", "list", "(", "atoms", ")", ")", ".", "to_undirected", "(", ")", "func_grps_no_h", "=", "[", "x", "for", "x", "in", "nx", ".", "connected_components", "(", "subgraph", ")", "]", "func_grps", "=", "[", "]", "for", "func_grp", "in", "func_grps_no_h", ":", "grp_hs", "=", "set", "(", ")", "for", "node", "in", "func_grp", ":", "neighbors", "=", "self", ".", "molgraph", ".", "graph", "[", "node", "]", "for", "neighbor", "in", "neighbors", ".", "keys", "(", ")", ":", "# Add all associated hydrogens into the functional group", "if", "neighbor", "in", "hydrogens", ":", "grp_hs", ".", "add", "(", "neighbor", ")", "func_grp", "=", "func_grp", ".", "union", "(", "grp_hs", ")", "func_grps", ".", "append", "(", "func_grp", ")", "return", "func_grps" ]
38.882353
20.764706
def get(url, **kwargs): """ Wrapper for `request.get` function to set params. """ headers = kwargs.get('headers', {}) headers['User-Agent'] = config.USER_AGENT # overwrite kwargs['headers'] = headers timeout = kwargs.get('timeout', config.TIMEOUT) kwargs['timeout'] = timeout kwargs['verify'] = False # no SSLError logger.debug("Getting: %s", url) return requests.get(url, **kwargs)
[ "def", "get", "(", "url", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "kwargs", ".", "get", "(", "'headers'", ",", "{", "}", ")", "headers", "[", "'User-Agent'", "]", "=", "config", ".", "USER_AGENT", "# overwrite", "kwargs", "[", "'headers'", "]", "=", "headers", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ",", "config", ".", "TIMEOUT", ")", "kwargs", "[", "'timeout'", "]", "=", "timeout", "kwargs", "[", "'verify'", "]", "=", "False", "# no SSLError", "logger", ".", "debug", "(", "\"Getting: %s\"", ",", "url", ")", "return", "requests", ".", "get", "(", "url", ",", "*", "*", "kwargs", ")" ]
27.666667
13.666667
def insert(self, index: int, obj: Any) -> None: """ Inserts an item to the list as long as it is not None """ if obj is not None: super().insert(index, obj)
[ "def", "insert", "(", "self", ",", "index", ":", "int", ",", "obj", ":", "Any", ")", "->", "None", ":", "if", "obj", "is", "not", "None", ":", "super", "(", ")", ".", "insert", "(", "index", ",", "obj", ")" ]
45.25
5.5
def perform_initialization(self): '''Initialize the harvesting for a given job''' log.debug('Initializing backend') factory = HarvestJob if self.dryrun else HarvestJob.objects.create self.job = factory(status='initializing', started=datetime.now(), source=self.source) before_harvest_job.send(self) try: self.initialize() self.job.status = 'initialized' if not self.dryrun: self.job.save() except HarvestValidationError as e: log.info('Initialization failed for "%s" (%s)', safe_unicode(self.source.name), self.source.backend) error = HarvestError(message=safe_unicode(e)) self.job.errors.append(error) self.job.status = 'failed' self.end() return except Exception as e: self.job.status = 'failed' error = HarvestError(message=safe_unicode(e)) self.job.errors.append(error) self.end() msg = 'Initialization failed for "{0.name}" ({0.backend})' log.exception(msg.format(self.source)) return if self.max_items: self.job.items = self.job.items[:self.max_items] if self.job.items: log.debug('Queued %s items', len(self.job.items)) return len(self.job.items)
[ "def", "perform_initialization", "(", "self", ")", ":", "log", ".", "debug", "(", "'Initializing backend'", ")", "factory", "=", "HarvestJob", "if", "self", ".", "dryrun", "else", "HarvestJob", ".", "objects", ".", "create", "self", ".", "job", "=", "factory", "(", "status", "=", "'initializing'", ",", "started", "=", "datetime", ".", "now", "(", ")", ",", "source", "=", "self", ".", "source", ")", "before_harvest_job", ".", "send", "(", "self", ")", "try", ":", "self", ".", "initialize", "(", ")", "self", ".", "job", ".", "status", "=", "'initialized'", "if", "not", "self", ".", "dryrun", ":", "self", ".", "job", ".", "save", "(", ")", "except", "HarvestValidationError", "as", "e", ":", "log", ".", "info", "(", "'Initialization failed for \"%s\" (%s)'", ",", "safe_unicode", "(", "self", ".", "source", ".", "name", ")", ",", "self", ".", "source", ".", "backend", ")", "error", "=", "HarvestError", "(", "message", "=", "safe_unicode", "(", "e", ")", ")", "self", ".", "job", ".", "errors", ".", "append", "(", "error", ")", "self", ".", "job", ".", "status", "=", "'failed'", "self", ".", "end", "(", ")", "return", "except", "Exception", "as", "e", ":", "self", ".", "job", ".", "status", "=", "'failed'", "error", "=", "HarvestError", "(", "message", "=", "safe_unicode", "(", "e", ")", ")", "self", ".", "job", ".", "errors", ".", "append", "(", "error", ")", "self", ".", "end", "(", ")", "msg", "=", "'Initialization failed for \"{0.name}\" ({0.backend})'", "log", ".", "exception", "(", "msg", ".", "format", "(", "self", ".", "source", ")", ")", "return", "if", "self", ".", "max_items", ":", "self", ".", "job", ".", "items", "=", "self", ".", "job", ".", "items", "[", ":", "self", ".", "max_items", "]", "if", "self", ".", "job", ".", "items", ":", "log", ".", "debug", "(", "'Queued %s items'", ",", "len", "(", "self", ".", "job", ".", "items", ")", ")", "return", "len", "(", "self", ".", "job", ".", "items", ")" ]
36.282051
16.538462
def simplify(self, options=None): """ returns a dict describing a simple snapshot of this change, and its children if any. """ simple = { "class": type(self).__name__, "is_change": self.is_change(), "description": self.get_description(), "label": self.label, } if options: simple["is_ignored"] = self.is_ignored(options) if isinstance(self, Addition): simple["is_addition"] = True if isinstance(self, Removal): simple["is_removal"] = True if self.entry: simple["entry"] = self.entry return simple
[ "def", "simplify", "(", "self", ",", "options", "=", "None", ")", ":", "simple", "=", "{", "\"class\"", ":", "type", "(", "self", ")", ".", "__name__", ",", "\"is_change\"", ":", "self", ".", "is_change", "(", ")", ",", "\"description\"", ":", "self", ".", "get_description", "(", ")", ",", "\"label\"", ":", "self", ".", "label", ",", "}", "if", "options", ":", "simple", "[", "\"is_ignored\"", "]", "=", "self", ".", "is_ignored", "(", "options", ")", "if", "isinstance", "(", "self", ",", "Addition", ")", ":", "simple", "[", "\"is_addition\"", "]", "=", "True", "if", "isinstance", "(", "self", ",", "Removal", ")", ":", "simple", "[", "\"is_removal\"", "]", "=", "True", "if", "self", ".", "entry", ":", "simple", "[", "\"entry\"", "]", "=", "self", ".", "entry", "return", "simple" ]
25.423077
17.192308
def index_complement(index_list, len_=None): """ Returns the other indicies in a list of length ``len_`` """ mask1 = index_to_boolmask(index_list, len_) mask2 = not_list(mask1) index_list_bar = list_where(mask2) return index_list_bar
[ "def", "index_complement", "(", "index_list", ",", "len_", "=", "None", ")", ":", "mask1", "=", "index_to_boolmask", "(", "index_list", ",", "len_", ")", "mask2", "=", "not_list", "(", "mask1", ")", "index_list_bar", "=", "list_where", "(", "mask2", ")", "return", "index_list_bar" ]
31.75
7.5
def list_connections(self, limit=500, offset=0): """List Points to which this Things is subscribed. I.e. list all the Points this Thing is following and controls it's attached to Returns subscription list e.g. #!python { "<Subscription GUID 1>": { "id": "<Control GUID>", "entityId": "<Control's Thing GUID>", "type": 3 # R_CONTROL from IoticAgent.Core.Const }, "<Subscription GUID 2>": { "id": "<Feed GUID>", "entityId": "<Feed's Thing GUID>", "type": 2 # R_FEED from IoticAgent.Core.Const } Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure Note: For Things following a Point see [list_followers](./Point.m.html#IoticAgent.IOT.Point.Point.list_followers) """ evt = self._client._request_sub_list(self.__lid, limit=limit, offset=offset) self._client._wait_and_except_if_failed(evt) return evt.payload['subs']
[ "def", "list_connections", "(", "self", ",", "limit", "=", "500", ",", "offset", "=", "0", ")", ":", "evt", "=", "self", ".", "_client", ".", "_request_sub_list", "(", "self", ".", "__lid", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ")", "self", ".", "_client", ".", "_wait_and_except_if_failed", "(", "evt", ")", "return", "evt", ".", "payload", "[", "'subs'", "]" ]
42.125
23.96875
def is_python_script(filename): '''Checks a file to see if it's a python script of some sort. ''' if filename.lower().endswith('.py'): return True if not os.path.isfile(filename): return False try: with open(filename, 'rb') as fp: if fp.read(2) != b'#!': return False return re.match(r'.*python', str_(fp.readline())) except IOError: pass return False
[ "def", "is_python_script", "(", "filename", ")", ":", "if", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "'.py'", ")", ":", "return", "True", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "False", "try", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fp", ":", "if", "fp", ".", "read", "(", "2", ")", "!=", "b'#!'", ":", "return", "False", "return", "re", ".", "match", "(", "r'.*python'", ",", "str_", "(", "fp", ".", "readline", "(", ")", ")", ")", "except", "IOError", ":", "pass", "return", "False" ]
24.277778
20.833333
def calcMzFromMass(mass, charge): """Calculate the mz value of a peptide from its mass and charge. :param mass: float, exact non protonated mass :param charge: int, charge state :returns: mass to charge ratio of the specified charge state """ mz = (mass + (maspy.constants.atomicMassProton * charge)) / charge return mz
[ "def", "calcMzFromMass", "(", "mass", ",", "charge", ")", ":", "mz", "=", "(", "mass", "+", "(", "maspy", ".", "constants", ".", "atomicMassProton", "*", "charge", ")", ")", "/", "charge", "return", "mz" ]
34
18.1
def is_extension_type(arr): """ Check whether an array-like is of a pandas extension class instance. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.SparseArray([1, 2, 3])) True >>> is_extension_type(pd.SparseSeries([1, 2, 3])) True >>> >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True """ if is_categorical(arr): return True elif is_sparse(arr): return True elif is_datetime64tz_dtype(arr): return True return False
[ "def", "is_extension_type", "(", "arr", ")", ":", "if", "is_categorical", "(", "arr", ")", ":", "return", "True", "elif", "is_sparse", "(", "arr", ")", ":", "return", "True", "elif", "is_datetime64tz_dtype", "(", "arr", ")", ":", "return", "True", "return", "False" ]
25.245614
22.649123
def delete_session(sid_s): """Delete entries in the data- and kvsessionstore with the given sid_s. On a successful deletion, the flask-kvsession store returns 1 while the sqlalchemy datastore returns None. :param sid_s: The session ID. :returns: ``1`` if deletion was successful. """ # Remove entries from sessionstore _sessionstore.delete(sid_s) # Find and remove the corresponding SessionActivity entry with db.session.begin_nested(): SessionActivity.query.filter_by(sid_s=sid_s).delete() return 1
[ "def", "delete_session", "(", "sid_s", ")", ":", "# Remove entries from sessionstore", "_sessionstore", ".", "delete", "(", "sid_s", ")", "# Find and remove the corresponding SessionActivity entry", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "SessionActivity", ".", "query", ".", "filter_by", "(", "sid_s", "=", "sid_s", ")", ".", "delete", "(", ")", "return", "1" ]
35.933333
15.4
def get_fieldset_index(fieldsets, index_or_name): """ Return the index of a fieldset in the ``fieldsets`` list. Args: fieldsets (list): The original ``fieldsets`` list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the fieldset in the ``fieldsets`` list. """ if isinstance(index_or_name, six.integer_types): return index_or_name for key, value in enumerate(fieldsets): if value[0] == index_or_name: return key raise KeyError("Key not found: '{}'.".format(index_or_name))
[ "def", "get_fieldset_index", "(", "fieldsets", ",", "index_or_name", ")", ":", "if", "isinstance", "(", "index_or_name", ",", "six", ".", "integer_types", ")", ":", "return", "index_or_name", "for", "key", ",", "value", "in", "enumerate", "(", "fieldsets", ")", ":", "if", "value", "[", "0", "]", "==", "index_or_name", ":", "return", "key", "raise", "KeyError", "(", "\"Key not found: '{}'.\"", ".", "format", "(", "index_or_name", ")", ")" ]
32.473684
22.473684
def _ProcessImage(self, tag, wall_time, step, image): """Processes an image by adding it to accumulated state.""" event = ImageEvent(wall_time=wall_time, step=step, encoded_image_string=image.encoded_image_string, width=image.width, height=image.height) self.images.AddItem(tag, event)
[ "def", "_ProcessImage", "(", "self", ",", "tag", ",", "wall_time", ",", "step", ",", "image", ")", ":", "event", "=", "ImageEvent", "(", "wall_time", "=", "wall_time", ",", "step", "=", "step", ",", "encoded_image_string", "=", "image", ".", "encoded_image_string", ",", "width", "=", "image", ".", "width", ",", "height", "=", "image", ".", "height", ")", "self", ".", "images", ".", "AddItem", "(", "tag", ",", "event", ")" ]
47.75
7.875
def _read_para_echo_request_unsigned(self, code, cbit, clen, *, desc, length, version): """Read HIP ECHO_REQUEST_UNSIGNED parameter. Structure of HIP ECHO_REQUEST_UNSIGNED parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Opaque data (variable length) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 echo_request_unsigned.type Parameter Type 1 15 echo_request_unsigned.critical Critical Bit 2 16 echo_request_unsigned.length Length of Contents 4 32 echo_request_unsigned.data Opaque Data """ _data = self._read_fileng(clen) echo_request_unsigned = dict( type=desc, critical=cbit, length=clen, data=_data, ) _plen = length - clen if _plen: self._read_fileng(_plen) return echo_request_unsigned
[ "def", "_read_para_echo_request_unsigned", "(", "self", ",", "code", ",", "cbit", ",", "clen", ",", "*", ",", "desc", ",", "length", ",", "version", ")", ":", "_data", "=", "self", ".", "_read_fileng", "(", "clen", ")", "echo_request_unsigned", "=", "dict", "(", "type", "=", "desc", ",", "critical", "=", "cbit", ",", "length", "=", "clen", ",", "data", "=", "_data", ",", ")", "_plen", "=", "length", "-", "clen", "if", "_plen", ":", "self", ".", "_read_fileng", "(", "_plen", ")", "return", "echo_request_unsigned" ]
43.666667
27.333333
def get_best_electronegativity_anonymous_mapping(self, struct1, struct2): """ Performs an anonymous fitting, which allows distinct species in one structure to map to another. E.g., to compare if the Li2O and Na2O structures are similar. If multiple substitutions are within tolerance this will return the one which minimizes the difference in electronegativity between the matches species. Args: struct1 (Structure): 1st structure struct2 (Structure): 2nd structure Returns: min_mapping (Dict): Mapping of struct1 species to struct2 species """ struct1, struct2 = self._process_species([struct1, struct2]) struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2) matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, use_rms=True, break_on_match=True) if matches: min_X_diff = np.inf for m in matches: X_diff = 0 for k, v in m[0].items(): X_diff += struct1.composition[k] * (k.X - v.X) ** 2 if X_diff < min_X_diff: min_X_diff = X_diff best = m[0] return best
[ "def", "get_best_electronegativity_anonymous_mapping", "(", "self", ",", "struct1", ",", "struct2", ")", ":", "struct1", ",", "struct2", "=", "self", ".", "_process_species", "(", "[", "struct1", ",", "struct2", "]", ")", "struct1", ",", "struct2", ",", "fu", ",", "s1_supercell", "=", "self", ".", "_preprocess", "(", "struct1", ",", "struct2", ")", "matches", "=", "self", ".", "_anonymous_match", "(", "struct1", ",", "struct2", ",", "fu", ",", "s1_supercell", ",", "use_rms", "=", "True", ",", "break_on_match", "=", "True", ")", "if", "matches", ":", "min_X_diff", "=", "np", ".", "inf", "for", "m", "in", "matches", ":", "X_diff", "=", "0", "for", "k", ",", "v", "in", "m", "[", "0", "]", ".", "items", "(", ")", ":", "X_diff", "+=", "struct1", ".", "composition", "[", "k", "]", "*", "(", "k", ".", "X", "-", "v", ".", "X", ")", "**", "2", "if", "X_diff", "<", "min_X_diff", ":", "min_X_diff", "=", "X_diff", "best", "=", "m", "[", "0", "]", "return", "best" ]
41.451613
22.290323
def imbalance_metrics(data): """ Computes imbalance metric for a given dataset. Imbalance metric is equal to 0 when a dataset is perfectly balanced (i.e. number of in each class is exact). :param data : pandas.DataFrame A dataset in a panda's data frame :returns int A value of imbalance metric, where zero means that the dataset is perfectly balanced and the higher the value, the more imbalanced the dataset. """ if not data: return 0 #imb - shows measure of inbalance within a dataset imb = 0 num_classes=float(len(Counter(data))) for x in Counter(data).values(): p_x = float(x)/len(data) if p_x > 0: imb += (p_x - 1/num_classes)*(p_x - 1/num_classes) #worst case scenario: all but 1 examplars in 1st class, the remaining one in 2nd class worst_case=(num_classes-1)*pow(1/num_classes,2) + pow(1-1/num_classes,2) return (num_classes,imb/worst_case)
[ "def", "imbalance_metrics", "(", "data", ")", ":", "if", "not", "data", ":", "return", "0", "#imb - shows measure of inbalance within a dataset", "imb", "=", "0", "num_classes", "=", "float", "(", "len", "(", "Counter", "(", "data", ")", ")", ")", "for", "x", "in", "Counter", "(", "data", ")", ".", "values", "(", ")", ":", "p_x", "=", "float", "(", "x", ")", "/", "len", "(", "data", ")", "if", "p_x", ">", "0", ":", "imb", "+=", "(", "p_x", "-", "1", "/", "num_classes", ")", "*", "(", "p_x", "-", "1", "/", "num_classes", ")", "#worst case scenario: all but 1 examplars in 1st class, the remaining one in 2nd class", "worst_case", "=", "(", "num_classes", "-", "1", ")", "*", "pow", "(", "1", "/", "num_classes", ",", "2", ")", "+", "pow", "(", "1", "-", "1", "/", "num_classes", ",", "2", ")", "return", "(", "num_classes", ",", "imb", "/", "worst_case", ")" ]
46.95
22.85
def address_from_digest(digest): # type: (Digest) -> Address """ Generates an address from a private key digest. """ address_trits = [0] * (Address.LEN * TRITS_PER_TRYTE) # type: List[int] sponge = Kerl() sponge.absorb(digest.as_trits()) sponge.squeeze(address_trits) return Address.from_trits( trits=address_trits, key_index=digest.key_index, security_level=digest.security_level, )
[ "def", "address_from_digest", "(", "digest", ")", ":", "# type: (Digest) -> Address", "address_trits", "=", "[", "0", "]", "*", "(", "Address", ".", "LEN", "*", "TRITS_PER_TRYTE", ")", "# type: List[int]", "sponge", "=", "Kerl", "(", ")", "sponge", ".", "absorb", "(", "digest", ".", "as_trits", "(", ")", ")", "sponge", ".", "squeeze", "(", "address_trits", ")", "return", "Address", ".", "from_trits", "(", "trits", "=", "address_trits", ",", "key_index", "=", "digest", ".", "key_index", ",", "security_level", "=", "digest", ".", "security_level", ",", ")" ]
28.647059
15.470588
def wait_and_ignore(condition, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5): ''' Waits wrapper that'll wait for the condition to become true, but will not error if the condition isn't met. Args: condition (lambda) - Lambda expression to wait for to evaluate to True. Kwargs: timeout (number) : Maximum number of seconds to wait. sleep (number) : Sleep time to wait between iterations. Example:: wait_and_ignore(lambda: driver.find_element_by_id("success").is_displayed(), timeout=30, sleep=0.5) is equivalent to:: end_time = datetime.now() + timedelta(seconds=30) while datetime.now() < end_time: try: if driver.find_element_by_id("success").is_displayed(): break; except: pass time.sleep(0.5) ''' try: return wait_until(condition, timeout, sleep) except: pass
[ "def", "wait_and_ignore", "(", "condition", ",", "timeout", "=", "WTF_TIMEOUT_MANAGER", ".", "NORMAL", ",", "sleep", "=", "0.5", ")", ":", "try", ":", "return", "wait_until", "(", "condition", ",", "timeout", ",", "sleep", ")", "except", ":", "pass" ]
29.575758
26.242424
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.norm.rvs(loc=m, scale=s, size=size) for s, m in zip(self.sigma, self.mu)])
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "norm", ".", "rvs", "(", "loc", "=", "m", ",", "scale", "=", "s", ",", "size", "=", "size", ")", "for", "s", ",", "m", "in", "zip", "(", "self", ".", "sigma", ",", "self", ".", "mu", ")", "]", ")" ]
42
20.5
def get_output_jsonpath_with_name(self, sub_output=None): """If ExtractorProcessor has a name defined, return a JSONPath that has a filter on that name""" if self.name is None: return None output_jsonpath_field = self.get_output_jsonpath_field(sub_output) extractor_filter = "name='{}'".format(self.name) output_jsonpath = "{}[?{}].(result[*][value])".format( output_jsonpath_field, extractor_filter) return output_jsonpath
[ "def", "get_output_jsonpath_with_name", "(", "self", ",", "sub_output", "=", "None", ")", ":", "if", "self", ".", "name", "is", "None", ":", "return", "None", "output_jsonpath_field", "=", "self", ".", "get_output_jsonpath_field", "(", "sub_output", ")", "extractor_filter", "=", "\"name='{}'\"", ".", "format", "(", "self", ".", "name", ")", "output_jsonpath", "=", "\"{}[?{}].(result[*][value])\"", ".", "format", "(", "output_jsonpath_field", ",", "extractor_filter", ")", "return", "output_jsonpath" ]
41.166667
18.25
def parse_argv(self, argv=None, location='Command line.'): """Parse command line arguments. args <list str> or None: The argument list to parse. None means use a copy of sys.argv. argv[0] is ignored. location = '' <str>: A user friendly string describing where the parser got this data from. '' means use "Command line." if args == None, and "Builtin default." otherwise. """ if argv is None: argv = list(sys.argv) argv.pop(0) self._parse_options(argv, location) self._parse_positional_arguments(argv)
[ "def", "parse_argv", "(", "self", ",", "argv", "=", "None", ",", "location", "=", "'Command line.'", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "list", "(", "sys", ".", "argv", ")", "argv", ".", "pop", "(", "0", ")", "self", ".", "_parse_options", "(", "argv", ",", "location", ")", "self", ".", "_parse_positional_arguments", "(", "argv", ")" ]
37.823529
16.470588
def orthographic_u_umlaut(sound: str) -> str: """ >>> OldNorsePhonology.orthographic_u_umlaut("a") 'ö' >>> OldNorsePhonology.orthographic_u_umlaut("e") 'e' :param sound: :return: """ if sound in OldNorsePhonology.U_UMLAUT: return OldNorsePhonology.U_UMLAUT[sound] else: return sound
[ "def", "orthographic_u_umlaut", "(", "sound", ":", "str", ")", "->", "str", ":", "if", "sound", "in", "OldNorsePhonology", ".", "U_UMLAUT", ":", "return", "OldNorsePhonology", ".", "U_UMLAUT", "[", "sound", "]", "else", ":", "return", "sound" ]
26.714286
17.142857
def change_first_point_by_coords(self, x, y, max_distance=1e-4, raise_if_too_far_away=True): """ Set the first point of the exterior to the given point based on its coordinates. If multiple points are found, the closest one will be picked. If no matching points are found, an exception is raised. Note: This method does *not* work in-place. Parameters ---------- x : number X-coordinate of the point. y : number Y-coordinate of the point. max_distance : None or number, optional Maximum distance past which possible matches are ignored. If ``None`` the distance limit is deactivated. raise_if_too_far_away : bool, optional Whether to raise an exception if the closest found point is too far away (``True``) or simply return an unchanged copy if this object (``False``). Returns ------- imgaug.Polygon Copy of this polygon with the new point order. """ if len(self.exterior) == 0: raise Exception("Cannot reorder polygon points, because it contains no points.") closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True) if max_distance is not None and closest_dist > max_distance: if not raise_if_too_far_away: return self.deepcopy() closest_point = self.exterior[closest_idx, :] raise Exception( "Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % ( closest_point[0], closest_point[1], closest_dist) ) return self.change_first_point_by_index(closest_idx)
[ "def", "change_first_point_by_coords", "(", "self", ",", "x", ",", "y", ",", "max_distance", "=", "1e-4", ",", "raise_if_too_far_away", "=", "True", ")", ":", "if", "len", "(", "self", ".", "exterior", ")", "==", "0", ":", "raise", "Exception", "(", "\"Cannot reorder polygon points, because it contains no points.\"", ")", "closest_idx", ",", "closest_dist", "=", "self", ".", "find_closest_point_index", "(", "x", "=", "x", ",", "y", "=", "y", ",", "return_distance", "=", "True", ")", "if", "max_distance", "is", "not", "None", "and", "closest_dist", ">", "max_distance", ":", "if", "not", "raise_if_too_far_away", ":", "return", "self", ".", "deepcopy", "(", ")", "closest_point", "=", "self", ".", "exterior", "[", "closest_idx", ",", ":", "]", "raise", "Exception", "(", "\"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded\"", "%", "(", "closest_point", "[", "0", "]", ",", "closest_point", "[", "1", "]", ",", "closest_dist", ")", ")", "return", "self", ".", "change_first_point_by_index", "(", "closest_idx", ")" ]
37.659574
25.06383
def getSelectedTab(tabs, forURL): """ Returns the tab that should be selected when the current resource lives at C{forURL}. Call me after L{setTabURLs} @param tabs: sequence of L{Tab} instances @param forURL: L{nevow.url.URL} @return: L{Tab} instance """ flatTabs = [] def flatten(tabs): for t in tabs: flatTabs.append(t) flatten(t.children) flatten(tabs) forURL = '/' + forURL.path for t in flatTabs: if forURL == t.linkURL: return t flatTabs.sort(key=lambda t: len(t.linkURL), reverse=True) for t in flatTabs: if not t.linkURL.endswith('/'): linkURL = t.linkURL + '/' else: linkURL = t.linkURL if forURL.startswith(linkURL): return t
[ "def", "getSelectedTab", "(", "tabs", ",", "forURL", ")", ":", "flatTabs", "=", "[", "]", "def", "flatten", "(", "tabs", ")", ":", "for", "t", "in", "tabs", ":", "flatTabs", ".", "append", "(", "t", ")", "flatten", "(", "t", ".", "children", ")", "flatten", "(", "tabs", ")", "forURL", "=", "'/'", "+", "forURL", ".", "path", "for", "t", "in", "flatTabs", ":", "if", "forURL", "==", "t", ".", "linkURL", ":", "return", "t", "flatTabs", ".", "sort", "(", "key", "=", "lambda", "t", ":", "len", "(", "t", ".", "linkURL", ")", ",", "reverse", "=", "True", ")", "for", "t", "in", "flatTabs", ":", "if", "not", "t", ".", "linkURL", ".", "endswith", "(", "'/'", ")", ":", "linkURL", "=", "t", ".", "linkURL", "+", "'/'", "else", ":", "linkURL", "=", "t", ".", "linkURL", "if", "forURL", ".", "startswith", "(", "linkURL", ")", ":", "return", "t" ]
22.257143
19.685714
def __calculate_links(self, cluster1, cluster2): """! @brief Returns number of link between two clusters. @details Link between objects (points) exists only if distance between them less than connectivity radius. @param[in] cluster1 (list): The first cluster. @param[in] cluster2 (list): The second cluster. @return (uint) Number of links between two clusters. """ number_links = 0; for index1 in cluster1: for index2 in cluster2: number_links += self.__adjacency_matrix[index1][index2]; return number_links;
[ "def", "__calculate_links", "(", "self", ",", "cluster1", ",", "cluster2", ")", ":", "number_links", "=", "0", "for", "index1", "in", "cluster1", ":", "for", "index2", "in", "cluster2", ":", "number_links", "+=", "self", ".", "__adjacency_matrix", "[", "index1", "]", "[", "index2", "]", "return", "number_links" ]
35.736842
21.368421
def partition_dict(items, key): """ Given an ordered dictionary of items and a key in that dict, return an ordered dict of items before, the keyed item, and an ordered dict of items after. >>> od = collections.OrderedDict(zip(range(5), 'abcde')) >>> before, item, after = partition_dict(od, 3) >>> before OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')]) >>> item 'd' >>> after OrderedDict([(4, 'e')]) Like string.partition, if the key is not found in the items, the before will contain all items, item will be None, and after will be an empty iterable. >>> before, item, after = partition_dict(od, -1) >>> before OrderedDict([(0, 'a'), ..., (4, 'e')]) >>> item >>> list(after) [] """ def unmatched(pair): test_key, item, = pair return test_key != key items_iter = iter(items.items()) item = items.get(key) left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter)) right = collections.OrderedDict(items_iter) return left, item, right
[ "def", "partition_dict", "(", "items", ",", "key", ")", ":", "def", "unmatched", "(", "pair", ")", ":", "test_key", ",", "item", ",", "=", "pair", "return", "test_key", "!=", "key", "items_iter", "=", "iter", "(", "items", ".", "items", "(", ")", ")", "item", "=", "items", ".", "get", "(", "key", ")", "left", "=", "collections", ".", "OrderedDict", "(", "itertools", ".", "takewhile", "(", "unmatched", ",", "items_iter", ")", ")", "right", "=", "collections", ".", "OrderedDict", "(", "items_iter", ")", "return", "left", ",", "item", ",", "right" ]
27.028571
19.885714
def _convert_nest_to_flat(self, params, _result=None, _prefix=None): """ Convert a data structure that looks like:: {"foo": {"bar": "baz", "shimmy": "sham"}} to:: {"foo.bar": "baz", "foo.shimmy": "sham"} This is the inverse of L{_convert_flat_to_nest}. """ if _result is None: _result = {} for k, v in params.iteritems(): if _prefix is None: path = k else: path = _prefix + '.' + k if isinstance(v, dict): self._convert_nest_to_flat(v, _result=_result, _prefix=path) else: _result[path] = v return _result
[ "def", "_convert_nest_to_flat", "(", "self", ",", "params", ",", "_result", "=", "None", ",", "_prefix", "=", "None", ")", ":", "if", "_result", "is", "None", ":", "_result", "=", "{", "}", "for", "k", ",", "v", "in", "params", ".", "iteritems", "(", ")", ":", "if", "_prefix", "is", "None", ":", "path", "=", "k", "else", ":", "path", "=", "_prefix", "+", "'.'", "+", "k", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "self", ".", "_convert_nest_to_flat", "(", "v", ",", "_result", "=", "_result", ",", "_prefix", "=", "path", ")", "else", ":", "_result", "[", "path", "]", "=", "v", "return", "_result" ]
28.4
17.52
def set_value(self, treeiter, column, value): """ {{ all }} `value` can also be a Python value and will be converted to a :obj:`GObject.Value` using the corresponding column type (See :obj:`Gtk.ListStore.set_column_types`\\()). """ value = self._convert_value(column, value) Gtk.ListStore.set_value(self, treeiter, column, value)
[ "def", "set_value", "(", "self", ",", "treeiter", ",", "column", ",", "value", ")", ":", "value", "=", "self", ".", "_convert_value", "(", "column", ",", "value", ")", "Gtk", ".", "ListStore", ".", "set_value", "(", "self", ",", "treeiter", ",", "column", ",", "value", ")" ]
35
19
def new_cells_from_excel( self, book, range_, sheet=None, names_row=None, param_cols=None, param_order=None, transpose=False, names_col=None, param_rows=None, ): """Create multiple cells from an Excel range. This method reads values from a range in an Excel file, create cells and populate them with the values in the range. To use this method, ``openpyxl`` package must be installed. The Excel file to read data from is specified by ``book`` parameters. The ``range_`` can be a range address, such as "$G4:$K10", or a named range. In case a range address is given, ``sheet`` must also be given. By default, cells data are interpreted as being laid out side-by-side. ``names_row`` is a row index (starting from 0) to specify the row that contains the names of cells and parameters. Cells and parameter names must be contained in a single row. ``param_cols`` accepts a sequence (such as list or tuple) of column indexes (starting from 0) that indicate columns that contain cells arguments. **2-dimensional cells definitions** The optional ``names_col`` and ``param_rows`` parameters are used, when data for one cells spans more than one column. In such cases, the cells data is 2-dimensional, and there must be parameter row(s) across the columns that contain arguments of the parameters. A sequence of row indexes that indicate parameter rows is passed to ``param_rows``. The names of those parameters must be contained in the same rows as parameter values (arguments), and ``names_col`` is to indicate the column position at which the parameter names are defined. **Horizontal arrangement** By default, cells data are interpreted as being placed side-by-side, regardless of whether one cells corresponds to a single column or multiple columns. ``transpose`` parameter is used to alter this orientation, and if it is set to ``True``, cells values are interpreted as being placed one above the other. "row(s)" and "col(s)" in the parameter names are interpreted inversely, i.e. all indexes passed to "row(s)" parameters are interpreted as column indexes, and all indexes passed to "col(s)" parameters as row indexes. Args: book (str): Path to an Excel file. range_ (str): Range expression, such as "A1", "$G4:$K10", or named range "NamedRange1". sheet (str): Sheet name (case ignored). names_row (optional): an index number indicating what row contains the names of cells and parameters. Defaults to the top row (0). param_cols (optional): a sequence of index numbers indicating parameter columns. Defaults to only the leftmost column ([0]). names_col (optional): an index number, starting from 0, indicating what column contains additional parameters. param_rows (optional): a sequence of index numbers, starting from 0, indicating rows of additional parameters, in case cells are defined in two dimensions. transpose (optional): Defaults to ``False``. If set to ``True``, "row(s)" and "col(s)" in the parameter names are interpreted inversely, i.e. all indexes passed to "row(s)" parameters are interpreted as column indexes, and all indexes passed to "col(s)" parameters as row indexes. param_order (optional): a sequence to reorder the parameters. The elements of the sequence are the indexes of ``param_cols`` elements, and optionally the index of ``param_rows`` elements shifted by the length of ``param_cols``. """ return self._impl.new_cells_from_excel( book, range_, sheet, names_row, param_cols, param_order, transpose, names_col, param_rows, )
[ "def", "new_cells_from_excel", "(", "self", ",", "book", ",", "range_", ",", "sheet", "=", "None", ",", "names_row", "=", "None", ",", "param_cols", "=", "None", ",", "param_order", "=", "None", ",", "transpose", "=", "False", ",", "names_col", "=", "None", ",", "param_rows", "=", "None", ",", ")", ":", "return", "self", ".", "_impl", ".", "new_cells_from_excel", "(", "book", ",", "range_", ",", "sheet", ",", "names_row", ",", "param_cols", ",", "param_order", ",", "transpose", ",", "names_col", ",", "param_rows", ",", ")" ]
43.520408
21.540816
def get_targets(conn=None): """ get_brain_targets function from Brain.Targets table. :return: <generator> yields dict objects """ targets = RBT results = targets.run(conn) for item in results: yield item
[ "def", "get_targets", "(", "conn", "=", "None", ")", ":", "targets", "=", "RBT", "results", "=", "targets", ".", "run", "(", "conn", ")", "for", "item", "in", "results", ":", "yield", "item" ]
23.1
14.3
def create_project_connection(self, create_connection_inputs, project): """CreateProjectConnection. [Preview API] Creates a new Pipeline connection between the provider installation and the specified project. Returns the PipelineConnection object created. :param :class:`<CreatePipelineConnectionInputs> <azure.devops.v5_1.cix.models.CreatePipelineConnectionInputs>` create_connection_inputs: :param str project: :rtype: :class:`<PipelineConnection> <azure.devops.v5_1.cix.models.PipelineConnection>` """ query_parameters = {} if project is not None: query_parameters['project'] = self._serialize.query('project', project, 'str') content = self._serialize.body(create_connection_inputs, 'CreatePipelineConnectionInputs') response = self._send(http_method='POST', location_id='00df4879-9216-45d5-b38d-4a487b626b2c', version='5.1-preview.1', query_parameters=query_parameters, content=content) return self._deserialize('PipelineConnection', response)
[ "def", "create_project_connection", "(", "self", ",", "create_connection_inputs", ",", "project", ")", ":", "query_parameters", "=", "{", "}", "if", "project", "is", "not", "None", ":", "query_parameters", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'project'", ",", "project", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "create_connection_inputs", ",", "'CreatePipelineConnectionInputs'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'00df4879-9216-45d5-b38d-4a487b626b2c'", ",", "version", "=", "'5.1-preview.1'", ",", "query_parameters", "=", "query_parameters", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'PipelineConnection'", ",", "response", ")" ]
67.764706
33.647059
def main(): """The main function. These are the steps performed for the data clean up: 1. Prints the version number. 2. Reads the configuration file (:py:func:`read_config_file`). 3. Creates a new directory with ``data_clean_up`` as prefix and the date and time as suffix. 4. Check the input file type (``bfile``, ``tfile`` or ``file``). 5. Creates an intermediate directory with the section as prefix and the script name as suffix (inside the previous directory). 6. Runs the required script in order (according to the configuration file section). .. note:: The main function is not responsible to check if the required files exist. This should be done in the ``run`` functions. """ # Getting and checking the options args = parse_args() check_args(args) # The directory name dirname = "data_clean_up." dirname += datetime.datetime.today().strftime("%Y-%m-%d_%H.%M.%S") while os.path.isdir(dirname): time.sleep(1) dirname = "data_clean_up." dirname += datetime.datetime.today().strftime("%Y-%m-%d_%H.%M.%S") # Creating the output directory os.mkdir(dirname) # Configuring the root logger add_file_handler_to_root(os.path.join(dirname, "pyGenClean.log")) logger.info("pyGenClean version {}".format(__version__)) plink_version = get_plink_version() logger.info("Using Plink version {}".format(plink_version)) # Reading the configuration file logger.info("Reading configuration file [ {} ]".format(args.conf)) order, conf = read_config_file(args.conf) # Executing the data clean up current_in = None current_in_type = None suffixes = None if args.tfile is not None: current_in = args.tfile current_in_type = "tfile" suffixes = (".tped", ".tfam") elif args.bfile is not None: current_in = args.bfile current_in_type = "bfile" suffixes = (".bed", ".bim", ".fam") else: current_in = args.file current_in_type = "file" suffixes = (".ped", ".map") # Creating the excluded files try: with open(os.path.join(dirname, "excluded_markers.txt"), "w") as o_f: pass with open(os.path.join(dirname, "excluded_samples.txt"), "w") as o_f: pass with open(os.path.join(dirname, "initial_files.txt"), "w") as o_file: for s in suffixes: print >>o_file, current_in + s except IOError: msg = "{}: cannot write summary".format(dirname) raise ProgramError(msg) # Counting the number of markers and samples in the datafile logger.info("Counting initial number of samples and markers") nb_markers, nb_samples = count_markers_samples(current_in, current_in_type) logger.info(" - {:,d} samples".format(nb_samples)) logger.info(" - {:,d} markers".format(nb_markers)) # Creating the result summary file containing the initial numbers try: with open(os.path.join(dirname, "results_summary.txt"), "w") as o_file: print >>o_file, "# initial" print >>o_file, ("Initial number of markers\t" "{:,d}".format(nb_markers)) print >>o_file, ("Initial number of samples\t" "{:,d}".format(nb_samples)) print >>o_file, "---" except IOError: msg = "{}: cannot write summary".format(dirname) raise ProgramError(msg) latex_summaries = [] steps = [] descriptions = [] long_descriptions = [] graphic_paths = set() for number in order: # Getting the script name and its options script_name, options = conf[number] # Getting the output prefix output_prefix = os.path.join(dirname, "{}_{}".format(number, script_name)) # Getting the function to use function_to_use = available_functions[script_name] # Executing the function logger.info("Running {} {}".format(number, script_name)) logger.info(" - Using {} as prefix for input " "files".format(current_in)) logger.info(" - Results will be in [ {} ]".format(output_prefix)) # Executing the function step_results = function_to_use( in_prefix=current_in, in_type=current_in_type, out_prefix=output_prefix, base_dir=dirname, options=options, ) # Updating the input files and input file types current_in = step_results.next_file current_in_type = step_results.next_file_type # Saving what's necessary for the LaTeX report latex_summaries.append(step_results.latex_summary) steps.append(script_name) descriptions.append(step_results.description) long_descriptions.append(step_results.long_description) if step_results.graph_path is not None: graphic_paths.update(step_results.graph_path) # Counting the final number of samples and markers logger.info("Counting final number of samples and markers") nb_markers, nb_samples = count_markers_samples(current_in, current_in_type) logger.info(" - {:,d} samples".format(nb_samples)) logger.info(" - {:,d} markers".format(nb_markers)) # Getting the final suffixes suffixes = None if current_in_type == "tfile": suffixes = ((".tped", nb_markers), (".tfam", nb_samples)) elif current_in_type == "bfile": suffixes = ((".bed", None), (".bim", nb_markers), (".fam", nb_samples)) else: suffixes = ((".ped", nb_samples), (".map", nb_markers)) with open(os.path.join(dirname, "final_files.txt"), "w") as o_file: for s, nb in suffixes: if nb: print >>o_file, current_in + s + "\t{:,d}".format(nb) else: print >>o_file, current_in + s # Generating the graphics paths file graphic_paths_fn = None if len(graphic_paths) > 0: try: graphic_paths_fn = os.path.join(dirname, "graphic_paths.txt") with open(graphic_paths_fn, "w") as o_file: for path in sorted(graphic_paths): print >>o_file, path except IOError: msg = "{}: cannot write summary".format(dirname) raise ProgramError(msg) # We create the automatic report logger.info("Generating automatic report") report_name = os.path.join(dirname, "automatic_report.tex") auto_report.create_report( dirname, report_name, project_name=args.report_number, steps=steps, descriptions=descriptions, graphic_paths_fn=graphic_paths_fn, long_descriptions=long_descriptions, summaries=latex_summaries, background=args.report_background, summary_fn=os.path.join(dirname, "results_summary.txt"), report_title=args.report_title, report_author=args.report_author, initial_files=os.path.join(dirname, "initial_files.txt"), final_files=os.path.join(dirname, "final_files.txt"), final_nb_markers="{:,d}".format(nb_markers), final_nb_samples="{:,d}".format(nb_samples), plink_version=plink_version, )
[ "def", "main", "(", ")", ":", "# Getting and checking the options", "args", "=", "parse_args", "(", ")", "check_args", "(", "args", ")", "# The directory name", "dirname", "=", "\"data_clean_up.\"", "dirname", "+=", "datetime", ".", "datetime", ".", "today", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d_%H.%M.%S\"", ")", "while", "os", ".", "path", ".", "isdir", "(", "dirname", ")", ":", "time", ".", "sleep", "(", "1", ")", "dirname", "=", "\"data_clean_up.\"", "dirname", "+=", "datetime", ".", "datetime", ".", "today", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d_%H.%M.%S\"", ")", "# Creating the output directory", "os", ".", "mkdir", "(", "dirname", ")", "# Configuring the root logger", "add_file_handler_to_root", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"pyGenClean.log\"", ")", ")", "logger", ".", "info", "(", "\"pyGenClean version {}\"", ".", "format", "(", "__version__", ")", ")", "plink_version", "=", "get_plink_version", "(", ")", "logger", ".", "info", "(", "\"Using Plink version {}\"", ".", "format", "(", "plink_version", ")", ")", "# Reading the configuration file", "logger", ".", "info", "(", "\"Reading configuration file [ {} ]\"", ".", "format", "(", "args", ".", "conf", ")", ")", "order", ",", "conf", "=", "read_config_file", "(", "args", ".", "conf", ")", "# Executing the data clean up", "current_in", "=", "None", "current_in_type", "=", "None", "suffixes", "=", "None", "if", "args", ".", "tfile", "is", "not", "None", ":", "current_in", "=", "args", ".", "tfile", "current_in_type", "=", "\"tfile\"", "suffixes", "=", "(", "\".tped\"", ",", "\".tfam\"", ")", "elif", "args", ".", "bfile", "is", "not", "None", ":", "current_in", "=", "args", ".", "bfile", "current_in_type", "=", "\"bfile\"", "suffixes", "=", "(", "\".bed\"", ",", "\".bim\"", ",", "\".fam\"", ")", "else", ":", "current_in", "=", "args", ".", "file", "current_in_type", "=", "\"file\"", "suffixes", "=", "(", "\".ped\"", ",", "\".map\"", ")", "# Creating the excluded files", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"excluded_markers.txt\"", ")", ",", "\"w\"", ")", "as", "o_f", ":", "pass", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"excluded_samples.txt\"", ")", ",", "\"w\"", ")", "as", "o_f", ":", "pass", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"initial_files.txt\"", ")", ",", "\"w\"", ")", "as", "o_file", ":", "for", "s", "in", "suffixes", ":", "print", ">>", "o_file", ",", "current_in", "+", "s", "except", "IOError", ":", "msg", "=", "\"{}: cannot write summary\"", ".", "format", "(", "dirname", ")", "raise", "ProgramError", "(", "msg", ")", "# Counting the number of markers and samples in the datafile", "logger", ".", "info", "(", "\"Counting initial number of samples and markers\"", ")", "nb_markers", ",", "nb_samples", "=", "count_markers_samples", "(", "current_in", ",", "current_in_type", ")", "logger", ".", "info", "(", "\" - {:,d} samples\"", ".", "format", "(", "nb_samples", ")", ")", "logger", ".", "info", "(", "\" - {:,d} markers\"", ".", "format", "(", "nb_markers", ")", ")", "# Creating the result summary file containing the initial numbers", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"results_summary.txt\"", ")", ",", "\"w\"", ")", "as", "o_file", ":", "print", ">>", "o_file", ",", "\"# initial\"", "print", ">>", "o_file", ",", "(", "\"Initial number of markers\\t\"", "\"{:,d}\"", ".", "format", "(", "nb_markers", ")", ")", "print", ">>", "o_file", ",", "(", "\"Initial number of samples\\t\"", "\"{:,d}\"", ".", "format", "(", "nb_samples", ")", ")", "print", ">>", "o_file", ",", "\"---\"", "except", "IOError", ":", "msg", "=", "\"{}: cannot write summary\"", ".", "format", "(", "dirname", ")", "raise", "ProgramError", "(", "msg", ")", "latex_summaries", "=", "[", "]", "steps", "=", "[", "]", "descriptions", "=", "[", "]", "long_descriptions", "=", "[", "]", "graphic_paths", "=", "set", "(", ")", "for", "number", "in", "order", ":", "# Getting the script name and its options", "script_name", ",", "options", "=", "conf", "[", "number", "]", "# Getting the output prefix", "output_prefix", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"{}_{}\"", ".", "format", "(", "number", ",", "script_name", ")", ")", "# Getting the function to use", "function_to_use", "=", "available_functions", "[", "script_name", "]", "# Executing the function", "logger", ".", "info", "(", "\"Running {} {}\"", ".", "format", "(", "number", ",", "script_name", ")", ")", "logger", ".", "info", "(", "\" - Using {} as prefix for input \"", "\"files\"", ".", "format", "(", "current_in", ")", ")", "logger", ".", "info", "(", "\" - Results will be in [ {} ]\"", ".", "format", "(", "output_prefix", ")", ")", "# Executing the function", "step_results", "=", "function_to_use", "(", "in_prefix", "=", "current_in", ",", "in_type", "=", "current_in_type", ",", "out_prefix", "=", "output_prefix", ",", "base_dir", "=", "dirname", ",", "options", "=", "options", ",", ")", "# Updating the input files and input file types", "current_in", "=", "step_results", ".", "next_file", "current_in_type", "=", "step_results", ".", "next_file_type", "# Saving what's necessary for the LaTeX report", "latex_summaries", ".", "append", "(", "step_results", ".", "latex_summary", ")", "steps", ".", "append", "(", "script_name", ")", "descriptions", ".", "append", "(", "step_results", ".", "description", ")", "long_descriptions", ".", "append", "(", "step_results", ".", "long_description", ")", "if", "step_results", ".", "graph_path", "is", "not", "None", ":", "graphic_paths", ".", "update", "(", "step_results", ".", "graph_path", ")", "# Counting the final number of samples and markers", "logger", ".", "info", "(", "\"Counting final number of samples and markers\"", ")", "nb_markers", ",", "nb_samples", "=", "count_markers_samples", "(", "current_in", ",", "current_in_type", ")", "logger", ".", "info", "(", "\" - {:,d} samples\"", ".", "format", "(", "nb_samples", ")", ")", "logger", ".", "info", "(", "\" - {:,d} markers\"", ".", "format", "(", "nb_markers", ")", ")", "# Getting the final suffixes", "suffixes", "=", "None", "if", "current_in_type", "==", "\"tfile\"", ":", "suffixes", "=", "(", "(", "\".tped\"", ",", "nb_markers", ")", ",", "(", "\".tfam\"", ",", "nb_samples", ")", ")", "elif", "current_in_type", "==", "\"bfile\"", ":", "suffixes", "=", "(", "(", "\".bed\"", ",", "None", ")", ",", "(", "\".bim\"", ",", "nb_markers", ")", ",", "(", "\".fam\"", ",", "nb_samples", ")", ")", "else", ":", "suffixes", "=", "(", "(", "\".ped\"", ",", "nb_samples", ")", ",", "(", "\".map\"", ",", "nb_markers", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"final_files.txt\"", ")", ",", "\"w\"", ")", "as", "o_file", ":", "for", "s", ",", "nb", "in", "suffixes", ":", "if", "nb", ":", "print", ">>", "o_file", ",", "current_in", "+", "s", "+", "\"\\t{:,d}\"", ".", "format", "(", "nb", ")", "else", ":", "print", ">>", "o_file", ",", "current_in", "+", "s", "# Generating the graphics paths file", "graphic_paths_fn", "=", "None", "if", "len", "(", "graphic_paths", ")", ">", "0", ":", "try", ":", "graphic_paths_fn", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"graphic_paths.txt\"", ")", "with", "open", "(", "graphic_paths_fn", ",", "\"w\"", ")", "as", "o_file", ":", "for", "path", "in", "sorted", "(", "graphic_paths", ")", ":", "print", ">>", "o_file", ",", "path", "except", "IOError", ":", "msg", "=", "\"{}: cannot write summary\"", ".", "format", "(", "dirname", ")", "raise", "ProgramError", "(", "msg", ")", "# We create the automatic report", "logger", ".", "info", "(", "\"Generating automatic report\"", ")", "report_name", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"automatic_report.tex\"", ")", "auto_report", ".", "create_report", "(", "dirname", ",", "report_name", ",", "project_name", "=", "args", ".", "report_number", ",", "steps", "=", "steps", ",", "descriptions", "=", "descriptions", ",", "graphic_paths_fn", "=", "graphic_paths_fn", ",", "long_descriptions", "=", "long_descriptions", ",", "summaries", "=", "latex_summaries", ",", "background", "=", "args", ".", "report_background", ",", "summary_fn", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"results_summary.txt\"", ")", ",", "report_title", "=", "args", ".", "report_title", ",", "report_author", "=", "args", ".", "report_author", ",", "initial_files", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"initial_files.txt\"", ")", ",", "final_files", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"final_files.txt\"", ")", ",", "final_nb_markers", "=", "\"{:,d}\"", ".", "format", "(", "nb_markers", ")", ",", "final_nb_samples", "=", "\"{:,d}\"", ".", "format", "(", "nb_samples", ")", ",", "plink_version", "=", "plink_version", ",", ")" ]
36.80203
19.106599
def calc_n_coarse_chan(self, chan_bw=None): """ This makes an attempt to calculate the number of coarse channels in a given file. Note: This is unlikely to work on non-Breakthrough Listen data, as a-priori knowledge of the digitizer system is required. """ nchans = int(self.header[b'nchans']) # Do we have a file with enough channels that it has coarse channelization? if chan_bw is not None: bandwidth = abs(self.f_stop - self.f_start) n_coarse_chan = int(bandwidth / chan_bw) return n_coarse_chan elif nchans >= 2**20: # Does the common FFT length of 2^20 divide through without a remainder? # This should work for most GBT and all Parkes hires data if nchans % 2**20 == 0: n_coarse_chan = nchans // 2**20 return n_coarse_chan # Early GBT data has non-2^N FFT length, check if it is GBT data elif self.header[b'telescope_id'] == 6: coarse_chan_bw = 2.9296875 bandwidth = abs(self.f_stop - self.f_start) n_coarse_chan = int(bandwidth / coarse_chan_bw) return n_coarse_chan else: logger.warning("Couldn't figure out n_coarse_chan") elif self.header[b'telescope_id'] == 6 and nchans < 2**20: #For GBT non-hires data coarse_chan_bw = 2.9296875 bandwidth = abs(self.f_stop - self.f_start) n_coarse_chan = int(bandwidth / coarse_chan_bw) return n_coarse_chan else: logger.warning("This function currently only works for hires BL Parkes or GBT data.")
[ "def", "calc_n_coarse_chan", "(", "self", ",", "chan_bw", "=", "None", ")", ":", "nchans", "=", "int", "(", "self", ".", "header", "[", "b'nchans'", "]", ")", "# Do we have a file with enough channels that it has coarse channelization?", "if", "chan_bw", "is", "not", "None", ":", "bandwidth", "=", "abs", "(", "self", ".", "f_stop", "-", "self", ".", "f_start", ")", "n_coarse_chan", "=", "int", "(", "bandwidth", "/", "chan_bw", ")", "return", "n_coarse_chan", "elif", "nchans", ">=", "2", "**", "20", ":", "# Does the common FFT length of 2^20 divide through without a remainder?", "# This should work for most GBT and all Parkes hires data", "if", "nchans", "%", "2", "**", "20", "==", "0", ":", "n_coarse_chan", "=", "nchans", "//", "2", "**", "20", "return", "n_coarse_chan", "# Early GBT data has non-2^N FFT length, check if it is GBT data", "elif", "self", ".", "header", "[", "b'telescope_id'", "]", "==", "6", ":", "coarse_chan_bw", "=", "2.9296875", "bandwidth", "=", "abs", "(", "self", ".", "f_stop", "-", "self", ".", "f_start", ")", "n_coarse_chan", "=", "int", "(", "bandwidth", "/", "coarse_chan_bw", ")", "return", "n_coarse_chan", "else", ":", "logger", ".", "warning", "(", "\"Couldn't figure out n_coarse_chan\"", ")", "elif", "self", ".", "header", "[", "b'telescope_id'", "]", "==", "6", "and", "nchans", "<", "2", "**", "20", ":", "#For GBT non-hires data", "coarse_chan_bw", "=", "2.9296875", "bandwidth", "=", "abs", "(", "self", ".", "f_stop", "-", "self", ".", "f_start", ")", "n_coarse_chan", "=", "int", "(", "bandwidth", "/", "coarse_chan_bw", ")", "return", "n_coarse_chan", "else", ":", "logger", ".", "warning", "(", "\"This function currently only works for hires BL Parkes or GBT data.\"", ")" ]
47.611111
18.555556
def do_alarm_definition_patch(mc, args): '''Patch the alarm definition.''' fields = {} fields['alarm_id'] = args.id if args.name: fields['name'] = args.name if args.description: fields['description'] = args.description if args.expression: fields['expression'] = args.expression if args.alarm_actions: fields['alarm_actions'] = _arg_split_patch_update(args.alarm_actions, patch=True) if args.ok_actions: fields['ok_actions'] = _arg_split_patch_update(args.ok_actions, patch=True) if args.undetermined_actions: fields['undetermined_actions'] = _arg_split_patch_update(args.undetermined_actions, patch=True) if args.actions_enabled: if args.actions_enabled not in enabled_types: errmsg = ('Invalid value, not one of [' + ', '.join(enabled_types) + ']') print(errmsg) return fields['actions_enabled'] = args.actions_enabled in ['true', 'True'] if args.severity: if not _validate_severity(args.severity): return fields['severity'] = args.severity try: alarm = mc.alarm_definitions.patch(**fields) except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: print(jsonutils.dumps(alarm, indent=2))
[ "def", "do_alarm_definition_patch", "(", "mc", ",", "args", ")", ":", "fields", "=", "{", "}", "fields", "[", "'alarm_id'", "]", "=", "args", ".", "id", "if", "args", ".", "name", ":", "fields", "[", "'name'", "]", "=", "args", ".", "name", "if", "args", ".", "description", ":", "fields", "[", "'description'", "]", "=", "args", ".", "description", "if", "args", ".", "expression", ":", "fields", "[", "'expression'", "]", "=", "args", ".", "expression", "if", "args", ".", "alarm_actions", ":", "fields", "[", "'alarm_actions'", "]", "=", "_arg_split_patch_update", "(", "args", ".", "alarm_actions", ",", "patch", "=", "True", ")", "if", "args", ".", "ok_actions", ":", "fields", "[", "'ok_actions'", "]", "=", "_arg_split_patch_update", "(", "args", ".", "ok_actions", ",", "patch", "=", "True", ")", "if", "args", ".", "undetermined_actions", ":", "fields", "[", "'undetermined_actions'", "]", "=", "_arg_split_patch_update", "(", "args", ".", "undetermined_actions", ",", "patch", "=", "True", ")", "if", "args", ".", "actions_enabled", ":", "if", "args", ".", "actions_enabled", "not", "in", "enabled_types", ":", "errmsg", "=", "(", "'Invalid value, not one of ['", "+", "', '", ".", "join", "(", "enabled_types", ")", "+", "']'", ")", "print", "(", "errmsg", ")", "return", "fields", "[", "'actions_enabled'", "]", "=", "args", ".", "actions_enabled", "in", "[", "'true'", ",", "'True'", "]", "if", "args", ".", "severity", ":", "if", "not", "_validate_severity", "(", "args", ".", "severity", ")", ":", "return", "fields", "[", "'severity'", "]", "=", "args", ".", "severity", "try", ":", "alarm", "=", "mc", ".", "alarm_definitions", ".", "patch", "(", "*", "*", "fields", ")", "except", "(", "osc_exc", ".", "ClientException", ",", "k_exc", ".", "HttpError", ")", "as", "he", ":", "raise", "osc_exc", ".", "CommandError", "(", "'%s\\n%s'", "%", "(", "he", ".", "message", ",", "he", ".", "details", ")", ")", "else", ":", "print", "(", "jsonutils", ".", "dumps", "(", "alarm", ",", "indent", "=", "2", ")", ")" ]
41.764706
18.764706
def _pid_to_id(self, pid): """Converts a pid to a URI that can be used as an OAI-ORE identifier.""" return d1_common.url.joinPathElements( self._base_url, self._version_tag, "resolve", d1_common.url.encodePathElement(pid), )
[ "def", "_pid_to_id", "(", "self", ",", "pid", ")", ":", "return", "d1_common", ".", "url", ".", "joinPathElements", "(", "self", ".", "_base_url", ",", "self", ".", "_version_tag", ",", "\"resolve\"", ",", "d1_common", ".", "url", ".", "encodePathElement", "(", "pid", ")", ",", ")" ]
36.125
12.625
async def import_aggregated(self, async_client, pid): """Import the SciObj at {pid}. If the SciObj is a Resource Map, also recursively import the aggregated objects. """ self._logger.info('Importing: {}'.format(pid)) task_set = set() object_info_pyxb = d1_common.types.dataoneTypes.ObjectInfo() object_info_pyxb.identifier = pid task_set.add(self.import_object(async_client, object_info_pyxb)) result_set, task_set = await asyncio.wait(task_set) assert len(result_set) == 1 assert not task_set sysmeta_pyxb = result_set.pop().result() if not sysmeta_pyxb: # Import was skipped return assert d1_common.xml.get_req_val(sysmeta_pyxb.identifier) == pid if d1_gmn.app.did.is_resource_map_db(pid): for member_pid in d1_gmn.app.resource_map.get_resource_map_members_by_map( pid ): self.progress_logger.event("Importing aggregated SciObj") self._logger.info('Importing aggregated SciObj. pid="{}"'.format(pid)) await self.import_aggregated(async_client, member_pid)
[ "async", "def", "import_aggregated", "(", "self", ",", "async_client", ",", "pid", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Importing: {}'", ".", "format", "(", "pid", ")", ")", "task_set", "=", "set", "(", ")", "object_info_pyxb", "=", "d1_common", ".", "types", ".", "dataoneTypes", ".", "ObjectInfo", "(", ")", "object_info_pyxb", ".", "identifier", "=", "pid", "task_set", ".", "add", "(", "self", ".", "import_object", "(", "async_client", ",", "object_info_pyxb", ")", ")", "result_set", ",", "task_set", "=", "await", "asyncio", ".", "wait", "(", "task_set", ")", "assert", "len", "(", "result_set", ")", "==", "1", "assert", "not", "task_set", "sysmeta_pyxb", "=", "result_set", ".", "pop", "(", ")", ".", "result", "(", ")", "if", "not", "sysmeta_pyxb", ":", "# Import was skipped", "return", "assert", "d1_common", ".", "xml", ".", "get_req_val", "(", "sysmeta_pyxb", ".", "identifier", ")", "==", "pid", "if", "d1_gmn", ".", "app", ".", "did", ".", "is_resource_map_db", "(", "pid", ")", ":", "for", "member_pid", "in", "d1_gmn", ".", "app", ".", "resource_map", ".", "get_resource_map_members_by_map", "(", "pid", ")", ":", "self", ".", "progress_logger", ".", "event", "(", "\"Importing aggregated SciObj\"", ")", "self", ".", "_logger", ".", "info", "(", "'Importing aggregated SciObj. pid=\"{}\"'", ".", "format", "(", "pid", ")", ")", "await", "self", ".", "import_aggregated", "(", "async_client", ",", "member_pid", ")" ]
34.294118
25.970588
def generate_pseudo(strain_states, order=3): """ Generates the pseudoinverse for a given set of strains. Args: strain_states (6xN array like): a list of voigt-notation "strain-states", i. e. perturbed indices of the strain as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0) order (int): order of pseudoinverse to calculate Returns: mis: pseudo inverses for each order tensor, these can be multiplied by the central difference derivative of the stress with respect to the strain state absent_syms: symbols of the tensor absent from the PI expression """ s = sp.Symbol('s') nstates = len(strain_states) ni = np.array(strain_states)*s mis, absent_syms = [], [] for degree in range(2, order + 1): cvec, carr = get_symbol_list(degree) sarr = np.zeros((nstates, 6), dtype=object) for n, strain_v in enumerate(ni): # Get expressions exps = carr.copy() for i in range(degree - 1): exps = np.dot(exps, strain_v) exps /= np.math.factorial(degree - 1) sarr[n] = [sp.diff(exp, s, degree - 1) for exp in exps] svec = sarr.ravel() present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec]) absent_syms += [set(cvec) - present_syms] m = np.zeros((6*nstates, len(cvec))) for n, c in enumerate(cvec): m[:, n] = v_diff(svec, c) mis.append(np.linalg.pinv(m)) return mis, absent_syms
[ "def", "generate_pseudo", "(", "strain_states", ",", "order", "=", "3", ")", ":", "s", "=", "sp", ".", "Symbol", "(", "'s'", ")", "nstates", "=", "len", "(", "strain_states", ")", "ni", "=", "np", ".", "array", "(", "strain_states", ")", "*", "s", "mis", ",", "absent_syms", "=", "[", "]", ",", "[", "]", "for", "degree", "in", "range", "(", "2", ",", "order", "+", "1", ")", ":", "cvec", ",", "carr", "=", "get_symbol_list", "(", "degree", ")", "sarr", "=", "np", ".", "zeros", "(", "(", "nstates", ",", "6", ")", ",", "dtype", "=", "object", ")", "for", "n", ",", "strain_v", "in", "enumerate", "(", "ni", ")", ":", "# Get expressions", "exps", "=", "carr", ".", "copy", "(", ")", "for", "i", "in", "range", "(", "degree", "-", "1", ")", ":", "exps", "=", "np", ".", "dot", "(", "exps", ",", "strain_v", ")", "exps", "/=", "np", ".", "math", ".", "factorial", "(", "degree", "-", "1", ")", "sarr", "[", "n", "]", "=", "[", "sp", ".", "diff", "(", "exp", ",", "s", ",", "degree", "-", "1", ")", "for", "exp", "in", "exps", "]", "svec", "=", "sarr", ".", "ravel", "(", ")", "present_syms", "=", "set", ".", "union", "(", "*", "[", "exp", ".", "atoms", "(", "sp", ".", "Symbol", ")", "for", "exp", "in", "svec", "]", ")", "absent_syms", "+=", "[", "set", "(", "cvec", ")", "-", "present_syms", "]", "m", "=", "np", ".", "zeros", "(", "(", "6", "*", "nstates", ",", "len", "(", "cvec", ")", ")", ")", "for", "n", ",", "c", "in", "enumerate", "(", "cvec", ")", ":", "m", "[", ":", ",", "n", "]", "=", "v_diff", "(", "svec", ",", "c", ")", "mis", ".", "append", "(", "np", ".", "linalg", ".", "pinv", "(", "m", ")", ")", "return", "mis", ",", "absent_syms" ]
39.512821
14.538462
def _pirls(self, X, Y, weights): """ Performs stable PIRLS iterations to estimate GAM coefficients Parameters --------- X : array-like of shape (n_samples, m_features) containing input data Y : array-like of shape (n,) containing target data weights : array-like of shape (n,) containing sample weights Returns ------- None """ modelmat = self._modelmat(X) # build a basis matrix for the GLM n, m = modelmat.shape # initialize GLM coefficients if model is not yet fitted if (not self._is_fitted or len(self.coef_) != self.terms.n_coefs or not np.isfinite(self.coef_).all()): # initialize the model self.coef_ = self._initial_estimate(Y, modelmat) assert np.isfinite(self.coef_).all(), "coefficients should be well-behaved, but found: {}".format(self.coef_) P = self._P() S = sp.sparse.diags(np.ones(m) * np.sqrt(EPS)) # improve condition # S += self._H # add any user-chosen minumum penalty to the diagonal # if we dont have any constraints, then do cholesky now if not self.terms.hasconstraint: E = self._cholesky(S + P, sparse=False, verbose=self.verbose) min_n_m = np.min([m,n]) Dinv = np.zeros((min_n_m + m, m)).T for _ in range(self.max_iter): # recompute cholesky if needed if self.terms.hasconstraint: P = self._P() C = self._C() E = self._cholesky(S + P + C, sparse=False, verbose=self.verbose) # forward pass y = deepcopy(Y) # for simplicity lp = self._linear_predictor(modelmat=modelmat) mu = self.link.mu(lp, self.distribution) W = self._W(mu, weights, y) # create pirls weight matrix # check for weghts == 0, nan, and update mask = self._mask(W.diagonal()) y = y[mask] # update lp = lp[mask] # update mu = mu[mask] # update W = sp.sparse.diags(W.diagonal()[mask]) # update # PIRLS Wood pg 183 pseudo_data = W.dot(self._pseudo_data(y, lp, mu)) # log on-loop-start stats self._on_loop_start(vars()) WB = W.dot(modelmat[mask,:]) # common matrix product Q, R = np.linalg.qr(WB.A) if not np.isfinite(Q).all() or not np.isfinite(R).all(): raise ValueError('QR decomposition produced NaN or Inf. '\ 'Check X data.') # need to recompute the number of singular values min_n_m = np.min([m, n, mask.sum()]) Dinv = np.zeros((m, min_n_m)) # SVD U, d, Vt = np.linalg.svd(np.vstack([R, E])) svd_mask = d <= (d.max() * np.sqrt(EPS)) # mask out small singular values np.fill_diagonal(Dinv, d**-1) # invert the singular values U1 = U[:min_n_m,:min_n_m] # keep only top corner of U # update coefficients B = Vt.T.dot(Dinv).dot(U1.T).dot(Q.T) coef_new = B.dot(pseudo_data).flatten() diff = np.linalg.norm(self.coef_ - coef_new)/np.linalg.norm(coef_new) self.coef_ = coef_new # update # log on-loop-end stats self._on_loop_end(vars()) # check convergence if diff < self.tol: break # estimate statistics even if not converged self._estimate_model_statistics(Y, modelmat, inner=None, BW=WB.T, B=B, weights=weights, U1=U1) if diff < self.tol: return print('did not converge') return
[ "def", "_pirls", "(", "self", ",", "X", ",", "Y", ",", "weights", ")", ":", "modelmat", "=", "self", ".", "_modelmat", "(", "X", ")", "# build a basis matrix for the GLM", "n", ",", "m", "=", "modelmat", ".", "shape", "# initialize GLM coefficients if model is not yet fitted", "if", "(", "not", "self", ".", "_is_fitted", "or", "len", "(", "self", ".", "coef_", ")", "!=", "self", ".", "terms", ".", "n_coefs", "or", "not", "np", ".", "isfinite", "(", "self", ".", "coef_", ")", ".", "all", "(", ")", ")", ":", "# initialize the model", "self", ".", "coef_", "=", "self", ".", "_initial_estimate", "(", "Y", ",", "modelmat", ")", "assert", "np", ".", "isfinite", "(", "self", ".", "coef_", ")", ".", "all", "(", ")", ",", "\"coefficients should be well-behaved, but found: {}\"", ".", "format", "(", "self", ".", "coef_", ")", "P", "=", "self", ".", "_P", "(", ")", "S", "=", "sp", ".", "sparse", ".", "diags", "(", "np", ".", "ones", "(", "m", ")", "*", "np", ".", "sqrt", "(", "EPS", ")", ")", "# improve condition", "# S += self._H # add any user-chosen minumum penalty to the diagonal", "# if we dont have any constraints, then do cholesky now", "if", "not", "self", ".", "terms", ".", "hasconstraint", ":", "E", "=", "self", ".", "_cholesky", "(", "S", "+", "P", ",", "sparse", "=", "False", ",", "verbose", "=", "self", ".", "verbose", ")", "min_n_m", "=", "np", ".", "min", "(", "[", "m", ",", "n", "]", ")", "Dinv", "=", "np", ".", "zeros", "(", "(", "min_n_m", "+", "m", ",", "m", ")", ")", ".", "T", "for", "_", "in", "range", "(", "self", ".", "max_iter", ")", ":", "# recompute cholesky if needed", "if", "self", ".", "terms", ".", "hasconstraint", ":", "P", "=", "self", ".", "_P", "(", ")", "C", "=", "self", ".", "_C", "(", ")", "E", "=", "self", ".", "_cholesky", "(", "S", "+", "P", "+", "C", ",", "sparse", "=", "False", ",", "verbose", "=", "self", ".", "verbose", ")", "# forward pass", "y", "=", "deepcopy", "(", "Y", ")", "# for simplicity", "lp", "=", "self", ".", "_linear_predictor", "(", "modelmat", "=", "modelmat", ")", "mu", "=", "self", ".", "link", ".", "mu", "(", "lp", ",", "self", ".", "distribution", ")", "W", "=", "self", ".", "_W", "(", "mu", ",", "weights", ",", "y", ")", "# create pirls weight matrix", "# check for weghts == 0, nan, and update", "mask", "=", "self", ".", "_mask", "(", "W", ".", "diagonal", "(", ")", ")", "y", "=", "y", "[", "mask", "]", "# update", "lp", "=", "lp", "[", "mask", "]", "# update", "mu", "=", "mu", "[", "mask", "]", "# update", "W", "=", "sp", ".", "sparse", ".", "diags", "(", "W", ".", "diagonal", "(", ")", "[", "mask", "]", ")", "# update", "# PIRLS Wood pg 183", "pseudo_data", "=", "W", ".", "dot", "(", "self", ".", "_pseudo_data", "(", "y", ",", "lp", ",", "mu", ")", ")", "# log on-loop-start stats", "self", ".", "_on_loop_start", "(", "vars", "(", ")", ")", "WB", "=", "W", ".", "dot", "(", "modelmat", "[", "mask", ",", ":", "]", ")", "# common matrix product", "Q", ",", "R", "=", "np", ".", "linalg", ".", "qr", "(", "WB", ".", "A", ")", "if", "not", "np", ".", "isfinite", "(", "Q", ")", ".", "all", "(", ")", "or", "not", "np", ".", "isfinite", "(", "R", ")", ".", "all", "(", ")", ":", "raise", "ValueError", "(", "'QR decomposition produced NaN or Inf. '", "'Check X data.'", ")", "# need to recompute the number of singular values", "min_n_m", "=", "np", ".", "min", "(", "[", "m", ",", "n", ",", "mask", ".", "sum", "(", ")", "]", ")", "Dinv", "=", "np", ".", "zeros", "(", "(", "m", ",", "min_n_m", ")", ")", "# SVD", "U", ",", "d", ",", "Vt", "=", "np", ".", "linalg", ".", "svd", "(", "np", ".", "vstack", "(", "[", "R", ",", "E", "]", ")", ")", "svd_mask", "=", "d", "<=", "(", "d", ".", "max", "(", ")", "*", "np", ".", "sqrt", "(", "EPS", ")", ")", "# mask out small singular values", "np", ".", "fill_diagonal", "(", "Dinv", ",", "d", "**", "-", "1", ")", "# invert the singular values", "U1", "=", "U", "[", ":", "min_n_m", ",", ":", "min_n_m", "]", "# keep only top corner of U", "# update coefficients", "B", "=", "Vt", ".", "T", ".", "dot", "(", "Dinv", ")", ".", "dot", "(", "U1", ".", "T", ")", ".", "dot", "(", "Q", ".", "T", ")", "coef_new", "=", "B", ".", "dot", "(", "pseudo_data", ")", ".", "flatten", "(", ")", "diff", "=", "np", ".", "linalg", ".", "norm", "(", "self", ".", "coef_", "-", "coef_new", ")", "/", "np", ".", "linalg", ".", "norm", "(", "coef_new", ")", "self", ".", "coef_", "=", "coef_new", "# update", "# log on-loop-end stats", "self", ".", "_on_loop_end", "(", "vars", "(", ")", ")", "# check convergence", "if", "diff", "<", "self", ".", "tol", ":", "break", "# estimate statistics even if not converged", "self", ".", "_estimate_model_statistics", "(", "Y", ",", "modelmat", ",", "inner", "=", "None", ",", "BW", "=", "WB", ".", "T", ",", "B", "=", "B", ",", "weights", "=", "weights", ",", "U1", "=", "U1", ")", "if", "diff", "<", "self", ".", "tol", ":", "return", "print", "(", "'did not converge'", ")", "return" ]
34.831776
21.224299
def create_key_ring( self, parent, key_ring_id, key_ring, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Create a new ``KeyRing`` in a given Project and Location. Example: >>> from google.cloud import kms_v1 >>> >>> client = kms_v1.KeyManagementServiceClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `key_ring_id`: >>> key_ring_id = '' >>> >>> # TODO: Initialize `key_ring`: >>> key_ring = {} >>> >>> response = client.create_key_ring(parent, key_ring_id, key_ring) Args: parent (str): Required. The resource name of the location associated with the ``KeyRings``, in the format ``projects/*/locations/*``. key_ring_id (str): Required. It must be unique within a location and match the regular expression ``[a-zA-Z0-9_-]{1,63}`` key_ring (Union[dict, ~google.cloud.kms_v1.types.KeyRing]): A ``KeyRing`` with initial field values. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.kms_v1.types.KeyRing` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.kms_v1.types.KeyRing` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_key_ring" not in self._inner_api_calls: self._inner_api_calls[ "create_key_ring" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_key_ring, default_retry=self._method_configs["CreateKeyRing"].retry, default_timeout=self._method_configs["CreateKeyRing"].timeout, client_info=self._client_info, ) request = service_pb2.CreateKeyRingRequest( parent=parent, key_ring_id=key_ring_id, key_ring=key_ring ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_key_ring"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "create_key_ring", "(", "self", ",", "parent", ",", "key_ring_id", ",", "key_ring", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"create_key_ring\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"create_key_ring\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "create_key_ring", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"CreateKeyRing\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"CreateKeyRing\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "service_pb2", ".", "CreateKeyRingRequest", "(", "parent", "=", "parent", ",", "key_ring_id", "=", "key_ring_id", ",", "key_ring", "=", "key_ring", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"parent\"", ",", "parent", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"create_key_ring\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
41.105882
24.517647
def _extract_functions(resources): """ Extracts and returns function information from the given dictionary of SAM/CloudFormation resources. This method supports functions defined with AWS::Serverless::Function and AWS::Lambda::Function :param dict resources: Dictionary of SAM/CloudFormation resources :return dict(string : samcli.commands.local.lib.provider.Function): Dictionary of function LogicalId to the Function configuration object """ result = {} for name, resource in resources.items(): resource_type = resource.get("Type") resource_properties = resource.get("Properties", {}) if resource_type == SamFunctionProvider._SERVERLESS_FUNCTION: layers = SamFunctionProvider._parse_layer_info(resource_properties.get("Layers", []), resources) result[name] = SamFunctionProvider._convert_sam_function_resource(name, resource_properties, layers) elif resource_type == SamFunctionProvider._LAMBDA_FUNCTION: layers = SamFunctionProvider._parse_layer_info(resource_properties.get("Layers", []), resources) result[name] = SamFunctionProvider._convert_lambda_function_resource(name, resource_properties, layers) # We don't care about other resource types. Just ignore them return result
[ "def", "_extract_functions", "(", "resources", ")", ":", "result", "=", "{", "}", "for", "name", ",", "resource", "in", "resources", ".", "items", "(", ")", ":", "resource_type", "=", "resource", ".", "get", "(", "\"Type\"", ")", "resource_properties", "=", "resource", ".", "get", "(", "\"Properties\"", ",", "{", "}", ")", "if", "resource_type", "==", "SamFunctionProvider", ".", "_SERVERLESS_FUNCTION", ":", "layers", "=", "SamFunctionProvider", ".", "_parse_layer_info", "(", "resource_properties", ".", "get", "(", "\"Layers\"", ",", "[", "]", ")", ",", "resources", ")", "result", "[", "name", "]", "=", "SamFunctionProvider", ".", "_convert_sam_function_resource", "(", "name", ",", "resource_properties", ",", "layers", ")", "elif", "resource_type", "==", "SamFunctionProvider", ".", "_LAMBDA_FUNCTION", ":", "layers", "=", "SamFunctionProvider", ".", "_parse_layer_info", "(", "resource_properties", ".", "get", "(", "\"Layers\"", ",", "[", "]", ")", ",", "resources", ")", "result", "[", "name", "]", "=", "SamFunctionProvider", ".", "_convert_lambda_function_resource", "(", "name", ",", "resource_properties", ",", "layers", ")", "# We don't care about other resource types. Just ignore them", "return", "result" ]
48.964286
37.178571
def check_overlap(current, hit, overlap = 200): """ determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene """ for prev in current: p_coords = prev[2:4] coords = hit[2:4] if get_overlap(coords, p_coords) >= overlap: return True return False
[ "def", "check_overlap", "(", "current", ",", "hit", ",", "overlap", "=", "200", ")", ":", "for", "prev", "in", "current", ":", "p_coords", "=", "prev", "[", "2", ":", "4", "]", "coords", "=", "hit", "[", "2", ":", "4", "]", "if", "get_overlap", "(", "coords", ",", "p_coords", ")", ">=", "overlap", ":", "return", "True", "return", "False" ]
32.272727
13.545455
def _get_repo_file(self, repo_path): """ Lazy load RepoFile objects on demand. :param repo_path: :return: """ if repo_path not in self._repo_files: self._repo_files[repo_path] = RepoFile(repo_path) return self._repo_files[repo_path]
[ "def", "_get_repo_file", "(", "self", ",", "repo_path", ")", ":", "if", "repo_path", "not", "in", "self", ".", "_repo_files", ":", "self", ".", "_repo_files", "[", "repo_path", "]", "=", "RepoFile", "(", "repo_path", ")", "return", "self", ".", "_repo_files", "[", "repo_path", "]" ]
32.444444
8.444444
def write_languages(f, l): """Write language information.""" f.write("Languages = {%s" % os.linesep) for lang in sorted(l): f.write(" %r: %r,%s" % (lang, l[lang], os.linesep)) f.write("}%s" % os.linesep)
[ "def", "write_languages", "(", "f", ",", "l", ")", ":", "f", ".", "write", "(", "\"Languages = {%s\"", "%", "os", ".", "linesep", ")", "for", "lang", "in", "sorted", "(", "l", ")", ":", "f", ".", "write", "(", "\" %r: %r,%s\"", "%", "(", "lang", ",", "l", "[", "lang", "]", ",", "os", ".", "linesep", ")", ")", "f", ".", "write", "(", "\"}%s\"", "%", "os", ".", "linesep", ")" ]
37.5
10.333333
def get_icohp_dict_of_site(self, site, minsummedicohp=None, maxsummedicohp=None, minbondlength=0.0, maxbondlength=8.0, only_bonds_to=None): """ get a dict of IcohpValue for a certain site (indicated by integer) Args: site: integer describing the site of interest, order as in Icohplist.lobster/Icooplist.lobster, starts at 0 minsummedicohp: float, minimal icohp/icoop of the bonds that are considered. It is the summed ICOHP value from both spin channels for spin polarized cases maxsummedicohp: float, maximal icohp/icoop of the bonds that are considered. It is the summed ICOHP value from both spin channels for spin polarized cases minbondlength: float, defines the minimum of the bond lengths of the bonds maxbondlength: float, defines the maximum of the bond lengths of the bonds only_bonds_to: list of strings describing the bonding partners that are allowed, e.g. ['O'] Returns: dict of IcohpValues, the keys correspond to the values from the initial list_labels """ newicohp_dict = {} for key, value in self._icohplist.items(): atomnumber1 = int(re.split(r'(\d+)', value._atom1)[1]) - 1 atomnumber2 = int(re.split(r'(\d+)', value._atom2)[1]) - 1 if site == atomnumber1 or site == atomnumber2: # manipulate order of atoms so that searched one is always atom1 if site == atomnumber2: save = value._atom1 value._atom1 = value._atom2 value._atom2 = save if only_bonds_to is None: second_test = True else: second_test = (re.split(r'(\d+)', value._atom2)[0] in only_bonds_to) if value._length >= minbondlength and value._length <= maxbondlength and second_test: if minsummedicohp is not None: if value.summed_icohp >= minsummedicohp: if maxsummedicohp is not None: if value.summed_icohp <= maxsummedicohp: newicohp_dict[key] = value else: newicohp_dict[key] = value else: if maxsummedicohp is not None: if value.summed_icohp <= maxsummedicohp: newicohp_dict[key] = value else: newicohp_dict[key] = value return newicohp_dict
[ "def", "get_icohp_dict_of_site", "(", "self", ",", "site", ",", "minsummedicohp", "=", "None", ",", "maxsummedicohp", "=", "None", ",", "minbondlength", "=", "0.0", ",", "maxbondlength", "=", "8.0", ",", "only_bonds_to", "=", "None", ")", ":", "newicohp_dict", "=", "{", "}", "for", "key", ",", "value", "in", "self", ".", "_icohplist", ".", "items", "(", ")", ":", "atomnumber1", "=", "int", "(", "re", ".", "split", "(", "r'(\\d+)'", ",", "value", ".", "_atom1", ")", "[", "1", "]", ")", "-", "1", "atomnumber2", "=", "int", "(", "re", ".", "split", "(", "r'(\\d+)'", ",", "value", ".", "_atom2", ")", "[", "1", "]", ")", "-", "1", "if", "site", "==", "atomnumber1", "or", "site", "==", "atomnumber2", ":", "# manipulate order of atoms so that searched one is always atom1", "if", "site", "==", "atomnumber2", ":", "save", "=", "value", ".", "_atom1", "value", ".", "_atom1", "=", "value", ".", "_atom2", "value", ".", "_atom2", "=", "save", "if", "only_bonds_to", "is", "None", ":", "second_test", "=", "True", "else", ":", "second_test", "=", "(", "re", ".", "split", "(", "r'(\\d+)'", ",", "value", ".", "_atom2", ")", "[", "0", "]", "in", "only_bonds_to", ")", "if", "value", ".", "_length", ">=", "minbondlength", "and", "value", ".", "_length", "<=", "maxbondlength", "and", "second_test", ":", "if", "minsummedicohp", "is", "not", "None", ":", "if", "value", ".", "summed_icohp", ">=", "minsummedicohp", ":", "if", "maxsummedicohp", "is", "not", "None", ":", "if", "value", ".", "summed_icohp", "<=", "maxsummedicohp", ":", "newicohp_dict", "[", "key", "]", "=", "value", "else", ":", "newicohp_dict", "[", "key", "]", "=", "value", "else", ":", "if", "maxsummedicohp", "is", "not", "None", ":", "if", "value", ".", "summed_icohp", "<=", "maxsummedicohp", ":", "newicohp_dict", "[", "key", "]", "=", "value", "else", ":", "newicohp_dict", "[", "key", "]", "=", "value", "return", "newicohp_dict" ]
57.304348
29.608696
def RSA_encrypt(public_key, message): '''用RSA加密字符串. public_key - 公钥 message - 要加密的信息, 使用UTF-8编码的字符串 @return - 使用base64编码的字符串 ''' # 如果没能成功导入RSA模块, 就直接返回空白字符串. if not globals().get('RSA'): return '' rsakey = RSA.importKey(public_key) rsakey = PKCS1_v1_5.new(rsakey) encrypted = rsakey.encrypt(message.encode()) return base64.encodestring(encrypted).decode().replace('\n', '')
[ "def", "RSA_encrypt", "(", "public_key", ",", "message", ")", ":", "# 如果没能成功导入RSA模块, 就直接返回空白字符串.", "if", "not", "globals", "(", ")", ".", "get", "(", "'RSA'", ")", ":", "return", "''", "rsakey", "=", "RSA", ".", "importKey", "(", "public_key", ")", "rsakey", "=", "PKCS1_v1_5", ".", "new", "(", "rsakey", ")", "encrypted", "=", "rsakey", ".", "encrypt", "(", "message", ".", "encode", "(", ")", ")", "return", "base64", ".", "encodestring", "(", "encrypted", ")", ".", "decode", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")" ]
29.928571
15.214286
def admin_tools_render_dashboard(context, location='index', dashboard=None): """ Template tag that renders the dashboard, it takes two optional arguments: ``location`` The location of the dashboard, it can be 'index' (for the admin index dashboard) or 'app_index' (for the app index dashboard), the default value is 'index'. ``dashboard`` An instance of ``Dashboard``, if not given, the dashboard is retrieved with the ``get_index_dashboard`` or ``get_app_index_dashboard`` functions, depending on the ``location`` argument. """ if dashboard is None: dashboard = get_dashboard(context, location) dashboard.init_with_context(context) dashboard._prepare_children() try: preferences = DashboardPreferences.objects.get( user=context['request'].user, dashboard_id=dashboard.get_id() ).data except DashboardPreferences.DoesNotExist: preferences = '{}' try: DashboardPreferences( user=context['request'].user, dashboard_id=dashboard.get_id(), data=preferences ).save() except IntegrityError: # dashboard already was saved for that (user, dashboard) pass context.update({ 'template': dashboard.template, 'dashboard': dashboard, 'dashboard_preferences': preferences, 'split_at': math.ceil( float(len(dashboard.children)) / float(dashboard.columns) ), 'has_disabled_modules': len( [m for m in dashboard.children if not m.enabled] ) > 0, 'admin_url': reverse('%s:index' % get_admin_site_name(context)), }) return context
[ "def", "admin_tools_render_dashboard", "(", "context", ",", "location", "=", "'index'", ",", "dashboard", "=", "None", ")", ":", "if", "dashboard", "is", "None", ":", "dashboard", "=", "get_dashboard", "(", "context", ",", "location", ")", "dashboard", ".", "init_with_context", "(", "context", ")", "dashboard", ".", "_prepare_children", "(", ")", "try", ":", "preferences", "=", "DashboardPreferences", ".", "objects", ".", "get", "(", "user", "=", "context", "[", "'request'", "]", ".", "user", ",", "dashboard_id", "=", "dashboard", ".", "get_id", "(", ")", ")", ".", "data", "except", "DashboardPreferences", ".", "DoesNotExist", ":", "preferences", "=", "'{}'", "try", ":", "DashboardPreferences", "(", "user", "=", "context", "[", "'request'", "]", ".", "user", ",", "dashboard_id", "=", "dashboard", ".", "get_id", "(", ")", ",", "data", "=", "preferences", ")", ".", "save", "(", ")", "except", "IntegrityError", ":", "# dashboard already was saved for that (user, dashboard)", "pass", "context", ".", "update", "(", "{", "'template'", ":", "dashboard", ".", "template", ",", "'dashboard'", ":", "dashboard", ",", "'dashboard_preferences'", ":", "preferences", ",", "'split_at'", ":", "math", ".", "ceil", "(", "float", "(", "len", "(", "dashboard", ".", "children", ")", ")", "/", "float", "(", "dashboard", ".", "columns", ")", ")", ",", "'has_disabled_modules'", ":", "len", "(", "[", "m", "for", "m", "in", "dashboard", ".", "children", "if", "not", "m", ".", "enabled", "]", ")", ">", "0", ",", "'admin_url'", ":", "reverse", "(", "'%s:index'", "%", "get_admin_site_name", "(", "context", ")", ")", ",", "}", ")", "return", "context" ]
34.42
20.1
def _write_to_error(self, s, truncate=False): """Writes the given output to the error file, appending unless `truncate` is True.""" # if truncate is True, set write mode to truncate with open(self._errorfile, 'w' if truncate else 'a') as fp: fp.writelines((to_text(s)), )
[ "def", "_write_to_error", "(", "self", ",", "s", ",", "truncate", "=", "False", ")", ":", "# if truncate is True, set write mode to truncate", "with", "open", "(", "self", ".", "_errorfile", ",", "'w'", "if", "truncate", "else", "'a'", ")", "as", "fp", ":", "fp", ".", "writelines", "(", "(", "to_text", "(", "s", ")", ")", ",", ")" ]
60.6
10
def working_yesterday(self, date_from=None, date_format=None): """ Retourne la date d'hier depuis maintenant ou depuis une date fournie seulement sur les jours ouvrableq. Ainsi lundi devient samedi et samedi devient vendredi :param: :date_from date de référence :return datetime """ # date d'hier que sur les jours de week-end return self.delta(days=-1, date_from=date_from, date_format=date_format, days_range=[1, 2, 3, 4, 5, 6])
[ "def", "working_yesterday", "(", "self", ",", "date_from", "=", "None", ",", "date_format", "=", "None", ")", ":", "# date d'hier que sur les jours de week-end", "return", "self", ".", "delta", "(", "days", "=", "-", "1", ",", "date_from", "=", "date_from", ",", "date_format", "=", "date_format", ",", "days_range", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", "]", ")" ]
39.909091
18.636364
def from_ros_pose_msg(pose_msg, from_frame='unassigned', to_frame='world'): """Creates a RigidTransform from a ROS pose msg. Parameters ---------- pose_msg : :obj:`geometry_msgs.msg.Pose` ROS pose message """ quaternion = np.array([pose_msg.orientation.w, pose_msg.orientation.x, pose_msg.orientation.y, pose_msg.orientation.z]) position = np.array([pose_msg.position.x, pose_msg.position.y, pose_msg.position.z]) pose = RigidTransform(rotation=quaternion, translation=position, from_frame=from_frame, to_frame=to_frame) return pose
[ "def", "from_ros_pose_msg", "(", "pose_msg", ",", "from_frame", "=", "'unassigned'", ",", "to_frame", "=", "'world'", ")", ":", "quaternion", "=", "np", ".", "array", "(", "[", "pose_msg", ".", "orientation", ".", "w", ",", "pose_msg", ".", "orientation", ".", "x", ",", "pose_msg", ".", "orientation", ".", "y", ",", "pose_msg", ".", "orientation", ".", "z", "]", ")", "position", "=", "np", ".", "array", "(", "[", "pose_msg", ".", "position", ".", "x", ",", "pose_msg", ".", "position", ".", "y", ",", "pose_msg", ".", "position", ".", "z", "]", ")", "pose", "=", "RigidTransform", "(", "rotation", "=", "quaternion", ",", "translation", "=", "position", ",", "from_frame", "=", "from_frame", ",", "to_frame", "=", "to_frame", ")", "return", "pose" ]
40.818182
12.090909
def get_rtr_name(self, router_id): """Retrieve the router name. Incomplete. """ try: body = {} router = self.neutronclient.show_router(router_id, body=body) return router.get('router').get('name') except Exception as exc: LOG.error("Failed to show router interface %(id)s " "Exc %(exc)s", {'id': router_id, 'exc': str(exc)})
[ "def", "get_rtr_name", "(", "self", ",", "router_id", ")", ":", "try", ":", "body", "=", "{", "}", "router", "=", "self", ".", "neutronclient", ".", "show_router", "(", "router_id", ",", "body", "=", "body", ")", "return", "router", ".", "get", "(", "'router'", ")", ".", "get", "(", "'name'", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "error", "(", "\"Failed to show router interface %(id)s \"", "\"Exc %(exc)s\"", ",", "{", "'id'", ":", "router_id", ",", "'exc'", ":", "str", "(", "exc", ")", "}", ")" ]
45.555556
17.777778
def isLoopback (self, ifname): """Check whether interface is a loopback device. @param ifname: interface name @type ifname: string """ # since not all systems have IFF_LOOPBACK as a flag defined, # the ifname is tested first if ifname.startswith('lo'): return True return (self.getFlags(ifname) & self.IFF_LOOPBACK) != 0
[ "def", "isLoopback", "(", "self", ",", "ifname", ")", ":", "# since not all systems have IFF_LOOPBACK as a flag defined,", "# the ifname is tested first", "if", "ifname", ".", "startswith", "(", "'lo'", ")", ":", "return", "True", "return", "(", "self", ".", "getFlags", "(", "ifname", ")", "&", "self", ".", "IFF_LOOPBACK", ")", "!=", "0" ]
38.7
10.2
def inject(self, request, response): """ Inject the debug toolbar iframe into an HTML response. """ # called in host app if not isinstance(response, Response): return settings = request.app[APP_KEY]['settings'] response_html = response.body route = request.app.router['debugtoolbar.request'] toolbar_url = route.url_for(request_id=request['id']) button_style = settings['button_style'] css_path = request.app.router[STATIC_ROUTE_NAME].url_for( filename='css/toolbar_button.css') toolbar_css = toolbar_css_template % {'css_path': css_path} toolbar_html = toolbar_html_template % { 'button_style': button_style, 'css_path': css_path, 'toolbar_url': toolbar_url} toolbar_html = toolbar_html.encode(response.charset or 'utf-8') toolbar_css = toolbar_css.encode(response.charset or 'utf-8') response_html = replace_insensitive( response_html, b'</head>', toolbar_css + b'</head>') response.body = replace_insensitive( response_html, b'</body>', toolbar_html + b'</body>')
[ "def", "inject", "(", "self", ",", "request", ",", "response", ")", ":", "# called in host app", "if", "not", "isinstance", "(", "response", ",", "Response", ")", ":", "return", "settings", "=", "request", ".", "app", "[", "APP_KEY", "]", "[", "'settings'", "]", "response_html", "=", "response", ".", "body", "route", "=", "request", ".", "app", ".", "router", "[", "'debugtoolbar.request'", "]", "toolbar_url", "=", "route", ".", "url_for", "(", "request_id", "=", "request", "[", "'id'", "]", ")", "button_style", "=", "settings", "[", "'button_style'", "]", "css_path", "=", "request", ".", "app", ".", "router", "[", "STATIC_ROUTE_NAME", "]", ".", "url_for", "(", "filename", "=", "'css/toolbar_button.css'", ")", "toolbar_css", "=", "toolbar_css_template", "%", "{", "'css_path'", ":", "css_path", "}", "toolbar_html", "=", "toolbar_html_template", "%", "{", "'button_style'", ":", "button_style", ",", "'css_path'", ":", "css_path", ",", "'toolbar_url'", ":", "toolbar_url", "}", "toolbar_html", "=", "toolbar_html", ".", "encode", "(", "response", ".", "charset", "or", "'utf-8'", ")", "toolbar_css", "=", "toolbar_css", ".", "encode", "(", "response", ".", "charset", "or", "'utf-8'", ")", "response_html", "=", "replace_insensitive", "(", "response_html", ",", "b'</head>'", ",", "toolbar_css", "+", "b'</head>'", ")", "response", ".", "body", "=", "replace_insensitive", "(", "response_html", ",", "b'</body>'", ",", "toolbar_html", "+", "b'</body>'", ")" ]
39.1
15.233333
def parse_oxide(self): """ Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide. Returns: oxide_type (str): Type of oxide ozonide/peroxide/superoxide/hydroxide/None. nbonds (int): Number of peroxide/superoxide/hydroxide bonds in structure. """ structure = self.structure relative_cutoff = self.relative_cutoff o_sites_frac_coords = [] h_sites_frac_coords = [] lattice = structure.lattice if isinstance(structure.composition.elements[0], Element): comp = structure.composition elif isinstance(structure.composition.elements[0], Specie): elmap = collections.defaultdict(float) for site in structure: for species, occu in site.species.items(): elmap[species.element] += occu comp = Composition(elmap) if Element("O") not in comp or comp.is_element: return "None", 0 for site in structure: syms = [sp.symbol for sp in site.species.keys()] if "O" in syms: o_sites_frac_coords.append(site.frac_coords) if "H" in syms: h_sites_frac_coords.append(site.frac_coords) if h_sites_frac_coords: dist_matrix = lattice.get_all_distances(o_sites_frac_coords, h_sites_frac_coords) if np.any(dist_matrix < relative_cutoff * 0.93): return "hydroxide", len( np.where(dist_matrix < relative_cutoff * 0.93)[0]) / 2.0 dist_matrix = lattice.get_all_distances(o_sites_frac_coords, o_sites_frac_coords) np.fill_diagonal(dist_matrix, 1000) is_superoxide = False is_peroxide = False is_ozonide = False if np.any(dist_matrix < relative_cutoff * 1.35): bond_atoms = np.where(dist_matrix < relative_cutoff * 1.35)[0] is_superoxide = True elif np.any(dist_matrix < relative_cutoff * 1.49): is_peroxide = True bond_atoms = np.where(dist_matrix < relative_cutoff * 1.49)[0] if is_superoxide: if len(bond_atoms) > len(set(bond_atoms)): is_superoxide = False is_ozonide = True try: nbonds = len(set(bond_atoms)) except UnboundLocalError: nbonds = 0.0 if is_ozonide: str_oxide = "ozonide" elif is_superoxide: str_oxide = "superoxide" elif is_peroxide: str_oxide = "peroxide" else: str_oxide = "oxide" if str_oxide == "oxide": nbonds = comp["O"] return str_oxide, nbonds
[ "def", "parse_oxide", "(", "self", ")", ":", "structure", "=", "self", ".", "structure", "relative_cutoff", "=", "self", ".", "relative_cutoff", "o_sites_frac_coords", "=", "[", "]", "h_sites_frac_coords", "=", "[", "]", "lattice", "=", "structure", ".", "lattice", "if", "isinstance", "(", "structure", ".", "composition", ".", "elements", "[", "0", "]", ",", "Element", ")", ":", "comp", "=", "structure", ".", "composition", "elif", "isinstance", "(", "structure", ".", "composition", ".", "elements", "[", "0", "]", ",", "Specie", ")", ":", "elmap", "=", "collections", ".", "defaultdict", "(", "float", ")", "for", "site", "in", "structure", ":", "for", "species", ",", "occu", "in", "site", ".", "species", ".", "items", "(", ")", ":", "elmap", "[", "species", ".", "element", "]", "+=", "occu", "comp", "=", "Composition", "(", "elmap", ")", "if", "Element", "(", "\"O\"", ")", "not", "in", "comp", "or", "comp", ".", "is_element", ":", "return", "\"None\"", ",", "0", "for", "site", "in", "structure", ":", "syms", "=", "[", "sp", ".", "symbol", "for", "sp", "in", "site", ".", "species", ".", "keys", "(", ")", "]", "if", "\"O\"", "in", "syms", ":", "o_sites_frac_coords", ".", "append", "(", "site", ".", "frac_coords", ")", "if", "\"H\"", "in", "syms", ":", "h_sites_frac_coords", ".", "append", "(", "site", ".", "frac_coords", ")", "if", "h_sites_frac_coords", ":", "dist_matrix", "=", "lattice", ".", "get_all_distances", "(", "o_sites_frac_coords", ",", "h_sites_frac_coords", ")", "if", "np", ".", "any", "(", "dist_matrix", "<", "relative_cutoff", "*", "0.93", ")", ":", "return", "\"hydroxide\"", ",", "len", "(", "np", ".", "where", "(", "dist_matrix", "<", "relative_cutoff", "*", "0.93", ")", "[", "0", "]", ")", "/", "2.0", "dist_matrix", "=", "lattice", ".", "get_all_distances", "(", "o_sites_frac_coords", ",", "o_sites_frac_coords", ")", "np", ".", "fill_diagonal", "(", "dist_matrix", ",", "1000", ")", "is_superoxide", "=", "False", "is_peroxide", "=", "False", "is_ozonide", "=", "False", "if", "np", ".", "any", "(", "dist_matrix", "<", "relative_cutoff", "*", "1.35", ")", ":", "bond_atoms", "=", "np", ".", "where", "(", "dist_matrix", "<", "relative_cutoff", "*", "1.35", ")", "[", "0", "]", "is_superoxide", "=", "True", "elif", "np", ".", "any", "(", "dist_matrix", "<", "relative_cutoff", "*", "1.49", ")", ":", "is_peroxide", "=", "True", "bond_atoms", "=", "np", ".", "where", "(", "dist_matrix", "<", "relative_cutoff", "*", "1.49", ")", "[", "0", "]", "if", "is_superoxide", ":", "if", "len", "(", "bond_atoms", ")", ">", "len", "(", "set", "(", "bond_atoms", ")", ")", ":", "is_superoxide", "=", "False", "is_ozonide", "=", "True", "try", ":", "nbonds", "=", "len", "(", "set", "(", "bond_atoms", ")", ")", "except", "UnboundLocalError", ":", "nbonds", "=", "0.0", "if", "is_ozonide", ":", "str_oxide", "=", "\"ozonide\"", "elif", "is_superoxide", ":", "str_oxide", "=", "\"superoxide\"", "elif", "is_peroxide", ":", "str_oxide", "=", "\"peroxide\"", "else", ":", "str_oxide", "=", "\"oxide\"", "if", "str_oxide", "==", "\"oxide\"", ":", "nbonds", "=", "comp", "[", "\"O\"", "]", "return", "str_oxide", ",", "nbonds" ]
39
15.873239
def get_extensions_assigned_to_user(self, user_id): """GetExtensionsAssignedToUser. [Preview API] Returns extensions that are currently assigned to the user in the account :param str user_id: The user's identity id. :rtype: {LicensingSource} """ route_values = {} if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') response = self._send(http_method='GET', location_id='8cec75ea-044f-4245-ab0d-a82dafcc85ea', version='5.0-preview.1', route_values=route_values) return self._deserialize('{LicensingSource}', self._unwrap_collection(response))
[ "def", "get_extensions_assigned_to_user", "(", "self", ",", "user_id", ")", ":", "route_values", "=", "{", "}", "if", "user_id", "is", "not", "None", ":", "route_values", "[", "'userId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'user_id'", ",", "user_id", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'8cec75ea-044f-4245-ab0d-a82dafcc85ea'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ")", "return", "self", ".", "_deserialize", "(", "'{LicensingSource}'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
53.285714
19.857143
def get_unverified_claims(token): """Returns the decoded claims without verification of any kind. Args: token (str): A signed JWT to decode the headers from. Returns: dict: The dict representation of the token claims. Raises: JWTError: If there is an exception decoding the token. """ try: claims = jws.get_unverified_claims(token) except: raise JWTError('Error decoding token claims.') try: claims = json.loads(claims.decode('utf-8')) except ValueError as e: raise JWTError('Invalid claims string: %s' % e) if not isinstance(claims, Mapping): raise JWTError('Invalid claims string: must be a json object') return claims
[ "def", "get_unverified_claims", "(", "token", ")", ":", "try", ":", "claims", "=", "jws", ".", "get_unverified_claims", "(", "token", ")", "except", ":", "raise", "JWTError", "(", "'Error decoding token claims.'", ")", "try", ":", "claims", "=", "json", ".", "loads", "(", "claims", ".", "decode", "(", "'utf-8'", ")", ")", "except", "ValueError", "as", "e", ":", "raise", "JWTError", "(", "'Invalid claims string: %s'", "%", "e", ")", "if", "not", "isinstance", "(", "claims", ",", "Mapping", ")", ":", "raise", "JWTError", "(", "'Invalid claims string: must be a json object'", ")", "return", "claims" ]
27.269231
23.269231
def _chooseBestSegmentPerCell(cls, connections, cells, allMatchingSegments, potentialOverlaps): """ For each specified cell, choose its matching segment with largest number of active potential synapses. When there's a tie, the first segment wins. @param connections (SparseMatrixConnections) @param cells (numpy array) @param allMatchingSegments (numpy array) @param potentialOverlaps (numpy array) @return (numpy array) One segment per cell """ candidateSegments = connections.filterSegmentsByCell(allMatchingSegments, cells) # Narrow it down to one pair per cell. onePerCellFilter = np2.argmaxMulti(potentialOverlaps[candidateSegments], connections.mapSegmentsToCells( candidateSegments)) learningSegments = candidateSegments[onePerCellFilter] return learningSegments
[ "def", "_chooseBestSegmentPerCell", "(", "cls", ",", "connections", ",", "cells", ",", "allMatchingSegments", ",", "potentialOverlaps", ")", ":", "candidateSegments", "=", "connections", ".", "filterSegmentsByCell", "(", "allMatchingSegments", ",", "cells", ")", "# Narrow it down to one pair per cell.", "onePerCellFilter", "=", "np2", ".", "argmaxMulti", "(", "potentialOverlaps", "[", "candidateSegments", "]", ",", "connections", ".", "mapSegmentsToCells", "(", "candidateSegments", ")", ")", "learningSegments", "=", "candidateSegments", "[", "onePerCellFilter", "]", "return", "learningSegments" ]
38.285714
19.357143
def render_template(template_name_or_list, **context): """Renders a template from the template folder with the given context. :param template_name_or_list: the name of the template to be rendered, or an iterable with template names the first one existing will be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list), context, ctx.app)
[ "def", "render_template", "(", "template_name_or_list", ",", "*", "*", "context", ")", ":", "ctx", "=", "_app_ctx_stack", ".", "top", "ctx", ".", "app", ".", "update_template_context", "(", "context", ")", "return", "_render", "(", "ctx", ".", "app", ".", "jinja_env", ".", "get_or_select_template", "(", "template_name_or_list", ")", ",", "context", ",", "ctx", ".", "app", ")" ]
46.642857
19.214286
def set_title(self,table=None,title=None,verbose=None): """ Changes the visible identifier of a single table. :param table (string, optional): Specifies a table by table name. If the pr efix SUID: is used, the table corresponding the SUID will be returne d. :param title (string, optional): The name of the table used in the current network """ PARAMS=set_param(['table','title'],[table,title]) response=api(url=self.__url+"/set title", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "set_title", "(", "self", ",", "table", "=", "None", ",", "title", "=", "None", ",", "verbose", "=", "None", ")", ":", "PARAMS", "=", "set_param", "(", "[", "'table'", ",", "'title'", "]", ",", "[", "table", ",", "title", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/set title\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"POST\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
45.230769
25.692308