id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
251,600 | openvax/sercol | sercol/collection.py | Collection.clone_with_new_elements | def clone_with_new_elements(
self,
new_elements,
drop_keywords=set([]),
rename_dict={},
extra_kwargs={}):
"""
Create another Collection of the same class and with same state but
possibly different entries. Extra parameters to control which keyword
arguments get passed to the initializer are necessary since derived
classes have different constructors than the base class.
"""
kwargs = dict(
elements=new_elements,
distinct=self.distinct,
sort_key=self.sort_key,
sources=self.sources)
for name in drop_keywords:
kwargs.pop(name)
for old_name, new_name in rename_dict.items():
kwargs[new_name] = kwargs.pop(old_name)
kwargs.update(extra_kwargs)
return self.__class__(**kwargs) | python | def clone_with_new_elements(
self,
new_elements,
drop_keywords=set([]),
rename_dict={},
extra_kwargs={}):
"""
Create another Collection of the same class and with same state but
possibly different entries. Extra parameters to control which keyword
arguments get passed to the initializer are necessary since derived
classes have different constructors than the base class.
"""
kwargs = dict(
elements=new_elements,
distinct=self.distinct,
sort_key=self.sort_key,
sources=self.sources)
for name in drop_keywords:
kwargs.pop(name)
for old_name, new_name in rename_dict.items():
kwargs[new_name] = kwargs.pop(old_name)
kwargs.update(extra_kwargs)
return self.__class__(**kwargs) | [
"def",
"clone_with_new_elements",
"(",
"self",
",",
"new_elements",
",",
"drop_keywords",
"=",
"set",
"(",
"[",
"]",
")",
",",
"rename_dict",
"=",
"{",
"}",
",",
"extra_kwargs",
"=",
"{",
"}",
")",
":",
"kwargs",
"=",
"dict",
"(",
"elements",
"=",
"new_elements",
",",
"distinct",
"=",
"self",
".",
"distinct",
",",
"sort_key",
"=",
"self",
".",
"sort_key",
",",
"sources",
"=",
"self",
".",
"sources",
")",
"for",
"name",
"in",
"drop_keywords",
":",
"kwargs",
".",
"pop",
"(",
"name",
")",
"for",
"old_name",
",",
"new_name",
"in",
"rename_dict",
".",
"items",
"(",
")",
":",
"kwargs",
"[",
"new_name",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"old_name",
")",
"kwargs",
".",
"update",
"(",
"extra_kwargs",
")",
"return",
"self",
".",
"__class__",
"(",
"*",
"*",
"kwargs",
")"
] | Create another Collection of the same class and with same state but
possibly different entries. Extra parameters to control which keyword
arguments get passed to the initializer are necessary since derived
classes have different constructors than the base class. | [
"Create",
"another",
"Collection",
"of",
"the",
"same",
"class",
"and",
"with",
"same",
"state",
"but",
"possibly",
"different",
"entries",
".",
"Extra",
"parameters",
"to",
"control",
"which",
"keyword",
"arguments",
"get",
"passed",
"to",
"the",
"initializer",
"are",
"necessary",
"since",
"derived",
"classes",
"have",
"different",
"constructors",
"than",
"the",
"base",
"class",
"."
] | e66a8e8c3c0b21e53eb8f73be4d23409fab311ae | https://github.com/openvax/sercol/blob/e66a8e8c3c0b21e53eb8f73be4d23409fab311ae/sercol/collection.py#L74-L96 |
251,601 | openvax/sercol | sercol/collection.py | Collection.source | def source(self):
"""
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
"""
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0] | python | def source(self):
"""
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
"""
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0] | [
"def",
"source",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"sources",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"No source associated with %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")",
"elif",
"len",
"(",
"self",
".",
"sources",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Multiple sources for %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")",
"return",
"list",
"(",
"self",
".",
"sources",
")",
"[",
"0",
"]"
] | Returns the single source name for a variant collection if it is unique,
otherwise raises an error. | [
"Returns",
"the",
"single",
"source",
"name",
"for",
"a",
"variant",
"collection",
"if",
"it",
"is",
"unique",
"otherwise",
"raises",
"an",
"error",
"."
] | e66a8e8c3c0b21e53eb8f73be4d23409fab311ae | https://github.com/openvax/sercol/blob/e66a8e8c3c0b21e53eb8f73be4d23409fab311ae/sercol/collection.py#L99-L108 |
251,602 | openvax/sercol | sercol/collection.py | Collection.filenames | def filenames(self):
"""
Assuming sources are paths to VCF or MAF files, trim their directory
path and return just the file names.
"""
return [os.path.basename(source) for source in self.sources if source] | python | def filenames(self):
"""
Assuming sources are paths to VCF or MAF files, trim their directory
path and return just the file names.
"""
return [os.path.basename(source) for source in self.sources if source] | [
"def",
"filenames",
"(",
"self",
")",
":",
"return",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
"for",
"source",
"in",
"self",
".",
"sources",
"if",
"source",
"]"
] | Assuming sources are paths to VCF or MAF files, trim their directory
path and return just the file names. | [
"Assuming",
"sources",
"are",
"paths",
"to",
"VCF",
"or",
"MAF",
"files",
"trim",
"their",
"directory",
"path",
"and",
"return",
"just",
"the",
"file",
"names",
"."
] | e66a8e8c3c0b21e53eb8f73be4d23409fab311ae | https://github.com/openvax/sercol/blob/e66a8e8c3c0b21e53eb8f73be4d23409fab311ae/sercol/collection.py#L119-L124 |
251,603 | openvax/sercol | sercol/collection.py | Collection.short_string | def short_string(self):
"""
Compact string representation which doesn't print any of the
collection elements.
"""
source_str = ""
if self.sources:
source_str = " from '%s'" % ",".join(self.sources)
return "<%s%s with %d elements>" % (
self.__class__.__name__,
source_str,
len(self)) | python | def short_string(self):
"""
Compact string representation which doesn't print any of the
collection elements.
"""
source_str = ""
if self.sources:
source_str = " from '%s'" % ",".join(self.sources)
return "<%s%s with %d elements>" % (
self.__class__.__name__,
source_str,
len(self)) | [
"def",
"short_string",
"(",
"self",
")",
":",
"source_str",
"=",
"\"\"",
"if",
"self",
".",
"sources",
":",
"source_str",
"=",
"\" from '%s'\"",
"%",
"\",\"",
".",
"join",
"(",
"self",
".",
"sources",
")",
"return",
"\"<%s%s with %d elements>\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"source_str",
",",
"len",
"(",
"self",
")",
")"
] | Compact string representation which doesn't print any of the
collection elements. | [
"Compact",
"string",
"representation",
"which",
"doesn",
"t",
"print",
"any",
"of",
"the",
"collection",
"elements",
"."
] | e66a8e8c3c0b21e53eb8f73be4d23409fab311ae | https://github.com/openvax/sercol/blob/e66a8e8c3c0b21e53eb8f73be4d23409fab311ae/sercol/collection.py#L134-L145 |
251,604 | openvax/sercol | sercol/collection.py | Collection.multi_groupby | def multi_groupby(self, key_fn):
"""
Like a groupby but expect the key_fn to return multiple keys for
each element.
"""
result_dict = defaultdict(list)
for x in self:
for key in key_fn(x):
result_dict[key].append(x)
# convert result lists into same Collection type as this one
return {
k: self.clone_with_new_elements(elements)
for (k, elements)
in result_dict.items()
} | python | def multi_groupby(self, key_fn):
"""
Like a groupby but expect the key_fn to return multiple keys for
each element.
"""
result_dict = defaultdict(list)
for x in self:
for key in key_fn(x):
result_dict[key].append(x)
# convert result lists into same Collection type as this one
return {
k: self.clone_with_new_elements(elements)
for (k, elements)
in result_dict.items()
} | [
"def",
"multi_groupby",
"(",
"self",
",",
"key_fn",
")",
":",
"result_dict",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"x",
"in",
"self",
":",
"for",
"key",
"in",
"key_fn",
"(",
"x",
")",
":",
"result_dict",
"[",
"key",
"]",
".",
"append",
"(",
"x",
")",
"# convert result lists into same Collection type as this one",
"return",
"{",
"k",
":",
"self",
".",
"clone_with_new_elements",
"(",
"elements",
")",
"for",
"(",
"k",
",",
"elements",
")",
"in",
"result_dict",
".",
"items",
"(",
")",
"}"
] | Like a groupby but expect the key_fn to return multiple keys for
each element. | [
"Like",
"a",
"groupby",
"but",
"expect",
"the",
"key_fn",
"to",
"return",
"multiple",
"keys",
"for",
"each",
"element",
"."
] | e66a8e8c3c0b21e53eb8f73be4d23409fab311ae | https://github.com/openvax/sercol/blob/e66a8e8c3c0b21e53eb8f73be4d23409fab311ae/sercol/collection.py#L214-L230 |
251,605 | openvax/sercol | sercol/collection.py | Collection.filter_above_threshold | def filter_above_threshold(
self,
key_fn,
value_dict,
threshold,
default_value=0.0):
"""The code for filtering by gene or transcript expression was pretty
much identical aside from which identifier you pull off an effect.
So, factored out the common operations for filtering an effect
collection into this helper method.
Parameters
----------
key_fn : callable
Given an element of this collection, returns a key into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict`
"""
def filter_fn(x):
key = key_fn(x)
value = value_dict.get(key, default_value)
return value > threshold
return self.filter(filter_fn) | python | def filter_above_threshold(
self,
key_fn,
value_dict,
threshold,
default_value=0.0):
"""The code for filtering by gene or transcript expression was pretty
much identical aside from which identifier you pull off an effect.
So, factored out the common operations for filtering an effect
collection into this helper method.
Parameters
----------
key_fn : callable
Given an element of this collection, returns a key into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict`
"""
def filter_fn(x):
key = key_fn(x)
value = value_dict.get(key, default_value)
return value > threshold
return self.filter(filter_fn) | [
"def",
"filter_above_threshold",
"(",
"self",
",",
"key_fn",
",",
"value_dict",
",",
"threshold",
",",
"default_value",
"=",
"0.0",
")",
":",
"def",
"filter_fn",
"(",
"x",
")",
":",
"key",
"=",
"key_fn",
"(",
"x",
")",
"value",
"=",
"value_dict",
".",
"get",
"(",
"key",
",",
"default_value",
")",
"return",
"value",
">",
"threshold",
"return",
"self",
".",
"filter",
"(",
"filter_fn",
")"
] | The code for filtering by gene or transcript expression was pretty
much identical aside from which identifier you pull off an effect.
So, factored out the common operations for filtering an effect
collection into this helper method.
Parameters
----------
key_fn : callable
Given an element of this collection, returns a key into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict` | [
"The",
"code",
"for",
"filtering",
"by",
"gene",
"or",
"transcript",
"expression",
"was",
"pretty",
"much",
"identical",
"aside",
"from",
"which",
"identifier",
"you",
"pull",
"off",
"an",
"effect",
".",
"So",
"factored",
"out",
"the",
"common",
"operations",
"for",
"filtering",
"an",
"effect",
"collection",
"into",
"this",
"helper",
"method",
"."
] | e66a8e8c3c0b21e53eb8f73be4d23409fab311ae | https://github.com/openvax/sercol/blob/e66a8e8c3c0b21e53eb8f73be4d23409fab311ae/sercol/collection.py#L232-L262 |
251,606 | openvax/sercol | sercol/collection.py | Collection.filter_any_above_threshold | def filter_any_above_threshold(
self,
multi_key_fn,
value_dict,
threshold,
default_value=0.0):
"""Like filter_above_threshold but `multi_key_fn` returns multiple
keys and the element is kept if any of them have a value above
the given threshold.
Parameters
----------
multi_key_fn : callable
Given an element of this collection, returns multiple keys
into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict`
"""
def filter_fn(x):
for key in multi_key_fn(x):
value = value_dict.get(key, default_value)
if value > threshold:
return True
return False
return self.filter(filter_fn) | python | def filter_any_above_threshold(
self,
multi_key_fn,
value_dict,
threshold,
default_value=0.0):
"""Like filter_above_threshold but `multi_key_fn` returns multiple
keys and the element is kept if any of them have a value above
the given threshold.
Parameters
----------
multi_key_fn : callable
Given an element of this collection, returns multiple keys
into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict`
"""
def filter_fn(x):
for key in multi_key_fn(x):
value = value_dict.get(key, default_value)
if value > threshold:
return True
return False
return self.filter(filter_fn) | [
"def",
"filter_any_above_threshold",
"(",
"self",
",",
"multi_key_fn",
",",
"value_dict",
",",
"threshold",
",",
"default_value",
"=",
"0.0",
")",
":",
"def",
"filter_fn",
"(",
"x",
")",
":",
"for",
"key",
"in",
"multi_key_fn",
"(",
"x",
")",
":",
"value",
"=",
"value_dict",
".",
"get",
"(",
"key",
",",
"default_value",
")",
"if",
"value",
">",
"threshold",
":",
"return",
"True",
"return",
"False",
"return",
"self",
".",
"filter",
"(",
"filter_fn",
")"
] | Like filter_above_threshold but `multi_key_fn` returns multiple
keys and the element is kept if any of them have a value above
the given threshold.
Parameters
----------
multi_key_fn : callable
Given an element of this collection, returns multiple keys
into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict` | [
"Like",
"filter_above_threshold",
"but",
"multi_key_fn",
"returns",
"multiple",
"keys",
"and",
"the",
"element",
"is",
"kept",
"if",
"any",
"of",
"them",
"have",
"a",
"value",
"above",
"the",
"given",
"threshold",
"."
] | e66a8e8c3c0b21e53eb8f73be4d23409fab311ae | https://github.com/openvax/sercol/blob/e66a8e8c3c0b21e53eb8f73be4d23409fab311ae/sercol/collection.py#L264-L296 |
251,607 | hapylestat/apputils | apputils/net/curl.py | curl | def curl(url, params=None, auth=None, req_type='GET', data=None, headers=None, timeout=None, use_gzip=True, use_stream=False):
"""
Make request to web resource
:param url: Url to endpoint
:param params: list of params after "?"
:param auth: authorization tokens
:param req_type: column_type of the request
:param data: data which need to be posted
:param headers: headers which would be posted with request
:param timeout: Request timeout
:param use_gzip: Accept gzip and deflate response from the server
:param use_stream: Do not parse content of response ans stream it via raw property
:return Response object
:type url str
:type params dict
:type auth CURLAuth
:type req_type str
:type headers dict
:type timeout int
:type use_gzip bool
:type use_stream bool
:rtype CURLResponse
"""
post_req = ["POST", "PUT"]
get_req = ["GET", "DELETE"]
if params is not None:
url += "?" + urlencode(params)
if req_type not in post_req + get_req:
raise IOError("Wrong request column_type \"%s\" passed" % req_type)
_headers = {}
handler_chain = []
req_args = {
"headers": _headers
}
# process content
if req_type in post_req and data is not None:
_data, __header = __parse_content(data)
_headers.update(__header)
_headers["Content-Length"] = len(_data)
req_args["data"] = _data
# process gzip and deflate
if use_gzip:
if "Accept-Encoding" in _headers:
if "gzip" not in _headers["Accept-Encoding"]:
_headers["Accept-Encoding"] += ", gzip, x-gzip, deflate"
else:
_headers["Accept-Encoding"] = "gzip, x-gzip, deflate"
if auth is not None and auth.force is False:
manager = HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, url, auth.user, auth.password)
handler_chain.append(HTTPBasicAuthHandler(manager))
if auth is not None and auth.force:
_headers.update(auth.headers)
if headers is not None:
_headers.update(headers)
director = build_opener(*handler_chain)
req = Request(url, **req_args)
req.get_method = lambda: req_type
try:
if timeout is not None:
return CURLResponse(director.open(req, timeout=timeout), is_stream=use_stream)
else:
return CURLResponse(director.open(req), is_stream=use_stream)
except URLError as e:
if isinstance(e, HTTPError):
raise e
else:
raise TimeoutError | python | def curl(url, params=None, auth=None, req_type='GET', data=None, headers=None, timeout=None, use_gzip=True, use_stream=False):
"""
Make request to web resource
:param url: Url to endpoint
:param params: list of params after "?"
:param auth: authorization tokens
:param req_type: column_type of the request
:param data: data which need to be posted
:param headers: headers which would be posted with request
:param timeout: Request timeout
:param use_gzip: Accept gzip and deflate response from the server
:param use_stream: Do not parse content of response ans stream it via raw property
:return Response object
:type url str
:type params dict
:type auth CURLAuth
:type req_type str
:type headers dict
:type timeout int
:type use_gzip bool
:type use_stream bool
:rtype CURLResponse
"""
post_req = ["POST", "PUT"]
get_req = ["GET", "DELETE"]
if params is not None:
url += "?" + urlencode(params)
if req_type not in post_req + get_req:
raise IOError("Wrong request column_type \"%s\" passed" % req_type)
_headers = {}
handler_chain = []
req_args = {
"headers": _headers
}
# process content
if req_type in post_req and data is not None:
_data, __header = __parse_content(data)
_headers.update(__header)
_headers["Content-Length"] = len(_data)
req_args["data"] = _data
# process gzip and deflate
if use_gzip:
if "Accept-Encoding" in _headers:
if "gzip" not in _headers["Accept-Encoding"]:
_headers["Accept-Encoding"] += ", gzip, x-gzip, deflate"
else:
_headers["Accept-Encoding"] = "gzip, x-gzip, deflate"
if auth is not None and auth.force is False:
manager = HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, url, auth.user, auth.password)
handler_chain.append(HTTPBasicAuthHandler(manager))
if auth is not None and auth.force:
_headers.update(auth.headers)
if headers is not None:
_headers.update(headers)
director = build_opener(*handler_chain)
req = Request(url, **req_args)
req.get_method = lambda: req_type
try:
if timeout is not None:
return CURLResponse(director.open(req, timeout=timeout), is_stream=use_stream)
else:
return CURLResponse(director.open(req), is_stream=use_stream)
except URLError as e:
if isinstance(e, HTTPError):
raise e
else:
raise TimeoutError | [
"def",
"curl",
"(",
"url",
",",
"params",
"=",
"None",
",",
"auth",
"=",
"None",
",",
"req_type",
"=",
"'GET'",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"use_gzip",
"=",
"True",
",",
"use_stream",
"=",
"False",
")",
":",
"post_req",
"=",
"[",
"\"POST\"",
",",
"\"PUT\"",
"]",
"get_req",
"=",
"[",
"\"GET\"",
",",
"\"DELETE\"",
"]",
"if",
"params",
"is",
"not",
"None",
":",
"url",
"+=",
"\"?\"",
"+",
"urlencode",
"(",
"params",
")",
"if",
"req_type",
"not",
"in",
"post_req",
"+",
"get_req",
":",
"raise",
"IOError",
"(",
"\"Wrong request column_type \\\"%s\\\" passed\"",
"%",
"req_type",
")",
"_headers",
"=",
"{",
"}",
"handler_chain",
"=",
"[",
"]",
"req_args",
"=",
"{",
"\"headers\"",
":",
"_headers",
"}",
"# process content",
"if",
"req_type",
"in",
"post_req",
"and",
"data",
"is",
"not",
"None",
":",
"_data",
",",
"__header",
"=",
"__parse_content",
"(",
"data",
")",
"_headers",
".",
"update",
"(",
"__header",
")",
"_headers",
"[",
"\"Content-Length\"",
"]",
"=",
"len",
"(",
"_data",
")",
"req_args",
"[",
"\"data\"",
"]",
"=",
"_data",
"# process gzip and deflate",
"if",
"use_gzip",
":",
"if",
"\"Accept-Encoding\"",
"in",
"_headers",
":",
"if",
"\"gzip\"",
"not",
"in",
"_headers",
"[",
"\"Accept-Encoding\"",
"]",
":",
"_headers",
"[",
"\"Accept-Encoding\"",
"]",
"+=",
"\", gzip, x-gzip, deflate\"",
"else",
":",
"_headers",
"[",
"\"Accept-Encoding\"",
"]",
"=",
"\"gzip, x-gzip, deflate\"",
"if",
"auth",
"is",
"not",
"None",
"and",
"auth",
".",
"force",
"is",
"False",
":",
"manager",
"=",
"HTTPPasswordMgrWithDefaultRealm",
"(",
")",
"manager",
".",
"add_password",
"(",
"None",
",",
"url",
",",
"auth",
".",
"user",
",",
"auth",
".",
"password",
")",
"handler_chain",
".",
"append",
"(",
"HTTPBasicAuthHandler",
"(",
"manager",
")",
")",
"if",
"auth",
"is",
"not",
"None",
"and",
"auth",
".",
"force",
":",
"_headers",
".",
"update",
"(",
"auth",
".",
"headers",
")",
"if",
"headers",
"is",
"not",
"None",
":",
"_headers",
".",
"update",
"(",
"headers",
")",
"director",
"=",
"build_opener",
"(",
"*",
"handler_chain",
")",
"req",
"=",
"Request",
"(",
"url",
",",
"*",
"*",
"req_args",
")",
"req",
".",
"get_method",
"=",
"lambda",
":",
"req_type",
"try",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"return",
"CURLResponse",
"(",
"director",
".",
"open",
"(",
"req",
",",
"timeout",
"=",
"timeout",
")",
",",
"is_stream",
"=",
"use_stream",
")",
"else",
":",
"return",
"CURLResponse",
"(",
"director",
".",
"open",
"(",
"req",
")",
",",
"is_stream",
"=",
"use_stream",
")",
"except",
"URLError",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"HTTPError",
")",
":",
"raise",
"e",
"else",
":",
"raise",
"TimeoutError"
] | Make request to web resource
:param url: Url to endpoint
:param params: list of params after "?"
:param auth: authorization tokens
:param req_type: column_type of the request
:param data: data which need to be posted
:param headers: headers which would be posted with request
:param timeout: Request timeout
:param use_gzip: Accept gzip and deflate response from the server
:param use_stream: Do not parse content of response ans stream it via raw property
:return Response object
:type url str
:type params dict
:type auth CURLAuth
:type req_type str
:type headers dict
:type timeout int
:type use_gzip bool
:type use_stream bool
:rtype CURLResponse | [
"Make",
"request",
"to",
"web",
"resource"
] | 5d185616feda27e6e21273307161471ef11a3518 | https://github.com/hapylestat/apputils/blob/5d185616feda27e6e21273307161471ef11a3518/apputils/net/curl.py#L201-L279 |
251,608 | msuozzo/Aduro | aduro/events.py | AddEvent.from_str | def from_str(string):
"""Generate a `AddEvent` object from a string
"""
match = re.match(r'^ADD (\w+)$', string)
if match:
return AddEvent(match.group(1))
else:
raise EventParseError | python | def from_str(string):
"""Generate a `AddEvent` object from a string
"""
match = re.match(r'^ADD (\w+)$', string)
if match:
return AddEvent(match.group(1))
else:
raise EventParseError | [
"def",
"from_str",
"(",
"string",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^ADD (\\w+)$'",
",",
"string",
")",
"if",
"match",
":",
"return",
"AddEvent",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"else",
":",
"raise",
"EventParseError"
] | Generate a `AddEvent` object from a string | [
"Generate",
"a",
"AddEvent",
"object",
"from",
"a",
"string"
] | 338eeb1deeff30c198e721b660ae4daca3660911 | https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/events.py#L69-L76 |
251,609 | msuozzo/Aduro | aduro/events.py | SetReadingEvent.from_str | def from_str(string):
"""Generate a `SetReadingEvent` object from a string
"""
match = re.match(r'^START READING (\w+) FROM \w+ (\d+)$', string)
if match:
return SetReadingEvent(match.group(1), int(match.group(2)))
else:
raise EventParseError | python | def from_str(string):
"""Generate a `SetReadingEvent` object from a string
"""
match = re.match(r'^START READING (\w+) FROM \w+ (\d+)$', string)
if match:
return SetReadingEvent(match.group(1), int(match.group(2)))
else:
raise EventParseError | [
"def",
"from_str",
"(",
"string",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^START READING (\\w+) FROM \\w+ (\\d+)$'",
",",
"string",
")",
"if",
"match",
":",
"return",
"SetReadingEvent",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"else",
":",
"raise",
"EventParseError"
] | Generate a `SetReadingEvent` object from a string | [
"Generate",
"a",
"SetReadingEvent",
"object",
"from",
"a",
"string"
] | 338eeb1deeff30c198e721b660ae4daca3660911 | https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/events.py#L94-L101 |
251,610 | msuozzo/Aduro | aduro/events.py | ReadEvent.from_str | def from_str(string):
"""Generate a `ReadEvent` object from a string
"""
match = re.match(r'^READ (\w+) FOR (\d+) \w+S$', string)
if match:
return ReadEvent(match.group(1), int(match.group(2)))
else:
raise EventParseError | python | def from_str(string):
"""Generate a `ReadEvent` object from a string
"""
match = re.match(r'^READ (\w+) FOR (\d+) \w+S$', string)
if match:
return ReadEvent(match.group(1), int(match.group(2)))
else:
raise EventParseError | [
"def",
"from_str",
"(",
"string",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^READ (\\w+) FOR (\\d+) \\w+S$'",
",",
"string",
")",
"if",
"match",
":",
"return",
"ReadEvent",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"else",
":",
"raise",
"EventParseError"
] | Generate a `ReadEvent` object from a string | [
"Generate",
"a",
"ReadEvent",
"object",
"from",
"a",
"string"
] | 338eeb1deeff30c198e721b660ae4daca3660911 | https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/events.py#L121-L128 |
251,611 | abe-winter/pg13-py | pg13/table.py | field_default | def field_default(colx, table_name, tables_dict):
"takes sqparse2.ColX, Table"
if colx.coltp.type.lower() == 'serial':
x = sqparse2.parse('select coalesce(max(%s),-1)+1 from %s' % (colx.name, table_name))
return sqex.run_select(x, tables_dict, Table)[0]
elif colx.not_null: raise NotImplementedError('todo: not_null error')
else: return toliteral(colx.default) | python | def field_default(colx, table_name, tables_dict):
"takes sqparse2.ColX, Table"
if colx.coltp.type.lower() == 'serial':
x = sqparse2.parse('select coalesce(max(%s),-1)+1 from %s' % (colx.name, table_name))
return sqex.run_select(x, tables_dict, Table)[0]
elif colx.not_null: raise NotImplementedError('todo: not_null error')
else: return toliteral(colx.default) | [
"def",
"field_default",
"(",
"colx",
",",
"table_name",
",",
"tables_dict",
")",
":",
"if",
"colx",
".",
"coltp",
".",
"type",
".",
"lower",
"(",
")",
"==",
"'serial'",
":",
"x",
"=",
"sqparse2",
".",
"parse",
"(",
"'select coalesce(max(%s),-1)+1 from %s'",
"%",
"(",
"colx",
".",
"name",
",",
"table_name",
")",
")",
"return",
"sqex",
".",
"run_select",
"(",
"x",
",",
"tables_dict",
",",
"Table",
")",
"[",
"0",
"]",
"elif",
"colx",
".",
"not_null",
":",
"raise",
"NotImplementedError",
"(",
"'todo: not_null error'",
")",
"else",
":",
"return",
"toliteral",
"(",
"colx",
".",
"default",
")"
] | takes sqparse2.ColX, Table | [
"takes",
"sqparse2",
".",
"ColX",
"Table"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/table.py#L31-L37 |
251,612 | abe-winter/pg13-py | pg13/table.py | Table.apply_defaults | def apply_defaults(self, row, tables_dict):
"apply defaults to missing cols for a row that's being inserted"
return [
emergency_cast(colx, field_default(colx, self.name, tables_dict) if v is Missing else v)
for colx,v in zip(self.fields,row)
] | python | def apply_defaults(self, row, tables_dict):
"apply defaults to missing cols for a row that's being inserted"
return [
emergency_cast(colx, field_default(colx, self.name, tables_dict) if v is Missing else v)
for colx,v in zip(self.fields,row)
] | [
"def",
"apply_defaults",
"(",
"self",
",",
"row",
",",
"tables_dict",
")",
":",
"return",
"[",
"emergency_cast",
"(",
"colx",
",",
"field_default",
"(",
"colx",
",",
"self",
".",
"name",
",",
"tables_dict",
")",
"if",
"v",
"is",
"Missing",
"else",
"v",
")",
"for",
"colx",
",",
"v",
"in",
"zip",
"(",
"self",
".",
"fields",
",",
"row",
")",
"]"
] | apply defaults to missing cols for a row that's being inserted | [
"apply",
"defaults",
"to",
"missing",
"cols",
"for",
"a",
"row",
"that",
"s",
"being",
"inserted"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/table.py#L72-L77 |
251,613 | JHowell45/helium-cli | helium/helium_functions/convert_youtube_url.py | convert_youtube_url | def convert_youtube_url(youtube_url, no_controls, autoplay):
"""Use this function to convert the youtube URL.
This function is used for converting the youtube URL so that it can be
used correctly with Helium. It means that Helium will know the next
video in the playlist.
:param youtube_url: the URL of the youtube playlist video.
:type youtube_url: str
:param no_controls: whether or not to show controls in the Helium app.
:type no_controls: bool
:param autoplay: whether or not to play the next video in the playlist
after the current video finishes.
:type autoplay: bool
:return: the new correct youtube URL.
:rtype: str
"""
for section in youtube_url.split('&'):
if 'list' in section:
playlist_id = section.split('list=')[1]
break
return (
'https://www.youtube.com/embed/videoseries?{0}&{1}&'
'loop=1&html5=1&showinfo=0&listType=playlist&list={2}'.format(
'' if autoplay else 'autoplay=1',
'controls=0' if no_controls else '',
str(playlist_id)
)
) | python | def convert_youtube_url(youtube_url, no_controls, autoplay):
"""Use this function to convert the youtube URL.
This function is used for converting the youtube URL so that it can be
used correctly with Helium. It means that Helium will know the next
video in the playlist.
:param youtube_url: the URL of the youtube playlist video.
:type youtube_url: str
:param no_controls: whether or not to show controls in the Helium app.
:type no_controls: bool
:param autoplay: whether or not to play the next video in the playlist
after the current video finishes.
:type autoplay: bool
:return: the new correct youtube URL.
:rtype: str
"""
for section in youtube_url.split('&'):
if 'list' in section:
playlist_id = section.split('list=')[1]
break
return (
'https://www.youtube.com/embed/videoseries?{0}&{1}&'
'loop=1&html5=1&showinfo=0&listType=playlist&list={2}'.format(
'' if autoplay else 'autoplay=1',
'controls=0' if no_controls else '',
str(playlist_id)
)
) | [
"def",
"convert_youtube_url",
"(",
"youtube_url",
",",
"no_controls",
",",
"autoplay",
")",
":",
"for",
"section",
"in",
"youtube_url",
".",
"split",
"(",
"'&'",
")",
":",
"if",
"'list'",
"in",
"section",
":",
"playlist_id",
"=",
"section",
".",
"split",
"(",
"'list='",
")",
"[",
"1",
"]",
"break",
"return",
"(",
"'https://www.youtube.com/embed/videoseries?{0}&{1}&'",
"'loop=1&html5=1&showinfo=0&listType=playlist&list={2}'",
".",
"format",
"(",
"''",
"if",
"autoplay",
"else",
"'autoplay=1'",
",",
"'controls=0'",
"if",
"no_controls",
"else",
"''",
",",
"str",
"(",
"playlist_id",
")",
")",
")"
] | Use this function to convert the youtube URL.
This function is used for converting the youtube URL so that it can be
used correctly with Helium. It means that Helium will know the next
video in the playlist.
:param youtube_url: the URL of the youtube playlist video.
:type youtube_url: str
:param no_controls: whether or not to show controls in the Helium app.
:type no_controls: bool
:param autoplay: whether or not to play the next video in the playlist
after the current video finishes.
:type autoplay: bool
:return: the new correct youtube URL.
:rtype: str | [
"Use",
"this",
"function",
"to",
"convert",
"the",
"youtube",
"URL",
"."
] | 8decc2f410a17314440eeed411a4b19dd4b4e780 | https://github.com/JHowell45/helium-cli/blob/8decc2f410a17314440eeed411a4b19dd4b4e780/helium/helium_functions/convert_youtube_url.py#L9-L37 |
251,614 | thomasvandoren/bugzscout-py | bugzscout/client.py | BugzScout.submit_error | def submit_error(self, description, extra=None, default_message=None):
"""Send an error to bugzscout.
Sends a request to the fogbugz URL for this instance. If a case exists
with the **same** description, a new occurrence will be added to that
case. It is advisable to remove personal info from the description for
that reason. Account ids, emails, request ids, etc, will make the
occurrence counting builtin to bugzscout less useful. Those values
should go in the extra parameter, though, so the developer
investigating the case has access to them.
When extra is not specified, bugzscout will increase the number of
occurrences for the case with the given description, but it will not
include an entry for it (unless it is a new case).
:param description: string description for error
:param extra: string details for error
:param default_message: string default message to return in responses
"""
req_data = {'ScoutUserName': self.user,
'ScoutProject': self.project,
'ScoutArea': self.area,
# When this matches, cases are grouped together.
'Description': description,
'Extra': extra,
# 1 forces a new bug to be created.
'ForceNewBug': 0,
'ScoutDefaultMessage': default_message,
# 0 sends XML response, 1 sends HTML response.
'FriendlyResponse': 0,
}
LOG.debug('Making bugzscout request to {0} with body {1}'.format(
self.url, req_data))
resp = requests.post(self.url, data=req_data)
LOG.debug('Response from bugzscout request: {0} body:\n{1}'.format(
resp, resp.content))
if resp.ok:
LOG.info('Successfully submitted error to bugzscout.')
else:
LOG.warn('Failed to submit error to bugzscout: {0}'.format(
resp.reason)) | python | def submit_error(self, description, extra=None, default_message=None):
"""Send an error to bugzscout.
Sends a request to the fogbugz URL for this instance. If a case exists
with the **same** description, a new occurrence will be added to that
case. It is advisable to remove personal info from the description for
that reason. Account ids, emails, request ids, etc, will make the
occurrence counting builtin to bugzscout less useful. Those values
should go in the extra parameter, though, so the developer
investigating the case has access to them.
When extra is not specified, bugzscout will increase the number of
occurrences for the case with the given description, but it will not
include an entry for it (unless it is a new case).
:param description: string description for error
:param extra: string details for error
:param default_message: string default message to return in responses
"""
req_data = {'ScoutUserName': self.user,
'ScoutProject': self.project,
'ScoutArea': self.area,
# When this matches, cases are grouped together.
'Description': description,
'Extra': extra,
# 1 forces a new bug to be created.
'ForceNewBug': 0,
'ScoutDefaultMessage': default_message,
# 0 sends XML response, 1 sends HTML response.
'FriendlyResponse': 0,
}
LOG.debug('Making bugzscout request to {0} with body {1}'.format(
self.url, req_data))
resp = requests.post(self.url, data=req_data)
LOG.debug('Response from bugzscout request: {0} body:\n{1}'.format(
resp, resp.content))
if resp.ok:
LOG.info('Successfully submitted error to bugzscout.')
else:
LOG.warn('Failed to submit error to bugzscout: {0}'.format(
resp.reason)) | [
"def",
"submit_error",
"(",
"self",
",",
"description",
",",
"extra",
"=",
"None",
",",
"default_message",
"=",
"None",
")",
":",
"req_data",
"=",
"{",
"'ScoutUserName'",
":",
"self",
".",
"user",
",",
"'ScoutProject'",
":",
"self",
".",
"project",
",",
"'ScoutArea'",
":",
"self",
".",
"area",
",",
"# When this matches, cases are grouped together.",
"'Description'",
":",
"description",
",",
"'Extra'",
":",
"extra",
",",
"# 1 forces a new bug to be created.",
"'ForceNewBug'",
":",
"0",
",",
"'ScoutDefaultMessage'",
":",
"default_message",
",",
"# 0 sends XML response, 1 sends HTML response.",
"'FriendlyResponse'",
":",
"0",
",",
"}",
"LOG",
".",
"debug",
"(",
"'Making bugzscout request to {0} with body {1}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"req_data",
")",
")",
"resp",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"url",
",",
"data",
"=",
"req_data",
")",
"LOG",
".",
"debug",
"(",
"'Response from bugzscout request: {0} body:\\n{1}'",
".",
"format",
"(",
"resp",
",",
"resp",
".",
"content",
")",
")",
"if",
"resp",
".",
"ok",
":",
"LOG",
".",
"info",
"(",
"'Successfully submitted error to bugzscout.'",
")",
"else",
":",
"LOG",
".",
"warn",
"(",
"'Failed to submit error to bugzscout: {0}'",
".",
"format",
"(",
"resp",
".",
"reason",
")",
")"
] | Send an error to bugzscout.
Sends a request to the fogbugz URL for this instance. If a case exists
with the **same** description, a new occurrence will be added to that
case. It is advisable to remove personal info from the description for
that reason. Account ids, emails, request ids, etc, will make the
occurrence counting builtin to bugzscout less useful. Those values
should go in the extra parameter, though, so the developer
investigating the case has access to them.
When extra is not specified, bugzscout will increase the number of
occurrences for the case with the given description, but it will not
include an entry for it (unless it is a new case).
:param description: string description for error
:param extra: string details for error
:param default_message: string default message to return in responses | [
"Send",
"an",
"error",
"to",
"bugzscout",
"."
] | 514528e958a97e0e7b36870037c5c69661511824 | https://github.com/thomasvandoren/bugzscout-py/blob/514528e958a97e0e7b36870037c5c69661511824/bugzscout/client.py#L43-L88 |
251,615 | ajk8/microcache | microcache/__init__.py | init_logging | def init_logging(stream=sys.stderr, filepath=None,
format='%(asctime).19s [%(levelname)s] %(name)s: %(message)s'):
"""
Setup logging for the microcache module, but only do it once!
:param stream: stream to log to (defaults to sys.stderr)
:param filepath: path to a file to log to as well (defaults to None)
:param format: override the default format with whatever you like
"""
if not (len(logger.handlers) == 1 and isinstance(logger.handlers[0], logging.NullHandler)):
logger.warn('logging has already been initialized, refusing to do it again')
return
formatter = logging.Formatter(format)
if stream is not None:
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
if filepath is not None:
handler = logging.FileHandler(filename=filepath)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('successfully initialized logger') | python | def init_logging(stream=sys.stderr, filepath=None,
format='%(asctime).19s [%(levelname)s] %(name)s: %(message)s'):
"""
Setup logging for the microcache module, but only do it once!
:param stream: stream to log to (defaults to sys.stderr)
:param filepath: path to a file to log to as well (defaults to None)
:param format: override the default format with whatever you like
"""
if not (len(logger.handlers) == 1 and isinstance(logger.handlers[0], logging.NullHandler)):
logger.warn('logging has already been initialized, refusing to do it again')
return
formatter = logging.Formatter(format)
if stream is not None:
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
if filepath is not None:
handler = logging.FileHandler(filename=filepath)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('successfully initialized logger') | [
"def",
"init_logging",
"(",
"stream",
"=",
"sys",
".",
"stderr",
",",
"filepath",
"=",
"None",
",",
"format",
"=",
"'%(asctime).19s [%(levelname)s] %(name)s: %(message)s'",
")",
":",
"if",
"not",
"(",
"len",
"(",
"logger",
".",
"handlers",
")",
"==",
"1",
"and",
"isinstance",
"(",
"logger",
".",
"handlers",
"[",
"0",
"]",
",",
"logging",
".",
"NullHandler",
")",
")",
":",
"logger",
".",
"warn",
"(",
"'logging has already been initialized, refusing to do it again'",
")",
"return",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"format",
")",
"if",
"stream",
"is",
"not",
"None",
":",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
"stream",
"=",
"stream",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"if",
"filepath",
"is",
"not",
"None",
":",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"filename",
"=",
"filepath",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"info",
"(",
"'successfully initialized logger'",
")"
] | Setup logging for the microcache module, but only do it once!
:param stream: stream to log to (defaults to sys.stderr)
:param filepath: path to a file to log to as well (defaults to None)
:param format: override the default format with whatever you like | [
"Setup",
"logging",
"for",
"the",
"microcache",
"module",
"but",
"only",
"do",
"it",
"once!"
] | 24876c2c5f8959a806e2701adb7efbf70a87a1ae | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L50-L71 |
251,616 | ajk8/microcache | microcache/__init__.py | this | def this(func, cache_obj=CACHE_OBJ, key=None, ttl=None, *args, **kwargs):
"""
Store the output from the decorated function in the cache and pull it
from the cache on future invocations without rerunning.
Normally, the value will be stored under a key which takes into account
all of the parameters that are passed into it, thereby caching different
invocations separately. If you specify a key, all invocations will be
cached under that key, and different invocations will return the same
value, which may be unexpected. So, be careful!
If the cache is disabled, the decorated function will just run normally.
Unlike the other functions in this module, you must pass a custom cache_obj
to this() in order to operate on the non-global cache. This is because of
wonky behavior when using decorator.decorator from a class method.
:param func: (expensive?) function to decorate
:param cache_obj: cache to a specific object (for use from the cache object itself)
:param key: optional key to store the value under
:param ttl: optional expiry to apply to the cached value
:param *args: arg tuple to pass to the decorated function
:param **kwargs: kwarg dict to pass to the decorated function
"""
key = key or (func.__name__ + str(args) + str(kwargs))
if cache_obj.has(key):
return cache_obj.get(key)
value = func(*args, **kwargs)
cache_obj.upsert(key, value, ttl)
return value | python | def this(func, cache_obj=CACHE_OBJ, key=None, ttl=None, *args, **kwargs):
"""
Store the output from the decorated function in the cache and pull it
from the cache on future invocations without rerunning.
Normally, the value will be stored under a key which takes into account
all of the parameters that are passed into it, thereby caching different
invocations separately. If you specify a key, all invocations will be
cached under that key, and different invocations will return the same
value, which may be unexpected. So, be careful!
If the cache is disabled, the decorated function will just run normally.
Unlike the other functions in this module, you must pass a custom cache_obj
to this() in order to operate on the non-global cache. This is because of
wonky behavior when using decorator.decorator from a class method.
:param func: (expensive?) function to decorate
:param cache_obj: cache to a specific object (for use from the cache object itself)
:param key: optional key to store the value under
:param ttl: optional expiry to apply to the cached value
:param *args: arg tuple to pass to the decorated function
:param **kwargs: kwarg dict to pass to the decorated function
"""
key = key or (func.__name__ + str(args) + str(kwargs))
if cache_obj.has(key):
return cache_obj.get(key)
value = func(*args, **kwargs)
cache_obj.upsert(key, value, ttl)
return value | [
"def",
"this",
"(",
"func",
",",
"cache_obj",
"=",
"CACHE_OBJ",
",",
"key",
"=",
"None",
",",
"ttl",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"key",
"=",
"key",
"or",
"(",
"func",
".",
"__name__",
"+",
"str",
"(",
"args",
")",
"+",
"str",
"(",
"kwargs",
")",
")",
"if",
"cache_obj",
".",
"has",
"(",
"key",
")",
":",
"return",
"cache_obj",
".",
"get",
"(",
"key",
")",
"value",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"cache_obj",
".",
"upsert",
"(",
"key",
",",
"value",
",",
"ttl",
")",
"return",
"value"
] | Store the output from the decorated function in the cache and pull it
from the cache on future invocations without rerunning.
Normally, the value will be stored under a key which takes into account
all of the parameters that are passed into it, thereby caching different
invocations separately. If you specify a key, all invocations will be
cached under that key, and different invocations will return the same
value, which may be unexpected. So, be careful!
If the cache is disabled, the decorated function will just run normally.
Unlike the other functions in this module, you must pass a custom cache_obj
to this() in order to operate on the non-global cache. This is because of
wonky behavior when using decorator.decorator from a class method.
:param func: (expensive?) function to decorate
:param cache_obj: cache to a specific object (for use from the cache object itself)
:param key: optional key to store the value under
:param ttl: optional expiry to apply to the cached value
:param *args: arg tuple to pass to the decorated function
:param **kwargs: kwarg dict to pass to the decorated function | [
"Store",
"the",
"output",
"from",
"the",
"decorated",
"function",
"in",
"the",
"cache",
"and",
"pull",
"it",
"from",
"the",
"cache",
"on",
"future",
"invocations",
"without",
"rerunning",
"."
] | 24876c2c5f8959a806e2701adb7efbf70a87a1ae | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L354-L383 |
251,617 | ajk8/microcache | microcache/__init__.py | Microcache.has | def has(self, key):
"""
See if a key is in the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
"""
if not self.options.enabled:
return CACHE_DISABLED
ret = key in self._dict.keys() and not self._dict[key].is_expired()
logger.debug('has({}) == {}'.format(repr(key), ret))
return ret | python | def has(self, key):
"""
See if a key is in the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
"""
if not self.options.enabled:
return CACHE_DISABLED
ret = key in self._dict.keys() and not self._dict[key].is_expired()
logger.debug('has({}) == {}'.format(repr(key), ret))
return ret | [
"def",
"has",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"self",
".",
"options",
".",
"enabled",
":",
"return",
"CACHE_DISABLED",
"ret",
"=",
"key",
"in",
"self",
".",
"_dict",
".",
"keys",
"(",
")",
"and",
"not",
"self",
".",
"_dict",
"[",
"key",
"]",
".",
"is_expired",
"(",
")",
"logger",
".",
"debug",
"(",
"'has({}) == {}'",
".",
"format",
"(",
"repr",
"(",
"key",
")",
",",
"ret",
")",
")",
"return",
"ret"
] | See if a key is in the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for | [
"See",
"if",
"a",
"key",
"is",
"in",
"the",
"cache"
] | 24876c2c5f8959a806e2701adb7efbf70a87a1ae | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L127-L139 |
251,618 | ajk8/microcache | microcache/__init__.py | Microcache.upsert | def upsert(self, key, value, ttl=None):
"""
Perform an upsert on the cache
Returns CACHE_DISABLED if the cache is disabled
Returns True on successful operation
:param key: key to store the value under
:param value: value to cache
:param ttl: optional expiry in seconds (defaults to None)
"""
if not self.options.enabled:
return CACHE_DISABLED
logger.debug('upsert({}, {}, ttl={})'.format(repr(key), repr(value), ttl))
self._dict[key] = MicrocacheItem(value, ttl)
return True | python | def upsert(self, key, value, ttl=None):
"""
Perform an upsert on the cache
Returns CACHE_DISABLED if the cache is disabled
Returns True on successful operation
:param key: key to store the value under
:param value: value to cache
:param ttl: optional expiry in seconds (defaults to None)
"""
if not self.options.enabled:
return CACHE_DISABLED
logger.debug('upsert({}, {}, ttl={})'.format(repr(key), repr(value), ttl))
self._dict[key] = MicrocacheItem(value, ttl)
return True | [
"def",
"upsert",
"(",
"self",
",",
"key",
",",
"value",
",",
"ttl",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"options",
".",
"enabled",
":",
"return",
"CACHE_DISABLED",
"logger",
".",
"debug",
"(",
"'upsert({}, {}, ttl={})'",
".",
"format",
"(",
"repr",
"(",
"key",
")",
",",
"repr",
"(",
"value",
")",
",",
"ttl",
")",
")",
"self",
".",
"_dict",
"[",
"key",
"]",
"=",
"MicrocacheItem",
"(",
"value",
",",
"ttl",
")",
"return",
"True"
] | Perform an upsert on the cache
Returns CACHE_DISABLED if the cache is disabled
Returns True on successful operation
:param key: key to store the value under
:param value: value to cache
:param ttl: optional expiry in seconds (defaults to None) | [
"Perform",
"an",
"upsert",
"on",
"the",
"cache"
] | 24876c2c5f8959a806e2701adb7efbf70a87a1ae | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L141-L156 |
251,619 | ajk8/microcache | microcache/__init__.py | Microcache.get | def get(self, key, default=CACHE_MISS):
"""
Get a value out of the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
:param default: value to return if the key is not found (defaults to CACHE_MISS)
"""
if not self.options.enabled:
return CACHE_DISABLED
ret = default
if self.has(key):
ret = self._dict[key].value
logger.debug('get({}, default={}) == {}'.format(repr(key), repr(default), repr(ret)))
return ret | python | def get(self, key, default=CACHE_MISS):
"""
Get a value out of the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
:param default: value to return if the key is not found (defaults to CACHE_MISS)
"""
if not self.options.enabled:
return CACHE_DISABLED
ret = default
if self.has(key):
ret = self._dict[key].value
logger.debug('get({}, default={}) == {}'.format(repr(key), repr(default), repr(ret)))
return ret | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"CACHE_MISS",
")",
":",
"if",
"not",
"self",
".",
"options",
".",
"enabled",
":",
"return",
"CACHE_DISABLED",
"ret",
"=",
"default",
"if",
"self",
".",
"has",
"(",
"key",
")",
":",
"ret",
"=",
"self",
".",
"_dict",
"[",
"key",
"]",
".",
"value",
"logger",
".",
"debug",
"(",
"'get({}, default={}) == {}'",
".",
"format",
"(",
"repr",
"(",
"key",
")",
",",
"repr",
"(",
"default",
")",
",",
"repr",
"(",
"ret",
")",
")",
")",
"return",
"ret"
] | Get a value out of the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
:param default: value to return if the key is not found (defaults to CACHE_MISS) | [
"Get",
"a",
"value",
"out",
"of",
"the",
"cache"
] | 24876c2c5f8959a806e2701adb7efbf70a87a1ae | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L158-L173 |
251,620 | ajk8/microcache | microcache/__init__.py | Microcache.clear | def clear(self, key=None):
"""
Clear a cache entry, or the entire cache if no key is given
Returns CACHE_DISABLED if the cache is disabled
Returns True on successful operation
:param key: optional key to limit the clear operation to (defaults to None)
"""
if not self.options.enabled:
return CACHE_DISABLED
logger.debug('clear(key={})'.format(repr(key)))
if key is not None and key in self._dict.keys():
del self._dict[key]
logger.info('cache cleared for key: ' + repr(key))
elif not key:
for cached_key in [k for k in self._dict.keys()]:
del self._dict[cached_key]
logger.info('cache cleared for ALL keys')
return True | python | def clear(self, key=None):
"""
Clear a cache entry, or the entire cache if no key is given
Returns CACHE_DISABLED if the cache is disabled
Returns True on successful operation
:param key: optional key to limit the clear operation to (defaults to None)
"""
if not self.options.enabled:
return CACHE_DISABLED
logger.debug('clear(key={})'.format(repr(key)))
if key is not None and key in self._dict.keys():
del self._dict[key]
logger.info('cache cleared for key: ' + repr(key))
elif not key:
for cached_key in [k for k in self._dict.keys()]:
del self._dict[cached_key]
logger.info('cache cleared for ALL keys')
return True | [
"def",
"clear",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"options",
".",
"enabled",
":",
"return",
"CACHE_DISABLED",
"logger",
".",
"debug",
"(",
"'clear(key={})'",
".",
"format",
"(",
"repr",
"(",
"key",
")",
")",
")",
"if",
"key",
"is",
"not",
"None",
"and",
"key",
"in",
"self",
".",
"_dict",
".",
"keys",
"(",
")",
":",
"del",
"self",
".",
"_dict",
"[",
"key",
"]",
"logger",
".",
"info",
"(",
"'cache cleared for key: '",
"+",
"repr",
"(",
"key",
")",
")",
"elif",
"not",
"key",
":",
"for",
"cached_key",
"in",
"[",
"k",
"for",
"k",
"in",
"self",
".",
"_dict",
".",
"keys",
"(",
")",
"]",
":",
"del",
"self",
".",
"_dict",
"[",
"cached_key",
"]",
"logger",
".",
"info",
"(",
"'cache cleared for ALL keys'",
")",
"return",
"True"
] | Clear a cache entry, or the entire cache if no key is given
Returns CACHE_DISABLED if the cache is disabled
Returns True on successful operation
:param key: optional key to limit the clear operation to (defaults to None) | [
"Clear",
"a",
"cache",
"entry",
"or",
"the",
"entire",
"cache",
"if",
"no",
"key",
"is",
"given"
] | 24876c2c5f8959a806e2701adb7efbf70a87a1ae | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L175-L194 |
251,621 | ajk8/microcache | microcache/__init__.py | Microcache.disable | def disable(self, clear_cache=True):
"""
Disable the cache and clear its contents
:param clear_cache: clear the cache contents as well as disabling (defaults to True)
"""
logger.debug('disable(clear_cache={})'.format(clear_cache))
if clear_cache:
self.clear()
self.options.enabled = False
logger.info('cache disabled') | python | def disable(self, clear_cache=True):
"""
Disable the cache and clear its contents
:param clear_cache: clear the cache contents as well as disabling (defaults to True)
"""
logger.debug('disable(clear_cache={})'.format(clear_cache))
if clear_cache:
self.clear()
self.options.enabled = False
logger.info('cache disabled') | [
"def",
"disable",
"(",
"self",
",",
"clear_cache",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"'disable(clear_cache={})'",
".",
"format",
"(",
"clear_cache",
")",
")",
"if",
"clear_cache",
":",
"self",
".",
"clear",
"(",
")",
"self",
".",
"options",
".",
"enabled",
"=",
"False",
"logger",
".",
"info",
"(",
"'cache disabled'",
")"
] | Disable the cache and clear its contents
:param clear_cache: clear the cache contents as well as disabling (defaults to True) | [
"Disable",
"the",
"cache",
"and",
"clear",
"its",
"contents"
] | 24876c2c5f8959a806e2701adb7efbf70a87a1ae | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L196-L206 |
251,622 | cogniteev/docido-python-sdk | docido_sdk/toolbox/collections_ext.py | flatten_dict | def flatten_dict(d, prefix='', sep='.'):
"""In place dict flattening.
"""
def apply_and_resolve_conflicts(dest, item, prefix):
for k, v in flatten_dict(item, prefix=prefix, sep=sep).items():
new_key = k
i = 2
while new_key in d:
new_key = '{key}{sep}{index}'.format(key=k, sep=sep, index=i)
i += 1
dest[new_key] = v
for key in list(d.keys()):
if any(unicode(prefix)):
new_key = u'{p}{sep}{key}'.format(p=prefix, key=key, sep=sep)
else:
new_key = key
if isinstance(d[key], (dict, collections.Mapping)):
apply_and_resolve_conflicts(d, d.pop(key), new_key)
elif isinstance(d[key], six.string_types):
d[new_key] = d.pop(key)
elif isinstance(d[key], (list, collections.Mapping)):
array = d.pop(key)
for i in range(len(array)):
index_key = '{key}{sep}{i}'.format(key=key, sep=sep, i=i)
while index_key in d:
i += 1
apply_and_resolve_conflicts(d, array[i], index_key)
else:
d[new_key] = d.pop(key)
return d | python | def flatten_dict(d, prefix='', sep='.'):
"""In place dict flattening.
"""
def apply_and_resolve_conflicts(dest, item, prefix):
for k, v in flatten_dict(item, prefix=prefix, sep=sep).items():
new_key = k
i = 2
while new_key in d:
new_key = '{key}{sep}{index}'.format(key=k, sep=sep, index=i)
i += 1
dest[new_key] = v
for key in list(d.keys()):
if any(unicode(prefix)):
new_key = u'{p}{sep}{key}'.format(p=prefix, key=key, sep=sep)
else:
new_key = key
if isinstance(d[key], (dict, collections.Mapping)):
apply_and_resolve_conflicts(d, d.pop(key), new_key)
elif isinstance(d[key], six.string_types):
d[new_key] = d.pop(key)
elif isinstance(d[key], (list, collections.Mapping)):
array = d.pop(key)
for i in range(len(array)):
index_key = '{key}{sep}{i}'.format(key=key, sep=sep, i=i)
while index_key in d:
i += 1
apply_and_resolve_conflicts(d, array[i], index_key)
else:
d[new_key] = d.pop(key)
return d | [
"def",
"flatten_dict",
"(",
"d",
",",
"prefix",
"=",
"''",
",",
"sep",
"=",
"'.'",
")",
":",
"def",
"apply_and_resolve_conflicts",
"(",
"dest",
",",
"item",
",",
"prefix",
")",
":",
"for",
"k",
",",
"v",
"in",
"flatten_dict",
"(",
"item",
",",
"prefix",
"=",
"prefix",
",",
"sep",
"=",
"sep",
")",
".",
"items",
"(",
")",
":",
"new_key",
"=",
"k",
"i",
"=",
"2",
"while",
"new_key",
"in",
"d",
":",
"new_key",
"=",
"'{key}{sep}{index}'",
".",
"format",
"(",
"key",
"=",
"k",
",",
"sep",
"=",
"sep",
",",
"index",
"=",
"i",
")",
"i",
"+=",
"1",
"dest",
"[",
"new_key",
"]",
"=",
"v",
"for",
"key",
"in",
"list",
"(",
"d",
".",
"keys",
"(",
")",
")",
":",
"if",
"any",
"(",
"unicode",
"(",
"prefix",
")",
")",
":",
"new_key",
"=",
"u'{p}{sep}{key}'",
".",
"format",
"(",
"p",
"=",
"prefix",
",",
"key",
"=",
"key",
",",
"sep",
"=",
"sep",
")",
"else",
":",
"new_key",
"=",
"key",
"if",
"isinstance",
"(",
"d",
"[",
"key",
"]",
",",
"(",
"dict",
",",
"collections",
".",
"Mapping",
")",
")",
":",
"apply_and_resolve_conflicts",
"(",
"d",
",",
"d",
".",
"pop",
"(",
"key",
")",
",",
"new_key",
")",
"elif",
"isinstance",
"(",
"d",
"[",
"key",
"]",
",",
"six",
".",
"string_types",
")",
":",
"d",
"[",
"new_key",
"]",
"=",
"d",
".",
"pop",
"(",
"key",
")",
"elif",
"isinstance",
"(",
"d",
"[",
"key",
"]",
",",
"(",
"list",
",",
"collections",
".",
"Mapping",
")",
")",
":",
"array",
"=",
"d",
".",
"pop",
"(",
"key",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"array",
")",
")",
":",
"index_key",
"=",
"'{key}{sep}{i}'",
".",
"format",
"(",
"key",
"=",
"key",
",",
"sep",
"=",
"sep",
",",
"i",
"=",
"i",
")",
"while",
"index_key",
"in",
"d",
":",
"i",
"+=",
"1",
"apply_and_resolve_conflicts",
"(",
"d",
",",
"array",
"[",
"i",
"]",
",",
"index_key",
")",
"else",
":",
"d",
"[",
"new_key",
"]",
"=",
"d",
".",
"pop",
"(",
"key",
")",
"return",
"d"
] | In place dict flattening. | [
"In",
"place",
"dict",
"flattening",
"."
] | 58ecb6c6f5757fd40c0601657ab18368da7ddf33 | https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/collections_ext.py#L133-L163 |
251,623 | relekang/rmoq | rmoq/backends.py | RmoqStorageBackend.clean_url | def clean_url(url, replacement='_'):
"""
Cleans the url for protocol prefix and trailing slash and replaces special characters
with the given replacement.
:param url: The url of the request.
:param replacement: A string that is used to replace special characters.
"""
cleaned = re.sub(r'/$', '', re.sub(r'https?://', '', url))
for character in '/ _ ? & : ; %'.split():
cleaned = cleaned.replace(character, replacement)
return cleaned | python | def clean_url(url, replacement='_'):
"""
Cleans the url for protocol prefix and trailing slash and replaces special characters
with the given replacement.
:param url: The url of the request.
:param replacement: A string that is used to replace special characters.
"""
cleaned = re.sub(r'/$', '', re.sub(r'https?://', '', url))
for character in '/ _ ? & : ; %'.split():
cleaned = cleaned.replace(character, replacement)
return cleaned | [
"def",
"clean_url",
"(",
"url",
",",
"replacement",
"=",
"'_'",
")",
":",
"cleaned",
"=",
"re",
".",
"sub",
"(",
"r'/$'",
",",
"''",
",",
"re",
".",
"sub",
"(",
"r'https?://'",
",",
"''",
",",
"url",
")",
")",
"for",
"character",
"in",
"'/ _ ? & : ; %'",
".",
"split",
"(",
")",
":",
"cleaned",
"=",
"cleaned",
".",
"replace",
"(",
"character",
",",
"replacement",
")",
"return",
"cleaned"
] | Cleans the url for protocol prefix and trailing slash and replaces special characters
with the given replacement.
:param url: The url of the request.
:param replacement: A string that is used to replace special characters. | [
"Cleans",
"the",
"url",
"for",
"protocol",
"prefix",
"and",
"trailing",
"slash",
"and",
"replaces",
"special",
"characters",
"with",
"the",
"given",
"replacement",
"."
] | 61fd2a221e247b7aca87492f10c3bc3894536260 | https://github.com/relekang/rmoq/blob/61fd2a221e247b7aca87492f10c3bc3894536260/rmoq/backends.py#L42-L53 |
251,624 | openpermissions/perch | perch/cli.py | create | def create(resource, previous=None, migrations_path=None):
"""Create an empty migration for a resource"""
if migrations_path:
file_path = migrate.create(resource, previous_version=previous, package=migrations_path)
else:
file_path = migrate.create(resource, previous_version=previous)
click.secho('Created migration file: ' + file_path, fg='green') | python | def create(resource, previous=None, migrations_path=None):
"""Create an empty migration for a resource"""
if migrations_path:
file_path = migrate.create(resource, previous_version=previous, package=migrations_path)
else:
file_path = migrate.create(resource, previous_version=previous)
click.secho('Created migration file: ' + file_path, fg='green') | [
"def",
"create",
"(",
"resource",
",",
"previous",
"=",
"None",
",",
"migrations_path",
"=",
"None",
")",
":",
"if",
"migrations_path",
":",
"file_path",
"=",
"migrate",
".",
"create",
"(",
"resource",
",",
"previous_version",
"=",
"previous",
",",
"package",
"=",
"migrations_path",
")",
"else",
":",
"file_path",
"=",
"migrate",
".",
"create",
"(",
"resource",
",",
"previous_version",
"=",
"previous",
")",
"click",
".",
"secho",
"(",
"'Created migration file: '",
"+",
"file_path",
",",
"fg",
"=",
"'green'",
")"
] | Create an empty migration for a resource | [
"Create",
"an",
"empty",
"migration",
"for",
"a",
"resource"
] | 36d78994133918f3c52c187f19e50132960a0156 | https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/cli.py#L38-L45 |
251,625 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_address_save | def handle_address_save(self, sender, instance, **kwargs):
""" Custom handler for address save """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_save(obj.__class__, obj) | python | def handle_address_save(self, sender, instance, **kwargs):
""" Custom handler for address save """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_save(obj.__class__, obj) | [
"def",
"handle_address_save",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"objects",
"=",
"self",
".",
"find_associated_with_address",
"(",
"instance",
")",
"for",
"obj",
"in",
"objects",
":",
"self",
".",
"handle_save",
"(",
"obj",
".",
"__class__",
",",
"obj",
")"
] | Custom handler for address save | [
"Custom",
"handler",
"for",
"address",
"save"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L63-L67 |
251,626 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_address_delete | def handle_address_delete(self, sender, instance, **kwargs):
""" Custom handler for address delete """
objects = self.find_associated_with_address(instance)
# this is not called as django will delete associated project/address
# triggering handle_delete
for obj in objects: # pragma: no cover
self.handle_delete(obj.__class__, obj) | python | def handle_address_delete(self, sender, instance, **kwargs):
""" Custom handler for address delete """
objects = self.find_associated_with_address(instance)
# this is not called as django will delete associated project/address
# triggering handle_delete
for obj in objects: # pragma: no cover
self.handle_delete(obj.__class__, obj) | [
"def",
"handle_address_delete",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"objects",
"=",
"self",
".",
"find_associated_with_address",
"(",
"instance",
")",
"# this is not called as django will delete associated project/address",
"# triggering handle_delete",
"for",
"obj",
"in",
"objects",
":",
"# pragma: no cover",
"self",
".",
"handle_delete",
"(",
"obj",
".",
"__class__",
",",
"obj",
")"
] | Custom handler for address delete | [
"Custom",
"handler",
"for",
"address",
"delete"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L70-L77 |
251,627 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_job_and_work_save | def handle_job_and_work_save(self, sender, instance, **kwargs):
""" Custom handler for job and work save """
self.handle_save(instance.project.__class__, instance.project) | python | def handle_job_and_work_save(self, sender, instance, **kwargs):
""" Custom handler for job and work save """
self.handle_save(instance.project.__class__, instance.project) | [
"def",
"handle_job_and_work_save",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"handle_save",
"(",
"instance",
".",
"project",
".",
"__class__",
",",
"instance",
".",
"project",
")"
] | Custom handler for job and work save | [
"Custom",
"handler",
"for",
"job",
"and",
"work",
"save"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L79-L81 |
251,628 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_job_and_work_delete | def handle_job_and_work_delete(self, sender, instance, **kwargs):
""" Custom handler for job and work delete """
self.handle_delete(instance.project.__class__, instance.project) | python | def handle_job_and_work_delete(self, sender, instance, **kwargs):
""" Custom handler for job and work delete """
self.handle_delete(instance.project.__class__, instance.project) | [
"def",
"handle_job_and_work_delete",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"handle_delete",
"(",
"instance",
".",
"project",
".",
"__class__",
",",
"instance",
".",
"project",
")"
] | Custom handler for job and work delete | [
"Custom",
"handler",
"for",
"job",
"and",
"work",
"delete"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L83-L85 |
251,629 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_profile_save | def handle_profile_save(self, sender, instance, **kwargs):
""" Custom handler for user profile save """
self.handle_save(instance.user.__class__, instance.user) | python | def handle_profile_save(self, sender, instance, **kwargs):
""" Custom handler for user profile save """
self.handle_save(instance.user.__class__, instance.user) | [
"def",
"handle_profile_save",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"handle_save",
"(",
"instance",
".",
"user",
".",
"__class__",
",",
"instance",
".",
"user",
")"
] | Custom handler for user profile save | [
"Custom",
"handler",
"for",
"user",
"profile",
"save"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L87-L89 |
251,630 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_profile_delete | def handle_profile_delete(self, sender, instance, **kwargs):
""" Custom handler for user profile delete """
try:
self.handle_save(instance.user.__class__, instance.user) # we call save just as well
except (get_profile_model().DoesNotExist):
pass | python | def handle_profile_delete(self, sender, instance, **kwargs):
""" Custom handler for user profile delete """
try:
self.handle_save(instance.user.__class__, instance.user) # we call save just as well
except (get_profile_model().DoesNotExist):
pass | [
"def",
"handle_profile_delete",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"self",
".",
"handle_save",
"(",
"instance",
".",
"user",
".",
"__class__",
",",
"instance",
".",
"user",
")",
"# we call save just as well",
"except",
"(",
"get_profile_model",
"(",
")",
".",
"DoesNotExist",
")",
":",
"pass"
] | Custom handler for user profile delete | [
"Custom",
"handler",
"for",
"user",
"profile",
"delete"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L91-L96 |
251,631 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_m2m | def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """
self.handle_save(instance.__class__, instance) | python | def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """
self.handle_save(instance.__class__, instance) | [
"def",
"handle_m2m",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"handle_save",
"(",
"instance",
".",
"__class__",
",",
"instance",
")"
] | Handle many to many relationships | [
"Handle",
"many",
"to",
"many",
"relationships"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L98-L100 |
251,632 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.handle_m2m_user | def handle_m2m_user(self, sender, instance, **kwargs):
""" Handle many to many relationships for user field """
self.handle_save(instance.user.__class__, instance.user) | python | def handle_m2m_user(self, sender, instance, **kwargs):
""" Handle many to many relationships for user field """
self.handle_save(instance.user.__class__, instance.user) | [
"def",
"handle_m2m_user",
"(",
"self",
",",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"handle_save",
"(",
"instance",
".",
"user",
".",
"__class__",
",",
"instance",
".",
"user",
")"
] | Handle many to many relationships for user field | [
"Handle",
"many",
"to",
"many",
"relationships",
"for",
"user",
"field"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L102-L104 |
251,633 | OpenVolunteeringPlatform/django-ovp-search | ovp_search/signals.py | TiedModelRealtimeSignalProcessor.find_associated_with_address | def find_associated_with_address(self, instance):
""" Returns list with projects and organizations associated with given address """
objects = []
objects += list(Project.objects.filter(address=instance))
objects += list(Organization.objects.filter(address=instance))
return objects | python | def find_associated_with_address(self, instance):
""" Returns list with projects and organizations associated with given address """
objects = []
objects += list(Project.objects.filter(address=instance))
objects += list(Organization.objects.filter(address=instance))
return objects | [
"def",
"find_associated_with_address",
"(",
"self",
",",
"instance",
")",
":",
"objects",
"=",
"[",
"]",
"objects",
"+=",
"list",
"(",
"Project",
".",
"objects",
".",
"filter",
"(",
"address",
"=",
"instance",
")",
")",
"objects",
"+=",
"list",
"(",
"Organization",
".",
"objects",
".",
"filter",
"(",
"address",
"=",
"instance",
")",
")",
"return",
"objects"
] | Returns list with projects and organizations associated with given address | [
"Returns",
"list",
"with",
"projects",
"and",
"organizations",
"associated",
"with",
"given",
"address"
] | 003ceecc0a87be31fe8195f65367c52631f72b57 | https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/signals.py#L106-L112 |
251,634 | cogniteev/docido-python-sdk | docido_sdk/toolbox/google_ext.py | refresh_token | def refresh_token(token, session=None):
"""Refresh Google OAuth token.
:param OAuthToken token:
the token to refresh
:param requests.Session session:
Optional `requests` session to use.
"""
session = session or HTTP_SESSION
refresh_data = dict(
refresh_token=token.refresh_token,
client_id=token.consumer_key,
client_secret=token.consumer_secret,
grant_type='refresh_token'
)
resp = session.post(REFRESH_TOKEN_URL, data=refresh_data)
resp_json = resp.json()
if 'error' in resp_json:
message = resp_json['error']
description = resp_json.get('error_description', '')
if any(description):
message = u'{}: {}'.format(message, description)
raise OAuthTokenExpiredError(message)
return OAuthToken(
access_token=resp_json['access_token'],
refresh_token=token.refresh_token,
consumer_key=token.consumer_key,
consumer_secret=token.consumer_secret
) | python | def refresh_token(token, session=None):
"""Refresh Google OAuth token.
:param OAuthToken token:
the token to refresh
:param requests.Session session:
Optional `requests` session to use.
"""
session = session or HTTP_SESSION
refresh_data = dict(
refresh_token=token.refresh_token,
client_id=token.consumer_key,
client_secret=token.consumer_secret,
grant_type='refresh_token'
)
resp = session.post(REFRESH_TOKEN_URL, data=refresh_data)
resp_json = resp.json()
if 'error' in resp_json:
message = resp_json['error']
description = resp_json.get('error_description', '')
if any(description):
message = u'{}: {}'.format(message, description)
raise OAuthTokenExpiredError(message)
return OAuthToken(
access_token=resp_json['access_token'],
refresh_token=token.refresh_token,
consumer_key=token.consumer_key,
consumer_secret=token.consumer_secret
) | [
"def",
"refresh_token",
"(",
"token",
",",
"session",
"=",
"None",
")",
":",
"session",
"=",
"session",
"or",
"HTTP_SESSION",
"refresh_data",
"=",
"dict",
"(",
"refresh_token",
"=",
"token",
".",
"refresh_token",
",",
"client_id",
"=",
"token",
".",
"consumer_key",
",",
"client_secret",
"=",
"token",
".",
"consumer_secret",
",",
"grant_type",
"=",
"'refresh_token'",
")",
"resp",
"=",
"session",
".",
"post",
"(",
"REFRESH_TOKEN_URL",
",",
"data",
"=",
"refresh_data",
")",
"resp_json",
"=",
"resp",
".",
"json",
"(",
")",
"if",
"'error'",
"in",
"resp_json",
":",
"message",
"=",
"resp_json",
"[",
"'error'",
"]",
"description",
"=",
"resp_json",
".",
"get",
"(",
"'error_description'",
",",
"''",
")",
"if",
"any",
"(",
"description",
")",
":",
"message",
"=",
"u'{}: {}'",
".",
"format",
"(",
"message",
",",
"description",
")",
"raise",
"OAuthTokenExpiredError",
"(",
"message",
")",
"return",
"OAuthToken",
"(",
"access_token",
"=",
"resp_json",
"[",
"'access_token'",
"]",
",",
"refresh_token",
"=",
"token",
".",
"refresh_token",
",",
"consumer_key",
"=",
"token",
".",
"consumer_key",
",",
"consumer_secret",
"=",
"token",
".",
"consumer_secret",
")"
] | Refresh Google OAuth token.
:param OAuthToken token:
the token to refresh
:param requests.Session session:
Optional `requests` session to use. | [
"Refresh",
"Google",
"OAuth",
"token",
"."
] | 58ecb6c6f5757fd40c0601657ab18368da7ddf33 | https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/google_ext.py#L22-L51 |
251,635 | calvinku96/labreporthelper | labreporthelper/table.py | make_tex_table | def make_tex_table(inputlist, outputfile, close=False, fmt=None,
**kwargs):
"""
Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None
"""
output_str = ""
if fmt is None:
fmt = {}
for row in inputlist:
for key, val in enumerate(row):
if val is None:
output_str += r'\text{{{}}}'.format(
str(kwargs.get("nonestring", "None"))
)
else:
# get default
if np.isscalar(val):
temp_str_fmt = "$\\num{{" + fmt.get(
key, "{:g}") + "}}$"
else:
temp_str_fmt = fmt.get(key, "{}")
temp_str = temp_str_fmt.format(val).replace("+", "")
output_str += temp_str + "&"
output_str = output_str[:-1]
output_str += "\\\\\n"
outputfile.write(output_str)
if close:
outputfile.close() | python | def make_tex_table(inputlist, outputfile, close=False, fmt=None,
**kwargs):
"""
Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None
"""
output_str = ""
if fmt is None:
fmt = {}
for row in inputlist:
for key, val in enumerate(row):
if val is None:
output_str += r'\text{{{}}}'.format(
str(kwargs.get("nonestring", "None"))
)
else:
# get default
if np.isscalar(val):
temp_str_fmt = "$\\num{{" + fmt.get(
key, "{:g}") + "}}$"
else:
temp_str_fmt = fmt.get(key, "{}")
temp_str = temp_str_fmt.format(val).replace("+", "")
output_str += temp_str + "&"
output_str = output_str[:-1]
output_str += "\\\\\n"
outputfile.write(output_str)
if close:
outputfile.close() | [
"def",
"make_tex_table",
"(",
"inputlist",
",",
"outputfile",
",",
"close",
"=",
"False",
",",
"fmt",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"output_str",
"=",
"\"\"",
"if",
"fmt",
"is",
"None",
":",
"fmt",
"=",
"{",
"}",
"for",
"row",
"in",
"inputlist",
":",
"for",
"key",
",",
"val",
"in",
"enumerate",
"(",
"row",
")",
":",
"if",
"val",
"is",
"None",
":",
"output_str",
"+=",
"r'\\text{{{}}}'",
".",
"format",
"(",
"str",
"(",
"kwargs",
".",
"get",
"(",
"\"nonestring\"",
",",
"\"None\"",
")",
")",
")",
"else",
":",
"# get default",
"if",
"np",
".",
"isscalar",
"(",
"val",
")",
":",
"temp_str_fmt",
"=",
"\"$\\\\num{{\"",
"+",
"fmt",
".",
"get",
"(",
"key",
",",
"\"{:g}\"",
")",
"+",
"\"}}$\"",
"else",
":",
"temp_str_fmt",
"=",
"fmt",
".",
"get",
"(",
"key",
",",
"\"{}\"",
")",
"temp_str",
"=",
"temp_str_fmt",
".",
"format",
"(",
"val",
")",
".",
"replace",
"(",
"\"+\"",
",",
"\"\"",
")",
"output_str",
"+=",
"temp_str",
"+",
"\"&\"",
"output_str",
"=",
"output_str",
"[",
":",
"-",
"1",
"]",
"output_str",
"+=",
"\"\\\\\\\\\\n\"",
"outputfile",
".",
"write",
"(",
"output_str",
")",
"if",
"close",
":",
"outputfile",
".",
"close",
"(",
")"
] | Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None | [
"Parse",
"table",
"from",
"inputlist"
] | 4d436241f389c02eb188c313190df62ab28c3763 | https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/table.py#L7-L50 |
251,636 | cogniteev/docido-python-sdk | docido_sdk/toolbox/logger_ext.py | set_root_logger_from_verbosity | def set_root_logger_from_verbosity(verbosity=0):
"""Configure root logger according to both application settings
and verbosity level.
"""
kwargs = {}
if verbosity == 1:
kwargs.update(level=logging.INFO)
elif verbosity > 1:
kwargs.update(level=logging.DEBUG)
set_root_logger(**kwargs) | python | def set_root_logger_from_verbosity(verbosity=0):
"""Configure root logger according to both application settings
and verbosity level.
"""
kwargs = {}
if verbosity == 1:
kwargs.update(level=logging.INFO)
elif verbosity > 1:
kwargs.update(level=logging.DEBUG)
set_root_logger(**kwargs) | [
"def",
"set_root_logger_from_verbosity",
"(",
"verbosity",
"=",
"0",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"verbosity",
"==",
"1",
":",
"kwargs",
".",
"update",
"(",
"level",
"=",
"logging",
".",
"INFO",
")",
"elif",
"verbosity",
">",
"1",
":",
"kwargs",
".",
"update",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"set_root_logger",
"(",
"*",
"*",
"kwargs",
")"
] | Configure root logger according to both application settings
and verbosity level. | [
"Configure",
"root",
"logger",
"according",
"to",
"both",
"application",
"settings",
"and",
"verbosity",
"level",
"."
] | 58ecb6c6f5757fd40c0601657ab18368da7ddf33 | https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/logger_ext.py#L6-L16 |
251,637 | cogniteev/docido-python-sdk | docido_sdk/toolbox/logger_ext.py | set_loggers_from_config | def set_loggers_from_config(config=None):
"""Set loggers configuration according to the `logging` section
of Docido configuration file.
:param nameddict config:
overrides Docido configuration
"""
config = config or app_config.logging
for lname, lconfig in config.get('loggers', {}).iteritems():
if 'level' in lconfig:
level = getattr(logging, lconfig.level)
assert isinstance(level, int)
logger = logging.getLogger(lname)
logger.setLevel(level) | python | def set_loggers_from_config(config=None):
"""Set loggers configuration according to the `logging` section
of Docido configuration file.
:param nameddict config:
overrides Docido configuration
"""
config = config or app_config.logging
for lname, lconfig in config.get('loggers', {}).iteritems():
if 'level' in lconfig:
level = getattr(logging, lconfig.level)
assert isinstance(level, int)
logger = logging.getLogger(lname)
logger.setLevel(level) | [
"def",
"set_loggers_from_config",
"(",
"config",
"=",
"None",
")",
":",
"config",
"=",
"config",
"or",
"app_config",
".",
"logging",
"for",
"lname",
",",
"lconfig",
"in",
"config",
".",
"get",
"(",
"'loggers'",
",",
"{",
"}",
")",
".",
"iteritems",
"(",
")",
":",
"if",
"'level'",
"in",
"lconfig",
":",
"level",
"=",
"getattr",
"(",
"logging",
",",
"lconfig",
".",
"level",
")",
"assert",
"isinstance",
"(",
"level",
",",
"int",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"lname",
")",
"logger",
".",
"setLevel",
"(",
"level",
")"
] | Set loggers configuration according to the `logging` section
of Docido configuration file.
:param nameddict config:
overrides Docido configuration | [
"Set",
"loggers",
"configuration",
"according",
"to",
"the",
"logging",
"section",
"of",
"Docido",
"configuration",
"file",
"."
] | 58ecb6c6f5757fd40c0601657ab18368da7ddf33 | https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/logger_ext.py#L34-L47 |
251,638 | Sean1708/HipPy | hippy/parser.py | Parser.data | def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data | python | def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data | [
"def",
"data",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"# reset after possible parsing failure",
"self",
".",
"__init__",
"(",
"self",
".",
"tokens",
")",
"return",
"self",
".",
"_parse",
"(",
")",
"else",
":",
"return",
"self",
".",
"_data"
] | Return parsed data structure. | [
"Return",
"parsed",
"data",
"structure",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L47-L54 |
251,639 | Sean1708/HipPy | hippy/parser.py | Parser._increment | def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n | python | def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n | [
"def",
"_increment",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"if",
"self",
".",
"_cur_position",
">=",
"self",
".",
"num_tokens",
"-",
"1",
":",
"self",
".",
"_cur_positon",
"=",
"self",
".",
"num_tokens",
"-",
"1",
"self",
".",
"_finished",
"=",
"True",
"else",
":",
"self",
".",
"_cur_position",
"+=",
"n"
] | Move forward n tokens in the stream. | [
"Move",
"forward",
"n",
"tokens",
"in",
"the",
"stream",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L71-L77 |
251,640 | Sean1708/HipPy | hippy/parser.py | Parser._skip_whitespace | def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i | python | def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i | [
"def",
"_skip_whitespace",
"(",
"self",
")",
":",
"i",
"=",
"0",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
"and",
"not",
"self",
".",
"_finished",
":",
"self",
".",
"_increment",
"(",
")",
"i",
"+=",
"1",
"return",
"i"
] | Increment over whitespace, counting characters. | [
"Increment",
"over",
"whitespace",
"counting",
"characters",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L79-L86 |
251,641 | Sean1708/HipPy | hippy/parser.py | Parser._skip_newlines | def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment() | python | def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment() | [
"def",
"_skip_newlines",
"(",
"self",
")",
":",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"lbreak",
"and",
"not",
"self",
".",
"_finished",
":",
"self",
".",
"_increment",
"(",
")"
] | Increment over newlines. | [
"Increment",
"over",
"newlines",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L88-L91 |
251,642 | Sean1708/HipPy | hippy/parser.py | Parser._parse | def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data | python | def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data | [
"def",
"_parse",
"(",
"self",
")",
":",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"in",
"(",
"TT",
".",
"ws",
",",
"TT",
".",
"lbreak",
")",
":",
"self",
".",
"_skip_whitespace",
"(",
")",
"self",
".",
"_skip_newlines",
"(",
")",
"self",
".",
"_data",
"=",
"self",
".",
"_parse_value",
"(",
")",
"return",
"self",
".",
"_data"
] | Parse the token stream into a nice dictionary data structure. | [
"Parse",
"the",
"token",
"stream",
"into",
"a",
"nice",
"dictionary",
"data",
"structure",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L93-L101 |
251,643 | Sean1708/HipPy | hippy/parser.py | Parser._parse_value | def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent) | python | def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent) | [
"def",
"_parse_value",
"(",
"self",
")",
":",
"indent",
"=",
"0",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
":",
"indent",
"=",
"self",
".",
"_skip_whitespace",
"(",
")",
"self",
".",
"_skip_newlines",
"(",
")",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"id",
":",
"return",
"self",
".",
"_parse_key",
"(",
"indent",
")",
"elif",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"hyphen",
":",
"self",
".",
"_increment",
"(",
")",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"hyphen",
":",
"self",
".",
"_increment",
"(",
")",
"return",
"[",
"]",
"else",
":",
"return",
"self",
".",
"_parse_object_list",
"(",
")",
"else",
":",
"# TODO: single comma gives empty list",
"return",
"self",
".",
"_parse_literal_list",
"(",
"indent",
")"
] | Parse the value of a key-value pair. | [
"Parse",
"the",
"value",
"of",
"a",
"key",
"-",
"value",
"pair",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L103-L121 |
251,644 | Sean1708/HipPy | hippy/parser.py | Parser._parse_key | def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
) | python | def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
) | [
"def",
"_parse_key",
"(",
"self",
",",
"indent",
")",
":",
"data",
"=",
"{",
"}",
"new_indent",
"=",
"indent",
"while",
"not",
"self",
".",
"_finished",
"and",
"new_indent",
"==",
"indent",
":",
"self",
".",
"_skip_whitespace",
"(",
")",
"cur_token",
"=",
"self",
".",
"_cur_token",
"if",
"cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"id",
":",
"key",
"=",
"cur_token",
"[",
"'value'",
"]",
"next_token",
"=",
"self",
".",
"_nth_token",
"(",
")",
"if",
"next_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"colon",
":",
"self",
".",
"_increment",
"(",
"2",
")",
"# move past the ':'",
"# whitespace before a newline is not important",
"# whitespace after a newline is important",
"self",
".",
"_skip_whitespace",
"(",
")",
"self",
".",
"_skip_newlines",
"(",
")",
"data",
"[",
"key",
"]",
"=",
"self",
".",
"_parse_value",
"(",
")",
"else",
":",
"raise",
"ParseError",
"(",
"\"':'\"",
",",
"next_token",
")",
"else",
":",
"if",
"cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"hyphen",
":",
"return",
"data",
"else",
":",
"raise",
"ParseError",
"(",
"\"identifier or '-'\"",
",",
"cur_token",
")",
"if",
"self",
".",
"tokens",
"[",
"self",
".",
"_cur_position",
"-",
"1",
"]",
"[",
"'type'",
"]",
"is",
"not",
"TT",
".",
"lbreak",
":",
"# skip whitespace at the end of the line",
"self",
".",
"_skip_whitespace",
"(",
")",
"self",
".",
"_skip_newlines",
"(",
")",
"# find next indentation level without incrementing",
"new_indent",
"=",
"0",
"temp_position",
"=",
"self",
".",
"_cur_position",
"while",
"(",
"temp_position",
"<",
"self",
".",
"num_tokens",
"-",
"1",
"and",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
")",
":",
"temp_position",
"+=",
"1",
"new_indent",
"+=",
"1",
"if",
"indent",
"==",
"0",
"or",
"new_indent",
"<",
"indent",
":",
"return",
"data",
"else",
":",
"raise",
"Exception",
"(",
"\"Parser screwed up, increase of indent on line {} should \"",
"\"have been caught by _parse_value().\"",
".",
"format",
"(",
"cur_token",
"[",
"'line'",
"]",
")",
")"
] | Parse a series of key-value pairs. | [
"Parse",
"a",
"series",
"of",
"key",
"-",
"value",
"pairs",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L123-L172 |
251,645 | Sean1708/HipPy | hippy/parser.py | Parser._parse_object_list | def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token) | python | def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token) | [
"def",
"_parse_object_list",
"(",
"self",
")",
":",
"array",
"=",
"[",
"]",
"indent",
"=",
"0",
"while",
"not",
"self",
".",
"_finished",
":",
"self",
".",
"_skip_newlines",
"(",
")",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
":",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
":",
"indent",
"=",
"self",
".",
"_skip_whitespace",
"(",
")",
"self",
".",
"_skip_newlines",
"(",
")",
"elif",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"id",
":",
"array",
".",
"append",
"(",
"self",
".",
"_parse_key",
"(",
"indent",
")",
")",
"elif",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"hyphen",
":",
"self",
".",
"_increment",
"(",
")",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"not",
"TT",
".",
"hyphen",
"or",
"self",
".",
"_finished",
":",
"return",
"array",
"else",
":",
"self",
".",
"_increment",
"(",
")",
"else",
":",
"raise",
"ParseError",
"(",
"'something different'",
",",
"self",
".",
"_cur_token",
")"
] | Parse a list of data structures. | [
"Parse",
"a",
"list",
"of",
"data",
"structures",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L174-L194 |
251,646 | Sean1708/HipPy | hippy/parser.py | Parser._parse_literal_list | def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval | python | def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval | [
"def",
"_parse_literal_list",
"(",
"self",
",",
"indent",
")",
":",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"not",
"in",
"self",
".",
"_literals",
":",
"raise",
"Exception",
"(",
"\"Parser failed, _parse_literal_list was called on non-literal\"",
"\" {} on line {}.\"",
".",
"format",
"(",
"repr",
"(",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
")",
",",
"self",
".",
"_cur_token",
"[",
"'line'",
"]",
")",
")",
"# find next token after whitespace without incrementing",
"temp_position",
"=",
"self",
".",
"_cur_position",
"while",
"(",
"temp_position",
"<",
"self",
".",
"num_tokens",
"-",
"1",
"and",
"(",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
"or",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"in",
"self",
".",
"_literals",
")",
")",
":",
"temp_position",
"+=",
"1",
"next_token",
"=",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"# end of stream",
"if",
"next_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
":",
"return",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
"elif",
"next_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"comma",
":",
"return",
"self",
".",
"_parse_comma_list",
"(",
")",
"elif",
"next_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"lbreak",
":",
"while",
"(",
"temp_position",
"<",
"self",
".",
"num_tokens",
"-",
"1",
"and",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"in",
"(",
"TT",
".",
"lbreak",
",",
"TT",
".",
"ws",
")",
")",
":",
"temp_position",
"+=",
"1",
"if",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"in",
"self",
".",
"_literals",
":",
"return",
"self",
".",
"_parse_newline_list",
"(",
"indent",
")",
"else",
":",
"rval",
"=",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
"self",
".",
"_increment",
"(",
")",
"return",
"rval",
"else",
":",
"rval",
"=",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
"self",
".",
"_increment",
"(",
")",
"return",
"rval"
] | Parse a list of literals. | [
"Parse",
"a",
"list",
"of",
"literals",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L196-L237 |
251,647 | Sean1708/HipPy | hippy/parser.py | Parser._parse_comma_list | def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array | python | def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array | [
"def",
"_parse_comma_list",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"not",
"in",
"self",
".",
"_literals",
":",
"raise",
"Exception",
"(",
"\"Parser failed, _parse_comma_list was called on non-literal\"",
"\" {} on line {}.\"",
".",
"format",
"(",
"repr",
"(",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
")",
",",
"self",
".",
"_cur_token",
"[",
"'line'",
"]",
")",
")",
"array",
"=",
"[",
"]",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"in",
"self",
".",
"_literals",
"and",
"not",
"self",
".",
"_finished",
":",
"array",
".",
"append",
"(",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
")",
"self",
".",
"_increment",
"(",
")",
"self",
".",
"_skip_whitespace",
"(",
")",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"comma",
":",
"self",
".",
"_increment",
"(",
")",
"self",
".",
"_skip_whitespace",
"(",
")",
"elif",
"(",
"not",
"self",
".",
"_finished",
"and",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"not",
"in",
"(",
"TT",
".",
"ws",
",",
"TT",
".",
"lbreak",
")",
")",
":",
"raise",
"ParseError",
"(",
"'comma or newline'",
",",
"self",
".",
"_cur_token",
")",
"return",
"array"
] | Parse a comma seperated list. | [
"Parse",
"a",
"comma",
"seperated",
"list",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L239-L263 |
251,648 | Sean1708/HipPy | hippy/parser.py | Parser._parse_newline_list | def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array | python | def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array | [
"def",
"_parse_newline_list",
"(",
"self",
",",
"indent",
")",
":",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"not",
"in",
"self",
".",
"_literals",
":",
"raise",
"Exception",
"(",
"\"Parser failed, _parse_newline_list was called on non-literal\"",
"\" {} on line {}.\"",
".",
"format",
"(",
"repr",
"(",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
")",
",",
"self",
".",
"_cur_token",
"[",
"'line'",
"]",
")",
")",
"array",
"=",
"[",
"]",
"new_indent",
"=",
"indent",
"while",
"not",
"self",
".",
"_finished",
":",
"if",
"new_indent",
"<",
"indent",
":",
"break",
"elif",
"new_indent",
"==",
"indent",
":",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"lbreak",
":",
"self",
".",
"_skip_newlines",
"(",
")",
"self",
".",
"_skip_whitespace",
"(",
")",
"# look ahead to see if it's a comma seperated list",
"temp_position",
"=",
"self",
".",
"_cur_position",
"while",
"(",
"temp_position",
"<",
"self",
".",
"num_tokens",
"-",
"1",
"and",
"(",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"is",
"TT",
".",
"ws",
"or",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"in",
"self",
".",
"_literals",
")",
")",
":",
"temp_position",
"+=",
"1",
"if",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"is",
"TT",
".",
"comma",
":",
"array",
".",
"append",
"(",
"self",
".",
"_parse_comma_list",
"(",
")",
")",
"else",
":",
"if",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"not",
"TT",
".",
"hyphen",
":",
"array",
".",
"append",
"(",
"self",
".",
"_cur_token",
"[",
"'value'",
"]",
")",
"elif",
"self",
".",
"_nth_token",
"(",
")",
"[",
"'type'",
"]",
"is",
"TT",
".",
"hyphen",
":",
"# two consecutive '-'s",
"array",
".",
"append",
"(",
"[",
"]",
")",
"self",
".",
"_increment",
"(",
")",
"self",
".",
"_increment",
"(",
")",
"else",
":",
"# new_indent > indent",
"while",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"is",
"TT",
".",
"lbreak",
":",
"self",
".",
"_skip_newlines",
"(",
")",
"self",
".",
"_skip_whitespace",
"(",
")",
"array",
".",
"append",
"(",
"self",
".",
"_parse_newline_list",
"(",
"new_indent",
")",
")",
"self",
".",
"_skip_whitespace",
"(",
")",
"if",
"(",
"not",
"self",
".",
"_finished",
"and",
"self",
".",
"_cur_token",
"[",
"'type'",
"]",
"not",
"in",
"(",
"TT",
".",
"lbreak",
",",
"TT",
".",
"hyphen",
")",
")",
":",
"raise",
"ParseError",
"(",
"'newline'",
",",
"self",
".",
"_cur_token",
")",
"temp_position",
"=",
"self",
".",
"_cur_position",
"new_indent",
"=",
"0",
"while",
"(",
"temp_position",
"<",
"self",
".",
"num_tokens",
"-",
"1",
"and",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"in",
"(",
"TT",
".",
"lbreak",
",",
"TT",
".",
"ws",
")",
")",
":",
"if",
"self",
".",
"tokens",
"[",
"temp_position",
"]",
"[",
"'type'",
"]",
"is",
"TT",
".",
"lbreak",
":",
"new_indent",
"=",
"0",
"else",
":",
"new_indent",
"+=",
"1",
"temp_position",
"+=",
"1",
"return",
"array"
] | Parse a newline seperated list. | [
"Parse",
"a",
"newline",
"seperated",
"list",
"."
] | d0ea8fb1e417f1fedaa8e215e3d420b90c4de691 | https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L265-L330 |
251,649 | xtrementl/focus | focus/environment/cli.py | CLI._handle_help | def _handle_help(self, env, args):
""" Handles showing help information for arguments provided.
`env`
Runtime ``Environment`` instance.
`args`
List of argument strings passed.
Returns ``False`` if nothing handled.
* Raises ``HelpBanner`` exception if valid subcommand provided.
"""
if args:
# command help (focus help [command])
# get command plugin registered for command
active = env.task.active
plugin_obj = registration.get_command_hook(args[0], active)
if plugin_obj:
parser = self._get_plugin_parser(plugin_obj)
raise HelpBanner(parser.format_help(), code=0)
return False | python | def _handle_help(self, env, args):
""" Handles showing help information for arguments provided.
`env`
Runtime ``Environment`` instance.
`args`
List of argument strings passed.
Returns ``False`` if nothing handled.
* Raises ``HelpBanner`` exception if valid subcommand provided.
"""
if args:
# command help (focus help [command])
# get command plugin registered for command
active = env.task.active
plugin_obj = registration.get_command_hook(args[0], active)
if plugin_obj:
parser = self._get_plugin_parser(plugin_obj)
raise HelpBanner(parser.format_help(), code=0)
return False | [
"def",
"_handle_help",
"(",
"self",
",",
"env",
",",
"args",
")",
":",
"if",
"args",
":",
"# command help (focus help [command])",
"# get command plugin registered for command",
"active",
"=",
"env",
".",
"task",
".",
"active",
"plugin_obj",
"=",
"registration",
".",
"get_command_hook",
"(",
"args",
"[",
"0",
"]",
",",
"active",
")",
"if",
"plugin_obj",
":",
"parser",
"=",
"self",
".",
"_get_plugin_parser",
"(",
"plugin_obj",
")",
"raise",
"HelpBanner",
"(",
"parser",
".",
"format_help",
"(",
")",
",",
"code",
"=",
"0",
")",
"return",
"False"
] | Handles showing help information for arguments provided.
`env`
Runtime ``Environment`` instance.
`args`
List of argument strings passed.
Returns ``False`` if nothing handled.
* Raises ``HelpBanner`` exception if valid subcommand provided. | [
"Handles",
"showing",
"help",
"information",
"for",
"arguments",
"provided",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/cli.py#L59-L82 |
251,650 | xtrementl/focus | focus/environment/cli.py | CLI._handle_command | def _handle_command(self, command, env, args):
""" Handles calling appropriate command plugin based on the arguments
provided.
`command`
Command string.
`env`
Runtime ``Environment`` instance.
`args`
List of argument strings passed.
Returns ``False`` if nothing handled.
* Raises ``HelpBanner`` exception if mismatched command arguments.
"""
# get command plugin registered for command
# note, we're guaranteed to have a command string by this point
plugin_obj = registration.get_command_hook(command, env.task.active)
# check if plugin is task-specific or has option hooks implying
# task-specific behavior
if plugin_obj and not env.task.active:
if plugin_obj.task_only or plugin_obj.options:
plugin_obj = None
if plugin_obj:
# plugin needs root, setup root access via sudo
if plugin_obj.needs_root:
registration.setup_sudo_access(plugin_obj)
# parse arguments
parser = self._get_plugin_parser(plugin_obj)
parsed_args = parser.parse_args(args)
# run plugin
plugin_obj.execute(env, parsed_args)
return True
return False | python | def _handle_command(self, command, env, args):
""" Handles calling appropriate command plugin based on the arguments
provided.
`command`
Command string.
`env`
Runtime ``Environment`` instance.
`args`
List of argument strings passed.
Returns ``False`` if nothing handled.
* Raises ``HelpBanner`` exception if mismatched command arguments.
"""
# get command plugin registered for command
# note, we're guaranteed to have a command string by this point
plugin_obj = registration.get_command_hook(command, env.task.active)
# check if plugin is task-specific or has option hooks implying
# task-specific behavior
if plugin_obj and not env.task.active:
if plugin_obj.task_only or plugin_obj.options:
plugin_obj = None
if plugin_obj:
# plugin needs root, setup root access via sudo
if plugin_obj.needs_root:
registration.setup_sudo_access(plugin_obj)
# parse arguments
parser = self._get_plugin_parser(plugin_obj)
parsed_args = parser.parse_args(args)
# run plugin
plugin_obj.execute(env, parsed_args)
return True
return False | [
"def",
"_handle_command",
"(",
"self",
",",
"command",
",",
"env",
",",
"args",
")",
":",
"# get command plugin registered for command",
"# note, we're guaranteed to have a command string by this point",
"plugin_obj",
"=",
"registration",
".",
"get_command_hook",
"(",
"command",
",",
"env",
".",
"task",
".",
"active",
")",
"# check if plugin is task-specific or has option hooks implying",
"# task-specific behavior",
"if",
"plugin_obj",
"and",
"not",
"env",
".",
"task",
".",
"active",
":",
"if",
"plugin_obj",
".",
"task_only",
"or",
"plugin_obj",
".",
"options",
":",
"plugin_obj",
"=",
"None",
"if",
"plugin_obj",
":",
"# plugin needs root, setup root access via sudo",
"if",
"plugin_obj",
".",
"needs_root",
":",
"registration",
".",
"setup_sudo_access",
"(",
"plugin_obj",
")",
"# parse arguments",
"parser",
"=",
"self",
".",
"_get_plugin_parser",
"(",
"plugin_obj",
")",
"parsed_args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"# run plugin",
"plugin_obj",
".",
"execute",
"(",
"env",
",",
"parsed_args",
")",
"return",
"True",
"return",
"False"
] | Handles calling appropriate command plugin based on the arguments
provided.
`command`
Command string.
`env`
Runtime ``Environment`` instance.
`args`
List of argument strings passed.
Returns ``False`` if nothing handled.
* Raises ``HelpBanner`` exception if mismatched command arguments. | [
"Handles",
"calling",
"appropriate",
"command",
"plugin",
"based",
"on",
"the",
"arguments",
"provided",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/cli.py#L84-L122 |
251,651 | xtrementl/focus | focus/environment/cli.py | CLI._get_parser | def _get_parser(self, env):
""" Creates base argument parser.
`env`
Runtime ``Environment`` instance.
* Raises ``HelpBanner`` exception when certain conditions apply.
Returns ``FocusArgumentParser`` object.
"""
version_str = 'focus version ' + __version__
usage_str = 'focus [-h] [-v] [--no-color] <command> [<args>]'
# setup parser
parser = FocusArgParser(description=("Command-line productivity tool "
"for improved task workflows."),
epilog=("See 'focus help <command>' for more "
"information on a specific command."),
usage=usage_str)
parser.add_argument('-v', '--version', action='version',
version=version_str)
parser.add_argument('--no-color', action='store_true',
help='disables colors')
# fetch command plugins
commands = []
active = env.task.active
command_hooks = registration.get_registered(command_hooks=True,
task_active=active)
# extract command name and docstrings as help text
for plugin in command_hooks:
help_text = (plugin.__doc__ or '').strip().rstrip('.').lower()
commands.append((plugin.command, help_text))
commands.sort(key=lambda x: x[0]) # command ordered
# install subparsers
subparsers = parser.add_subparsers(title='available commands')
# install 'help' subparser
help_parser = subparsers.add_parser('help', add_help=False)
help_parser.set_defaults(func=self._handle_help)
# install 'version' subparser
version_parser = subparsers.add_parser('version', add_help=False)
def _print_version(env, args):
env.io.write(version_str)
return True
version_parser.set_defaults(func=_print_version)
# install command subparsers based on registered command plugins.
# this allows for focus commands (e.g. focus on [...])
for command, help_ in commands:
cmd_parser = subparsers.add_parser(command, help=help_,
add_help=False)
# use wrapper to bind command value and passthru to _handle_command
# when executed later
def _run(command):
def _wrapper(env, args):
return self._handle_command(command, env, args)
return _wrapper
cmd_parser.set_defaults(func=_run(command))
return parser | python | def _get_parser(self, env):
""" Creates base argument parser.
`env`
Runtime ``Environment`` instance.
* Raises ``HelpBanner`` exception when certain conditions apply.
Returns ``FocusArgumentParser`` object.
"""
version_str = 'focus version ' + __version__
usage_str = 'focus [-h] [-v] [--no-color] <command> [<args>]'
# setup parser
parser = FocusArgParser(description=("Command-line productivity tool "
"for improved task workflows."),
epilog=("See 'focus help <command>' for more "
"information on a specific command."),
usage=usage_str)
parser.add_argument('-v', '--version', action='version',
version=version_str)
parser.add_argument('--no-color', action='store_true',
help='disables colors')
# fetch command plugins
commands = []
active = env.task.active
command_hooks = registration.get_registered(command_hooks=True,
task_active=active)
# extract command name and docstrings as help text
for plugin in command_hooks:
help_text = (plugin.__doc__ or '').strip().rstrip('.').lower()
commands.append((plugin.command, help_text))
commands.sort(key=lambda x: x[0]) # command ordered
# install subparsers
subparsers = parser.add_subparsers(title='available commands')
# install 'help' subparser
help_parser = subparsers.add_parser('help', add_help=False)
help_parser.set_defaults(func=self._handle_help)
# install 'version' subparser
version_parser = subparsers.add_parser('version', add_help=False)
def _print_version(env, args):
env.io.write(version_str)
return True
version_parser.set_defaults(func=_print_version)
# install command subparsers based on registered command plugins.
# this allows for focus commands (e.g. focus on [...])
for command, help_ in commands:
cmd_parser = subparsers.add_parser(command, help=help_,
add_help=False)
# use wrapper to bind command value and passthru to _handle_command
# when executed later
def _run(command):
def _wrapper(env, args):
return self._handle_command(command, env, args)
return _wrapper
cmd_parser.set_defaults(func=_run(command))
return parser | [
"def",
"_get_parser",
"(",
"self",
",",
"env",
")",
":",
"version_str",
"=",
"'focus version '",
"+",
"__version__",
"usage_str",
"=",
"'focus [-h] [-v] [--no-color] <command> [<args>]'",
"# setup parser",
"parser",
"=",
"FocusArgParser",
"(",
"description",
"=",
"(",
"\"Command-line productivity tool \"",
"\"for improved task workflows.\"",
")",
",",
"epilog",
"=",
"(",
"\"See 'focus help <command>' for more \"",
"\"information on a specific command.\"",
")",
",",
"usage",
"=",
"usage_str",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"version_str",
")",
"parser",
".",
"add_argument",
"(",
"'--no-color'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'disables colors'",
")",
"# fetch command plugins",
"commands",
"=",
"[",
"]",
"active",
"=",
"env",
".",
"task",
".",
"active",
"command_hooks",
"=",
"registration",
".",
"get_registered",
"(",
"command_hooks",
"=",
"True",
",",
"task_active",
"=",
"active",
")",
"# extract command name and docstrings as help text",
"for",
"plugin",
"in",
"command_hooks",
":",
"help_text",
"=",
"(",
"plugin",
".",
"__doc__",
"or",
"''",
")",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
"'.'",
")",
".",
"lower",
"(",
")",
"commands",
".",
"append",
"(",
"(",
"plugin",
".",
"command",
",",
"help_text",
")",
")",
"commands",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"# command ordered",
"# install subparsers",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"'available commands'",
")",
"# install 'help' subparser",
"help_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'help'",
",",
"add_help",
"=",
"False",
")",
"help_parser",
".",
"set_defaults",
"(",
"func",
"=",
"self",
".",
"_handle_help",
")",
"# install 'version' subparser",
"version_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'version'",
",",
"add_help",
"=",
"False",
")",
"def",
"_print_version",
"(",
"env",
",",
"args",
")",
":",
"env",
".",
"io",
".",
"write",
"(",
"version_str",
")",
"return",
"True",
"version_parser",
".",
"set_defaults",
"(",
"func",
"=",
"_print_version",
")",
"# install command subparsers based on registered command plugins.",
"# this allows for focus commands (e.g. focus on [...])",
"for",
"command",
",",
"help_",
"in",
"commands",
":",
"cmd_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"command",
",",
"help",
"=",
"help_",
",",
"add_help",
"=",
"False",
")",
"# use wrapper to bind command value and passthru to _handle_command",
"# when executed later",
"def",
"_run",
"(",
"command",
")",
":",
"def",
"_wrapper",
"(",
"env",
",",
"args",
")",
":",
"return",
"self",
".",
"_handle_command",
"(",
"command",
",",
"env",
",",
"args",
")",
"return",
"_wrapper",
"cmd_parser",
".",
"set_defaults",
"(",
"func",
"=",
"_run",
"(",
"command",
")",
")",
"return",
"parser"
] | Creates base argument parser.
`env`
Runtime ``Environment`` instance.
* Raises ``HelpBanner`` exception when certain conditions apply.
Returns ``FocusArgumentParser`` object. | [
"Creates",
"base",
"argument",
"parser",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/cli.py#L124-L192 |
251,652 | xtrementl/focus | focus/environment/cli.py | CLI._get_plugin_parser | def _get_plugin_parser(self, plugin_obj):
""" Creates a plugin argument parser.
`plugin_obj`
``Plugin`` object.
Returns ``FocusArgParser`` object.
"""
prog_name = 'focus ' + plugin_obj.command
desc = (plugin_obj.__doc__ or '').strip()
parser = FocusArgParser(prog=prog_name, description=desc)
plugin_obj.setup_parser(parser)
return parser | python | def _get_plugin_parser(self, plugin_obj):
""" Creates a plugin argument parser.
`plugin_obj`
``Plugin`` object.
Returns ``FocusArgParser`` object.
"""
prog_name = 'focus ' + plugin_obj.command
desc = (plugin_obj.__doc__ or '').strip()
parser = FocusArgParser(prog=prog_name, description=desc)
plugin_obj.setup_parser(parser)
return parser | [
"def",
"_get_plugin_parser",
"(",
"self",
",",
"plugin_obj",
")",
":",
"prog_name",
"=",
"'focus '",
"+",
"plugin_obj",
".",
"command",
"desc",
"=",
"(",
"plugin_obj",
".",
"__doc__",
"or",
"''",
")",
".",
"strip",
"(",
")",
"parser",
"=",
"FocusArgParser",
"(",
"prog",
"=",
"prog_name",
",",
"description",
"=",
"desc",
")",
"plugin_obj",
".",
"setup_parser",
"(",
"parser",
")",
"return",
"parser"
] | Creates a plugin argument parser.
`plugin_obj`
``Plugin`` object.
Returns ``FocusArgParser`` object. | [
"Creates",
"a",
"plugin",
"argument",
"parser",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/cli.py#L194-L209 |
251,653 | xtrementl/focus | focus/environment/cli.py | CLI.execute | def execute(self, env):
""" Executes basic flags and command plugins.
`env`
Runtime ``Environment`` instance.
* Raises ``FocusError`` exception when certain conditions apply.
"""
# parse args
parser = self._get_parser(env)
parsed_args, cmd_args = parser.parse_known_args(env.args)
# disable colors
if parsed_args.no_color:
env.io.set_colored(False)
# run command handler passing any remaining args
if not parsed_args.func(env, cmd_args):
raise HelpBanner(parser.format_help()) | python | def execute(self, env):
""" Executes basic flags and command plugins.
`env`
Runtime ``Environment`` instance.
* Raises ``FocusError`` exception when certain conditions apply.
"""
# parse args
parser = self._get_parser(env)
parsed_args, cmd_args = parser.parse_known_args(env.args)
# disable colors
if parsed_args.no_color:
env.io.set_colored(False)
# run command handler passing any remaining args
if not parsed_args.func(env, cmd_args):
raise HelpBanner(parser.format_help()) | [
"def",
"execute",
"(",
"self",
",",
"env",
")",
":",
"# parse args",
"parser",
"=",
"self",
".",
"_get_parser",
"(",
"env",
")",
"parsed_args",
",",
"cmd_args",
"=",
"parser",
".",
"parse_known_args",
"(",
"env",
".",
"args",
")",
"# disable colors",
"if",
"parsed_args",
".",
"no_color",
":",
"env",
".",
"io",
".",
"set_colored",
"(",
"False",
")",
"# run command handler passing any remaining args",
"if",
"not",
"parsed_args",
".",
"func",
"(",
"env",
",",
"cmd_args",
")",
":",
"raise",
"HelpBanner",
"(",
"parser",
".",
"format_help",
"(",
")",
")"
] | Executes basic flags and command plugins.
`env`
Runtime ``Environment`` instance.
* Raises ``FocusError`` exception when certain conditions apply. | [
"Executes",
"basic",
"flags",
"and",
"command",
"plugins",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/cli.py#L211-L230 |
251,654 | mbarakaja/braulio | braulio/files.py | _split_chglog | def _split_chglog(path, title):
"""Split a RST file text in two parts. The title argument determine the
split point. The given title goes in the bottom part. If the title is not
found everything goes in the top part.
Return a tuple with the top and bottom parts.
"""
with path.open() as f:
doc = f.readlines()
has_title = False
for idx, curr_line in enumerate(doc):
if title in curr_line:
prev_line = doc[idx - 1] if idx - 1 < len(doc) else "\n"
next_line = doc[idx + 1] if idx + 1 < len(doc) else None
if is_title(prev_line, curr_line, next_line):
idx = idx if prev_line == "\n" else idx - 1
has_title = True
break
if has_title:
top, bottom = doc[:idx], doc[idx:]
else:
top, bottom = doc, []
return "".join(top), "".join(bottom) | python | def _split_chglog(path, title):
"""Split a RST file text in two parts. The title argument determine the
split point. The given title goes in the bottom part. If the title is not
found everything goes in the top part.
Return a tuple with the top and bottom parts.
"""
with path.open() as f:
doc = f.readlines()
has_title = False
for idx, curr_line in enumerate(doc):
if title in curr_line:
prev_line = doc[idx - 1] if idx - 1 < len(doc) else "\n"
next_line = doc[idx + 1] if idx + 1 < len(doc) else None
if is_title(prev_line, curr_line, next_line):
idx = idx if prev_line == "\n" else idx - 1
has_title = True
break
if has_title:
top, bottom = doc[:idx], doc[idx:]
else:
top, bottom = doc, []
return "".join(top), "".join(bottom) | [
"def",
"_split_chglog",
"(",
"path",
",",
"title",
")",
":",
"with",
"path",
".",
"open",
"(",
")",
"as",
"f",
":",
"doc",
"=",
"f",
".",
"readlines",
"(",
")",
"has_title",
"=",
"False",
"for",
"idx",
",",
"curr_line",
"in",
"enumerate",
"(",
"doc",
")",
":",
"if",
"title",
"in",
"curr_line",
":",
"prev_line",
"=",
"doc",
"[",
"idx",
"-",
"1",
"]",
"if",
"idx",
"-",
"1",
"<",
"len",
"(",
"doc",
")",
"else",
"\"\\n\"",
"next_line",
"=",
"doc",
"[",
"idx",
"+",
"1",
"]",
"if",
"idx",
"+",
"1",
"<",
"len",
"(",
"doc",
")",
"else",
"None",
"if",
"is_title",
"(",
"prev_line",
",",
"curr_line",
",",
"next_line",
")",
":",
"idx",
"=",
"idx",
"if",
"prev_line",
"==",
"\"\\n\"",
"else",
"idx",
"-",
"1",
"has_title",
"=",
"True",
"break",
"if",
"has_title",
":",
"top",
",",
"bottom",
"=",
"doc",
"[",
":",
"idx",
"]",
",",
"doc",
"[",
"idx",
":",
"]",
"else",
":",
"top",
",",
"bottom",
"=",
"doc",
",",
"[",
"]",
"return",
"\"\"",
".",
"join",
"(",
"top",
")",
",",
"\"\"",
".",
"join",
"(",
"bottom",
")"
] | Split a RST file text in two parts. The title argument determine the
split point. The given title goes in the bottom part. If the title is not
found everything goes in the top part.
Return a tuple with the top and bottom parts. | [
"Split",
"a",
"RST",
"file",
"text",
"in",
"two",
"parts",
".",
"The",
"title",
"argument",
"determine",
"the",
"split",
"point",
".",
"The",
"given",
"title",
"goes",
"in",
"the",
"bottom",
"part",
".",
"If",
"the",
"title",
"is",
"not",
"found",
"everything",
"goes",
"in",
"the",
"top",
"part",
"."
] | 70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b | https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/files.py#L151-L180 |
251,655 | smartmob-project/procfile | procfile/__init__.py | loads | def loads(content):
"""Load a Procfile from a string."""
lines = _group_lines(line for line in content.split('\n'))
lines = [
(i, _parse_procfile_line(line))
for i, line in lines if line.strip()
]
errors = []
# Reject files with duplicate process types (no sane default).
duplicates = _find_duplicates(((i, line[0]) for i, line in lines))
for i, process_type, j in duplicates:
errors.append(''.join([
'Line %d: duplicate process type "%s": ',
'already appears on line %d.',
]) % (i + 1, process_type, j + 1)
)
# Reject commands with duplicate variables (no sane default).
for i, line in lines:
process_type, env = line[0], line[2]
duplicates = _find_duplicates(((0, var[0]) for var in env))
for _, variable, _ in duplicates:
errors.append(''.join([
'Line %d: duplicate variable "%s" ',
'for process type "%s".',
]) % (i + 1, variable, process_type)
)
# Done!
if errors:
raise ValueError(errors)
return {k: {'cmd': cmd, 'env': dict(env)} for _, (k, cmd, env) in lines} | python | def loads(content):
"""Load a Procfile from a string."""
lines = _group_lines(line for line in content.split('\n'))
lines = [
(i, _parse_procfile_line(line))
for i, line in lines if line.strip()
]
errors = []
# Reject files with duplicate process types (no sane default).
duplicates = _find_duplicates(((i, line[0]) for i, line in lines))
for i, process_type, j in duplicates:
errors.append(''.join([
'Line %d: duplicate process type "%s": ',
'already appears on line %d.',
]) % (i + 1, process_type, j + 1)
)
# Reject commands with duplicate variables (no sane default).
for i, line in lines:
process_type, env = line[0], line[2]
duplicates = _find_duplicates(((0, var[0]) for var in env))
for _, variable, _ in duplicates:
errors.append(''.join([
'Line %d: duplicate variable "%s" ',
'for process type "%s".',
]) % (i + 1, variable, process_type)
)
# Done!
if errors:
raise ValueError(errors)
return {k: {'cmd': cmd, 'env': dict(env)} for _, (k, cmd, env) in lines} | [
"def",
"loads",
"(",
"content",
")",
":",
"lines",
"=",
"_group_lines",
"(",
"line",
"for",
"line",
"in",
"content",
".",
"split",
"(",
"'\\n'",
")",
")",
"lines",
"=",
"[",
"(",
"i",
",",
"_parse_procfile_line",
"(",
"line",
")",
")",
"for",
"i",
",",
"line",
"in",
"lines",
"if",
"line",
".",
"strip",
"(",
")",
"]",
"errors",
"=",
"[",
"]",
"# Reject files with duplicate process types (no sane default).",
"duplicates",
"=",
"_find_duplicates",
"(",
"(",
"(",
"i",
",",
"line",
"[",
"0",
"]",
")",
"for",
"i",
",",
"line",
"in",
"lines",
")",
")",
"for",
"i",
",",
"process_type",
",",
"j",
"in",
"duplicates",
":",
"errors",
".",
"append",
"(",
"''",
".",
"join",
"(",
"[",
"'Line %d: duplicate process type \"%s\": '",
",",
"'already appears on line %d.'",
",",
"]",
")",
"%",
"(",
"i",
"+",
"1",
",",
"process_type",
",",
"j",
"+",
"1",
")",
")",
"# Reject commands with duplicate variables (no sane default).",
"for",
"i",
",",
"line",
"in",
"lines",
":",
"process_type",
",",
"env",
"=",
"line",
"[",
"0",
"]",
",",
"line",
"[",
"2",
"]",
"duplicates",
"=",
"_find_duplicates",
"(",
"(",
"(",
"0",
",",
"var",
"[",
"0",
"]",
")",
"for",
"var",
"in",
"env",
")",
")",
"for",
"_",
",",
"variable",
",",
"_",
"in",
"duplicates",
":",
"errors",
".",
"append",
"(",
"''",
".",
"join",
"(",
"[",
"'Line %d: duplicate variable \"%s\" '",
",",
"'for process type \"%s\".'",
",",
"]",
")",
"%",
"(",
"i",
"+",
"1",
",",
"variable",
",",
"process_type",
")",
")",
"# Done!",
"if",
"errors",
":",
"raise",
"ValueError",
"(",
"errors",
")",
"return",
"{",
"k",
":",
"{",
"'cmd'",
":",
"cmd",
",",
"'env'",
":",
"dict",
"(",
"env",
")",
"}",
"for",
"_",
",",
"(",
"k",
",",
"cmd",
",",
"env",
")",
"in",
"lines",
"}"
] | Load a Procfile from a string. | [
"Load",
"a",
"Procfile",
"from",
"a",
"string",
"."
] | 338756d5b645f17aa2366c34afa3b7a58d880796 | https://github.com/smartmob-project/procfile/blob/338756d5b645f17aa2366c34afa3b7a58d880796/procfile/__init__.py#L70-L99 |
251,656 | OpenVolunteeringPlatform/django-ovp-core | ovp_core/helpers/__init__.py | is_email_enabled | def is_email_enabled(email):
""" Emails are activated by default. Returns false
if an email has been disabled in settings.py
"""
s = get_settings(string="OVP_EMAILS")
email_settings = s.get(email, {})
enabled = True
if email_settings.get("disabled", False):
enabled = False
return enabled | python | def is_email_enabled(email):
""" Emails are activated by default. Returns false
if an email has been disabled in settings.py
"""
s = get_settings(string="OVP_EMAILS")
email_settings = s.get(email, {})
enabled = True
if email_settings.get("disabled", False):
enabled = False
return enabled | [
"def",
"is_email_enabled",
"(",
"email",
")",
":",
"s",
"=",
"get_settings",
"(",
"string",
"=",
"\"OVP_EMAILS\"",
")",
"email_settings",
"=",
"s",
".",
"get",
"(",
"email",
",",
"{",
"}",
")",
"enabled",
"=",
"True",
"if",
"email_settings",
".",
"get",
"(",
"\"disabled\"",
",",
"False",
")",
":",
"enabled",
"=",
"False",
"return",
"enabled"
] | Emails are activated by default. Returns false
if an email has been disabled in settings.py | [
"Emails",
"are",
"activated",
"by",
"default",
".",
"Returns",
"false",
"if",
"an",
"email",
"has",
"been",
"disabled",
"in",
"settings",
".",
"py"
] | c81b868a0a4b317f7b1ec0718cabc34f7794dd20 | https://github.com/OpenVolunteeringPlatform/django-ovp-core/blob/c81b868a0a4b317f7b1ec0718cabc34f7794dd20/ovp_core/helpers/__init__.py#L21-L32 |
251,657 | OpenVolunteeringPlatform/django-ovp-core | ovp_core/helpers/__init__.py | get_email_subject | def get_email_subject(email, default):
""" Allows for email subject overriding from settings.py """
s = get_settings(string="OVP_EMAILS")
email_settings = s.get(email, {})
title = email_settings.get("subject", default)
return _(title) | python | def get_email_subject(email, default):
""" Allows for email subject overriding from settings.py """
s = get_settings(string="OVP_EMAILS")
email_settings = s.get(email, {})
title = email_settings.get("subject", default)
return _(title) | [
"def",
"get_email_subject",
"(",
"email",
",",
"default",
")",
":",
"s",
"=",
"get_settings",
"(",
"string",
"=",
"\"OVP_EMAILS\"",
")",
"email_settings",
"=",
"s",
".",
"get",
"(",
"email",
",",
"{",
"}",
")",
"title",
"=",
"email_settings",
".",
"get",
"(",
"\"subject\"",
",",
"default",
")",
"return",
"_",
"(",
"title",
")"
] | Allows for email subject overriding from settings.py | [
"Allows",
"for",
"email",
"subject",
"overriding",
"from",
"settings",
".",
"py"
] | c81b868a0a4b317f7b1ec0718cabc34f7794dd20 | https://github.com/OpenVolunteeringPlatform/django-ovp-core/blob/c81b868a0a4b317f7b1ec0718cabc34f7794dd20/ovp_core/helpers/__init__.py#L35-L42 |
251,658 | AguaClara/aide_document-DEPRECATED | aide_document/convert.py | md_to_pdf | def md_to_pdf(input_name, output_name):
"""
Converts an input MarkDown file to a PDF of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .pdf can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.md
To convert the document:
>>> from aide_document import convert
>>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf')
.pdf can also be omitted from the second argument.
"""
if output_name[-4:] == '.pdf':
os.system("pandoc " + input_name + " -o " + output_name)
else:
os.system("pandoc " + input_name + " -o " + output_name + ".pdf" ) | python | def md_to_pdf(input_name, output_name):
"""
Converts an input MarkDown file to a PDF of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .pdf can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.md
To convert the document:
>>> from aide_document import convert
>>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf')
.pdf can also be omitted from the second argument.
"""
if output_name[-4:] == '.pdf':
os.system("pandoc " + input_name + " -o " + output_name)
else:
os.system("pandoc " + input_name + " -o " + output_name + ".pdf" ) | [
"def",
"md_to_pdf",
"(",
"input_name",
",",
"output_name",
")",
":",
"if",
"output_name",
"[",
"-",
"4",
":",
"]",
"==",
"'.pdf'",
":",
"os",
".",
"system",
"(",
"\"pandoc \"",
"+",
"input_name",
"+",
"\" -o \"",
"+",
"output_name",
")",
"else",
":",
"os",
".",
"system",
"(",
"\"pandoc \"",
"+",
"input_name",
"+",
"\" -o \"",
"+",
"output_name",
"+",
"\".pdf\"",
")"
] | Converts an input MarkDown file to a PDF of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .pdf can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.md
To convert the document:
>>> from aide_document import convert
>>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf')
.pdf can also be omitted from the second argument. | [
"Converts",
"an",
"input",
"MarkDown",
"file",
"to",
"a",
"PDF",
"of",
"the",
"given",
"output",
"name",
"."
] | 3f3b5c9f321264e0e4d8ed68dfbc080762579815 | https://github.com/AguaClara/aide_document-DEPRECATED/blob/3f3b5c9f321264e0e4d8ed68dfbc080762579815/aide_document/convert.py#L3-L31 |
251,659 | AguaClara/aide_document-DEPRECATED | aide_document/convert.py | docx_to_md | def docx_to_md(input_name, output_name):
"""
Converts an input docx file to MarkDown file of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .md can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.docx
To convert the document:
>>> from aide_document import convert
>>> convert.docx_to_md(data/doc.docx, data/doc.md)
.md can also be omitted from the second argument.
"""
if output_name[-5:] == '.docx':
os.system("pandoc " + input_name + " -o " + output_name)
else:
os.system("pandoc " + input_name + " -o " + output_name + ".docx" ) | python | def docx_to_md(input_name, output_name):
"""
Converts an input docx file to MarkDown file of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .md can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.docx
To convert the document:
>>> from aide_document import convert
>>> convert.docx_to_md(data/doc.docx, data/doc.md)
.md can also be omitted from the second argument.
"""
if output_name[-5:] == '.docx':
os.system("pandoc " + input_name + " -o " + output_name)
else:
os.system("pandoc " + input_name + " -o " + output_name + ".docx" ) | [
"def",
"docx_to_md",
"(",
"input_name",
",",
"output_name",
")",
":",
"if",
"output_name",
"[",
"-",
"5",
":",
"]",
"==",
"'.docx'",
":",
"os",
".",
"system",
"(",
"\"pandoc \"",
"+",
"input_name",
"+",
"\" -o \"",
"+",
"output_name",
")",
"else",
":",
"os",
".",
"system",
"(",
"\"pandoc \"",
"+",
"input_name",
"+",
"\" -o \"",
"+",
"output_name",
"+",
"\".docx\"",
")"
] | Converts an input docx file to MarkDown file of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .md can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.docx
To convert the document:
>>> from aide_document import convert
>>> convert.docx_to_md(data/doc.docx, data/doc.md)
.md can also be omitted from the second argument. | [
"Converts",
"an",
"input",
"docx",
"file",
"to",
"MarkDown",
"file",
"of",
"the",
"given",
"output",
"name",
"."
] | 3f3b5c9f321264e0e4d8ed68dfbc080762579815 | https://github.com/AguaClara/aide_document-DEPRECATED/blob/3f3b5c9f321264e0e4d8ed68dfbc080762579815/aide_document/convert.py#L33-L61 |
251,660 | MrKriss/vigilance | vigilance/vigilance.py | report_failures | def report_failures(error=False, display=True, clear=True):
""" Print details of logged failures in expect function
If no failures are detected, None is returned by the function.
Parameters
----------
error:bool
If true, will raise an Expectation of type 'FaliedValidationError' instead of printing to console
display: bool
If True, will print the failure report to console as well as returning it as a string. If
error = True do nothing.
clear: bool
If True, all logged failured will be cleared after being reported.
Returns
-------
string
The string formated failure report.
list of dict
The failed expectations. Each dictionary contains the keys:
idx - the number of the failed expectation in the list starting at one,
expression - Code that is evaluated
file - the file name where the validation function was defined,
funcname - the name of the validation function,
line - the line of the validation function that the expression was on
msg - the error message associated with the expression, if there was one.
"""
global _failed_expectations
output = []
# Copy as failures are returned
all_failed_expectations = _failed_expectations[:]
if all_failed_expectations:
output.append('\nFailed Expectations: %s\n\n' % len(all_failed_expectations))
for i, failure in enumerate(all_failed_expectations, start=1):
report_line = '{idx}: File {file}, line {line}, in {funcname}()\n "{expression}" is not True\n'
if failure['msg']:
report_line += ' -- {msg}\n'
report_line += '\n'
failure['idx'] = i
output.append(report_line.format(**failure))
if clear:
_failed_expectations = []
else:
output.append("All expectations met.")
if error:
raise FailedValidationError("\n" + ''.join(output))
elif display:
print(''.join(output))
if all_failed_expectations:
return (''.join(output), all_failed_expectations)
else:
return None | python | def report_failures(error=False, display=True, clear=True):
""" Print details of logged failures in expect function
If no failures are detected, None is returned by the function.
Parameters
----------
error:bool
If true, will raise an Expectation of type 'FaliedValidationError' instead of printing to console
display: bool
If True, will print the failure report to console as well as returning it as a string. If
error = True do nothing.
clear: bool
If True, all logged failured will be cleared after being reported.
Returns
-------
string
The string formated failure report.
list of dict
The failed expectations. Each dictionary contains the keys:
idx - the number of the failed expectation in the list starting at one,
expression - Code that is evaluated
file - the file name where the validation function was defined,
funcname - the name of the validation function,
line - the line of the validation function that the expression was on
msg - the error message associated with the expression, if there was one.
"""
global _failed_expectations
output = []
# Copy as failures are returned
all_failed_expectations = _failed_expectations[:]
if all_failed_expectations:
output.append('\nFailed Expectations: %s\n\n' % len(all_failed_expectations))
for i, failure in enumerate(all_failed_expectations, start=1):
report_line = '{idx}: File {file}, line {line}, in {funcname}()\n "{expression}" is not True\n'
if failure['msg']:
report_line += ' -- {msg}\n'
report_line += '\n'
failure['idx'] = i
output.append(report_line.format(**failure))
if clear:
_failed_expectations = []
else:
output.append("All expectations met.")
if error:
raise FailedValidationError("\n" + ''.join(output))
elif display:
print(''.join(output))
if all_failed_expectations:
return (''.join(output), all_failed_expectations)
else:
return None | [
"def",
"report_failures",
"(",
"error",
"=",
"False",
",",
"display",
"=",
"True",
",",
"clear",
"=",
"True",
")",
":",
"global",
"_failed_expectations",
"output",
"=",
"[",
"]",
"# Copy as failures are returned ",
"all_failed_expectations",
"=",
"_failed_expectations",
"[",
":",
"]",
"if",
"all_failed_expectations",
":",
"output",
".",
"append",
"(",
"'\\nFailed Expectations: %s\\n\\n'",
"%",
"len",
"(",
"all_failed_expectations",
")",
")",
"for",
"i",
",",
"failure",
"in",
"enumerate",
"(",
"all_failed_expectations",
",",
"start",
"=",
"1",
")",
":",
"report_line",
"=",
"'{idx}: File {file}, line {line}, in {funcname}()\\n \"{expression}\" is not True\\n'",
"if",
"failure",
"[",
"'msg'",
"]",
":",
"report_line",
"+=",
"' -- {msg}\\n'",
"report_line",
"+=",
"'\\n'",
"failure",
"[",
"'idx'",
"]",
"=",
"i",
"output",
".",
"append",
"(",
"report_line",
".",
"format",
"(",
"*",
"*",
"failure",
")",
")",
"if",
"clear",
":",
"_failed_expectations",
"=",
"[",
"]",
"else",
":",
"output",
".",
"append",
"(",
"\"All expectations met.\"",
")",
"if",
"error",
":",
"raise",
"FailedValidationError",
"(",
"\"\\n\"",
"+",
"''",
".",
"join",
"(",
"output",
")",
")",
"elif",
"display",
":",
"print",
"(",
"''",
".",
"join",
"(",
"output",
")",
")",
"if",
"all_failed_expectations",
":",
"return",
"(",
"''",
".",
"join",
"(",
"output",
")",
",",
"all_failed_expectations",
")",
"else",
":",
"return",
"None"
] | Print details of logged failures in expect function
If no failures are detected, None is returned by the function.
Parameters
----------
error:bool
If true, will raise an Expectation of type 'FaliedValidationError' instead of printing to console
display: bool
If True, will print the failure report to console as well as returning it as a string. If
error = True do nothing.
clear: bool
If True, all logged failured will be cleared after being reported.
Returns
-------
string
The string formated failure report.
list of dict
The failed expectations. Each dictionary contains the keys:
idx - the number of the failed expectation in the list starting at one,
expression - Code that is evaluated
file - the file name where the validation function was defined,
funcname - the name of the validation function,
line - the line of the validation function that the expression was on
msg - the error message associated with the expression, if there was one. | [
"Print",
"details",
"of",
"logged",
"failures",
"in",
"expect",
"function"
] | 2946b09f524c042c12d796f111f287866e7a3c67 | https://github.com/MrKriss/vigilance/blob/2946b09f524c042c12d796f111f287866e7a3c67/vigilance/vigilance.py#L90-L155 |
251,661 | MrKriss/vigilance | vigilance/vigilance.py | _log_failure | def _log_failure(arg_num, msg=None):
""" Retrace stack and log the failed expresion information """
# stack() returns a list of frame records
# 0 is the _log_failure() function
# 1 is the expect() function
# 2 is the function that called expect(), that's what we want
#
# a frame record is a tuple like this:
# (frame, filename, line, funcname, contextlist, index)
# we're only interested in the first 4.
frame, filename, file_lineno, funcname = inspect.stack()[2][:4]
# Note that a frame object should be deleted once used to be safe and stop possible
# memory leak from circular referencing
try:
frame_source_lines, frame_start_lineno = (inspect.getsourcelines(frame))
finally:
del frame
filename = os.path.basename(filename)
# Build abstract syntax tree from source of frame
source_ast = ast.parse(''.join(frame_source_lines))
# Locate the executed expect function
func_body = source_ast.body[0].body
map_lineno_to_node = {}
for idx, node in enumerate(func_body):
map_lineno_to_node[node.lineno] = node
last_lineno = file_lineno - frame_start_lineno + 1
element_idx = [x for x in map_lineno_to_node.keys() if x <= last_lineno]
element_idx = max(element_idx)
expect_function_ast = map_lineno_to_node[element_idx]
# Return the source code of the numbered argument
arg = expect_function_ast.value.args[arg_num]
line = arg.lineno
if isinstance(arg, (ast.Tuple, ast.List)):
expr = astor.to_source(arg.elts[0])
else:
expr = astor.to_source(arg)
filename = os.path.basename(filename)
failure_info = {'file': filename, 'line': line, 'funcname': funcname, 'msg': msg, 'expression': expr}
_failed_expectations.append(failure_info) | python | def _log_failure(arg_num, msg=None):
""" Retrace stack and log the failed expresion information """
# stack() returns a list of frame records
# 0 is the _log_failure() function
# 1 is the expect() function
# 2 is the function that called expect(), that's what we want
#
# a frame record is a tuple like this:
# (frame, filename, line, funcname, contextlist, index)
# we're only interested in the first 4.
frame, filename, file_lineno, funcname = inspect.stack()[2][:4]
# Note that a frame object should be deleted once used to be safe and stop possible
# memory leak from circular referencing
try:
frame_source_lines, frame_start_lineno = (inspect.getsourcelines(frame))
finally:
del frame
filename = os.path.basename(filename)
# Build abstract syntax tree from source of frame
source_ast = ast.parse(''.join(frame_source_lines))
# Locate the executed expect function
func_body = source_ast.body[0].body
map_lineno_to_node = {}
for idx, node in enumerate(func_body):
map_lineno_to_node[node.lineno] = node
last_lineno = file_lineno - frame_start_lineno + 1
element_idx = [x for x in map_lineno_to_node.keys() if x <= last_lineno]
element_idx = max(element_idx)
expect_function_ast = map_lineno_to_node[element_idx]
# Return the source code of the numbered argument
arg = expect_function_ast.value.args[arg_num]
line = arg.lineno
if isinstance(arg, (ast.Tuple, ast.List)):
expr = astor.to_source(arg.elts[0])
else:
expr = astor.to_source(arg)
filename = os.path.basename(filename)
failure_info = {'file': filename, 'line': line, 'funcname': funcname, 'msg': msg, 'expression': expr}
_failed_expectations.append(failure_info) | [
"def",
"_log_failure",
"(",
"arg_num",
",",
"msg",
"=",
"None",
")",
":",
"# stack() returns a list of frame records",
"# 0 is the _log_failure() function",
"# 1 is the expect() function ",
"# 2 is the function that called expect(), that's what we want",
"#",
"# a frame record is a tuple like this:",
"# (frame, filename, line, funcname, contextlist, index)",
"# we're only interested in the first 4. ",
"frame",
",",
"filename",
",",
"file_lineno",
",",
"funcname",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"2",
"]",
"[",
":",
"4",
"]",
"# Note that a frame object should be deleted once used to be safe and stop possible ",
"# memory leak from circular referencing ",
"try",
":",
"frame_source_lines",
",",
"frame_start_lineno",
"=",
"(",
"inspect",
".",
"getsourcelines",
"(",
"frame",
")",
")",
"finally",
":",
"del",
"frame",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"# Build abstract syntax tree from source of frame",
"source_ast",
"=",
"ast",
".",
"parse",
"(",
"''",
".",
"join",
"(",
"frame_source_lines",
")",
")",
"# Locate the executed expect function ",
"func_body",
"=",
"source_ast",
".",
"body",
"[",
"0",
"]",
".",
"body",
"map_lineno_to_node",
"=",
"{",
"}",
"for",
"idx",
",",
"node",
"in",
"enumerate",
"(",
"func_body",
")",
":",
"map_lineno_to_node",
"[",
"node",
".",
"lineno",
"]",
"=",
"node",
"last_lineno",
"=",
"file_lineno",
"-",
"frame_start_lineno",
"+",
"1",
"element_idx",
"=",
"[",
"x",
"for",
"x",
"in",
"map_lineno_to_node",
".",
"keys",
"(",
")",
"if",
"x",
"<=",
"last_lineno",
"]",
"element_idx",
"=",
"max",
"(",
"element_idx",
")",
"expect_function_ast",
"=",
"map_lineno_to_node",
"[",
"element_idx",
"]",
"# Return the source code of the numbered argument",
"arg",
"=",
"expect_function_ast",
".",
"value",
".",
"args",
"[",
"arg_num",
"]",
"line",
"=",
"arg",
".",
"lineno",
"if",
"isinstance",
"(",
"arg",
",",
"(",
"ast",
".",
"Tuple",
",",
"ast",
".",
"List",
")",
")",
":",
"expr",
"=",
"astor",
".",
"to_source",
"(",
"arg",
".",
"elts",
"[",
"0",
"]",
")",
"else",
":",
"expr",
"=",
"astor",
".",
"to_source",
"(",
"arg",
")",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"failure_info",
"=",
"{",
"'file'",
":",
"filename",
",",
"'line'",
":",
"line",
",",
"'funcname'",
":",
"funcname",
",",
"'msg'",
":",
"msg",
",",
"'expression'",
":",
"expr",
"}",
"_failed_expectations",
".",
"append",
"(",
"failure_info",
")"
] | Retrace stack and log the failed expresion information | [
"Retrace",
"stack",
"and",
"log",
"the",
"failed",
"expresion",
"information"
] | 2946b09f524c042c12d796f111f287866e7a3c67 | https://github.com/MrKriss/vigilance/blob/2946b09f524c042c12d796f111f287866e7a3c67/vigilance/vigilance.py#L158-L208 |
251,662 | testing-cabal/systemfixtures | systemfixtures/filesystem.py | FakeFilesystem.add | def add(self, path):
"""Add a path to the overlay filesytem.
Any filesystem operation involving the this path or any sub-paths
of it will be transparently redirected to temporary root dir.
@path: An absolute path string.
"""
if not path.startswith(os.sep):
raise ValueError("Non-absolute path '{}'".format(path))
path = path.rstrip(os.sep)
while True:
self._paths[path] = None
path, _ = os.path.split(path)
if path == os.sep:
break | python | def add(self, path):
"""Add a path to the overlay filesytem.
Any filesystem operation involving the this path or any sub-paths
of it will be transparently redirected to temporary root dir.
@path: An absolute path string.
"""
if not path.startswith(os.sep):
raise ValueError("Non-absolute path '{}'".format(path))
path = path.rstrip(os.sep)
while True:
self._paths[path] = None
path, _ = os.path.split(path)
if path == os.sep:
break | [
"def",
"add",
"(",
"self",
",",
"path",
")",
":",
"if",
"not",
"path",
".",
"startswith",
"(",
"os",
".",
"sep",
")",
":",
"raise",
"ValueError",
"(",
"\"Non-absolute path '{}'\"",
".",
"format",
"(",
"path",
")",
")",
"path",
"=",
"path",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
"while",
"True",
":",
"self",
".",
"_paths",
"[",
"path",
"]",
"=",
"None",
"path",
",",
"_",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"if",
"path",
"==",
"os",
".",
"sep",
":",
"break"
] | Add a path to the overlay filesytem.
Any filesystem operation involving the this path or any sub-paths
of it will be transparently redirected to temporary root dir.
@path: An absolute path string. | [
"Add",
"a",
"path",
"to",
"the",
"overlay",
"filesytem",
"."
] | adf1b822bf83dc2a2f6bf7b85b5d8055e5e6ccd4 | https://github.com/testing-cabal/systemfixtures/blob/adf1b822bf83dc2a2f6bf7b85b5d8055e5e6ccd4/systemfixtures/filesystem.py#L65-L80 |
251,663 | testing-cabal/systemfixtures | systemfixtures/filesystem.py | FakeFilesystem._fchown | def _fchown(self, real, fileno, uid, gid):
"""Run fake fchown code if fileno points to a sub-path of our tree.
The ownership set with this fake fchown can be inspected by looking
at the self.uid/self.gid dictionaries.
"""
path = self._fake_path(self._path_from_fd(fileno))
self._chown_common(path, uid, gid) | python | def _fchown(self, real, fileno, uid, gid):
"""Run fake fchown code if fileno points to a sub-path of our tree.
The ownership set with this fake fchown can be inspected by looking
at the self.uid/self.gid dictionaries.
"""
path = self._fake_path(self._path_from_fd(fileno))
self._chown_common(path, uid, gid) | [
"def",
"_fchown",
"(",
"self",
",",
"real",
",",
"fileno",
",",
"uid",
",",
"gid",
")",
":",
"path",
"=",
"self",
".",
"_fake_path",
"(",
"self",
".",
"_path_from_fd",
"(",
"fileno",
")",
")",
"self",
".",
"_chown_common",
"(",
"path",
",",
"uid",
",",
"gid",
")"
] | Run fake fchown code if fileno points to a sub-path of our tree.
The ownership set with this fake fchown can be inspected by looking
at the self.uid/self.gid dictionaries. | [
"Run",
"fake",
"fchown",
"code",
"if",
"fileno",
"points",
"to",
"a",
"sub",
"-",
"path",
"of",
"our",
"tree",
"."
] | adf1b822bf83dc2a2f6bf7b85b5d8055e5e6ccd4 | https://github.com/testing-cabal/systemfixtures/blob/adf1b822bf83dc2a2f6bf7b85b5d8055e5e6ccd4/systemfixtures/filesystem.py#L82-L89 |
251,664 | anovelmous-dev-squad/anovelmous-grammar | grammar/__init__.py | GrammarFilter.get_grammatically_correct_vocabulary_subset | def get_grammatically_correct_vocabulary_subset(self, text,
sent_filter='combined'):
"""
Returns a subset of a given vocabulary based on whether its
terms are "grammatically correct".
"""
tokens = word_tokenize(text)
sent_tokens = get_partial_sentence(tokens)
if not sent_tokens:
return self.vocabulary
if sent_filter == 'combined':
if len(sent_tokens) < 2:
return self.get_bigram_filtered_vocab(sent_tokens)
combined_filters = self.get_pos_filtered_vocab(sent_tokens) + \
self.get_trigram_filtered_vocab(sent_tokens) + \
self.get_bigram_filtered_vocab(sent_tokens)
return combined_filters
if sent_filter == 'pos' and len(sent_tokens) > 1:
return self.get_pos_filtered_vocab(sent_tokens)
elif sent_filter == 'bigram' or len(sent_tokens) < 2:
return self.get_bigram_filtered_vocab(sent_tokens)
elif sent_filter == 'trigram':
return self.get_trigram_filtered_vocab(sent_tokens) | python | def get_grammatically_correct_vocabulary_subset(self, text,
sent_filter='combined'):
"""
Returns a subset of a given vocabulary based on whether its
terms are "grammatically correct".
"""
tokens = word_tokenize(text)
sent_tokens = get_partial_sentence(tokens)
if not sent_tokens:
return self.vocabulary
if sent_filter == 'combined':
if len(sent_tokens) < 2:
return self.get_bigram_filtered_vocab(sent_tokens)
combined_filters = self.get_pos_filtered_vocab(sent_tokens) + \
self.get_trigram_filtered_vocab(sent_tokens) + \
self.get_bigram_filtered_vocab(sent_tokens)
return combined_filters
if sent_filter == 'pos' and len(sent_tokens) > 1:
return self.get_pos_filtered_vocab(sent_tokens)
elif sent_filter == 'bigram' or len(sent_tokens) < 2:
return self.get_bigram_filtered_vocab(sent_tokens)
elif sent_filter == 'trigram':
return self.get_trigram_filtered_vocab(sent_tokens) | [
"def",
"get_grammatically_correct_vocabulary_subset",
"(",
"self",
",",
"text",
",",
"sent_filter",
"=",
"'combined'",
")",
":",
"tokens",
"=",
"word_tokenize",
"(",
"text",
")",
"sent_tokens",
"=",
"get_partial_sentence",
"(",
"tokens",
")",
"if",
"not",
"sent_tokens",
":",
"return",
"self",
".",
"vocabulary",
"if",
"sent_filter",
"==",
"'combined'",
":",
"if",
"len",
"(",
"sent_tokens",
")",
"<",
"2",
":",
"return",
"self",
".",
"get_bigram_filtered_vocab",
"(",
"sent_tokens",
")",
"combined_filters",
"=",
"self",
".",
"get_pos_filtered_vocab",
"(",
"sent_tokens",
")",
"+",
"self",
".",
"get_trigram_filtered_vocab",
"(",
"sent_tokens",
")",
"+",
"self",
".",
"get_bigram_filtered_vocab",
"(",
"sent_tokens",
")",
"return",
"combined_filters",
"if",
"sent_filter",
"==",
"'pos'",
"and",
"len",
"(",
"sent_tokens",
")",
">",
"1",
":",
"return",
"self",
".",
"get_pos_filtered_vocab",
"(",
"sent_tokens",
")",
"elif",
"sent_filter",
"==",
"'bigram'",
"or",
"len",
"(",
"sent_tokens",
")",
"<",
"2",
":",
"return",
"self",
".",
"get_bigram_filtered_vocab",
"(",
"sent_tokens",
")",
"elif",
"sent_filter",
"==",
"'trigram'",
":",
"return",
"self",
".",
"get_trigram_filtered_vocab",
"(",
"sent_tokens",
")"
] | Returns a subset of a given vocabulary based on whether its
terms are "grammatically correct". | [
"Returns",
"a",
"subset",
"of",
"a",
"given",
"vocabulary",
"based",
"on",
"whether",
"its",
"terms",
"are",
"grammatically",
"correct",
"."
] | fbffbfa2c6546d8c74e1f582b941ba190c31c097 | https://github.com/anovelmous-dev-squad/anovelmous-grammar/blob/fbffbfa2c6546d8c74e1f582b941ba190c31c097/grammar/__init__.py#L226-L252 |
251,665 | dlancer/django-pages-cms | pages/managers/pagemanager.py | PageManager.get_queryset | def get_queryset(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
"""
qs = super(TreeManager, self).get_queryset(*args, **kwargs)
# Restrict operations to pages on the current site if needed
if settings.PAGES_HIDE_SITES and settings.PAGES_USE_SITE_ID:
return qs.order_by(self.tree_id_attr, self.left_attr).filter(sites=settings.SITE_ID)
else:
return qs.order_by(self.tree_id_attr, self.left_attr) | python | def get_queryset(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
"""
qs = super(TreeManager, self).get_queryset(*args, **kwargs)
# Restrict operations to pages on the current site if needed
if settings.PAGES_HIDE_SITES and settings.PAGES_USE_SITE_ID:
return qs.order_by(self.tree_id_attr, self.left_attr).filter(sites=settings.SITE_ID)
else:
return qs.order_by(self.tree_id_attr, self.left_attr) | [
"def",
"get_queryset",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"qs",
"=",
"super",
"(",
"TreeManager",
",",
"self",
")",
".",
"get_queryset",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Restrict operations to pages on the current site if needed",
"if",
"settings",
".",
"PAGES_HIDE_SITES",
"and",
"settings",
".",
"PAGES_USE_SITE_ID",
":",
"return",
"qs",
".",
"order_by",
"(",
"self",
".",
"tree_id_attr",
",",
"self",
".",
"left_attr",
")",
".",
"filter",
"(",
"sites",
"=",
"settings",
".",
"SITE_ID",
")",
"else",
":",
"return",
"qs",
".",
"order_by",
"(",
"self",
".",
"tree_id_attr",
",",
"self",
".",
"left_attr",
")"
] | Ensures that this manager always returns nodes in tree order. | [
"Ensures",
"that",
"this",
"manager",
"always",
"returns",
"nodes",
"in",
"tree",
"order",
"."
] | 441fad674d5ad4f6e05c953508950525dc0fa789 | https://github.com/dlancer/django-pages-cms/blob/441fad674d5ad4f6e05c953508950525dc0fa789/pages/managers/pagemanager.py#L10-L20 |
251,666 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | center_start | def center_start(r, window_size):
"""
Center a region on its start and expand it to window_size bases.
:return: the new region.
"""
res = copy.copy(r)
res.end = res.start + window_size / 2
res.start = res.end - window_size
return res | python | def center_start(r, window_size):
"""
Center a region on its start and expand it to window_size bases.
:return: the new region.
"""
res = copy.copy(r)
res.end = res.start + window_size / 2
res.start = res.end - window_size
return res | [
"def",
"center_start",
"(",
"r",
",",
"window_size",
")",
":",
"res",
"=",
"copy",
".",
"copy",
"(",
"r",
")",
"res",
".",
"end",
"=",
"res",
".",
"start",
"+",
"window_size",
"/",
"2",
"res",
".",
"start",
"=",
"res",
".",
"end",
"-",
"window_size",
"return",
"res"
] | Center a region on its start and expand it to window_size bases.
:return: the new region. | [
"Center",
"a",
"region",
"on",
"its",
"start",
"and",
"expand",
"it",
"to",
"window_size",
"bases",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L84-L93 |
251,667 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | center_end | def center_end(r, window_size):
"""
Center a region on its end and expand it to window_size bases.
:return: the new region.
"""
res = copy.copy(r)
res.start = res.end - window_size / 2
res.end = res.start + window_size
return res | python | def center_end(r, window_size):
"""
Center a region on its end and expand it to window_size bases.
:return: the new region.
"""
res = copy.copy(r)
res.start = res.end - window_size / 2
res.end = res.start + window_size
return res | [
"def",
"center_end",
"(",
"r",
",",
"window_size",
")",
":",
"res",
"=",
"copy",
".",
"copy",
"(",
"r",
")",
"res",
".",
"start",
"=",
"res",
".",
"end",
"-",
"window_size",
"/",
"2",
"res",
".",
"end",
"=",
"res",
".",
"start",
"+",
"window_size",
"return",
"res"
] | Center a region on its end and expand it to window_size bases.
:return: the new region. | [
"Center",
"a",
"region",
"on",
"its",
"end",
"and",
"expand",
"it",
"to",
"window_size",
"bases",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L96-L105 |
251,668 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | center_middle | def center_middle(r, window_size):
"""
Center a region on its middle and expand it to window_size bases.
:return: the new region.
"""
res = copy.copy(r)
mid = res.start + (len(res) / 2)
res.start = mid - (window_size / 2)
res.end = res.start + window_size
return res | python | def center_middle(r, window_size):
"""
Center a region on its middle and expand it to window_size bases.
:return: the new region.
"""
res = copy.copy(r)
mid = res.start + (len(res) / 2)
res.start = mid - (window_size / 2)
res.end = res.start + window_size
return res | [
"def",
"center_middle",
"(",
"r",
",",
"window_size",
")",
":",
"res",
"=",
"copy",
".",
"copy",
"(",
"r",
")",
"mid",
"=",
"res",
".",
"start",
"+",
"(",
"len",
"(",
"res",
")",
"/",
"2",
")",
"res",
".",
"start",
"=",
"mid",
"-",
"(",
"window_size",
"/",
"2",
")",
"res",
".",
"end",
"=",
"res",
".",
"start",
"+",
"window_size",
"return",
"res"
] | Center a region on its middle and expand it to window_size bases.
:return: the new region. | [
"Center",
"a",
"region",
"on",
"its",
"middle",
"and",
"expand",
"it",
"to",
"window_size",
"bases",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L108-L118 |
251,669 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | transform_locus | def transform_locus(region, window_center, window_size):
"""
transform an input genomic region into one suitable for the profile.
:param region: input region to transform.
:param window_center: which part of the input region to center on.
:param window_size: how large the resultant region should be.
:return: a new genomic interval on the same chromosome, centered on the
<window_center> (e.g. 3' end) of the input region and resized to
be window_size long.
"""
if window_center == CENTRE:
region.transform_center(window_size)
else:
raise ValueError("Don't know how to do this transformation: " +
window_center) | python | def transform_locus(region, window_center, window_size):
"""
transform an input genomic region into one suitable for the profile.
:param region: input region to transform.
:param window_center: which part of the input region to center on.
:param window_size: how large the resultant region should be.
:return: a new genomic interval on the same chromosome, centered on the
<window_center> (e.g. 3' end) of the input region and resized to
be window_size long.
"""
if window_center == CENTRE:
region.transform_center(window_size)
else:
raise ValueError("Don't know how to do this transformation: " +
window_center) | [
"def",
"transform_locus",
"(",
"region",
",",
"window_center",
",",
"window_size",
")",
":",
"if",
"window_center",
"==",
"CENTRE",
":",
"region",
".",
"transform_center",
"(",
"window_size",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Don't know how to do this transformation: \"",
"+",
"window_center",
")"
] | transform an input genomic region into one suitable for the profile.
:param region: input region to transform.
:param window_center: which part of the input region to center on.
:param window_size: how large the resultant region should be.
:return: a new genomic interval on the same chromosome, centered on the
<window_center> (e.g. 3' end) of the input region and resized to
be window_size long. | [
"transform",
"an",
"input",
"genomic",
"region",
"into",
"one",
"suitable",
"for",
"the",
"profile",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L121-L136 |
251,670 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | pid | def pid(col, ignore_gaps=False):
"""
Compute the percent identity of a an alignment column.
Define PID as the frequency of the most frequent nucleotide in the column.
:param col: an alignment column; a dictionary where keys are seq.
names and values are the nucleotide in the column for
that sequence.
:param ignore_gaps: if True, do not count gaps towards the total number of
sequences in the column (i.e. the denominator of the
fraction).
:raise ValueError: if the column contains only gaps.
"""
hist = {}
total = 0
found_non_gap = False
for v in col.values():
if v == sequence.GAP_CHAR:
if ignore_gaps:
continue
else:
total += 1
else:
found_non_gap = True
if v not in hist:
hist[v] = 0
hist[v] += 1
total += 1
if not found_non_gap:
raise ValueError("Cannot determine PID of column with only gaps")
return max(hist.values()) / float(total) | python | def pid(col, ignore_gaps=False):
"""
Compute the percent identity of a an alignment column.
Define PID as the frequency of the most frequent nucleotide in the column.
:param col: an alignment column; a dictionary where keys are seq.
names and values are the nucleotide in the column for
that sequence.
:param ignore_gaps: if True, do not count gaps towards the total number of
sequences in the column (i.e. the denominator of the
fraction).
:raise ValueError: if the column contains only gaps.
"""
hist = {}
total = 0
found_non_gap = False
for v in col.values():
if v == sequence.GAP_CHAR:
if ignore_gaps:
continue
else:
total += 1
else:
found_non_gap = True
if v not in hist:
hist[v] = 0
hist[v] += 1
total += 1
if not found_non_gap:
raise ValueError("Cannot determine PID of column with only gaps")
return max(hist.values()) / float(total) | [
"def",
"pid",
"(",
"col",
",",
"ignore_gaps",
"=",
"False",
")",
":",
"hist",
"=",
"{",
"}",
"total",
"=",
"0",
"found_non_gap",
"=",
"False",
"for",
"v",
"in",
"col",
".",
"values",
"(",
")",
":",
"if",
"v",
"==",
"sequence",
".",
"GAP_CHAR",
":",
"if",
"ignore_gaps",
":",
"continue",
"else",
":",
"total",
"+=",
"1",
"else",
":",
"found_non_gap",
"=",
"True",
"if",
"v",
"not",
"in",
"hist",
":",
"hist",
"[",
"v",
"]",
"=",
"0",
"hist",
"[",
"v",
"]",
"+=",
"1",
"total",
"+=",
"1",
"if",
"not",
"found_non_gap",
":",
"raise",
"ValueError",
"(",
"\"Cannot determine PID of column with only gaps\"",
")",
"return",
"max",
"(",
"hist",
".",
"values",
"(",
")",
")",
"/",
"float",
"(",
"total",
")"
] | Compute the percent identity of a an alignment column.
Define PID as the frequency of the most frequent nucleotide in the column.
:param col: an alignment column; a dictionary where keys are seq.
names and values are the nucleotide in the column for
that sequence.
:param ignore_gaps: if True, do not count gaps towards the total number of
sequences in the column (i.e. the denominator of the
fraction).
:raise ValueError: if the column contains only gaps. | [
"Compute",
"the",
"percent",
"identity",
"of",
"a",
"an",
"alignment",
"column",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L143-L174 |
251,671 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | conservtion_profile_pid | def conservtion_profile_pid(region, genome_alignment,
mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS,
species=None):
"""
build a conservation profile for the given region using the genome alignment.
The scores in the profile will be the percent of bases identical to the
reference sequence.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:return: a list of the same length as the region where each entry is the
PID at the corresponding locus.
"""
res = []
s = region.start if region.isPositiveStrand() else region.end - 1
e = region.end if region.isPositiveStrand() else region.start - 1
step = 1 if region.isPositiveStrand() else -1
for i in range(s, e, step):
try:
col = genome_alignment.get_column(region.chrom, i, mi_seqs, species)
res.append(pid(col))
except NoSuchAlignmentColumnError:
res.append(None)
except NoUniqueColumnError:
res.append(None)
return res | python | def conservtion_profile_pid(region, genome_alignment,
mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS,
species=None):
"""
build a conservation profile for the given region using the genome alignment.
The scores in the profile will be the percent of bases identical to the
reference sequence.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:return: a list of the same length as the region where each entry is the
PID at the corresponding locus.
"""
res = []
s = region.start if region.isPositiveStrand() else region.end - 1
e = region.end if region.isPositiveStrand() else region.start - 1
step = 1 if region.isPositiveStrand() else -1
for i in range(s, e, step):
try:
col = genome_alignment.get_column(region.chrom, i, mi_seqs, species)
res.append(pid(col))
except NoSuchAlignmentColumnError:
res.append(None)
except NoUniqueColumnError:
res.append(None)
return res | [
"def",
"conservtion_profile_pid",
"(",
"region",
",",
"genome_alignment",
",",
"mi_seqs",
"=",
"MissingSequenceHandler",
".",
"TREAT_AS_ALL_GAPS",
",",
"species",
"=",
"None",
")",
":",
"res",
"=",
"[",
"]",
"s",
"=",
"region",
".",
"start",
"if",
"region",
".",
"isPositiveStrand",
"(",
")",
"else",
"region",
".",
"end",
"-",
"1",
"e",
"=",
"region",
".",
"end",
"if",
"region",
".",
"isPositiveStrand",
"(",
")",
"else",
"region",
".",
"start",
"-",
"1",
"step",
"=",
"1",
"if",
"region",
".",
"isPositiveStrand",
"(",
")",
"else",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"s",
",",
"e",
",",
"step",
")",
":",
"try",
":",
"col",
"=",
"genome_alignment",
".",
"get_column",
"(",
"region",
".",
"chrom",
",",
"i",
",",
"mi_seqs",
",",
"species",
")",
"res",
".",
"append",
"(",
"pid",
"(",
"col",
")",
")",
"except",
"NoSuchAlignmentColumnError",
":",
"res",
".",
"append",
"(",
"None",
")",
"except",
"NoUniqueColumnError",
":",
"res",
".",
"append",
"(",
"None",
")",
"return",
"res"
] | build a conservation profile for the given region using the genome alignment.
The scores in the profile will be the percent of bases identical to the
reference sequence.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:return: a list of the same length as the region where each entry is the
PID at the corresponding locus. | [
"build",
"a",
"conservation",
"profile",
"for",
"the",
"given",
"region",
"using",
"the",
"genome",
"alignment",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L177-L204 |
251,672 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | merge_profile | def merge_profile(mean_profile, new_profile):
"""Add a new list of values to a list of rolling means."""
for i in range(0, len(mean_profile)):
if new_profile[i] is None:
continue
mean_profile[i].add(new_profile[i]) | python | def merge_profile(mean_profile, new_profile):
"""Add a new list of values to a list of rolling means."""
for i in range(0, len(mean_profile)):
if new_profile[i] is None:
continue
mean_profile[i].add(new_profile[i]) | [
"def",
"merge_profile",
"(",
"mean_profile",
",",
"new_profile",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"mean_profile",
")",
")",
":",
"if",
"new_profile",
"[",
"i",
"]",
"is",
"None",
":",
"continue",
"mean_profile",
"[",
"i",
"]",
".",
"add",
"(",
"new_profile",
"[",
"i",
"]",
")"
] | Add a new list of values to a list of rolling means. | [
"Add",
"a",
"new",
"list",
"of",
"values",
"to",
"a",
"list",
"of",
"rolling",
"means",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L207-L212 |
251,673 | pjuren/pyokit | src/pyokit/scripts/conservationProfile.py | processBED | def processBED(fh, genome_alig, window_size, window_centre,
mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None,
verbose=False):
"""
Process BED file, produce profile of conservation using whole genome alig.
:param fh:
:param genome_alig: the whole-genome alignment to use to compute
conservation scores
:param window_size: length of the profile.
:param window_center: which part of each interval to place at the center
of the profile. Acceptable values are in the module
constant WINDOW_CENTRE_OPTIONS.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:param verbose: if True, output progress messages to stderr.
:return:
"""
mean_profile = []
while len(mean_profile) < window_size:
mean_profile.append(RollingMean())
for e in BEDIterator(fh, verbose=verbose, scoreType=float,
sortedby=ITERATOR_SORTED_START):
# figure out which interval to look at...
transform_locus(e, window_centre, window_size)
new_profile = conservtion_profile_pid(e, genome_alig, mi_seqs, species)
merge_profile(mean_profile, new_profile)
return [m.mean for m in mean_profile] | python | def processBED(fh, genome_alig, window_size, window_centre,
mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None,
verbose=False):
"""
Process BED file, produce profile of conservation using whole genome alig.
:param fh:
:param genome_alig: the whole-genome alignment to use to compute
conservation scores
:param window_size: length of the profile.
:param window_center: which part of each interval to place at the center
of the profile. Acceptable values are in the module
constant WINDOW_CENTRE_OPTIONS.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:param verbose: if True, output progress messages to stderr.
:return:
"""
mean_profile = []
while len(mean_profile) < window_size:
mean_profile.append(RollingMean())
for e in BEDIterator(fh, verbose=verbose, scoreType=float,
sortedby=ITERATOR_SORTED_START):
# figure out which interval to look at...
transform_locus(e, window_centre, window_size)
new_profile = conservtion_profile_pid(e, genome_alig, mi_seqs, species)
merge_profile(mean_profile, new_profile)
return [m.mean for m in mean_profile] | [
"def",
"processBED",
"(",
"fh",
",",
"genome_alig",
",",
"window_size",
",",
"window_centre",
",",
"mi_seqs",
"=",
"MissingSequenceHandler",
".",
"TREAT_AS_ALL_GAPS",
",",
"species",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"mean_profile",
"=",
"[",
"]",
"while",
"len",
"(",
"mean_profile",
")",
"<",
"window_size",
":",
"mean_profile",
".",
"append",
"(",
"RollingMean",
"(",
")",
")",
"for",
"e",
"in",
"BEDIterator",
"(",
"fh",
",",
"verbose",
"=",
"verbose",
",",
"scoreType",
"=",
"float",
",",
"sortedby",
"=",
"ITERATOR_SORTED_START",
")",
":",
"# figure out which interval to look at...",
"transform_locus",
"(",
"e",
",",
"window_centre",
",",
"window_size",
")",
"new_profile",
"=",
"conservtion_profile_pid",
"(",
"e",
",",
"genome_alig",
",",
"mi_seqs",
",",
"species",
")",
"merge_profile",
"(",
"mean_profile",
",",
"new_profile",
")",
"return",
"[",
"m",
".",
"mean",
"for",
"m",
"in",
"mean_profile",
"]"
] | Process BED file, produce profile of conservation using whole genome alig.
:param fh:
:param genome_alig: the whole-genome alignment to use to compute
conservation scores
:param window_size: length of the profile.
:param window_center: which part of each interval to place at the center
of the profile. Acceptable values are in the module
constant WINDOW_CENTRE_OPTIONS.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:param verbose: if True, output progress messages to stderr.
:return: | [
"Process",
"BED",
"file",
"produce",
"profile",
"of",
"conservation",
"using",
"whole",
"genome",
"alig",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L219-L248 |
251,674 | etcher-be/elib_run | elib_run/_run/_run.py | check_error | def check_error(context: RunContext) -> int:
"""
Runs after a sub-process exits
Checks the return code; if it is different than 0, then a few things happen:
- if the process was muted ("mute" is True), the process output is printed anyway
- if "failure_ok" is True (default), then a SystemExist exception is raised
:param context: run context
:type context: _RunContext
:return: process return code
:rtype: int
"""
if context.return_code != 0:
if context.mute:
context.result_buffer += f': command failed: {context.return_code}'
else:
context.result_buffer += f'{context.cmd_as_string}: command failed: {context.return_code}'
_LOGGER_PROCESS.error(context.result_buffer)
_LOGGER_PROCESS.error(repr(context))
if not context.failure_ok:
_exit(context)
else:
if context.mute:
context.result_buffer += f': success: {context.return_code}'
else:
context.result_buffer += f'{context.cmd_as_string}: success: {context.return_code}'
_LOGGER_PROCESS.info(context.result_buffer)
return context.return_code | python | def check_error(context: RunContext) -> int:
"""
Runs after a sub-process exits
Checks the return code; if it is different than 0, then a few things happen:
- if the process was muted ("mute" is True), the process output is printed anyway
- if "failure_ok" is True (default), then a SystemExist exception is raised
:param context: run context
:type context: _RunContext
:return: process return code
:rtype: int
"""
if context.return_code != 0:
if context.mute:
context.result_buffer += f': command failed: {context.return_code}'
else:
context.result_buffer += f'{context.cmd_as_string}: command failed: {context.return_code}'
_LOGGER_PROCESS.error(context.result_buffer)
_LOGGER_PROCESS.error(repr(context))
if not context.failure_ok:
_exit(context)
else:
if context.mute:
context.result_buffer += f': success: {context.return_code}'
else:
context.result_buffer += f'{context.cmd_as_string}: success: {context.return_code}'
_LOGGER_PROCESS.info(context.result_buffer)
return context.return_code | [
"def",
"check_error",
"(",
"context",
":",
"RunContext",
")",
"->",
"int",
":",
"if",
"context",
".",
"return_code",
"!=",
"0",
":",
"if",
"context",
".",
"mute",
":",
"context",
".",
"result_buffer",
"+=",
"f': command failed: {context.return_code}'",
"else",
":",
"context",
".",
"result_buffer",
"+=",
"f'{context.cmd_as_string}: command failed: {context.return_code}'",
"_LOGGER_PROCESS",
".",
"error",
"(",
"context",
".",
"result_buffer",
")",
"_LOGGER_PROCESS",
".",
"error",
"(",
"repr",
"(",
"context",
")",
")",
"if",
"not",
"context",
".",
"failure_ok",
":",
"_exit",
"(",
"context",
")",
"else",
":",
"if",
"context",
".",
"mute",
":",
"context",
".",
"result_buffer",
"+=",
"f': success: {context.return_code}'",
"else",
":",
"context",
".",
"result_buffer",
"+=",
"f'{context.cmd_as_string}: success: {context.return_code}'",
"_LOGGER_PROCESS",
".",
"info",
"(",
"context",
".",
"result_buffer",
")",
"return",
"context",
".",
"return_code"
] | Runs after a sub-process exits
Checks the return code; if it is different than 0, then a few things happen:
- if the process was muted ("mute" is True), the process output is printed anyway
- if "failure_ok" is True (default), then a SystemExist exception is raised
:param context: run context
:type context: _RunContext
:return: process return code
:rtype: int | [
"Runs",
"after",
"a",
"sub",
"-",
"process",
"exits"
] | c9d8ba9f067ab90c5baa27375a92b23f1b97cdde | https://github.com/etcher-be/elib_run/blob/c9d8ba9f067ab90c5baa27375a92b23f1b97cdde/elib_run/_run/_run.py#L28-L61 |
251,675 | etcher-be/elib_run | elib_run/_run/_run.py | run | def run(cmd: str,
*paths: str,
cwd: str = '.',
mute: bool = False,
filters: typing.Optional[typing.Union[typing.Iterable[str], str]] = None,
failure_ok: bool = False,
timeout: float = _DEFAULT_PROCESS_TIMEOUT,
) -> typing.Tuple[str, int]:
"""
Executes a command and returns the result
Args:
cmd: command to execute
paths: paths to search executable in
cwd: working directory (defaults to ".")
mute: if true, output will not be printed
filters: gives a list of partial strings to filter out from the output (stdout or stderr)
failure_ok: if False (default), a return code different than 0 will exit the application
timeout: sub-process timeout
Returns: command output
"""
filters = _sanitize_filters(filters)
exe_path, args_list = _parse_cmd(cmd, *paths)
context = RunContext( # type: ignore
exe_path=exe_path,
capture=sarge.Capture(),
failure_ok=failure_ok,
mute=mute,
args_list=args_list,
paths=paths,
cwd=cwd,
timeout=timeout,
filters=filters,
)
if mute:
context.result_buffer += f'{context.cmd_as_string}'
else:
_LOGGER_PROCESS.info('%s: running', context.cmd_as_string)
context.start_process()
monitor_running_process(context)
check_error(context)
return context.process_output_as_str, context.return_code | python | def run(cmd: str,
*paths: str,
cwd: str = '.',
mute: bool = False,
filters: typing.Optional[typing.Union[typing.Iterable[str], str]] = None,
failure_ok: bool = False,
timeout: float = _DEFAULT_PROCESS_TIMEOUT,
) -> typing.Tuple[str, int]:
"""
Executes a command and returns the result
Args:
cmd: command to execute
paths: paths to search executable in
cwd: working directory (defaults to ".")
mute: if true, output will not be printed
filters: gives a list of partial strings to filter out from the output (stdout or stderr)
failure_ok: if False (default), a return code different than 0 will exit the application
timeout: sub-process timeout
Returns: command output
"""
filters = _sanitize_filters(filters)
exe_path, args_list = _parse_cmd(cmd, *paths)
context = RunContext( # type: ignore
exe_path=exe_path,
capture=sarge.Capture(),
failure_ok=failure_ok,
mute=mute,
args_list=args_list,
paths=paths,
cwd=cwd,
timeout=timeout,
filters=filters,
)
if mute:
context.result_buffer += f'{context.cmd_as_string}'
else:
_LOGGER_PROCESS.info('%s: running', context.cmd_as_string)
context.start_process()
monitor_running_process(context)
check_error(context)
return context.process_output_as_str, context.return_code | [
"def",
"run",
"(",
"cmd",
":",
"str",
",",
"*",
"paths",
":",
"str",
",",
"cwd",
":",
"str",
"=",
"'.'",
",",
"mute",
":",
"bool",
"=",
"False",
",",
"filters",
":",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Union",
"[",
"typing",
".",
"Iterable",
"[",
"str",
"]",
",",
"str",
"]",
"]",
"=",
"None",
",",
"failure_ok",
":",
"bool",
"=",
"False",
",",
"timeout",
":",
"float",
"=",
"_DEFAULT_PROCESS_TIMEOUT",
",",
")",
"->",
"typing",
".",
"Tuple",
"[",
"str",
",",
"int",
"]",
":",
"filters",
"=",
"_sanitize_filters",
"(",
"filters",
")",
"exe_path",
",",
"args_list",
"=",
"_parse_cmd",
"(",
"cmd",
",",
"*",
"paths",
")",
"context",
"=",
"RunContext",
"(",
"# type: ignore",
"exe_path",
"=",
"exe_path",
",",
"capture",
"=",
"sarge",
".",
"Capture",
"(",
")",
",",
"failure_ok",
"=",
"failure_ok",
",",
"mute",
"=",
"mute",
",",
"args_list",
"=",
"args_list",
",",
"paths",
"=",
"paths",
",",
"cwd",
"=",
"cwd",
",",
"timeout",
"=",
"timeout",
",",
"filters",
"=",
"filters",
",",
")",
"if",
"mute",
":",
"context",
".",
"result_buffer",
"+=",
"f'{context.cmd_as_string}'",
"else",
":",
"_LOGGER_PROCESS",
".",
"info",
"(",
"'%s: running'",
",",
"context",
".",
"cmd_as_string",
")",
"context",
".",
"start_process",
"(",
")",
"monitor_running_process",
"(",
"context",
")",
"check_error",
"(",
"context",
")",
"return",
"context",
".",
"process_output_as_str",
",",
"context",
".",
"return_code"
] | Executes a command and returns the result
Args:
cmd: command to execute
paths: paths to search executable in
cwd: working directory (defaults to ".")
mute: if true, output will not be printed
filters: gives a list of partial strings to filter out from the output (stdout or stderr)
failure_ok: if False (default), a return code different than 0 will exit the application
timeout: sub-process timeout
Returns: command output | [
"Executes",
"a",
"command",
"and",
"returns",
"the",
"result"
] | c9d8ba9f067ab90c5baa27375a92b23f1b97cdde | https://github.com/etcher-be/elib_run/blob/c9d8ba9f067ab90c5baa27375a92b23f1b97cdde/elib_run/_run/_run.py#L93-L141 |
251,676 | sys-git/certifiable | certifiable/cli_impl/complex/certify_dict.py | cli_certify_complex_dict | def cli_certify_complex_dict(
config, schema, key_certifier, value_certifier, allow_extra,
include_collections, value,
):
"""Console script for certify_dict."""
schema = load_json_pickle(schema, config)
key_certifier = create_certifier(load_json_pickle(key_certifier, config))
value_certifier = create_certifier(load_json_pickle(value_certifier, config))
execute_cli_command(
'dict',
config,
lambda x: load_json_pickle(x, config),
certify_dict,
value,
allow_extra=allow_extra,
include_collections=include_collections,
key_certifier=key_certifier,
required=config['required'],
schema=schema,
value_certifier=value_certifier,
) | python | def cli_certify_complex_dict(
config, schema, key_certifier, value_certifier, allow_extra,
include_collections, value,
):
"""Console script for certify_dict."""
schema = load_json_pickle(schema, config)
key_certifier = create_certifier(load_json_pickle(key_certifier, config))
value_certifier = create_certifier(load_json_pickle(value_certifier, config))
execute_cli_command(
'dict',
config,
lambda x: load_json_pickle(x, config),
certify_dict,
value,
allow_extra=allow_extra,
include_collections=include_collections,
key_certifier=key_certifier,
required=config['required'],
schema=schema,
value_certifier=value_certifier,
) | [
"def",
"cli_certify_complex_dict",
"(",
"config",
",",
"schema",
",",
"key_certifier",
",",
"value_certifier",
",",
"allow_extra",
",",
"include_collections",
",",
"value",
",",
")",
":",
"schema",
"=",
"load_json_pickle",
"(",
"schema",
",",
"config",
")",
"key_certifier",
"=",
"create_certifier",
"(",
"load_json_pickle",
"(",
"key_certifier",
",",
"config",
")",
")",
"value_certifier",
"=",
"create_certifier",
"(",
"load_json_pickle",
"(",
"value_certifier",
",",
"config",
")",
")",
"execute_cli_command",
"(",
"'dict'",
",",
"config",
",",
"lambda",
"x",
":",
"load_json_pickle",
"(",
"x",
",",
"config",
")",
",",
"certify_dict",
",",
"value",
",",
"allow_extra",
"=",
"allow_extra",
",",
"include_collections",
"=",
"include_collections",
",",
"key_certifier",
"=",
"key_certifier",
",",
"required",
"=",
"config",
"[",
"'required'",
"]",
",",
"schema",
"=",
"schema",
",",
"value_certifier",
"=",
"value_certifier",
",",
")"
] | Console script for certify_dict. | [
"Console",
"script",
"for",
"certify_dict",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/cli_impl/complex/certify_dict.py#L29-L50 |
251,677 | usc-isi-i2/dig-sparkutil | digSparkUtil/fileUtil.py | FileUtil._load_text_csv_file | def _load_text_csv_file(self, filename, separator=',', **kwargs):
"""Return a pair RDD where key is taken from first column, remaining columns are named after their column id as string"""
rdd_input = self.sc.textFile(filename)
def load_csv_record(line):
input_stream = StringIO.StringIO(line)
reader = csv.reader(input_stream, delimiter=',')
# key in first column, remaining columns 1..n become dict key values
payload = reader.next()
key = payload[0]
rest = payload[1:]
# generate dict of "1": first value, "2": second value, ...
d = {}
for (cell,i) in izip(rest, range(1,1+len(rest))):
d[str(i)] = cell
# just in case, add "0": key
d["0"] = key
return (key, d)
rdd_parsed = rdd_input.map(load_csv_record)
return rdd_parsed | python | def _load_text_csv_file(self, filename, separator=',', **kwargs):
"""Return a pair RDD where key is taken from first column, remaining columns are named after their column id as string"""
rdd_input = self.sc.textFile(filename)
def load_csv_record(line):
input_stream = StringIO.StringIO(line)
reader = csv.reader(input_stream, delimiter=',')
# key in first column, remaining columns 1..n become dict key values
payload = reader.next()
key = payload[0]
rest = payload[1:]
# generate dict of "1": first value, "2": second value, ...
d = {}
for (cell,i) in izip(rest, range(1,1+len(rest))):
d[str(i)] = cell
# just in case, add "0": key
d["0"] = key
return (key, d)
rdd_parsed = rdd_input.map(load_csv_record)
return rdd_parsed | [
"def",
"_load_text_csv_file",
"(",
"self",
",",
"filename",
",",
"separator",
"=",
"','",
",",
"*",
"*",
"kwargs",
")",
":",
"rdd_input",
"=",
"self",
".",
"sc",
".",
"textFile",
"(",
"filename",
")",
"def",
"load_csv_record",
"(",
"line",
")",
":",
"input_stream",
"=",
"StringIO",
".",
"StringIO",
"(",
"line",
")",
"reader",
"=",
"csv",
".",
"reader",
"(",
"input_stream",
",",
"delimiter",
"=",
"','",
")",
"# key in first column, remaining columns 1..n become dict key values",
"payload",
"=",
"reader",
".",
"next",
"(",
")",
"key",
"=",
"payload",
"[",
"0",
"]",
"rest",
"=",
"payload",
"[",
"1",
":",
"]",
"# generate dict of \"1\": first value, \"2\": second value, ...",
"d",
"=",
"{",
"}",
"for",
"(",
"cell",
",",
"i",
")",
"in",
"izip",
"(",
"rest",
",",
"range",
"(",
"1",
",",
"1",
"+",
"len",
"(",
"rest",
")",
")",
")",
":",
"d",
"[",
"str",
"(",
"i",
")",
"]",
"=",
"cell",
"# just in case, add \"0\": key",
"d",
"[",
"\"0\"",
"]",
"=",
"key",
"return",
"(",
"key",
",",
"d",
")",
"rdd_parsed",
"=",
"rdd_input",
".",
"map",
"(",
"load_csv_record",
")",
"return",
"rdd_parsed"
] | Return a pair RDD where key is taken from first column, remaining columns are named after their column id as string | [
"Return",
"a",
"pair",
"RDD",
"where",
"key",
"is",
"taken",
"from",
"first",
"column",
"remaining",
"columns",
"are",
"named",
"after",
"their",
"column",
"id",
"as",
"string"
] | d39c6cf957025c170753b0e02e477fea20ee3f2a | https://github.com/usc-isi-i2/dig-sparkutil/blob/d39c6cf957025c170753b0e02e477fea20ee3f2a/digSparkUtil/fileUtil.py#L131-L151 |
251,678 | usc-isi-i2/dig-sparkutil | digSparkUtil/fileUtil.py | FileUtil.get_config | def get_config(config_spec):
"""Like get_json_config but does not parse result as JSON"""
config_file = None
if config_spec.startswith("http"):
# URL: fetch it
config_file = urllib.urlopen(config_spec)
else:
# string: open file with that name
config_file = open(config_spec)
config = json.load(config_file)
# Close any open files
try:
config_file.close()
except:
pass
return config | python | def get_config(config_spec):
"""Like get_json_config but does not parse result as JSON"""
config_file = None
if config_spec.startswith("http"):
# URL: fetch it
config_file = urllib.urlopen(config_spec)
else:
# string: open file with that name
config_file = open(config_spec)
config = json.load(config_file)
# Close any open files
try:
config_file.close()
except:
pass
return config | [
"def",
"get_config",
"(",
"config_spec",
")",
":",
"config_file",
"=",
"None",
"if",
"config_spec",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"# URL: fetch it",
"config_file",
"=",
"urllib",
".",
"urlopen",
"(",
"config_spec",
")",
"else",
":",
"# string: open file with that name",
"config_file",
"=",
"open",
"(",
"config_spec",
")",
"config",
"=",
"json",
".",
"load",
"(",
"config_file",
")",
"# Close any open files",
"try",
":",
"config_file",
".",
"close",
"(",
")",
"except",
":",
"pass",
"return",
"config"
] | Like get_json_config but does not parse result as JSON | [
"Like",
"get_json_config",
"but",
"does",
"not",
"parse",
"result",
"as",
"JSON"
] | d39c6cf957025c170753b0e02e477fea20ee3f2a | https://github.com/usc-isi-i2/dig-sparkutil/blob/d39c6cf957025c170753b0e02e477fea20ee3f2a/digSparkUtil/fileUtil.py#L239-L254 |
251,679 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/lookup_plugins/sequence.py | LookupModule.reset | def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d" | python | def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d" | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"start",
"=",
"1",
"self",
".",
"count",
"=",
"None",
"self",
".",
"end",
"=",
"None",
"self",
".",
"stride",
"=",
"1",
"self",
".",
"format",
"=",
"\"%d\""
] | set sensible defaults | [
"set",
"sensible",
"defaults"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/lookup_plugins/sequence.py#L80-L86 |
251,680 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/lookup_plugins/sequence.py | LookupModule.parse_kv_args | def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
) | python | def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
) | [
"def",
"parse_kv_args",
"(",
"self",
",",
"args",
")",
":",
"for",
"arg",
"in",
"[",
"\"start\"",
",",
"\"end\"",
",",
"\"count\"",
",",
"\"stride\"",
"]",
":",
"try",
":",
"arg_raw",
"=",
"args",
".",
"pop",
"(",
"arg",
",",
"None",
")",
"if",
"arg_raw",
"is",
"None",
":",
"continue",
"arg_cooked",
"=",
"int",
"(",
"arg_raw",
",",
"0",
")",
"setattr",
"(",
"self",
",",
"arg",
",",
"arg_cooked",
")",
"except",
"ValueError",
":",
"raise",
"AnsibleError",
"(",
"\"can't parse arg %s=%r as integer\"",
"%",
"(",
"arg",
",",
"arg_raw",
")",
")",
"if",
"'format'",
"in",
"args",
":",
"self",
".",
"format",
"=",
"args",
".",
"pop",
"(",
"\"format\"",
")",
"if",
"args",
":",
"raise",
"AnsibleError",
"(",
"\"unrecognized arguments to with_sequence: %r\"",
"%",
"args",
".",
"keys",
"(",
")",
")"
] | parse key-value style arguments | [
"parse",
"key",
"-",
"value",
"style",
"arguments"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/lookup_plugins/sequence.py#L88-L108 |
251,681 | coghost/izen | izen/crawler.py | ParseHeaderFromFile.parse_headers | def parse_headers(self, use_cookies, raw):
"""
analyze headers from file or raw messages
:return: (url, dat)
:rtype:
"""
if not raw:
packet = helper.to_str(helper.read_file(self.fpth))
else:
packet = raw
dat = {}
pks = [x for x in packet.split('\n') if x.replace(' ', '')]
url = pks[0].split(' ')[1]
for i, cnt in enumerate(pks[1:]):
arr = cnt.split(':')
if len(arr) < 2:
continue
arr = [x.replace(' ', '') for x in arr]
_k, v = arr[0], ':'.join(arr[1:])
dat[_k] = v
if use_cookies:
try:
self.fmt_cookies(dat.pop('Cookie'))
except:
pass
self.headers = dat
self.url = 'https://{}{}'.format(self.headers.get('Host'), url)
return url, dat | python | def parse_headers(self, use_cookies, raw):
"""
analyze headers from file or raw messages
:return: (url, dat)
:rtype:
"""
if not raw:
packet = helper.to_str(helper.read_file(self.fpth))
else:
packet = raw
dat = {}
pks = [x for x in packet.split('\n') if x.replace(' ', '')]
url = pks[0].split(' ')[1]
for i, cnt in enumerate(pks[1:]):
arr = cnt.split(':')
if len(arr) < 2:
continue
arr = [x.replace(' ', '') for x in arr]
_k, v = arr[0], ':'.join(arr[1:])
dat[_k] = v
if use_cookies:
try:
self.fmt_cookies(dat.pop('Cookie'))
except:
pass
self.headers = dat
self.url = 'https://{}{}'.format(self.headers.get('Host'), url)
return url, dat | [
"def",
"parse_headers",
"(",
"self",
",",
"use_cookies",
",",
"raw",
")",
":",
"if",
"not",
"raw",
":",
"packet",
"=",
"helper",
".",
"to_str",
"(",
"helper",
".",
"read_file",
"(",
"self",
".",
"fpth",
")",
")",
"else",
":",
"packet",
"=",
"raw",
"dat",
"=",
"{",
"}",
"pks",
"=",
"[",
"x",
"for",
"x",
"in",
"packet",
".",
"split",
"(",
"'\\n'",
")",
"if",
"x",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"]",
"url",
"=",
"pks",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
"for",
"i",
",",
"cnt",
"in",
"enumerate",
"(",
"pks",
"[",
"1",
":",
"]",
")",
":",
"arr",
"=",
"cnt",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"arr",
")",
"<",
"2",
":",
"continue",
"arr",
"=",
"[",
"x",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"for",
"x",
"in",
"arr",
"]",
"_k",
",",
"v",
"=",
"arr",
"[",
"0",
"]",
",",
"':'",
".",
"join",
"(",
"arr",
"[",
"1",
":",
"]",
")",
"dat",
"[",
"_k",
"]",
"=",
"v",
"if",
"use_cookies",
":",
"try",
":",
"self",
".",
"fmt_cookies",
"(",
"dat",
".",
"pop",
"(",
"'Cookie'",
")",
")",
"except",
":",
"pass",
"self",
".",
"headers",
"=",
"dat",
"self",
".",
"url",
"=",
"'https://{}{}'",
".",
"format",
"(",
"self",
".",
"headers",
".",
"get",
"(",
"'Host'",
")",
",",
"url",
")",
"return",
"url",
",",
"dat"
] | analyze headers from file or raw messages
:return: (url, dat)
:rtype: | [
"analyze",
"headers",
"from",
"file",
"or",
"raw",
"messages"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L82-L113 |
251,682 | coghost/izen | izen/crawler.py | Crawler.spawn | def spawn(self, url, force_spawn=False):
"""use the url for creation of domain and fetch cookies
- init cache dir by the url domain as ``<base>/domain``
- save the cookies to file ``<base>/domain/cookie.txt``
- init ``headers.get/post/json`` with response info
- init ``site_dir/site_raw/site_media``
:param url:
:type url:
:param force_spawn:
:type force_spawn:
:return:
:rtype:
"""
_url, domain = self.get_domain_home_from_url(url)
if not _url:
return False
self.cache['site_dir'] = os.path.join(self.cache['base'], self.domain)
for k in ['raw', 'media']:
self.cache['site_' + k] = os.path.join(self.cache['site_dir'], k)
helper.mkdir_p(self.cache['site_' + k], True)
ck_pth = os.path.join(self.cache['site_dir'], 'cookie.txt')
helper.mkdir_p(ck_pth)
name = os.path.join(self.cache['site_raw'], 'homepage')
# not force spawn and file ok
if not force_spawn and helper.is_file_ok(name):
# zlog.debug('{} exist!'.format(name))
self.sess.cookies = self.load_cookies(ck_pth)
return True
else:
zlog.debug('{} not exist!'.format(name))
res = self.sess.get(url, headers=self.__header__)
if res.status_code != 200:
return False
if res:
helper.write_file(res.content, name)
# self.load(url)
for k, v in self.headers.items():
self.headers[k] = res.request.headers
self.dump_cookies(cookies=self.sess.cookies, save_to=ck_pth)
return True | python | def spawn(self, url, force_spawn=False):
"""use the url for creation of domain and fetch cookies
- init cache dir by the url domain as ``<base>/domain``
- save the cookies to file ``<base>/domain/cookie.txt``
- init ``headers.get/post/json`` with response info
- init ``site_dir/site_raw/site_media``
:param url:
:type url:
:param force_spawn:
:type force_spawn:
:return:
:rtype:
"""
_url, domain = self.get_domain_home_from_url(url)
if not _url:
return False
self.cache['site_dir'] = os.path.join(self.cache['base'], self.domain)
for k in ['raw', 'media']:
self.cache['site_' + k] = os.path.join(self.cache['site_dir'], k)
helper.mkdir_p(self.cache['site_' + k], True)
ck_pth = os.path.join(self.cache['site_dir'], 'cookie.txt')
helper.mkdir_p(ck_pth)
name = os.path.join(self.cache['site_raw'], 'homepage')
# not force spawn and file ok
if not force_spawn and helper.is_file_ok(name):
# zlog.debug('{} exist!'.format(name))
self.sess.cookies = self.load_cookies(ck_pth)
return True
else:
zlog.debug('{} not exist!'.format(name))
res = self.sess.get(url, headers=self.__header__)
if res.status_code != 200:
return False
if res:
helper.write_file(res.content, name)
# self.load(url)
for k, v in self.headers.items():
self.headers[k] = res.request.headers
self.dump_cookies(cookies=self.sess.cookies, save_to=ck_pth)
return True | [
"def",
"spawn",
"(",
"self",
",",
"url",
",",
"force_spawn",
"=",
"False",
")",
":",
"_url",
",",
"domain",
"=",
"self",
".",
"get_domain_home_from_url",
"(",
"url",
")",
"if",
"not",
"_url",
":",
"return",
"False",
"self",
".",
"cache",
"[",
"'site_dir'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache",
"[",
"'base'",
"]",
",",
"self",
".",
"domain",
")",
"for",
"k",
"in",
"[",
"'raw'",
",",
"'media'",
"]",
":",
"self",
".",
"cache",
"[",
"'site_'",
"+",
"k",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache",
"[",
"'site_dir'",
"]",
",",
"k",
")",
"helper",
".",
"mkdir_p",
"(",
"self",
".",
"cache",
"[",
"'site_'",
"+",
"k",
"]",
",",
"True",
")",
"ck_pth",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache",
"[",
"'site_dir'",
"]",
",",
"'cookie.txt'",
")",
"helper",
".",
"mkdir_p",
"(",
"ck_pth",
")",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache",
"[",
"'site_raw'",
"]",
",",
"'homepage'",
")",
"# not force spawn and file ok",
"if",
"not",
"force_spawn",
"and",
"helper",
".",
"is_file_ok",
"(",
"name",
")",
":",
"# zlog.debug('{} exist!'.format(name))",
"self",
".",
"sess",
".",
"cookies",
"=",
"self",
".",
"load_cookies",
"(",
"ck_pth",
")",
"return",
"True",
"else",
":",
"zlog",
".",
"debug",
"(",
"'{} not exist!'",
".",
"format",
"(",
"name",
")",
")",
"res",
"=",
"self",
".",
"sess",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"__header__",
")",
"if",
"res",
".",
"status_code",
"!=",
"200",
":",
"return",
"False",
"if",
"res",
":",
"helper",
".",
"write_file",
"(",
"res",
".",
"content",
",",
"name",
")",
"# self.load(url)",
"for",
"k",
",",
"v",
"in",
"self",
".",
"headers",
".",
"items",
"(",
")",
":",
"self",
".",
"headers",
"[",
"k",
"]",
"=",
"res",
".",
"request",
".",
"headers",
"self",
".",
"dump_cookies",
"(",
"cookies",
"=",
"self",
".",
"sess",
".",
"cookies",
",",
"save_to",
"=",
"ck_pth",
")",
"return",
"True"
] | use the url for creation of domain and fetch cookies
- init cache dir by the url domain as ``<base>/domain``
- save the cookies to file ``<base>/domain/cookie.txt``
- init ``headers.get/post/json`` with response info
- init ``site_dir/site_raw/site_media``
:param url:
:type url:
:param force_spawn:
:type force_spawn:
:return:
:rtype: | [
"use",
"the",
"url",
"for",
"creation",
"of",
"domain",
"and",
"fetch",
"cookies"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L179-L227 |
251,683 | coghost/izen | izen/crawler.py | Crawler.map_url_to_cache_id | def map_url_to_cache_id(self, url):
"""use of the url resource location as cached id
e.g.: ``<domain>/foo/bar/a.html => <base>/domain/foo/bar/a.html``
- map the url to local file
:param url:
:type url:
:return:
:rtype:
"""
base, _ = self.get_domain_home_from_url(url)
if base == '':
# invalid url
_sub_page = ''
elif base == url or base + '/' == url:
# homepage
_sub_page = 'homepage'
else:
# sub page
_sub_page = url.replace(base, '').split('/')
_sub_page = '/'.join([x for x in _sub_page if x])
if _sub_page:
full_name = os.path.join(self.cache['site_raw'], _sub_page)
return full_name
else:
return _sub_page | python | def map_url_to_cache_id(self, url):
"""use of the url resource location as cached id
e.g.: ``<domain>/foo/bar/a.html => <base>/domain/foo/bar/a.html``
- map the url to local file
:param url:
:type url:
:return:
:rtype:
"""
base, _ = self.get_domain_home_from_url(url)
if base == '':
# invalid url
_sub_page = ''
elif base == url or base + '/' == url:
# homepage
_sub_page = 'homepage'
else:
# sub page
_sub_page = url.replace(base, '').split('/')
_sub_page = '/'.join([x for x in _sub_page if x])
if _sub_page:
full_name = os.path.join(self.cache['site_raw'], _sub_page)
return full_name
else:
return _sub_page | [
"def",
"map_url_to_cache_id",
"(",
"self",
",",
"url",
")",
":",
"base",
",",
"_",
"=",
"self",
".",
"get_domain_home_from_url",
"(",
"url",
")",
"if",
"base",
"==",
"''",
":",
"# invalid url",
"_sub_page",
"=",
"''",
"elif",
"base",
"==",
"url",
"or",
"base",
"+",
"'/'",
"==",
"url",
":",
"# homepage",
"_sub_page",
"=",
"'homepage'",
"else",
":",
"# sub page",
"_sub_page",
"=",
"url",
".",
"replace",
"(",
"base",
",",
"''",
")",
".",
"split",
"(",
"'/'",
")",
"_sub_page",
"=",
"'/'",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"_sub_page",
"if",
"x",
"]",
")",
"if",
"_sub_page",
":",
"full_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache",
"[",
"'site_raw'",
"]",
",",
"_sub_page",
")",
"return",
"full_name",
"else",
":",
"return",
"_sub_page"
] | use of the url resource location as cached id
e.g.: ``<domain>/foo/bar/a.html => <base>/domain/foo/bar/a.html``
- map the url to local file
:param url:
:type url:
:return:
:rtype: | [
"use",
"of",
"the",
"url",
"resource",
"location",
"as",
"cached",
"id"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L264-L293 |
251,684 | coghost/izen | izen/crawler.py | CommonCrawler.do_sess_get | def do_sess_get(self, url):
"""get url by requests synchronized
:param url:
:type url:
:return:
:rtype:
"""
try:
res = self.sess.get(url, headers=self.headers['get'], timeout=self.timeout)
if res.status_code == 200:
return res.content
except (requests.ReadTimeout, requests.ConnectTimeout, requests.ConnectionError) as _:
zlog.error('failed of: {} with error: {}'.format(url, _)) | python | def do_sess_get(self, url):
"""get url by requests synchronized
:param url:
:type url:
:return:
:rtype:
"""
try:
res = self.sess.get(url, headers=self.headers['get'], timeout=self.timeout)
if res.status_code == 200:
return res.content
except (requests.ReadTimeout, requests.ConnectTimeout, requests.ConnectionError) as _:
zlog.error('failed of: {} with error: {}'.format(url, _)) | [
"def",
"do_sess_get",
"(",
"self",
",",
"url",
")",
":",
"try",
":",
"res",
"=",
"self",
".",
"sess",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"headers",
"[",
"'get'",
"]",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"if",
"res",
".",
"status_code",
"==",
"200",
":",
"return",
"res",
".",
"content",
"except",
"(",
"requests",
".",
"ReadTimeout",
",",
"requests",
".",
"ConnectTimeout",
",",
"requests",
".",
"ConnectionError",
")",
"as",
"_",
":",
"zlog",
".",
"error",
"(",
"'failed of: {} with error: {}'",
".",
"format",
"(",
"url",
",",
"_",
")",
")"
] | get url by requests synchronized
:param url:
:type url:
:return:
:rtype: | [
"get",
"url",
"by",
"requests",
"synchronized"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L347-L360 |
251,685 | coghost/izen | izen/crawler.py | CommonCrawler.load | def load(self, url, use_cache=True, show_log=False):
"""fetch the url ``raw info``, use cache first, if no cache hit, try get from Internet
:param url:
:type url:
:param use_cache:
:type use_cache:
:param show_log:
:type show_log:
:return: the ``raw info`` of the url
:rtype: ``str``
"""
_name = self.map_url_to_cache_id(url)
raw = ''
hit = False
if use_cache:
hit = True
raw = self.load_from_cache(_name)
if not raw:
if show_log:
zlog.debug('from cache got nothing {}'.format(_name))
raw = self.do_sess_get(url)
if raw:
helper.write_file(raw, _name)
# if not raw:
# hit = True
# raw = self.load_from_cache(_name)
if show_log:
zlog.debug('[{}:{:>8}] get {}'.format('Cache' if hit else 'Net', len(raw), url))
return raw | python | def load(self, url, use_cache=True, show_log=False):
"""fetch the url ``raw info``, use cache first, if no cache hit, try get from Internet
:param url:
:type url:
:param use_cache:
:type use_cache:
:param show_log:
:type show_log:
:return: the ``raw info`` of the url
:rtype: ``str``
"""
_name = self.map_url_to_cache_id(url)
raw = ''
hit = False
if use_cache:
hit = True
raw = self.load_from_cache(_name)
if not raw:
if show_log:
zlog.debug('from cache got nothing {}'.format(_name))
raw = self.do_sess_get(url)
if raw:
helper.write_file(raw, _name)
# if not raw:
# hit = True
# raw = self.load_from_cache(_name)
if show_log:
zlog.debug('[{}:{:>8}] get {}'.format('Cache' if hit else 'Net', len(raw), url))
return raw | [
"def",
"load",
"(",
"self",
",",
"url",
",",
"use_cache",
"=",
"True",
",",
"show_log",
"=",
"False",
")",
":",
"_name",
"=",
"self",
".",
"map_url_to_cache_id",
"(",
"url",
")",
"raw",
"=",
"''",
"hit",
"=",
"False",
"if",
"use_cache",
":",
"hit",
"=",
"True",
"raw",
"=",
"self",
".",
"load_from_cache",
"(",
"_name",
")",
"if",
"not",
"raw",
":",
"if",
"show_log",
":",
"zlog",
".",
"debug",
"(",
"'from cache got nothing {}'",
".",
"format",
"(",
"_name",
")",
")",
"raw",
"=",
"self",
".",
"do_sess_get",
"(",
"url",
")",
"if",
"raw",
":",
"helper",
".",
"write_file",
"(",
"raw",
",",
"_name",
")",
"# if not raw:",
"# hit = True",
"# raw = self.load_from_cache(_name)",
"if",
"show_log",
":",
"zlog",
".",
"debug",
"(",
"'[{}:{:>8}] get {}'",
".",
"format",
"(",
"'Cache'",
"if",
"hit",
"else",
"'Net'",
",",
"len",
"(",
"raw",
")",
",",
"url",
")",
")",
"return",
"raw"
] | fetch the url ``raw info``, use cache first, if no cache hit, try get from Internet
:param url:
:type url:
:param use_cache:
:type use_cache:
:param show_log:
:type show_log:
:return: the ``raw info`` of the url
:rtype: ``str`` | [
"fetch",
"the",
"url",
"raw",
"info",
"use",
"cache",
"first",
"if",
"no",
"cache",
"hit",
"try",
"get",
"from",
"Internet"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L362-L394 |
251,686 | coghost/izen | izen/crawler.py | CommonCrawler.sync_save | def sync_save(self, res, overwrite=False):
""" save ``res`` to local synchronized
:param res: {'url': '', 'name': ''}
:type res: dict
:param overwrite:
:type overwrite:
:return:
:rtype: BeautifulSoup
"""
if not isinstance(res, dict):
raise CrawlerParamsError('res must be dict')
url_, file_name = res.get('url', ''), res.get('name', '')
if not url_ or not file_name:
raise CrawlerParamsError('url&name is needed!')
# log.debug('Sync {}'.format(res.get('name')))
# not overwrite and file exists
if not overwrite and helper.is_file_ok(file_name):
return True
cnt = self.do_sess_get(url_)
# get res failed
if not cnt:
return False
with open(file_name, 'wb') as f:
f.write(cnt)
zlog.debug('Sync Done {}'.format(res.get('name')))
return True | python | def sync_save(self, res, overwrite=False):
""" save ``res`` to local synchronized
:param res: {'url': '', 'name': ''}
:type res: dict
:param overwrite:
:type overwrite:
:return:
:rtype: BeautifulSoup
"""
if not isinstance(res, dict):
raise CrawlerParamsError('res must be dict')
url_, file_name = res.get('url', ''), res.get('name', '')
if not url_ or not file_name:
raise CrawlerParamsError('url&name is needed!')
# log.debug('Sync {}'.format(res.get('name')))
# not overwrite and file exists
if not overwrite and helper.is_file_ok(file_name):
return True
cnt = self.do_sess_get(url_)
# get res failed
if not cnt:
return False
with open(file_name, 'wb') as f:
f.write(cnt)
zlog.debug('Sync Done {}'.format(res.get('name')))
return True | [
"def",
"sync_save",
"(",
"self",
",",
"res",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"res",
",",
"dict",
")",
":",
"raise",
"CrawlerParamsError",
"(",
"'res must be dict'",
")",
"url_",
",",
"file_name",
"=",
"res",
".",
"get",
"(",
"'url'",
",",
"''",
")",
",",
"res",
".",
"get",
"(",
"'name'",
",",
"''",
")",
"if",
"not",
"url_",
"or",
"not",
"file_name",
":",
"raise",
"CrawlerParamsError",
"(",
"'url&name is needed!'",
")",
"# log.debug('Sync {}'.format(res.get('name')))",
"# not overwrite and file exists",
"if",
"not",
"overwrite",
"and",
"helper",
".",
"is_file_ok",
"(",
"file_name",
")",
":",
"return",
"True",
"cnt",
"=",
"self",
".",
"do_sess_get",
"(",
"url_",
")",
"# get res failed",
"if",
"not",
"cnt",
":",
"return",
"False",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"cnt",
")",
"zlog",
".",
"debug",
"(",
"'Sync Done {}'",
".",
"format",
"(",
"res",
".",
"get",
"(",
"'name'",
")",
")",
")",
"return",
"True"
] | save ``res`` to local synchronized
:param res: {'url': '', 'name': ''}
:type res: dict
:param overwrite:
:type overwrite:
:return:
:rtype: BeautifulSoup | [
"save",
"res",
"to",
"local",
"synchronized"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L448-L478 |
251,687 | coghost/izen | izen/crawler.py | AsyncCrawler.crawl_raw | async def crawl_raw(self, res):
""" crawl the raw doc, and save it asynchronous.
:param res: {'url','', 'name': ''}
:type res: ``dict``
:return:
:rtype:
"""
cnt = await self.async_get(res)
if cnt:
loop_ = asyncio.get_event_loop()
await loop_.run_in_executor(None, self.write_hd, res.get('name'), cnt)
return True
else:
return False | python | async def crawl_raw(self, res):
""" crawl the raw doc, and save it asynchronous.
:param res: {'url','', 'name': ''}
:type res: ``dict``
:return:
:rtype:
"""
cnt = await self.async_get(res)
if cnt:
loop_ = asyncio.get_event_loop()
await loop_.run_in_executor(None, self.write_hd, res.get('name'), cnt)
return True
else:
return False | [
"async",
"def",
"crawl_raw",
"(",
"self",
",",
"res",
")",
":",
"cnt",
"=",
"await",
"self",
".",
"async_get",
"(",
"res",
")",
"if",
"cnt",
":",
"loop_",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"await",
"loop_",
".",
"run_in_executor",
"(",
"None",
",",
"self",
".",
"write_hd",
",",
"res",
".",
"get",
"(",
"'name'",
")",
",",
"cnt",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | crawl the raw doc, and save it asynchronous.
:param res: {'url','', 'name': ''}
:type res: ``dict``
:return:
:rtype: | [
"crawl",
"the",
"raw",
"doc",
"and",
"save",
"it",
"asynchronous",
"."
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L494-L508 |
251,688 | coghost/izen | izen/crawler.py | AsyncCrawler._sem_crawl | async def _sem_crawl(self, sem, res):
""" use semaphore ``encapsulate`` the crawl_media \n
with async crawl, should avoid crawl too fast to become DDos attack to the crawled server
should set the ``semaphore size``, and take ``a little gap`` between each crawl behavior.
:param sem: the size of semaphore
:type sem:
:param res:
:type res: dict
:return:
:rtype:
"""
async with sem:
st_ = await self.crawl_raw(res)
if st_:
self.result['ok'] += 1
else:
self.result['fail'] += 1
# take a little gap
await asyncio.sleep(random.randint(0, 1)) | python | async def _sem_crawl(self, sem, res):
""" use semaphore ``encapsulate`` the crawl_media \n
with async crawl, should avoid crawl too fast to become DDos attack to the crawled server
should set the ``semaphore size``, and take ``a little gap`` between each crawl behavior.
:param sem: the size of semaphore
:type sem:
:param res:
:type res: dict
:return:
:rtype:
"""
async with sem:
st_ = await self.crawl_raw(res)
if st_:
self.result['ok'] += 1
else:
self.result['fail'] += 1
# take a little gap
await asyncio.sleep(random.randint(0, 1)) | [
"async",
"def",
"_sem_crawl",
"(",
"self",
",",
"sem",
",",
"res",
")",
":",
"async",
"with",
"sem",
":",
"st_",
"=",
"await",
"self",
".",
"crawl_raw",
"(",
"res",
")",
"if",
"st_",
":",
"self",
".",
"result",
"[",
"'ok'",
"]",
"+=",
"1",
"else",
":",
"self",
".",
"result",
"[",
"'fail'",
"]",
"+=",
"1",
"# take a little gap",
"await",
"asyncio",
".",
"sleep",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"1",
")",
")"
] | use semaphore ``encapsulate`` the crawl_media \n
with async crawl, should avoid crawl too fast to become DDos attack to the crawled server
should set the ``semaphore size``, and take ``a little gap`` between each crawl behavior.
:param sem: the size of semaphore
:type sem:
:param res:
:type res: dict
:return:
:rtype: | [
"use",
"semaphore",
"encapsulate",
"the",
"crawl_media",
"\\",
"n",
"with",
"async",
"crawl",
"should",
"avoid",
"crawl",
"too",
"fast",
"to",
"become",
"DDos",
"attack",
"to",
"the",
"crawled",
"server",
"should",
"set",
"the",
"semaphore",
"size",
"and",
"take",
"a",
"little",
"gap",
"between",
"each",
"crawl",
"behavior",
"."
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L510-L530 |
251,689 | alfred82santa/aio-service-client | service_client/json.py | json_decoder | def json_decoder(content, *args, **kwargs):
"""
Json decoder parser to be used by service_client
"""
if not content:
return None
json_value = content.decode()
return json.loads(json_value) | python | def json_decoder(content, *args, **kwargs):
"""
Json decoder parser to be used by service_client
"""
if not content:
return None
json_value = content.decode()
return json.loads(json_value) | [
"def",
"json_decoder",
"(",
"content",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"content",
":",
"return",
"None",
"json_value",
"=",
"content",
".",
"decode",
"(",
")",
"return",
"json",
".",
"loads",
"(",
"json_value",
")"
] | Json decoder parser to be used by service_client | [
"Json",
"decoder",
"parser",
"to",
"be",
"used",
"by",
"service_client"
] | dd9ad49e23067b22178534915aa23ba24f6ff39b | https://github.com/alfred82santa/aio-service-client/blob/dd9ad49e23067b22178534915aa23ba24f6ff39b/service_client/json.py#L11-L18 |
251,690 | mbarakaja/braulio | braulio/cli.py | init | def init(changelog_name):
"""Setup your project."""
changelog_path = find_chglog_file()
create_changelog_flag = True
mark = style("?", fg="blue", bold=True)
if not changelog_name:
if changelog_path:
filename = style(changelog_path.name, fg="blue", bold=True)
message = f" {mark} {filename} was found." " Is this the changelog file?"
if click.confirm(message):
changelog_name = changelog_path.name
create_changelog_flag = False
if create_changelog_flag:
message = f" {mark} Enter a name for the changelog:"
changelog_name = click.prompt(message, default=DEFAULT_CHANGELOG)
if create_changelog_flag:
create_chglog_file(changelog_name)
if changelog_name and create_changelog_flag:
update_config_file("changelog_file", changelog_name) | python | def init(changelog_name):
"""Setup your project."""
changelog_path = find_chglog_file()
create_changelog_flag = True
mark = style("?", fg="blue", bold=True)
if not changelog_name:
if changelog_path:
filename = style(changelog_path.name, fg="blue", bold=True)
message = f" {mark} {filename} was found." " Is this the changelog file?"
if click.confirm(message):
changelog_name = changelog_path.name
create_changelog_flag = False
if create_changelog_flag:
message = f" {mark} Enter a name for the changelog:"
changelog_name = click.prompt(message, default=DEFAULT_CHANGELOG)
if create_changelog_flag:
create_chglog_file(changelog_name)
if changelog_name and create_changelog_flag:
update_config_file("changelog_file", changelog_name) | [
"def",
"init",
"(",
"changelog_name",
")",
":",
"changelog_path",
"=",
"find_chglog_file",
"(",
")",
"create_changelog_flag",
"=",
"True",
"mark",
"=",
"style",
"(",
"\"?\"",
",",
"fg",
"=",
"\"blue\"",
",",
"bold",
"=",
"True",
")",
"if",
"not",
"changelog_name",
":",
"if",
"changelog_path",
":",
"filename",
"=",
"style",
"(",
"changelog_path",
".",
"name",
",",
"fg",
"=",
"\"blue\"",
",",
"bold",
"=",
"True",
")",
"message",
"=",
"f\" {mark} {filename} was found.\"",
"\" Is this the changelog file?\"",
"if",
"click",
".",
"confirm",
"(",
"message",
")",
":",
"changelog_name",
"=",
"changelog_path",
".",
"name",
"create_changelog_flag",
"=",
"False",
"if",
"create_changelog_flag",
":",
"message",
"=",
"f\" {mark} Enter a name for the changelog:\"",
"changelog_name",
"=",
"click",
".",
"prompt",
"(",
"message",
",",
"default",
"=",
"DEFAULT_CHANGELOG",
")",
"if",
"create_changelog_flag",
":",
"create_chglog_file",
"(",
"changelog_name",
")",
"if",
"changelog_name",
"and",
"create_changelog_flag",
":",
"update_config_file",
"(",
"\"changelog_file\"",
",",
"changelog_name",
")"
] | Setup your project. | [
"Setup",
"your",
"project",
"."
] | 70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b | https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/cli.py#L67-L91 |
251,691 | mbarakaja/braulio | braulio/cli.py | changelog_file_option_validator | def changelog_file_option_validator(ctx, param, value):
"""Checks that the given file path exists in the current working directory.
Returns a :class:`~pathlib.Path` object. If the file does not exist raises
a :class:`~click.UsageError` exception.
"""
path = Path(value)
if not path.exists():
filename = click.style(path.name, fg="blue", bold=True)
ctx.fail(
"\n"
f" {x_mark} Unable to find {filename}\n"
' Run "$ brau init" to create one'
)
return path | python | def changelog_file_option_validator(ctx, param, value):
"""Checks that the given file path exists in the current working directory.
Returns a :class:`~pathlib.Path` object. If the file does not exist raises
a :class:`~click.UsageError` exception.
"""
path = Path(value)
if not path.exists():
filename = click.style(path.name, fg="blue", bold=True)
ctx.fail(
"\n"
f" {x_mark} Unable to find {filename}\n"
' Run "$ brau init" to create one'
)
return path | [
"def",
"changelog_file_option_validator",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"path",
"=",
"Path",
"(",
"value",
")",
"if",
"not",
"path",
".",
"exists",
"(",
")",
":",
"filename",
"=",
"click",
".",
"style",
"(",
"path",
".",
"name",
",",
"fg",
"=",
"\"blue\"",
",",
"bold",
"=",
"True",
")",
"ctx",
".",
"fail",
"(",
"\"\\n\"",
"f\" {x_mark} Unable to find {filename}\\n\"",
"' Run \"$ brau init\" to create one'",
")",
"return",
"path"
] | Checks that the given file path exists in the current working directory.
Returns a :class:`~pathlib.Path` object. If the file does not exist raises
a :class:`~click.UsageError` exception. | [
"Checks",
"that",
"the",
"given",
"file",
"path",
"exists",
"in",
"the",
"current",
"working",
"directory",
"."
] | 70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b | https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/cli.py#L118-L135 |
251,692 | mbarakaja/braulio | braulio/cli.py | current_version_option_validator | def current_version_option_validator(ctx, param, value):
"""If a version string is provided, validates it. Otherwise it tries
to determine the current version from the last Git tag that matches
``tag_pattern`` option.
Return a :class:`~braulio.version.Version` object or **None**.
"""
current_version = None
if value:
try:
current_version = Version(value)
except ValueError:
ctx.fail(f"{value} is not a valid version string")
# Look for the last git tag for the curren version
git = Git()
tag_pattern = ctx.params["tag_pattern"]
versions = tag_analyzer(git.tags, tag_pattern, Version)
# User provided current version. Try to find a tag that match it.
if current_version:
for version in versions:
if version == current_version:
current_version = version
break
elif versions:
current_version = versions[0]
ctx.params["current_tag"] = current_version.tag if current_version else None
ctx.params["versions"] = versions
return current_version | python | def current_version_option_validator(ctx, param, value):
"""If a version string is provided, validates it. Otherwise it tries
to determine the current version from the last Git tag that matches
``tag_pattern`` option.
Return a :class:`~braulio.version.Version` object or **None**.
"""
current_version = None
if value:
try:
current_version = Version(value)
except ValueError:
ctx.fail(f"{value} is not a valid version string")
# Look for the last git tag for the curren version
git = Git()
tag_pattern = ctx.params["tag_pattern"]
versions = tag_analyzer(git.tags, tag_pattern, Version)
# User provided current version. Try to find a tag that match it.
if current_version:
for version in versions:
if version == current_version:
current_version = version
break
elif versions:
current_version = versions[0]
ctx.params["current_tag"] = current_version.tag if current_version else None
ctx.params["versions"] = versions
return current_version | [
"def",
"current_version_option_validator",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"current_version",
"=",
"None",
"if",
"value",
":",
"try",
":",
"current_version",
"=",
"Version",
"(",
"value",
")",
"except",
"ValueError",
":",
"ctx",
".",
"fail",
"(",
"f\"{value} is not a valid version string\"",
")",
"# Look for the last git tag for the curren version",
"git",
"=",
"Git",
"(",
")",
"tag_pattern",
"=",
"ctx",
".",
"params",
"[",
"\"tag_pattern\"",
"]",
"versions",
"=",
"tag_analyzer",
"(",
"git",
".",
"tags",
",",
"tag_pattern",
",",
"Version",
")",
"# User provided current version. Try to find a tag that match it.",
"if",
"current_version",
":",
"for",
"version",
"in",
"versions",
":",
"if",
"version",
"==",
"current_version",
":",
"current_version",
"=",
"version",
"break",
"elif",
"versions",
":",
"current_version",
"=",
"versions",
"[",
"0",
"]",
"ctx",
".",
"params",
"[",
"\"current_tag\"",
"]",
"=",
"current_version",
".",
"tag",
"if",
"current_version",
"else",
"None",
"ctx",
".",
"params",
"[",
"\"versions\"",
"]",
"=",
"versions",
"return",
"current_version"
] | If a version string is provided, validates it. Otherwise it tries
to determine the current version from the last Git tag that matches
``tag_pattern`` option.
Return a :class:`~braulio.version.Version` object or **None**. | [
"If",
"a",
"version",
"string",
"is",
"provided",
"validates",
"it",
".",
"Otherwise",
"it",
"tries",
"to",
"determine",
"the",
"current",
"version",
"from",
"the",
"last",
"Git",
"tag",
"that",
"matches",
"tag_pattern",
"option",
"."
] | 70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b | https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/cli.py#L158-L191 |
251,693 | lanhel/ftpysetup | ftpysetup/website/config/__init__.py | HTMLTranslator.layout | def layout(self, indent=' '):
"""This will indent each new tag in the body by given number of spaces."""
self.__indent(self.head, indent)
self.__indent(self.meta, indent)
self.__indent(self.stylesheet, indent)
self.__indent(self.header, indent)
self.__indent(self.body, indent, initial=3)
self.__indent(self.footer, indent)
self.__indent(self.body_pre_docinfo, indent, initial=3)
self.__indent(self.docinfo, indent) | python | def layout(self, indent=' '):
"""This will indent each new tag in the body by given number of spaces."""
self.__indent(self.head, indent)
self.__indent(self.meta, indent)
self.__indent(self.stylesheet, indent)
self.__indent(self.header, indent)
self.__indent(self.body, indent, initial=3)
self.__indent(self.footer, indent)
self.__indent(self.body_pre_docinfo, indent, initial=3)
self.__indent(self.docinfo, indent) | [
"def",
"layout",
"(",
"self",
",",
"indent",
"=",
"' '",
")",
":",
"self",
".",
"__indent",
"(",
"self",
".",
"head",
",",
"indent",
")",
"self",
".",
"__indent",
"(",
"self",
".",
"meta",
",",
"indent",
")",
"self",
".",
"__indent",
"(",
"self",
".",
"stylesheet",
",",
"indent",
")",
"self",
".",
"__indent",
"(",
"self",
".",
"header",
",",
"indent",
")",
"self",
".",
"__indent",
"(",
"self",
".",
"body",
",",
"indent",
",",
"initial",
"=",
"3",
")",
"self",
".",
"__indent",
"(",
"self",
".",
"footer",
",",
"indent",
")",
"self",
".",
"__indent",
"(",
"self",
".",
"body_pre_docinfo",
",",
"indent",
",",
"initial",
"=",
"3",
")",
"self",
".",
"__indent",
"(",
"self",
".",
"docinfo",
",",
"indent",
")"
] | This will indent each new tag in the body by given number of spaces. | [
"This",
"will",
"indent",
"each",
"new",
"tag",
"in",
"the",
"body",
"by",
"given",
"number",
"of",
"spaces",
"."
] | 9cdea6b82658fb4394b582d1fe5b05eaf5746fde | https://github.com/lanhel/ftpysetup/blob/9cdea6b82658fb4394b582d1fe5b05eaf5746fde/ftpysetup/website/config/__init__.py#L117-L128 |
251,694 | beaugunderson/intervention | intervention/launch.py | main | def main():
"""
Show the intervention screen.
"""
application = Application(sys.argv, ignore_close=not SKIP_FILTER)
platform.hide_cursor()
with open(resource_filename(__name__, 'intervention.css')) as css:
application.setStyleSheet(css.read())
# exec() is required for objc so we must use spawn
# multiprocessing.set_start_method('spawn')
# target = do_nothing
# if sys.platform == 'darwin' and not SKIP_FILTER:
# from filters import filter_input
# target = filter_input
# pool = multiprocessing.Pool(1) # pylint: disable=not-callable
# def filter_input_done_cb(ignored):
# application.closeAllWindows()
# result = pool.apply_async(target, callback=filter_input_done_cb)
# pylint: disable=unused-variable
@atexit.register
def exit_handler():
"""
Clean up.
"""
logging.info('atexit triggered')
platform.show_cursor()
# # terminate the pool so we don't sit forever waiting on our get()
# logging.info('Terminating pool...')
# pool.terminate()
# logging.info('Joining pool...')
# pool.join()
# logging.info('Retrieving result...')
# try:
# # raise any exceptions raised by the input filtering code
# result.get(0)
# except multiprocessing.TimeoutError:
# logging.info('Timed out waiting for result.')
# def duration_reached():
# logging.info('Duration reached, exiting...')
# sys.exit(0)
# Run for DURATION and then exit
# QtCore.QTimer.singleShot(DURATION, duration_reached)
application.run() | python | def main():
"""
Show the intervention screen.
"""
application = Application(sys.argv, ignore_close=not SKIP_FILTER)
platform.hide_cursor()
with open(resource_filename(__name__, 'intervention.css')) as css:
application.setStyleSheet(css.read())
# exec() is required for objc so we must use spawn
# multiprocessing.set_start_method('spawn')
# target = do_nothing
# if sys.platform == 'darwin' and not SKIP_FILTER:
# from filters import filter_input
# target = filter_input
# pool = multiprocessing.Pool(1) # pylint: disable=not-callable
# def filter_input_done_cb(ignored):
# application.closeAllWindows()
# result = pool.apply_async(target, callback=filter_input_done_cb)
# pylint: disable=unused-variable
@atexit.register
def exit_handler():
"""
Clean up.
"""
logging.info('atexit triggered')
platform.show_cursor()
# # terminate the pool so we don't sit forever waiting on our get()
# logging.info('Terminating pool...')
# pool.terminate()
# logging.info('Joining pool...')
# pool.join()
# logging.info('Retrieving result...')
# try:
# # raise any exceptions raised by the input filtering code
# result.get(0)
# except multiprocessing.TimeoutError:
# logging.info('Timed out waiting for result.')
# def duration_reached():
# logging.info('Duration reached, exiting...')
# sys.exit(0)
# Run for DURATION and then exit
# QtCore.QTimer.singleShot(DURATION, duration_reached)
application.run() | [
"def",
"main",
"(",
")",
":",
"application",
"=",
"Application",
"(",
"sys",
".",
"argv",
",",
"ignore_close",
"=",
"not",
"SKIP_FILTER",
")",
"platform",
".",
"hide_cursor",
"(",
")",
"with",
"open",
"(",
"resource_filename",
"(",
"__name__",
",",
"'intervention.css'",
")",
")",
"as",
"css",
":",
"application",
".",
"setStyleSheet",
"(",
"css",
".",
"read",
"(",
")",
")",
"# exec() is required for objc so we must use spawn",
"# multiprocessing.set_start_method('spawn')",
"# target = do_nothing",
"# if sys.platform == 'darwin' and not SKIP_FILTER:",
"# from filters import filter_input",
"# target = filter_input",
"# pool = multiprocessing.Pool(1) # pylint: disable=not-callable",
"# def filter_input_done_cb(ignored):",
"# application.closeAllWindows()",
"# result = pool.apply_async(target, callback=filter_input_done_cb)",
"# pylint: disable=unused-variable",
"@",
"atexit",
".",
"register",
"def",
"exit_handler",
"(",
")",
":",
"\"\"\"\n Clean up.\n \"\"\"",
"logging",
".",
"info",
"(",
"'atexit triggered'",
")",
"platform",
".",
"show_cursor",
"(",
")",
"# # terminate the pool so we don't sit forever waiting on our get()",
"# logging.info('Terminating pool...')",
"# pool.terminate()",
"# logging.info('Joining pool...')",
"# pool.join()",
"# logging.info('Retrieving result...')",
"# try:",
"# # raise any exceptions raised by the input filtering code",
"# result.get(0)",
"# except multiprocessing.TimeoutError:",
"# logging.info('Timed out waiting for result.')",
"# def duration_reached():",
"# logging.info('Duration reached, exiting...')",
"# sys.exit(0)",
"# Run for DURATION and then exit",
"# QtCore.QTimer.singleShot(DURATION, duration_reached)",
"application",
".",
"run",
"(",
")"
] | Show the intervention screen. | [
"Show",
"the",
"intervention",
"screen",
"."
] | 72ee436c38962006b30747e16ac0d20d298ec9d5 | https://github.com/beaugunderson/intervention/blob/72ee436c38962006b30747e16ac0d20d298ec9d5/intervention/launch.py#L39-L98 |
251,695 | ramrod-project/database-brain | schema/brain/binary/filesystem.py | BrainStore.write | def write(self, path, data, offset, fh): # pragma: no cover
"""
This is a readonly filesystem right now
"""
# print("write {}".format(path))
with self.attr_lock:
base = self.attr[path][BASE_KEY]
staged = self.attr[path][STAGED_KEY]
if not staged.closed:
base.st_size += len(data)
staged.write(data)
return len(data) | python | def write(self, path, data, offset, fh): # pragma: no cover
"""
This is a readonly filesystem right now
"""
# print("write {}".format(path))
with self.attr_lock:
base = self.attr[path][BASE_KEY]
staged = self.attr[path][STAGED_KEY]
if not staged.closed:
base.st_size += len(data)
staged.write(data)
return len(data) | [
"def",
"write",
"(",
"self",
",",
"path",
",",
"data",
",",
"offset",
",",
"fh",
")",
":",
"# pragma: no cover",
"# print(\"write {}\".format(path))",
"with",
"self",
".",
"attr_lock",
":",
"base",
"=",
"self",
".",
"attr",
"[",
"path",
"]",
"[",
"BASE_KEY",
"]",
"staged",
"=",
"self",
".",
"attr",
"[",
"path",
"]",
"[",
"STAGED_KEY",
"]",
"if",
"not",
"staged",
".",
"closed",
":",
"base",
".",
"st_size",
"+=",
"len",
"(",
"data",
")",
"staged",
".",
"write",
"(",
"data",
")",
"return",
"len",
"(",
"data",
")"
] | This is a readonly filesystem right now | [
"This",
"is",
"a",
"readonly",
"filesystem",
"right",
"now"
] | b024cb44f34cabb9d80af38271ddb65c25767083 | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/binary/filesystem.py#L169-L180 |
251,696 | ramrod-project/database-brain | schema/brain/binary/filesystem.py | BrainStore._cleanup | def _cleanup(self): # pragma: no cover
"""
cleans up data that's been in the cache for a while
should be called from an async OS call like release? to not impact user
:return:
"""
need_to_delete = [] # can't delete from a dict while iterating
with self.attr_lock:
now_time = time()
for path in self.cache:
if now_time - self.attr[path][TIMESTAMP_KEY] >= MAX_CACHE_TIME:
need_to_delete.append(path)
for path in need_to_delete:
del self.attr[path]
del self.cache[path] | python | def _cleanup(self): # pragma: no cover
"""
cleans up data that's been in the cache for a while
should be called from an async OS call like release? to not impact user
:return:
"""
need_to_delete = [] # can't delete from a dict while iterating
with self.attr_lock:
now_time = time()
for path in self.cache:
if now_time - self.attr[path][TIMESTAMP_KEY] >= MAX_CACHE_TIME:
need_to_delete.append(path)
for path in need_to_delete:
del self.attr[path]
del self.cache[path] | [
"def",
"_cleanup",
"(",
"self",
")",
":",
"# pragma: no cover",
"need_to_delete",
"=",
"[",
"]",
"# can't delete from a dict while iterating",
"with",
"self",
".",
"attr_lock",
":",
"now_time",
"=",
"time",
"(",
")",
"for",
"path",
"in",
"self",
".",
"cache",
":",
"if",
"now_time",
"-",
"self",
".",
"attr",
"[",
"path",
"]",
"[",
"TIMESTAMP_KEY",
"]",
">=",
"MAX_CACHE_TIME",
":",
"need_to_delete",
".",
"append",
"(",
"path",
")",
"for",
"path",
"in",
"need_to_delete",
":",
"del",
"self",
".",
"attr",
"[",
"path",
"]",
"del",
"self",
".",
"cache",
"[",
"path",
"]"
] | cleans up data that's been in the cache for a while
should be called from an async OS call like release? to not impact user
:return: | [
"cleans",
"up",
"data",
"that",
"s",
"been",
"in",
"the",
"cache",
"for",
"a",
"while"
] | b024cb44f34cabb9d80af38271ddb65c25767083 | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/binary/filesystem.py#L214-L229 |
251,697 | sci-bots/mpm | mpm/bin/__init__.py | validate_args | def validate_args(args):
'''
Apply custom validation and actions based on parsed arguments.
Parameters
----------
args : argparse.Namespace
Result from ``parse_args`` method of ``argparse.ArgumentParser``
instance.
Returns
-------
argparse.Namespace
Reference to input ``args``, which have been validated/updated.
'''
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
if getattr(args, 'command', None) == 'install':
if args.requirements_file and not args.requirements_file.isfile():
print >> sys.stderr, ('Requirements file not available: {}'
.format(args.requirements_file))
raise SystemExit(-1)
elif not args.plugin and not args.requirements_file:
print >> sys.stderr, ('Requirements file or at least one plugin '
'must be specified.')
raise SystemExit(-2)
if hasattr(args, 'server_url'):
logger.debug('Using MicroDrop index server: "%s"', args.server_url)
args.server_url = SERVER_URL_TEMPLATE % args.server_url
if all([args.plugins_directory is None,
args.config_file is None]):
args.plugins_directory = get_plugins_directory()
elif args.plugins_directory is None:
args.config_file = args.config_file.realpath()
args.plugins_directory = get_plugins_directory(config_path=
args.config_file)
else:
args.plugins_directory = args.plugins_directory.realpath()
return args | python | def validate_args(args):
'''
Apply custom validation and actions based on parsed arguments.
Parameters
----------
args : argparse.Namespace
Result from ``parse_args`` method of ``argparse.ArgumentParser``
instance.
Returns
-------
argparse.Namespace
Reference to input ``args``, which have been validated/updated.
'''
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
if getattr(args, 'command', None) == 'install':
if args.requirements_file and not args.requirements_file.isfile():
print >> sys.stderr, ('Requirements file not available: {}'
.format(args.requirements_file))
raise SystemExit(-1)
elif not args.plugin and not args.requirements_file:
print >> sys.stderr, ('Requirements file or at least one plugin '
'must be specified.')
raise SystemExit(-2)
if hasattr(args, 'server_url'):
logger.debug('Using MicroDrop index server: "%s"', args.server_url)
args.server_url = SERVER_URL_TEMPLATE % args.server_url
if all([args.plugins_directory is None,
args.config_file is None]):
args.plugins_directory = get_plugins_directory()
elif args.plugins_directory is None:
args.config_file = args.config_file.realpath()
args.plugins_directory = get_plugins_directory(config_path=
args.config_file)
else:
args.plugins_directory = args.plugins_directory.realpath()
return args | [
"def",
"validate_args",
"(",
"args",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"getattr",
"(",
"logging",
",",
"args",
".",
"log_level",
".",
"upper",
"(",
")",
")",
")",
"if",
"getattr",
"(",
"args",
",",
"'command'",
",",
"None",
")",
"==",
"'install'",
":",
"if",
"args",
".",
"requirements_file",
"and",
"not",
"args",
".",
"requirements_file",
".",
"isfile",
"(",
")",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"'Requirements file not available: {}'",
".",
"format",
"(",
"args",
".",
"requirements_file",
")",
")",
"raise",
"SystemExit",
"(",
"-",
"1",
")",
"elif",
"not",
"args",
".",
"plugin",
"and",
"not",
"args",
".",
"requirements_file",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"'Requirements file or at least one plugin '",
"'must be specified.'",
")",
"raise",
"SystemExit",
"(",
"-",
"2",
")",
"if",
"hasattr",
"(",
"args",
",",
"'server_url'",
")",
":",
"logger",
".",
"debug",
"(",
"'Using MicroDrop index server: \"%s\"'",
",",
"args",
".",
"server_url",
")",
"args",
".",
"server_url",
"=",
"SERVER_URL_TEMPLATE",
"%",
"args",
".",
"server_url",
"if",
"all",
"(",
"[",
"args",
".",
"plugins_directory",
"is",
"None",
",",
"args",
".",
"config_file",
"is",
"None",
"]",
")",
":",
"args",
".",
"plugins_directory",
"=",
"get_plugins_directory",
"(",
")",
"elif",
"args",
".",
"plugins_directory",
"is",
"None",
":",
"args",
".",
"config_file",
"=",
"args",
".",
"config_file",
".",
"realpath",
"(",
")",
"args",
".",
"plugins_directory",
"=",
"get_plugins_directory",
"(",
"config_path",
"=",
"args",
".",
"config_file",
")",
"else",
":",
"args",
".",
"plugins_directory",
"=",
"args",
".",
"plugins_directory",
".",
"realpath",
"(",
")",
"return",
"args"
] | Apply custom validation and actions based on parsed arguments.
Parameters
----------
args : argparse.Namespace
Result from ``parse_args`` method of ``argparse.ArgumentParser``
instance.
Returns
-------
argparse.Namespace
Reference to input ``args``, which have been validated/updated. | [
"Apply",
"custom",
"validation",
"and",
"actions",
"based",
"on",
"parsed",
"arguments",
"."
] | a69651cda4b37ee6b17df4fe0809249e7f4dc536 | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/bin/__init__.py#L88-L126 |
251,698 | msuozzo/Aduro | aduro/manager.py | KindleProgressMgr.detect_events | def detect_events(self, max_attempts=3):
"""Returns a list of `Event`s detected from differences in state
between the current snapshot and the Kindle Library.
`books` and `progress` attributes will be set with the latest API
results upon successful completion of the function.
Returns:
If failed to retrieve progress, None
Else, the list of `Event`s
"""
# Attempt to retrieve current state from KindleAPI
for _ in xrange(max_attempts):
try:
with KindleCloudReaderAPI\
.get_instance(self.uname, self.pword) as kcr:
self.books = kcr.get_library_metadata()
self.progress = kcr.get_library_progress()
except KindleAPIError:
continue
else:
break
else:
return None
# Calculate diffs from new progress
progress_map = {book.asin: self.progress[book.asin].locs[1]
for book in self.books}
new_events = self._snapshot.calc_update_events(progress_map)
update_event = UpdateEvent(datetime.now().replace(microsecond=0))
new_events.append(update_event)
self._event_buf.extend(new_events)
return new_events | python | def detect_events(self, max_attempts=3):
"""Returns a list of `Event`s detected from differences in state
between the current snapshot and the Kindle Library.
`books` and `progress` attributes will be set with the latest API
results upon successful completion of the function.
Returns:
If failed to retrieve progress, None
Else, the list of `Event`s
"""
# Attempt to retrieve current state from KindleAPI
for _ in xrange(max_attempts):
try:
with KindleCloudReaderAPI\
.get_instance(self.uname, self.pword) as kcr:
self.books = kcr.get_library_metadata()
self.progress = kcr.get_library_progress()
except KindleAPIError:
continue
else:
break
else:
return None
# Calculate diffs from new progress
progress_map = {book.asin: self.progress[book.asin].locs[1]
for book in self.books}
new_events = self._snapshot.calc_update_events(progress_map)
update_event = UpdateEvent(datetime.now().replace(microsecond=0))
new_events.append(update_event)
self._event_buf.extend(new_events)
return new_events | [
"def",
"detect_events",
"(",
"self",
",",
"max_attempts",
"=",
"3",
")",
":",
"# Attempt to retrieve current state from KindleAPI",
"for",
"_",
"in",
"xrange",
"(",
"max_attempts",
")",
":",
"try",
":",
"with",
"KindleCloudReaderAPI",
".",
"get_instance",
"(",
"self",
".",
"uname",
",",
"self",
".",
"pword",
")",
"as",
"kcr",
":",
"self",
".",
"books",
"=",
"kcr",
".",
"get_library_metadata",
"(",
")",
"self",
".",
"progress",
"=",
"kcr",
".",
"get_library_progress",
"(",
")",
"except",
"KindleAPIError",
":",
"continue",
"else",
":",
"break",
"else",
":",
"return",
"None",
"# Calculate diffs from new progress",
"progress_map",
"=",
"{",
"book",
".",
"asin",
":",
"self",
".",
"progress",
"[",
"book",
".",
"asin",
"]",
".",
"locs",
"[",
"1",
"]",
"for",
"book",
"in",
"self",
".",
"books",
"}",
"new_events",
"=",
"self",
".",
"_snapshot",
".",
"calc_update_events",
"(",
"progress_map",
")",
"update_event",
"=",
"UpdateEvent",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
")",
"new_events",
".",
"append",
"(",
"update_event",
")",
"self",
".",
"_event_buf",
".",
"extend",
"(",
"new_events",
")",
"return",
"new_events"
] | Returns a list of `Event`s detected from differences in state
between the current snapshot and the Kindle Library.
`books` and `progress` attributes will be set with the latest API
results upon successful completion of the function.
Returns:
If failed to retrieve progress, None
Else, the list of `Event`s | [
"Returns",
"a",
"list",
"of",
"Event",
"s",
"detected",
"from",
"differences",
"in",
"state",
"between",
"the",
"current",
"snapshot",
"and",
"the",
"Kindle",
"Library",
"."
] | 338eeb1deeff30c198e721b660ae4daca3660911 | https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/manager.py#L36-L70 |
251,699 | msuozzo/Aduro | aduro/manager.py | KindleProgressMgr.commit_events | def commit_events(self):
"""Applies all outstanding `Event`s to the internal state
"""
# Events are sorted such that, when applied in order, each event
# represents a logical change in state. That is, an event never requires
# future events' data in order to be parsed.
# e.g. All ADDs must go before START READINGs
# All START READINGs before all READs
for event in sorted(self._event_buf):
self.store.record_event(event)
self._snapshot.process_event(event)
self._event_buf = [] | python | def commit_events(self):
"""Applies all outstanding `Event`s to the internal state
"""
# Events are sorted such that, when applied in order, each event
# represents a logical change in state. That is, an event never requires
# future events' data in order to be parsed.
# e.g. All ADDs must go before START READINGs
# All START READINGs before all READs
for event in sorted(self._event_buf):
self.store.record_event(event)
self._snapshot.process_event(event)
self._event_buf = [] | [
"def",
"commit_events",
"(",
"self",
")",
":",
"# Events are sorted such that, when applied in order, each event",
"# represents a logical change in state. That is, an event never requires",
"# future events' data in order to be parsed.",
"# e.g. All ADDs must go before START READINGs",
"# All START READINGs before all READs",
"for",
"event",
"in",
"sorted",
"(",
"self",
".",
"_event_buf",
")",
":",
"self",
".",
"store",
".",
"record_event",
"(",
"event",
")",
"self",
".",
"_snapshot",
".",
"process_event",
"(",
"event",
")",
"self",
".",
"_event_buf",
"=",
"[",
"]"
] | Applies all outstanding `Event`s to the internal state | [
"Applies",
"all",
"outstanding",
"Event",
"s",
"to",
"the",
"internal",
"state"
] | 338eeb1deeff30c198e721b660ae4daca3660911 | https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/manager.py#L80-L91 |
Subsets and Splits