id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
251,700 | florianpaquet/mease | mease/registry.py | Mease._get_registry_names | def _get_registry_names(self, registry):
"""
Returns functions names for a registry
"""
return ', '.join(
f.__name__ if not isinstance(f, tuple) else f[0].__name__
for f in getattr(self, registry, [])) | python | def _get_registry_names(self, registry):
"""
Returns functions names for a registry
"""
return ', '.join(
f.__name__ if not isinstance(f, tuple) else f[0].__name__
for f in getattr(self, registry, [])) | [
"def",
"_get_registry_names",
"(",
"self",
",",
"registry",
")",
":",
"return",
"', '",
".",
"join",
"(",
"f",
".",
"__name__",
"if",
"not",
"isinstance",
"(",
"f",
",",
"tuple",
")",
"else",
"f",
"[",
"0",
"]",
".",
"__name__",
"for",
"f",
"in",
"getattr",
"(",
"self",
",",
"registry",
",",
"[",
"]",
")",
")"
] | Returns functions names for a registry | [
"Returns",
"functions",
"names",
"for",
"a",
"registry"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L33-L39 |
251,701 | florianpaquet/mease | mease/registry.py | Mease.receiver | def receiver(self, func=None, json=False):
"""
Registers a receiver function
"""
self.receivers.append((func, json)) | python | def receiver(self, func=None, json=False):
"""
Registers a receiver function
"""
self.receivers.append((func, json)) | [
"def",
"receiver",
"(",
"self",
",",
"func",
"=",
"None",
",",
"json",
"=",
"False",
")",
":",
"self",
".",
"receivers",
".",
"append",
"(",
"(",
"func",
",",
"json",
")",
")"
] | Registers a receiver function | [
"Registers",
"a",
"receiver",
"function"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L58-L62 |
251,702 | florianpaquet/mease | mease/registry.py | Mease.sender | def sender(self, func, routing=None, routing_re=None):
"""
Registers a sender function
"""
if routing and not isinstance(routing, list):
routing = [routing]
if routing_re:
if not isinstance(routing_re, list):
routing_re = [routing_re]
routing_re[:] = [re.compile(r) for r in routing_re]
self.senders.append((func, routing, routing_re)) | python | def sender(self, func, routing=None, routing_re=None):
"""
Registers a sender function
"""
if routing and not isinstance(routing, list):
routing = [routing]
if routing_re:
if not isinstance(routing_re, list):
routing_re = [routing_re]
routing_re[:] = [re.compile(r) for r in routing_re]
self.senders.append((func, routing, routing_re)) | [
"def",
"sender",
"(",
"self",
",",
"func",
",",
"routing",
"=",
"None",
",",
"routing_re",
"=",
"None",
")",
":",
"if",
"routing",
"and",
"not",
"isinstance",
"(",
"routing",
",",
"list",
")",
":",
"routing",
"=",
"[",
"routing",
"]",
"if",
"routing_re",
":",
"if",
"not",
"isinstance",
"(",
"routing_re",
",",
"list",
")",
":",
"routing_re",
"=",
"[",
"routing_re",
"]",
"routing_re",
"[",
":",
"]",
"=",
"[",
"re",
".",
"compile",
"(",
"r",
")",
"for",
"r",
"in",
"routing_re",
"]",
"self",
".",
"senders",
".",
"append",
"(",
"(",
"func",
",",
"routing",
",",
"routing_re",
")",
")"
] | Registers a sender function | [
"Registers",
"a",
"sender",
"function"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L65-L77 |
251,703 | florianpaquet/mease | mease/registry.py | Mease.call_openers | def call_openers(self, client, clients_list):
"""
Calls openers callbacks
"""
for func in self.openers:
func(client, clients_list) | python | def call_openers(self, client, clients_list):
"""
Calls openers callbacks
"""
for func in self.openers:
func(client, clients_list) | [
"def",
"call_openers",
"(",
"self",
",",
"client",
",",
"clients_list",
")",
":",
"for",
"func",
"in",
"self",
".",
"openers",
":",
"func",
"(",
"client",
",",
"clients_list",
")"
] | Calls openers callbacks | [
"Calls",
"openers",
"callbacks"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L81-L86 |
251,704 | florianpaquet/mease | mease/registry.py | Mease.call_closers | def call_closers(self, client, clients_list):
"""
Calls closers callbacks
"""
for func in self.closers:
func(client, clients_list) | python | def call_closers(self, client, clients_list):
"""
Calls closers callbacks
"""
for func in self.closers:
func(client, clients_list) | [
"def",
"call_closers",
"(",
"self",
",",
"client",
",",
"clients_list",
")",
":",
"for",
"func",
"in",
"self",
".",
"closers",
":",
"func",
"(",
"client",
",",
"clients_list",
")"
] | Calls closers callbacks | [
"Calls",
"closers",
"callbacks"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L88-L93 |
251,705 | florianpaquet/mease | mease/registry.py | Mease.call_receivers | def call_receivers(self, client, clients_list, message):
"""
Calls receivers callbacks
"""
# Try to parse JSON
try:
json_message = json.loads(message)
except ValueError:
json_message = None
for func, to_json in self.receivers:
# Check if json version is available
if to_json:
if json_message is None:
continue
msg = json_message
else:
msg = message
# Call callback
func(client, clients_list, msg) | python | def call_receivers(self, client, clients_list, message):
"""
Calls receivers callbacks
"""
# Try to parse JSON
try:
json_message = json.loads(message)
except ValueError:
json_message = None
for func, to_json in self.receivers:
# Check if json version is available
if to_json:
if json_message is None:
continue
msg = json_message
else:
msg = message
# Call callback
func(client, clients_list, msg) | [
"def",
"call_receivers",
"(",
"self",
",",
"client",
",",
"clients_list",
",",
"message",
")",
":",
"# Try to parse JSON",
"try",
":",
"json_message",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"except",
"ValueError",
":",
"json_message",
"=",
"None",
"for",
"func",
",",
"to_json",
"in",
"self",
".",
"receivers",
":",
"# Check if json version is available",
"if",
"to_json",
":",
"if",
"json_message",
"is",
"None",
":",
"continue",
"msg",
"=",
"json_message",
"else",
":",
"msg",
"=",
"message",
"# Call callback",
"func",
"(",
"client",
",",
"clients_list",
",",
"msg",
")"
] | Calls receivers callbacks | [
"Calls",
"receivers",
"callbacks"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L95-L116 |
251,706 | florianpaquet/mease | mease/registry.py | Mease.call_senders | def call_senders(self, routing, clients_list, *args, **kwargs):
"""
Calls senders callbacks
"""
for func, routings, routings_re in self.senders:
call_callback = False
# Message is published globally
if routing is None or (routings is None and routings_re is None):
call_callback = True
# Message is not published globally
else:
# Message is catched by a string routing key
if routings and routing in routings:
call_callback = True
# Message is catched by a regex routing key
if routings_re and any(r.match(routing) for r in routings_re):
call_callback = True
if call_callback:
func(routing, clients_list, *args, **kwargs) | python | def call_senders(self, routing, clients_list, *args, **kwargs):
"""
Calls senders callbacks
"""
for func, routings, routings_re in self.senders:
call_callback = False
# Message is published globally
if routing is None or (routings is None and routings_re is None):
call_callback = True
# Message is not published globally
else:
# Message is catched by a string routing key
if routings and routing in routings:
call_callback = True
# Message is catched by a regex routing key
if routings_re and any(r.match(routing) for r in routings_re):
call_callback = True
if call_callback:
func(routing, clients_list, *args, **kwargs) | [
"def",
"call_senders",
"(",
"self",
",",
"routing",
",",
"clients_list",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"func",
",",
"routings",
",",
"routings_re",
"in",
"self",
".",
"senders",
":",
"call_callback",
"=",
"False",
"# Message is published globally",
"if",
"routing",
"is",
"None",
"or",
"(",
"routings",
"is",
"None",
"and",
"routings_re",
"is",
"None",
")",
":",
"call_callback",
"=",
"True",
"# Message is not published globally",
"else",
":",
"# Message is catched by a string routing key",
"if",
"routings",
"and",
"routing",
"in",
"routings",
":",
"call_callback",
"=",
"True",
"# Message is catched by a regex routing key",
"if",
"routings_re",
"and",
"any",
"(",
"r",
".",
"match",
"(",
"routing",
")",
"for",
"r",
"in",
"routings_re",
")",
":",
"call_callback",
"=",
"True",
"if",
"call_callback",
":",
"func",
"(",
"routing",
",",
"clients_list",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Calls senders callbacks | [
"Calls",
"senders",
"callbacks"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L118-L141 |
251,707 | florianpaquet/mease | mease/registry.py | Mease.run_websocket_server | def run_websocket_server(self, host='localhost', port=9090, debug=False):
"""
Runs websocket server
"""
from .server import MeaseWebSocketServerFactory
websocket_factory = MeaseWebSocketServerFactory(
mease=self, host=host, port=port, debug=debug)
websocket_factory.run_server() | python | def run_websocket_server(self, host='localhost', port=9090, debug=False):
"""
Runs websocket server
"""
from .server import MeaseWebSocketServerFactory
websocket_factory = MeaseWebSocketServerFactory(
mease=self, host=host, port=port, debug=debug)
websocket_factory.run_server() | [
"def",
"run_websocket_server",
"(",
"self",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"9090",
",",
"debug",
"=",
"False",
")",
":",
"from",
".",
"server",
"import",
"MeaseWebSocketServerFactory",
"websocket_factory",
"=",
"MeaseWebSocketServerFactory",
"(",
"mease",
"=",
"self",
",",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"debug",
"=",
"debug",
")",
"websocket_factory",
".",
"run_server",
"(",
")"
] | Runs websocket server | [
"Runs",
"websocket",
"server"
] | b9fbd08bbe162c8890c2a2124674371170c319ef | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L155-L163 |
251,708 | drcloud/stackclimber | stackclimber.py | stackclimber | def stackclimber(height=0): # http://stackoverflow.com/a/900404/48251
"""
Obtain the name of the caller's module. Uses the inspect module to find
the caller's position in the module hierarchy. With the optional height
argument, finds the caller's caller, and so forth.
"""
caller = inspect.stack()[height+1]
scope = caller[0].f_globals
path = scope['__name__'].split('__main__')[0].strip('.')
if path == '':
if scope['__package__']:
path = scope['__package__']
else:
path = os.path.basename(sys.argv[0]).split('.')[0]
return path | python | def stackclimber(height=0): # http://stackoverflow.com/a/900404/48251
"""
Obtain the name of the caller's module. Uses the inspect module to find
the caller's position in the module hierarchy. With the optional height
argument, finds the caller's caller, and so forth.
"""
caller = inspect.stack()[height+1]
scope = caller[0].f_globals
path = scope['__name__'].split('__main__')[0].strip('.')
if path == '':
if scope['__package__']:
path = scope['__package__']
else:
path = os.path.basename(sys.argv[0]).split('.')[0]
return path | [
"def",
"stackclimber",
"(",
"height",
"=",
"0",
")",
":",
"# http://stackoverflow.com/a/900404/48251",
"caller",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"height",
"+",
"1",
"]",
"scope",
"=",
"caller",
"[",
"0",
"]",
".",
"f_globals",
"path",
"=",
"scope",
"[",
"'__name__'",
"]",
".",
"split",
"(",
"'__main__'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'.'",
")",
"if",
"path",
"==",
"''",
":",
"if",
"scope",
"[",
"'__package__'",
"]",
":",
"path",
"=",
"scope",
"[",
"'__package__'",
"]",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"return",
"path"
] | Obtain the name of the caller's module. Uses the inspect module to find
the caller's position in the module hierarchy. With the optional height
argument, finds the caller's caller, and so forth. | [
"Obtain",
"the",
"name",
"of",
"the",
"caller",
"s",
"module",
".",
"Uses",
"the",
"inspect",
"module",
"to",
"find",
"the",
"caller",
"s",
"position",
"in",
"the",
"module",
"hierarchy",
".",
"With",
"the",
"optional",
"height",
"argument",
"finds",
"the",
"caller",
"s",
"caller",
"and",
"so",
"forth",
"."
] | 595b66474171bc5fe08e7361622953c309d00ae5 | https://github.com/drcloud/stackclimber/blob/595b66474171bc5fe08e7361622953c309d00ae5/stackclimber.py#L6-L20 |
251,709 | EventTeam/beliefs | src/beliefs/belief_utils.py | flatten_list | def flatten_list(l):
""" Nested lists to single-level list, does not split strings"""
return list(chain.from_iterable(repeat(x,1) if isinstance(x,str) else x for x in l)) | python | def flatten_list(l):
""" Nested lists to single-level list, does not split strings"""
return list(chain.from_iterable(repeat(x,1) if isinstance(x,str) else x for x in l)) | [
"def",
"flatten_list",
"(",
"l",
")",
":",
"return",
"list",
"(",
"chain",
".",
"from_iterable",
"(",
"repeat",
"(",
"x",
",",
"1",
")",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
"else",
"x",
"for",
"x",
"in",
"l",
")",
")"
] | Nested lists to single-level list, does not split strings | [
"Nested",
"lists",
"to",
"single",
"-",
"level",
"list",
"does",
"not",
"split",
"strings"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/belief_utils.py#L55-L57 |
251,710 | EventTeam/beliefs | src/beliefs/belief_utils.py | list_diff | def list_diff(list1, list2):
""" Ssymetric list difference """
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item)
for item in list2:
if not item in list1:
diff_list.append(item)
return diff_list | python | def list_diff(list1, list2):
""" Ssymetric list difference """
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item)
for item in list2:
if not item in list1:
diff_list.append(item)
return diff_list | [
"def",
"list_diff",
"(",
"list1",
",",
"list2",
")",
":",
"diff_list",
"=",
"[",
"]",
"for",
"item",
"in",
"list1",
":",
"if",
"not",
"item",
"in",
"list2",
":",
"diff_list",
".",
"append",
"(",
"item",
")",
"for",
"item",
"in",
"list2",
":",
"if",
"not",
"item",
"in",
"list1",
":",
"diff_list",
".",
"append",
"(",
"item",
")",
"return",
"diff_list"
] | Ssymetric list difference | [
"Ssymetric",
"list",
"difference"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/belief_utils.py#L59-L68 |
251,711 | EventTeam/beliefs | src/beliefs/belief_utils.py | asym_list_diff | def asym_list_diff(list1, list2):
""" Asymmetric list difference """
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item)
return diff_list | python | def asym_list_diff(list1, list2):
""" Asymmetric list difference """
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item)
return diff_list | [
"def",
"asym_list_diff",
"(",
"list1",
",",
"list2",
")",
":",
"diff_list",
"=",
"[",
"]",
"for",
"item",
"in",
"list1",
":",
"if",
"not",
"item",
"in",
"list2",
":",
"diff_list",
".",
"append",
"(",
"item",
")",
"return",
"diff_list"
] | Asymmetric list difference | [
"Asymmetric",
"list",
"difference"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/belief_utils.py#L70-L76 |
251,712 | EventTeam/beliefs | src/beliefs/belief_utils.py | next_tokens_in_sequence | def next_tokens_in_sequence(observed, current):
""" Given the observed list of tokens, and the current list,
finds out what should be next next emitted word
"""
idx = 0
for word in current:
if observed[idx:].count(word) != 0:
found_pos = observed.index(word, idx)
idx = max(idx + 1, found_pos)
# otherwise, don't increment idx
if idx < len(observed):
return observed[idx:]
else:
return [] | python | def next_tokens_in_sequence(observed, current):
""" Given the observed list of tokens, and the current list,
finds out what should be next next emitted word
"""
idx = 0
for word in current:
if observed[idx:].count(word) != 0:
found_pos = observed.index(word, idx)
idx = max(idx + 1, found_pos)
# otherwise, don't increment idx
if idx < len(observed):
return observed[idx:]
else:
return [] | [
"def",
"next_tokens_in_sequence",
"(",
"observed",
",",
"current",
")",
":",
"idx",
"=",
"0",
"for",
"word",
"in",
"current",
":",
"if",
"observed",
"[",
"idx",
":",
"]",
".",
"count",
"(",
"word",
")",
"!=",
"0",
":",
"found_pos",
"=",
"observed",
".",
"index",
"(",
"word",
",",
"idx",
")",
"idx",
"=",
"max",
"(",
"idx",
"+",
"1",
",",
"found_pos",
")",
"# otherwise, don't increment idx",
"if",
"idx",
"<",
"len",
"(",
"observed",
")",
":",
"return",
"observed",
"[",
"idx",
":",
"]",
"else",
":",
"return",
"[",
"]"
] | Given the observed list of tokens, and the current list,
finds out what should be next next emitted word | [
"Given",
"the",
"observed",
"list",
"of",
"tokens",
"and",
"the",
"current",
"list",
"finds",
"out",
"what",
"should",
"be",
"next",
"next",
"emitted",
"word"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/belief_utils.py#L148-L161 |
251,713 | EventTeam/beliefs | src/beliefs/cells/numeric.py | IntervalCell.to_latex | def to_latex(self):
""" Returns an interval representation """
if self.low == self.high:
if self.low * 10 % 10 == 0:
return "{0:d}".format(int(self.low))
else:
return "{0:0.2f}".format(self.low)
else:
t = ""
if self.low == -np.inf:
t += r"(-\infty, "
elif self.low * 10 % 10 == 0:
t += r"[{0:d}, ".format(int(self.low))
else:
t += r"[{0:0.2f}, ".format(self.low)
if self.high == np.inf:
t += r"\infty)"
elif self.high * 10 % 10 == 0:
t += r"{0:d}]".format(int(self.high))
else:
t += r"{0:0.2f}]".format(self.high)
return t | python | def to_latex(self):
""" Returns an interval representation """
if self.low == self.high:
if self.low * 10 % 10 == 0:
return "{0:d}".format(int(self.low))
else:
return "{0:0.2f}".format(self.low)
else:
t = ""
if self.low == -np.inf:
t += r"(-\infty, "
elif self.low * 10 % 10 == 0:
t += r"[{0:d}, ".format(int(self.low))
else:
t += r"[{0:0.2f}, ".format(self.low)
if self.high == np.inf:
t += r"\infty)"
elif self.high * 10 % 10 == 0:
t += r"{0:d}]".format(int(self.high))
else:
t += r"{0:0.2f}]".format(self.high)
return t | [
"def",
"to_latex",
"(",
"self",
")",
":",
"if",
"self",
".",
"low",
"==",
"self",
".",
"high",
":",
"if",
"self",
".",
"low",
"*",
"10",
"%",
"10",
"==",
"0",
":",
"return",
"\"{0:d}\"",
".",
"format",
"(",
"int",
"(",
"self",
".",
"low",
")",
")",
"else",
":",
"return",
"\"{0:0.2f}\"",
".",
"format",
"(",
"self",
".",
"low",
")",
"else",
":",
"t",
"=",
"\"\"",
"if",
"self",
".",
"low",
"==",
"-",
"np",
".",
"inf",
":",
"t",
"+=",
"r\"(-\\infty, \"",
"elif",
"self",
".",
"low",
"*",
"10",
"%",
"10",
"==",
"0",
":",
"t",
"+=",
"r\"[{0:d}, \"",
".",
"format",
"(",
"int",
"(",
"self",
".",
"low",
")",
")",
"else",
":",
"t",
"+=",
"r\"[{0:0.2f}, \"",
".",
"format",
"(",
"self",
".",
"low",
")",
"if",
"self",
".",
"high",
"==",
"np",
".",
"inf",
":",
"t",
"+=",
"r\"\\infty)\"",
"elif",
"self",
".",
"high",
"*",
"10",
"%",
"10",
"==",
"0",
":",
"t",
"+=",
"r\"{0:d}]\"",
".",
"format",
"(",
"int",
"(",
"self",
".",
"high",
")",
")",
"else",
":",
"t",
"+=",
"r\"{0:0.2f}]\"",
".",
"format",
"(",
"self",
".",
"high",
")",
"return",
"t"
] | Returns an interval representation | [
"Returns",
"an",
"interval",
"representation"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/numeric.py#L244-L265 |
251,714 | oubiga/respect | respect/main.py | main | def main():
"""
Main entry point for the `respect` command.
"""
args = parse_respect_args(sys.argv[1:])
if validate_username(args['<username>']):
print("processing...")
else:
print("@"+args['<username>'], "is not a valid username.")
print("Username may only contain alphanumeric ASCII characters or "
"dashes and cannot begin with a dash.")
return
try:
r = requests.get(urljoin(GITHUB_USERS, args['<username>']))
except ConnectionErrorException as e:
print('Connection Error from requests. Request again, please.')
print(e)
if r.status_code == 404 or r.status_code == 403:
session = login(401, args=args)
return dispatch(args, r, session)
elif r.status_code == 200:
return dispatch(args, response=r)
else:
raise UnknownStausCodeException | python | def main():
"""
Main entry point for the `respect` command.
"""
args = parse_respect_args(sys.argv[1:])
if validate_username(args['<username>']):
print("processing...")
else:
print("@"+args['<username>'], "is not a valid username.")
print("Username may only contain alphanumeric ASCII characters or "
"dashes and cannot begin with a dash.")
return
try:
r = requests.get(urljoin(GITHUB_USERS, args['<username>']))
except ConnectionErrorException as e:
print('Connection Error from requests. Request again, please.')
print(e)
if r.status_code == 404 or r.status_code == 403:
session = login(401, args=args)
return dispatch(args, r, session)
elif r.status_code == 200:
return dispatch(args, response=r)
else:
raise UnknownStausCodeException | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_respect_args",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"if",
"validate_username",
"(",
"args",
"[",
"'<username>'",
"]",
")",
":",
"print",
"(",
"\"processing...\"",
")",
"else",
":",
"print",
"(",
"\"@\"",
"+",
"args",
"[",
"'<username>'",
"]",
",",
"\"is not a valid username.\"",
")",
"print",
"(",
"\"Username may only contain alphanumeric ASCII characters or \"",
"\"dashes and cannot begin with a dash.\"",
")",
"return",
"try",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"urljoin",
"(",
"GITHUB_USERS",
",",
"args",
"[",
"'<username>'",
"]",
")",
")",
"except",
"ConnectionErrorException",
"as",
"e",
":",
"print",
"(",
"'Connection Error from requests. Request again, please.'",
")",
"print",
"(",
"e",
")",
"if",
"r",
".",
"status_code",
"==",
"404",
"or",
"r",
".",
"status_code",
"==",
"403",
":",
"session",
"=",
"login",
"(",
"401",
",",
"args",
"=",
"args",
")",
"return",
"dispatch",
"(",
"args",
",",
"r",
",",
"session",
")",
"elif",
"r",
".",
"status_code",
"==",
"200",
":",
"return",
"dispatch",
"(",
"args",
",",
"response",
"=",
"r",
")",
"else",
":",
"raise",
"UnknownStausCodeException"
] | Main entry point for the `respect` command. | [
"Main",
"entry",
"point",
"for",
"the",
"respect",
"command",
"."
] | 550554ec4d3139379d03cb8f82a8cd2d80c3ad62 | https://github.com/oubiga/respect/blob/550554ec4d3139379d03cb8f82a8cd2d80c3ad62/respect/main.py#L56-L82 |
251,715 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | clear_caches | def clear_caches(): # suppress(unused-function)
"""Clear all caches."""
for _, reader in _spellchecker_cache.values():
reader.close()
_spellchecker_cache.clear()
_valid_words_cache.clear()
_user_dictionary_cache.clear() | python | def clear_caches(): # suppress(unused-function)
"""Clear all caches."""
for _, reader in _spellchecker_cache.values():
reader.close()
_spellchecker_cache.clear()
_valid_words_cache.clear()
_user_dictionary_cache.clear() | [
"def",
"clear_caches",
"(",
")",
":",
"# suppress(unused-function)",
"for",
"_",
",",
"reader",
"in",
"_spellchecker_cache",
".",
"values",
"(",
")",
":",
"reader",
".",
"close",
"(",
")",
"_spellchecker_cache",
".",
"clear",
"(",
")",
"_valid_words_cache",
".",
"clear",
"(",
")",
"_user_dictionary_cache",
".",
"clear",
"(",
")"
] | Clear all caches. | [
"Clear",
"all",
"caches",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L61-L68 |
251,716 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _comment_system_for_file | def _comment_system_for_file(contents):
"""For file contents, return the comment system."""
if contents[0] == "#":
return FileCommentSystem(begin="#", middle="", end="", single="#")
elif contents[:2] == "/*":
return FileCommentSystem(begin="/*", middle="*", end="*/", single="//")
elif contents[:2] == "//":
return FileCommentSystem(begin="//", middle="//", end="", single="//")
elif contents[:3] == "rem":
return FileCommentSystem(begin="rem",
middle="rem",
end="",
single="rem")
else:
raise RuntimeError("Couldn't detect comment "
"system from {0}".format(contents[:3])) | python | def _comment_system_for_file(contents):
"""For file contents, return the comment system."""
if contents[0] == "#":
return FileCommentSystem(begin="#", middle="", end="", single="#")
elif contents[:2] == "/*":
return FileCommentSystem(begin="/*", middle="*", end="*/", single="//")
elif contents[:2] == "//":
return FileCommentSystem(begin="//", middle="//", end="", single="//")
elif contents[:3] == "rem":
return FileCommentSystem(begin="rem",
middle="rem",
end="",
single="rem")
else:
raise RuntimeError("Couldn't detect comment "
"system from {0}".format(contents[:3])) | [
"def",
"_comment_system_for_file",
"(",
"contents",
")",
":",
"if",
"contents",
"[",
"0",
"]",
"==",
"\"#\"",
":",
"return",
"FileCommentSystem",
"(",
"begin",
"=",
"\"#\"",
",",
"middle",
"=",
"\"\"",
",",
"end",
"=",
"\"\"",
",",
"single",
"=",
"\"#\"",
")",
"elif",
"contents",
"[",
":",
"2",
"]",
"==",
"\"/*\"",
":",
"return",
"FileCommentSystem",
"(",
"begin",
"=",
"\"/*\"",
",",
"middle",
"=",
"\"*\"",
",",
"end",
"=",
"\"*/\"",
",",
"single",
"=",
"\"//\"",
")",
"elif",
"contents",
"[",
":",
"2",
"]",
"==",
"\"//\"",
":",
"return",
"FileCommentSystem",
"(",
"begin",
"=",
"\"//\"",
",",
"middle",
"=",
"\"//\"",
",",
"end",
"=",
"\"\"",
",",
"single",
"=",
"\"//\"",
")",
"elif",
"contents",
"[",
":",
"3",
"]",
"==",
"\"rem\"",
":",
"return",
"FileCommentSystem",
"(",
"begin",
"=",
"\"rem\"",
",",
"middle",
"=",
"\"rem\"",
",",
"end",
"=",
"\"\"",
",",
"single",
"=",
"\"rem\"",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Couldn't detect comment \"",
"\"system from {0}\"",
".",
"format",
"(",
"contents",
"[",
":",
"3",
"]",
")",
")"
] | For file contents, return the comment system. | [
"For",
"file",
"contents",
"return",
"the",
"comment",
"system",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L74-L89 |
251,717 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _split_line_with_offsets | def _split_line_with_offsets(line):
"""Split a line by delimiter, but yield tuples of word and offset.
This function works by dropping all the english-like punctuation from
a line (so parenthesis preceded or succeeded by spaces, periods, etc)
and then splitting on spaces.
"""
for delimiter in re.finditer(r"[\.,:\;](?![^\s])", line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"[\"'\)\]\}>](?![^\.,\;:\"'\)\]\}>\s])",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"(?<![^\.,\;:\"'\(\[\{<\s])[\"'\(\[\{<]",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
# Treat hyphen separated words as separate words
line = line.replace("-", " ")
# Remove backticks
line = line.replace("`", " ")
for match in re.finditer(r"[^\s]+", line):
content = match.group(0)
if content.strip() != "":
yield (match.span()[0], content) | python | def _split_line_with_offsets(line):
"""Split a line by delimiter, but yield tuples of word and offset.
This function works by dropping all the english-like punctuation from
a line (so parenthesis preceded or succeeded by spaces, periods, etc)
and then splitting on spaces.
"""
for delimiter in re.finditer(r"[\.,:\;](?![^\s])", line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"[\"'\)\]\}>](?![^\.,\;:\"'\)\]\}>\s])",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"(?<![^\.,\;:\"'\(\[\{<\s])[\"'\(\[\{<]",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
# Treat hyphen separated words as separate words
line = line.replace("-", " ")
# Remove backticks
line = line.replace("`", " ")
for match in re.finditer(r"[^\s]+", line):
content = match.group(0)
if content.strip() != "":
yield (match.span()[0], content) | [
"def",
"_split_line_with_offsets",
"(",
"line",
")",
":",
"for",
"delimiter",
"in",
"re",
".",
"finditer",
"(",
"r\"[\\.,:\\;](?![^\\s])\"",
",",
"line",
")",
":",
"span",
"=",
"delimiter",
".",
"span",
"(",
")",
"line",
"=",
"line",
"[",
":",
"span",
"[",
"0",
"]",
"]",
"+",
"\" \"",
"+",
"line",
"[",
"span",
"[",
"1",
"]",
":",
"]",
"for",
"delimiter",
"in",
"re",
".",
"finditer",
"(",
"r\"[\\\"'\\)\\]\\}>](?![^\\.,\\;:\\\"'\\)\\]\\}>\\s])\"",
",",
"line",
")",
":",
"span",
"=",
"delimiter",
".",
"span",
"(",
")",
"line",
"=",
"line",
"[",
":",
"span",
"[",
"0",
"]",
"]",
"+",
"\" \"",
"+",
"line",
"[",
"span",
"[",
"1",
"]",
":",
"]",
"for",
"delimiter",
"in",
"re",
".",
"finditer",
"(",
"r\"(?<![^\\.,\\;:\\\"'\\(\\[\\{<\\s])[\\\"'\\(\\[\\{<]\"",
",",
"line",
")",
":",
"span",
"=",
"delimiter",
".",
"span",
"(",
")",
"line",
"=",
"line",
"[",
":",
"span",
"[",
"0",
"]",
"]",
"+",
"\" \"",
"+",
"line",
"[",
"span",
"[",
"1",
"]",
":",
"]",
"# Treat hyphen separated words as separate words",
"line",
"=",
"line",
".",
"replace",
"(",
"\"-\"",
",",
"\" \"",
")",
"# Remove backticks",
"line",
"=",
"line",
".",
"replace",
"(",
"\"`\"",
",",
"\" \"",
")",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"r\"[^\\s]+\"",
",",
"line",
")",
":",
"content",
"=",
"match",
".",
"group",
"(",
"0",
")",
"if",
"content",
".",
"strip",
"(",
")",
"!=",
"\"\"",
":",
"yield",
"(",
"match",
".",
"span",
"(",
")",
"[",
"0",
"]",
",",
"content",
")"
] | Split a line by delimiter, but yield tuples of word and offset.
This function works by dropping all the english-like punctuation from
a line (so parenthesis preceded or succeeded by spaces, periods, etc)
and then splitting on spaces. | [
"Split",
"a",
"line",
"by",
"delimiter",
"but",
"yield",
"tuples",
"of",
"word",
"and",
"offset",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L129-L159 |
251,718 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | read_dictionary_file | def read_dictionary_file(dictionary_path):
"""Return all words in dictionary file as set."""
try:
return _user_dictionary_cache[dictionary_path]
except KeyError:
if dictionary_path and os.path.exists(dictionary_path):
with open(dictionary_path, "rt") as dict_f:
words = set(re.findall(r"(\w[\w']*\w|\w)",
" ".join(dict_f.read().splitlines())))
return words
return set() | python | def read_dictionary_file(dictionary_path):
"""Return all words in dictionary file as set."""
try:
return _user_dictionary_cache[dictionary_path]
except KeyError:
if dictionary_path and os.path.exists(dictionary_path):
with open(dictionary_path, "rt") as dict_f:
words = set(re.findall(r"(\w[\w']*\w|\w)",
" ".join(dict_f.read().splitlines())))
return words
return set() | [
"def",
"read_dictionary_file",
"(",
"dictionary_path",
")",
":",
"try",
":",
"return",
"_user_dictionary_cache",
"[",
"dictionary_path",
"]",
"except",
"KeyError",
":",
"if",
"dictionary_path",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"dictionary_path",
")",
":",
"with",
"open",
"(",
"dictionary_path",
",",
"\"rt\"",
")",
"as",
"dict_f",
":",
"words",
"=",
"set",
"(",
"re",
".",
"findall",
"(",
"r\"(\\w[\\w']*\\w|\\w)\"",
",",
"\" \"",
".",
"join",
"(",
"dict_f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
")",
")",
")",
"return",
"words",
"return",
"set",
"(",
")"
] | Return all words in dictionary file as set. | [
"Return",
"all",
"words",
"in",
"dictionary",
"file",
"as",
"set",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L162-L173 |
251,719 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | valid_words_set | def valid_words_set(path_to_user_dictionary=None,
user_dictionary_words=None):
"""Get a set of valid words.
If :path_to_user_dictionary: is specified, then the newline-separated
words in that file will be added to the word set.
"""
def read_file(binary_file):
"""Read a binary file for its text lines."""
return binary_file.read().decode("ascii").splitlines()
try:
valid = _valid_words_cache[path_to_user_dictionary]
return valid
except KeyError:
words = set()
with resource_stream("polysquarelinter", "en_US.txt") as words_file:
words |= set(["".join(l).lower() for l in read_file(words_file)])
if path_to_user_dictionary:
# Add both case-sensitive and case-insensitive variants
# of words in user dictionary as they may be checked as
# though they are a regular word and a technical word.
words |= set([w.lower() for w in user_dictionary_words])
words |= user_dictionary_words
_valid_words_cache[path_to_user_dictionary] = words
return words | python | def valid_words_set(path_to_user_dictionary=None,
user_dictionary_words=None):
"""Get a set of valid words.
If :path_to_user_dictionary: is specified, then the newline-separated
words in that file will be added to the word set.
"""
def read_file(binary_file):
"""Read a binary file for its text lines."""
return binary_file.read().decode("ascii").splitlines()
try:
valid = _valid_words_cache[path_to_user_dictionary]
return valid
except KeyError:
words = set()
with resource_stream("polysquarelinter", "en_US.txt") as words_file:
words |= set(["".join(l).lower() for l in read_file(words_file)])
if path_to_user_dictionary:
# Add both case-sensitive and case-insensitive variants
# of words in user dictionary as they may be checked as
# though they are a regular word and a technical word.
words |= set([w.lower() for w in user_dictionary_words])
words |= user_dictionary_words
_valid_words_cache[path_to_user_dictionary] = words
return words | [
"def",
"valid_words_set",
"(",
"path_to_user_dictionary",
"=",
"None",
",",
"user_dictionary_words",
"=",
"None",
")",
":",
"def",
"read_file",
"(",
"binary_file",
")",
":",
"\"\"\"Read a binary file for its text lines.\"\"\"",
"return",
"binary_file",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
".",
"splitlines",
"(",
")",
"try",
":",
"valid",
"=",
"_valid_words_cache",
"[",
"path_to_user_dictionary",
"]",
"return",
"valid",
"except",
"KeyError",
":",
"words",
"=",
"set",
"(",
")",
"with",
"resource_stream",
"(",
"\"polysquarelinter\"",
",",
"\"en_US.txt\"",
")",
"as",
"words_file",
":",
"words",
"|=",
"set",
"(",
"[",
"\"\"",
".",
"join",
"(",
"l",
")",
".",
"lower",
"(",
")",
"for",
"l",
"in",
"read_file",
"(",
"words_file",
")",
"]",
")",
"if",
"path_to_user_dictionary",
":",
"# Add both case-sensitive and case-insensitive variants",
"# of words in user dictionary as they may be checked as",
"# though they are a regular word and a technical word.",
"words",
"|=",
"set",
"(",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"user_dictionary_words",
"]",
")",
"words",
"|=",
"user_dictionary_words",
"_valid_words_cache",
"[",
"path_to_user_dictionary",
"]",
"=",
"words",
"return",
"words"
] | Get a set of valid words.
If :path_to_user_dictionary: is specified, then the newline-separated
words in that file will be added to the word set. | [
"Get",
"a",
"set",
"of",
"valid",
"words",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L176-L203 |
251,720 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _create_word_graph_file | def _create_word_graph_file(name, file_storage, word_set):
"""Create a word graph file and open it in memory."""
word_graph_file = file_storage.create_file(name)
spelling.wordlist_to_graph_file(sorted(list(word_set)),
word_graph_file)
return copy_to_ram(file_storage).open_file(name) | python | def _create_word_graph_file(name, file_storage, word_set):
"""Create a word graph file and open it in memory."""
word_graph_file = file_storage.create_file(name)
spelling.wordlist_to_graph_file(sorted(list(word_set)),
word_graph_file)
return copy_to_ram(file_storage).open_file(name) | [
"def",
"_create_word_graph_file",
"(",
"name",
",",
"file_storage",
",",
"word_set",
")",
":",
"word_graph_file",
"=",
"file_storage",
".",
"create_file",
"(",
"name",
")",
"spelling",
".",
"wordlist_to_graph_file",
"(",
"sorted",
"(",
"list",
"(",
"word_set",
")",
")",
",",
"word_graph_file",
")",
"return",
"copy_to_ram",
"(",
"file_storage",
")",
".",
"open_file",
"(",
"name",
")"
] | Create a word graph file and open it in memory. | [
"Create",
"a",
"word",
"graph",
"file",
"and",
"open",
"it",
"in",
"memory",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L211-L216 |
251,721 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | filter_nonspellcheckable_tokens | def filter_nonspellcheckable_tokens(line, block_out_regexes=None):
"""Return line with paths, urls and emails filtered out.
Block out other strings of text matching :block_out_regexes: if passed in.
"""
all_block_out_regexes = [
r"[^\s]*:[^\s]*[/\\][^\s]*",
r"[^\s]*[/\\][^\s]*",
r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]+\b"
] + (block_out_regexes or list())
for block_regex in all_block_out_regexes:
for marker in re.finditer(block_regex, line):
spaces = " " * (marker.end() - marker.start())
line = line[:marker.start()] + spaces + line[marker.end():]
return line | python | def filter_nonspellcheckable_tokens(line, block_out_regexes=None):
"""Return line with paths, urls and emails filtered out.
Block out other strings of text matching :block_out_regexes: if passed in.
"""
all_block_out_regexes = [
r"[^\s]*:[^\s]*[/\\][^\s]*",
r"[^\s]*[/\\][^\s]*",
r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]+\b"
] + (block_out_regexes or list())
for block_regex in all_block_out_regexes:
for marker in re.finditer(block_regex, line):
spaces = " " * (marker.end() - marker.start())
line = line[:marker.start()] + spaces + line[marker.end():]
return line | [
"def",
"filter_nonspellcheckable_tokens",
"(",
"line",
",",
"block_out_regexes",
"=",
"None",
")",
":",
"all_block_out_regexes",
"=",
"[",
"r\"[^\\s]*:[^\\s]*[/\\\\][^\\s]*\"",
",",
"r\"[^\\s]*[/\\\\][^\\s]*\"",
",",
"r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]+\\b\"",
"]",
"+",
"(",
"block_out_regexes",
"or",
"list",
"(",
")",
")",
"for",
"block_regex",
"in",
"all_block_out_regexes",
":",
"for",
"marker",
"in",
"re",
".",
"finditer",
"(",
"block_regex",
",",
"line",
")",
":",
"spaces",
"=",
"\" \"",
"*",
"(",
"marker",
".",
"end",
"(",
")",
"-",
"marker",
".",
"start",
"(",
")",
")",
"line",
"=",
"line",
"[",
":",
"marker",
".",
"start",
"(",
")",
"]",
"+",
"spaces",
"+",
"line",
"[",
"marker",
".",
"end",
"(",
")",
":",
"]",
"return",
"line"
] | Return line with paths, urls and emails filtered out.
Block out other strings of text matching :block_out_regexes: if passed in. | [
"Return",
"line",
"with",
"paths",
"urls",
"and",
"emails",
"filtered",
"out",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L285-L301 |
251,722 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _chunk_from_ranges | def _chunk_from_ranges(contents_lines,
start_line_index,
start_column_index,
end_line_index,
end_column_index):
"""Create a _ChunkInfo from a range of lines and columns.
:contents_lines: is the raw lines of a file.
"""
# If the start and end line are the same we have to compensate for
# that by subtracting start_column_index from end_column_index
if start_line_index == end_line_index:
end_column_index -= start_column_index
lines = contents_lines[start_line_index:end_line_index + 1]
lines[0] = lines[0][start_column_index:]
lines[-1] = lines[-1][:end_column_index]
return _ChunkInfo(start_line_index,
start_column_index,
lines) | python | def _chunk_from_ranges(contents_lines,
start_line_index,
start_column_index,
end_line_index,
end_column_index):
"""Create a _ChunkInfo from a range of lines and columns.
:contents_lines: is the raw lines of a file.
"""
# If the start and end line are the same we have to compensate for
# that by subtracting start_column_index from end_column_index
if start_line_index == end_line_index:
end_column_index -= start_column_index
lines = contents_lines[start_line_index:end_line_index + 1]
lines[0] = lines[0][start_column_index:]
lines[-1] = lines[-1][:end_column_index]
return _ChunkInfo(start_line_index,
start_column_index,
lines) | [
"def",
"_chunk_from_ranges",
"(",
"contents_lines",
",",
"start_line_index",
",",
"start_column_index",
",",
"end_line_index",
",",
"end_column_index",
")",
":",
"# If the start and end line are the same we have to compensate for",
"# that by subtracting start_column_index from end_column_index",
"if",
"start_line_index",
"==",
"end_line_index",
":",
"end_column_index",
"-=",
"start_column_index",
"lines",
"=",
"contents_lines",
"[",
"start_line_index",
":",
"end_line_index",
"+",
"1",
"]",
"lines",
"[",
"0",
"]",
"=",
"lines",
"[",
"0",
"]",
"[",
"start_column_index",
":",
"]",
"lines",
"[",
"-",
"1",
"]",
"=",
"lines",
"[",
"-",
"1",
"]",
"[",
":",
"end_column_index",
"]",
"return",
"_ChunkInfo",
"(",
"start_line_index",
",",
"start_column_index",
",",
"lines",
")"
] | Create a _ChunkInfo from a range of lines and columns.
:contents_lines: is the raw lines of a file. | [
"Create",
"a",
"_ChunkInfo",
"from",
"a",
"range",
"of",
"lines",
"and",
"columns",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L334-L354 |
251,723 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _token_at_col_in_line | def _token_at_col_in_line(line, column, token, token_len=None):
"""True if token is at column."""
if not token_len:
token_len = len(token)
remaining_len = len(line) - column
return (remaining_len >= token_len and
line[column:column + token_len] == token) | python | def _token_at_col_in_line(line, column, token, token_len=None):
"""True if token is at column."""
if not token_len:
token_len = len(token)
remaining_len = len(line) - column
return (remaining_len >= token_len and
line[column:column + token_len] == token) | [
"def",
"_token_at_col_in_line",
"(",
"line",
",",
"column",
",",
"token",
",",
"token_len",
"=",
"None",
")",
":",
"if",
"not",
"token_len",
":",
"token_len",
"=",
"len",
"(",
"token",
")",
"remaining_len",
"=",
"len",
"(",
"line",
")",
"-",
"column",
"return",
"(",
"remaining_len",
">=",
"token_len",
"and",
"line",
"[",
"column",
":",
"column",
"+",
"token_len",
"]",
"==",
"token",
")"
] | True if token is at column. | [
"True",
"if",
"token",
"is",
"at",
"column",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L357-L365 |
251,724 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _maybe_append_chunk | def _maybe_append_chunk(chunk_info, line_index, column, contents, chunks):
"""Append chunk_info to chunks if it is set."""
if chunk_info:
chunks.append(_chunk_from_ranges(contents,
chunk_info[0],
chunk_info[1],
line_index,
column)) | python | def _maybe_append_chunk(chunk_info, line_index, column, contents, chunks):
"""Append chunk_info to chunks if it is set."""
if chunk_info:
chunks.append(_chunk_from_ranges(contents,
chunk_info[0],
chunk_info[1],
line_index,
column)) | [
"def",
"_maybe_append_chunk",
"(",
"chunk_info",
",",
"line_index",
",",
"column",
",",
"contents",
",",
"chunks",
")",
":",
"if",
"chunk_info",
":",
"chunks",
".",
"append",
"(",
"_chunk_from_ranges",
"(",
"contents",
",",
"chunk_info",
"[",
"0",
"]",
",",
"chunk_info",
"[",
"1",
"]",
",",
"line_index",
",",
"column",
")",
")"
] | Append chunk_info to chunks if it is set. | [
"Append",
"chunk_info",
"to",
"chunks",
"if",
"it",
"is",
"set",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L671-L678 |
251,725 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _find_spellcheckable_chunks | def _find_spellcheckable_chunks(contents,
comment_system):
"""Given some contents for a file, find chunks that can be spellchecked.
This applies the following rules:
1. If the comment system comments individual lines, that whole line
can be spellchecked from the point of the comment
2. If a comment-start marker or triple quote is found, keep going
until a comment end marker or matching triple quote is found.
3. In both cases, ignore anything in triple backticks.
"""
state = InTextParser()
comment_system_transitions = CommentSystemTransitions(comment_system)
chunks = []
for line_index, line in enumerate(contents):
column = 0
line_len = len(line)
escape_next = False
# We hit a new line. If we were waiting until the end of the line
# then add a new chunk in here
(state,
column_delta,
chunk_info) = state.get_transition(line,
line_index,
0,
False,
comment_system_transitions)
_maybe_append_chunk(chunk_info,
line_index - 1,
len(contents[line_index - 1]),
contents,
chunks)
column += column_delta
while column < line_len:
# Check if the next character should be considered as escaped. That
# only happens if we are not escaped and the current character is
# a backslash.
is_escaped = escape_next
escape_next = not is_escaped and line[column] == "\\"
(state,
column_delta,
chunk_info) = state.get_transition(line,
line_index,
column,
is_escaped,
comment_system_transitions)
_maybe_append_chunk(chunk_info,
line_index,
column,
contents,
chunks)
column += column_delta
last_line_index = len(contents) - 1
(state,
column_delta,
chunk_info) = state.get_transition(contents[-1],
last_line_index,
len(contents[-1]),
False,
comment_system_transitions,
eof=True)
_maybe_append_chunk(chunk_info,
last_line_index,
len(contents[last_line_index]),
contents,
chunks)
return chunks | python | def _find_spellcheckable_chunks(contents,
comment_system):
"""Given some contents for a file, find chunks that can be spellchecked.
This applies the following rules:
1. If the comment system comments individual lines, that whole line
can be spellchecked from the point of the comment
2. If a comment-start marker or triple quote is found, keep going
until a comment end marker or matching triple quote is found.
3. In both cases, ignore anything in triple backticks.
"""
state = InTextParser()
comment_system_transitions = CommentSystemTransitions(comment_system)
chunks = []
for line_index, line in enumerate(contents):
column = 0
line_len = len(line)
escape_next = False
# We hit a new line. If we were waiting until the end of the line
# then add a new chunk in here
(state,
column_delta,
chunk_info) = state.get_transition(line,
line_index,
0,
False,
comment_system_transitions)
_maybe_append_chunk(chunk_info,
line_index - 1,
len(contents[line_index - 1]),
contents,
chunks)
column += column_delta
while column < line_len:
# Check if the next character should be considered as escaped. That
# only happens if we are not escaped and the current character is
# a backslash.
is_escaped = escape_next
escape_next = not is_escaped and line[column] == "\\"
(state,
column_delta,
chunk_info) = state.get_transition(line,
line_index,
column,
is_escaped,
comment_system_transitions)
_maybe_append_chunk(chunk_info,
line_index,
column,
contents,
chunks)
column += column_delta
last_line_index = len(contents) - 1
(state,
column_delta,
chunk_info) = state.get_transition(contents[-1],
last_line_index,
len(contents[-1]),
False,
comment_system_transitions,
eof=True)
_maybe_append_chunk(chunk_info,
last_line_index,
len(contents[last_line_index]),
contents,
chunks)
return chunks | [
"def",
"_find_spellcheckable_chunks",
"(",
"contents",
",",
"comment_system",
")",
":",
"state",
"=",
"InTextParser",
"(",
")",
"comment_system_transitions",
"=",
"CommentSystemTransitions",
"(",
"comment_system",
")",
"chunks",
"=",
"[",
"]",
"for",
"line_index",
",",
"line",
"in",
"enumerate",
"(",
"contents",
")",
":",
"column",
"=",
"0",
"line_len",
"=",
"len",
"(",
"line",
")",
"escape_next",
"=",
"False",
"# We hit a new line. If we were waiting until the end of the line",
"# then add a new chunk in here",
"(",
"state",
",",
"column_delta",
",",
"chunk_info",
")",
"=",
"state",
".",
"get_transition",
"(",
"line",
",",
"line_index",
",",
"0",
",",
"False",
",",
"comment_system_transitions",
")",
"_maybe_append_chunk",
"(",
"chunk_info",
",",
"line_index",
"-",
"1",
",",
"len",
"(",
"contents",
"[",
"line_index",
"-",
"1",
"]",
")",
",",
"contents",
",",
"chunks",
")",
"column",
"+=",
"column_delta",
"while",
"column",
"<",
"line_len",
":",
"# Check if the next character should be considered as escaped. That",
"# only happens if we are not escaped and the current character is",
"# a backslash.",
"is_escaped",
"=",
"escape_next",
"escape_next",
"=",
"not",
"is_escaped",
"and",
"line",
"[",
"column",
"]",
"==",
"\"\\\\\"",
"(",
"state",
",",
"column_delta",
",",
"chunk_info",
")",
"=",
"state",
".",
"get_transition",
"(",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
",",
"comment_system_transitions",
")",
"_maybe_append_chunk",
"(",
"chunk_info",
",",
"line_index",
",",
"column",
",",
"contents",
",",
"chunks",
")",
"column",
"+=",
"column_delta",
"last_line_index",
"=",
"len",
"(",
"contents",
")",
"-",
"1",
"(",
"state",
",",
"column_delta",
",",
"chunk_info",
")",
"=",
"state",
".",
"get_transition",
"(",
"contents",
"[",
"-",
"1",
"]",
",",
"last_line_index",
",",
"len",
"(",
"contents",
"[",
"-",
"1",
"]",
")",
",",
"False",
",",
"comment_system_transitions",
",",
"eof",
"=",
"True",
")",
"_maybe_append_chunk",
"(",
"chunk_info",
",",
"last_line_index",
",",
"len",
"(",
"contents",
"[",
"last_line_index",
"]",
")",
",",
"contents",
",",
"chunks",
")",
"return",
"chunks"
] | Given some contents for a file, find chunks that can be spellchecked.
This applies the following rules:
1. If the comment system comments individual lines, that whole line
can be spellchecked from the point of the comment
2. If a comment-start marker or triple quote is found, keep going
until a comment end marker or matching triple quote is found.
3. In both cases, ignore anything in triple backticks. | [
"Given",
"some",
"contents",
"for",
"a",
"file",
"find",
"chunks",
"that",
"can",
"be",
"spellchecked",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L681-L754 |
251,726 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | spellcheckable_and_shadow_contents | def spellcheckable_and_shadow_contents(contents, block_out_regexes=None):
"""For contents, split into spellcheckable and shadow parts.
:contents: is a list of lines in a file.
The return value is a tuple of (chunks, shadow_contents).
chunks is a list of _ChunkInfo, each of which contain
a region of text to be spell-checked. shadow_contents is an array of
characters and integers. The characters represent nonspellcheckable
regions and any region which will be subject to spellcheck is denoted
by a zero in place of that character.
"""
if not len(contents):
return ([], [])
comment_system = _comment_system_for_file(contents[0])
# Shadow contents excludes anything in quotes
chunks = _find_spellcheckable_chunks(contents, comment_system)
shadow_contents = _shadow_contents_from_chunks(contents,
chunks,
block_out_regexes)
return (chunks, shadow_contents) | python | def spellcheckable_and_shadow_contents(contents, block_out_regexes=None):
"""For contents, split into spellcheckable and shadow parts.
:contents: is a list of lines in a file.
The return value is a tuple of (chunks, shadow_contents).
chunks is a list of _ChunkInfo, each of which contain
a region of text to be spell-checked. shadow_contents is an array of
characters and integers. The characters represent nonspellcheckable
regions and any region which will be subject to spellcheck is denoted
by a zero in place of that character.
"""
if not len(contents):
return ([], [])
comment_system = _comment_system_for_file(contents[0])
# Shadow contents excludes anything in quotes
chunks = _find_spellcheckable_chunks(contents, comment_system)
shadow_contents = _shadow_contents_from_chunks(contents,
chunks,
block_out_regexes)
return (chunks, shadow_contents) | [
"def",
"spellcheckable_and_shadow_contents",
"(",
"contents",
",",
"block_out_regexes",
"=",
"None",
")",
":",
"if",
"not",
"len",
"(",
"contents",
")",
":",
"return",
"(",
"[",
"]",
",",
"[",
"]",
")",
"comment_system",
"=",
"_comment_system_for_file",
"(",
"contents",
"[",
"0",
"]",
")",
"# Shadow contents excludes anything in quotes",
"chunks",
"=",
"_find_spellcheckable_chunks",
"(",
"contents",
",",
"comment_system",
")",
"shadow_contents",
"=",
"_shadow_contents_from_chunks",
"(",
"contents",
",",
"chunks",
",",
"block_out_regexes",
")",
"return",
"(",
"chunks",
",",
"shadow_contents",
")"
] | For contents, split into spellcheckable and shadow parts.
:contents: is a list of lines in a file.
The return value is a tuple of (chunks, shadow_contents).
chunks is a list of _ChunkInfo, each of which contain
a region of text to be spell-checked. shadow_contents is an array of
characters and integers. The characters represent nonspellcheckable
regions and any region which will be subject to spellcheck is denoted
by a zero in place of that character. | [
"For",
"contents",
"split",
"into",
"spellcheckable",
"and",
"shadow",
"parts",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L763-L786 |
251,727 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _split_into_symbol_words | def _split_into_symbol_words(sym):
"""Split a technical looking word into a set of symbols.
This handles cases where technical words are separated by dots or
arrows, as is the convention in many programming languages.
"""
punc = r"[\s\-\*/\+\.,:\;=\)\(\[\]\{\}<>\|\?&\^\$@]"
words = [w.strip() for w in re.split(punc, sym)]
return words | python | def _split_into_symbol_words(sym):
"""Split a technical looking word into a set of symbols.
This handles cases where technical words are separated by dots or
arrows, as is the convention in many programming languages.
"""
punc = r"[\s\-\*/\+\.,:\;=\)\(\[\]\{\}<>\|\?&\^\$@]"
words = [w.strip() for w in re.split(punc, sym)]
return words | [
"def",
"_split_into_symbol_words",
"(",
"sym",
")",
":",
"punc",
"=",
"r\"[\\s\\-\\*/\\+\\.,:\\;=\\)\\(\\[\\]\\{\\}<>\\|\\?&\\^\\$@]\"",
"words",
"=",
"[",
"w",
".",
"strip",
"(",
")",
"for",
"w",
"in",
"re",
".",
"split",
"(",
"punc",
",",
"sym",
")",
"]",
"return",
"words"
] | Split a technical looking word into a set of symbols.
This handles cases where technical words are separated by dots or
arrows, as is the convention in many programming languages. | [
"Split",
"a",
"technical",
"looking",
"word",
"into",
"a",
"set",
"of",
"symbols",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L789-L797 |
251,728 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _error_if_word_invalid | def _error_if_word_invalid(word,
valid_words_dictionary,
technical_words_dictionary,
line_offset,
col_offset):
"""Return SpellcheckError if this non-technical word is invalid."""
word_lower = word.lower()
valid_words_result = valid_words_dictionary.corrections(word_lower)
if technical_words_dictionary:
technical_words_result = technical_words_dictionary.corrections(word)
else:
# No technical words available to make an otherwise invalid
# result value.
technical_words_result = Dictionary.Result(False, list())
if not valid_words_result.valid and not technical_words_result.valid:
return SpellcheckError(word,
line_offset,
col_offset,
valid_words_result.suggestions,
SpellcheckError.InvalidWord) | python | def _error_if_word_invalid(word,
valid_words_dictionary,
technical_words_dictionary,
line_offset,
col_offset):
"""Return SpellcheckError if this non-technical word is invalid."""
word_lower = word.lower()
valid_words_result = valid_words_dictionary.corrections(word_lower)
if technical_words_dictionary:
technical_words_result = technical_words_dictionary.corrections(word)
else:
# No technical words available to make an otherwise invalid
# result value.
technical_words_result = Dictionary.Result(False, list())
if not valid_words_result.valid and not technical_words_result.valid:
return SpellcheckError(word,
line_offset,
col_offset,
valid_words_result.suggestions,
SpellcheckError.InvalidWord) | [
"def",
"_error_if_word_invalid",
"(",
"word",
",",
"valid_words_dictionary",
",",
"technical_words_dictionary",
",",
"line_offset",
",",
"col_offset",
")",
":",
"word_lower",
"=",
"word",
".",
"lower",
"(",
")",
"valid_words_result",
"=",
"valid_words_dictionary",
".",
"corrections",
"(",
"word_lower",
")",
"if",
"technical_words_dictionary",
":",
"technical_words_result",
"=",
"technical_words_dictionary",
".",
"corrections",
"(",
"word",
")",
"else",
":",
"# No technical words available to make an otherwise invalid",
"# result value.",
"technical_words_result",
"=",
"Dictionary",
".",
"Result",
"(",
"False",
",",
"list",
"(",
")",
")",
"if",
"not",
"valid_words_result",
".",
"valid",
"and",
"not",
"technical_words_result",
".",
"valid",
":",
"return",
"SpellcheckError",
"(",
"word",
",",
"line_offset",
",",
"col_offset",
",",
"valid_words_result",
".",
"suggestions",
",",
"SpellcheckError",
".",
"InvalidWord",
")"
] | Return SpellcheckError if this non-technical word is invalid. | [
"Return",
"SpellcheckError",
"if",
"this",
"non",
"-",
"technical",
"word",
"is",
"invalid",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L841-L862 |
251,729 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | _error_if_symbol_unused | def _error_if_symbol_unused(symbol_word,
technical_words_dictionary,
line_offset,
col_offset):
"""Return SpellcheckError if this symbol is not used in the code."""
result = technical_words_dictionary.corrections(symbol_word,
distance=5,
prefix=0)
if not result.valid:
return SpellcheckError(symbol_word,
line_offset,
col_offset,
result.suggestions,
SpellcheckError.TechnicalWord) | python | def _error_if_symbol_unused(symbol_word,
technical_words_dictionary,
line_offset,
col_offset):
"""Return SpellcheckError if this symbol is not used in the code."""
result = technical_words_dictionary.corrections(symbol_word,
distance=5,
prefix=0)
if not result.valid:
return SpellcheckError(symbol_word,
line_offset,
col_offset,
result.suggestions,
SpellcheckError.TechnicalWord) | [
"def",
"_error_if_symbol_unused",
"(",
"symbol_word",
",",
"technical_words_dictionary",
",",
"line_offset",
",",
"col_offset",
")",
":",
"result",
"=",
"technical_words_dictionary",
".",
"corrections",
"(",
"symbol_word",
",",
"distance",
"=",
"5",
",",
"prefix",
"=",
"0",
")",
"if",
"not",
"result",
".",
"valid",
":",
"return",
"SpellcheckError",
"(",
"symbol_word",
",",
"line_offset",
",",
"col_offset",
",",
"result",
".",
"suggestions",
",",
"SpellcheckError",
".",
"TechnicalWord",
")"
] | Return SpellcheckError if this symbol is not used in the code. | [
"Return",
"SpellcheckError",
"if",
"this",
"symbol",
"is",
"not",
"used",
"in",
"the",
"code",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L865-L878 |
251,730 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | CommentSystemTransitions.should_terminate_now | def should_terminate_now(self, line, waiting_for):
"""Whether parsing within a comment should terminate now.
This is used for comment systems where there is no comment-ending
character. We need it for parsing disabled regions where we don't
know where a comment block ends, but we know that a comment block
could end at a line ending. It returns true if, for a given line,
line is not a comment.
"""
if waiting_for not in (ParserState.EOL, self._end):
return False
if self._continue_regex:
return (re.match(self._continue_regex, line) is None)
return False | python | def should_terminate_now(self, line, waiting_for):
"""Whether parsing within a comment should terminate now.
This is used for comment systems where there is no comment-ending
character. We need it for parsing disabled regions where we don't
know where a comment block ends, but we know that a comment block
could end at a line ending. It returns true if, for a given line,
line is not a comment.
"""
if waiting_for not in (ParserState.EOL, self._end):
return False
if self._continue_regex:
return (re.match(self._continue_regex, line) is None)
return False | [
"def",
"should_terminate_now",
"(",
"self",
",",
"line",
",",
"waiting_for",
")",
":",
"if",
"waiting_for",
"not",
"in",
"(",
"ParserState",
".",
"EOL",
",",
"self",
".",
"_end",
")",
":",
"return",
"False",
"if",
"self",
".",
"_continue_regex",
":",
"return",
"(",
"re",
".",
"match",
"(",
"self",
".",
"_continue_regex",
",",
"line",
")",
"is",
"None",
")",
"return",
"False"
] | Whether parsing within a comment should terminate now.
This is used for comment systems where there is no comment-ending
character. We need it for parsing disabled regions where we don't
know where a comment block ends, but we know that a comment block
could end at a line ending. It returns true if, for a given line,
line is not a comment. | [
"Whether",
"parsing",
"within",
"a",
"comment",
"should",
"terminate",
"now",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L431-L446 |
251,731 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | ParserState.get_transition | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Return a parser state, a move-ahead amount, and an append range.
If this parser state should terminate and return back to
the TEXT state, then return that state and also any corresponding
chunk that would have been yielded as a result.
"""
raise NotImplementedError("""Cannot instantiate base ParserState""") | python | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Return a parser state, a move-ahead amount, and an append range.
If this parser state should terminate and return back to
the TEXT state, then return that state and also any corresponding
chunk that would have been yielded as a result.
"""
raise NotImplementedError("""Cannot instantiate base ParserState""") | [
"def",
"get_transition",
"(",
"self",
",",
"# suppress(too-many-arguments)",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
",",
"comment_system_transitions",
",",
"eof",
"=",
"False",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"\"\"Cannot instantiate base ParserState\"\"\"",
")"
] | Return a parser state, a move-ahead amount, and an append range.
If this parser state should terminate and return back to
the TEXT state, then return that state and also any corresponding
chunk that would have been yielded as a result. | [
"Return",
"a",
"parser",
"state",
"a",
"move",
"-",
"ahead",
"amount",
"and",
"an",
"append",
"range",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L467-L480 |
251,732 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | InTextParser.get_transition | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Get transition from InTextParser."""
parser_transition = {
STATE_IN_COMMENT: InCommentParser,
STATE_IN_QUOTE: InQuoteParser
}
(state,
start_state_from,
waiting_until) = comment_system_transitions.from_text(line,
line_index,
column,
is_escaped)
# We need to move ahead by a certain number of characters
# if we hit a new state
if state != STATE_IN_TEXT:
return (parser_transition[state](start_state_from,
waiting_until),
start_state_from[1] - column,
None)
else:
return (self, 1, None) | python | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Get transition from InTextParser."""
parser_transition = {
STATE_IN_COMMENT: InCommentParser,
STATE_IN_QUOTE: InQuoteParser
}
(state,
start_state_from,
waiting_until) = comment_system_transitions.from_text(line,
line_index,
column,
is_escaped)
# We need to move ahead by a certain number of characters
# if we hit a new state
if state != STATE_IN_TEXT:
return (parser_transition[state](start_state_from,
waiting_until),
start_state_from[1] - column,
None)
else:
return (self, 1, None) | [
"def",
"get_transition",
"(",
"self",
",",
"# suppress(too-many-arguments)",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
",",
"comment_system_transitions",
",",
"eof",
"=",
"False",
")",
":",
"parser_transition",
"=",
"{",
"STATE_IN_COMMENT",
":",
"InCommentParser",
",",
"STATE_IN_QUOTE",
":",
"InQuoteParser",
"}",
"(",
"state",
",",
"start_state_from",
",",
"waiting_until",
")",
"=",
"comment_system_transitions",
".",
"from_text",
"(",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
")",
"# We need to move ahead by a certain number of characters",
"# if we hit a new state",
"if",
"state",
"!=",
"STATE_IN_TEXT",
":",
"return",
"(",
"parser_transition",
"[",
"state",
"]",
"(",
"start_state_from",
",",
"waiting_until",
")",
",",
"start_state_from",
"[",
"1",
"]",
"-",
"column",
",",
"None",
")",
"else",
":",
"return",
"(",
"self",
",",
"1",
",",
"None",
")"
] | Get transition from InTextParser. | [
"Get",
"transition",
"from",
"InTextParser",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L495-L523 |
251,733 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | DisabledParser.get_transition | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Get transition from DisabledParser."""
# If we are at the beginning of a line, to see if we should
# disable processing from this point onward and get out - this will
# happen if we reach the end of some comment block that doesn't have
# an explicit end marker. We can't detect line endings here because
# we want a disabled region to continue across multiple lines.
if (column == 0 and
comment_system_transitions.should_terminate_now(
line,
self._resume_waiting_for
)):
return (InTextParser(), 0, None)
# Need to be a bit careful here, since we need to check what the
# disabled parser was waiting for and disable on that, too.
if (_token_at_col_in_line(line,
column,
"```",
3) and
not _is_escaped(line, column, is_escaped)):
# Hit a disable token, so we resume the old parser
return (self._resume_parser((line_index, column + 3),
self._resume_waiting_for),
3,
None)
elif self._resume_waiting_for != ParserState.EOL:
wait_until_len = len(self._resume_waiting_for)
if (_token_at_col_in_line(line,
column,
self._resume_waiting_for,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
# Skip ahead to end of this token
return (InTextParser(),
len(self._waiting_until),
None)
elif eof:
# We hit the end of the file and were still in a comment
# state. Grab everything up to here.
return (InTextParser(), 0, None)
# Move ahead by one character otherwise
return (self, 1, None) | python | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Get transition from DisabledParser."""
# If we are at the beginning of a line, to see if we should
# disable processing from this point onward and get out - this will
# happen if we reach the end of some comment block that doesn't have
# an explicit end marker. We can't detect line endings here because
# we want a disabled region to continue across multiple lines.
if (column == 0 and
comment_system_transitions.should_terminate_now(
line,
self._resume_waiting_for
)):
return (InTextParser(), 0, None)
# Need to be a bit careful here, since we need to check what the
# disabled parser was waiting for and disable on that, too.
if (_token_at_col_in_line(line,
column,
"```",
3) and
not _is_escaped(line, column, is_escaped)):
# Hit a disable token, so we resume the old parser
return (self._resume_parser((line_index, column + 3),
self._resume_waiting_for),
3,
None)
elif self._resume_waiting_for != ParserState.EOL:
wait_until_len = len(self._resume_waiting_for)
if (_token_at_col_in_line(line,
column,
self._resume_waiting_for,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
# Skip ahead to end of this token
return (InTextParser(),
len(self._waiting_until),
None)
elif eof:
# We hit the end of the file and were still in a comment
# state. Grab everything up to here.
return (InTextParser(), 0, None)
# Move ahead by one character otherwise
return (self, 1, None) | [
"def",
"get_transition",
"(",
"self",
",",
"# suppress(too-many-arguments)",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
",",
"comment_system_transitions",
",",
"eof",
"=",
"False",
")",
":",
"# If we are at the beginning of a line, to see if we should",
"# disable processing from this point onward and get out - this will",
"# happen if we reach the end of some comment block that doesn't have",
"# an explicit end marker. We can't detect line endings here because",
"# we want a disabled region to continue across multiple lines.",
"if",
"(",
"column",
"==",
"0",
"and",
"comment_system_transitions",
".",
"should_terminate_now",
"(",
"line",
",",
"self",
".",
"_resume_waiting_for",
")",
")",
":",
"return",
"(",
"InTextParser",
"(",
")",
",",
"0",
",",
"None",
")",
"# Need to be a bit careful here, since we need to check what the",
"# disabled parser was waiting for and disable on that, too.",
"if",
"(",
"_token_at_col_in_line",
"(",
"line",
",",
"column",
",",
"\"```\"",
",",
"3",
")",
"and",
"not",
"_is_escaped",
"(",
"line",
",",
"column",
",",
"is_escaped",
")",
")",
":",
"# Hit a disable token, so we resume the old parser",
"return",
"(",
"self",
".",
"_resume_parser",
"(",
"(",
"line_index",
",",
"column",
"+",
"3",
")",
",",
"self",
".",
"_resume_waiting_for",
")",
",",
"3",
",",
"None",
")",
"elif",
"self",
".",
"_resume_waiting_for",
"!=",
"ParserState",
".",
"EOL",
":",
"wait_until_len",
"=",
"len",
"(",
"self",
".",
"_resume_waiting_for",
")",
"if",
"(",
"_token_at_col_in_line",
"(",
"line",
",",
"column",
",",
"self",
".",
"_resume_waiting_for",
",",
"wait_until_len",
")",
"and",
"not",
"_is_escaped",
"(",
"line",
",",
"column",
",",
"is_escaped",
")",
")",
":",
"# Skip ahead to end of this token",
"return",
"(",
"InTextParser",
"(",
")",
",",
"len",
"(",
"self",
".",
"_waiting_until",
")",
",",
"None",
")",
"elif",
"eof",
":",
"# We hit the end of the file and were still in a comment",
"# state. Grab everything up to here.",
"return",
"(",
"InTextParser",
"(",
")",
",",
"0",
",",
"None",
")",
"# Move ahead by one character otherwise",
"return",
"(",
"self",
",",
"1",
",",
"None",
")"
] | Get transition from DisabledParser. | [
"Get",
"transition",
"from",
"DisabledParser",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L543-L593 |
251,734 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | InCommentParser.get_transition | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Get transition from InCommentParser."""
del comment_system_transitions
if (_token_at_col_in_line(line,
column,
"```",
3) and
not _is_escaped(line, column, is_escaped)):
# Hit a disable token, so resume the last parser
return (DisabledParser((line_index, column + 3),
self.__class__,
self._waiting_until), 3, self._started_at)
elif self._waiting_until != ParserState.EOL:
wait_until_len = len(self._waiting_until)
if (_token_at_col_in_line(line,
column,
self._waiting_until,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
# Skip ahead to end of this token
return (InTextParser(),
len(self._waiting_until),
self._started_at)
elif self._waiting_until == ParserState.EOL and column == 0:
# We hit a new line and the state ends here. Return
# corresponding state
return (InTextParser(), 0, self._started_at)
elif eof:
# We hit the end of the file and were still in a comment
# state. Grab everything up to here.
return (InTextParser(), 0, self._started_at)
# Move ahead by one character otherwise
return (self, 1, None) | python | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Get transition from InCommentParser."""
del comment_system_transitions
if (_token_at_col_in_line(line,
column,
"```",
3) and
not _is_escaped(line, column, is_escaped)):
# Hit a disable token, so resume the last parser
return (DisabledParser((line_index, column + 3),
self.__class__,
self._waiting_until), 3, self._started_at)
elif self._waiting_until != ParserState.EOL:
wait_until_len = len(self._waiting_until)
if (_token_at_col_in_line(line,
column,
self._waiting_until,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
# Skip ahead to end of this token
return (InTextParser(),
len(self._waiting_until),
self._started_at)
elif self._waiting_until == ParserState.EOL and column == 0:
# We hit a new line and the state ends here. Return
# corresponding state
return (InTextParser(), 0, self._started_at)
elif eof:
# We hit the end of the file and were still in a comment
# state. Grab everything up to here.
return (InTextParser(), 0, self._started_at)
# Move ahead by one character otherwise
return (self, 1, None) | [
"def",
"get_transition",
"(",
"self",
",",
"# suppress(too-many-arguments)",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
",",
"comment_system_transitions",
",",
"eof",
"=",
"False",
")",
":",
"del",
"comment_system_transitions",
"if",
"(",
"_token_at_col_in_line",
"(",
"line",
",",
"column",
",",
"\"```\"",
",",
"3",
")",
"and",
"not",
"_is_escaped",
"(",
"line",
",",
"column",
",",
"is_escaped",
")",
")",
":",
"# Hit a disable token, so resume the last parser",
"return",
"(",
"DisabledParser",
"(",
"(",
"line_index",
",",
"column",
"+",
"3",
")",
",",
"self",
".",
"__class__",
",",
"self",
".",
"_waiting_until",
")",
",",
"3",
",",
"self",
".",
"_started_at",
")",
"elif",
"self",
".",
"_waiting_until",
"!=",
"ParserState",
".",
"EOL",
":",
"wait_until_len",
"=",
"len",
"(",
"self",
".",
"_waiting_until",
")",
"if",
"(",
"_token_at_col_in_line",
"(",
"line",
",",
"column",
",",
"self",
".",
"_waiting_until",
",",
"wait_until_len",
")",
"and",
"not",
"_is_escaped",
"(",
"line",
",",
"column",
",",
"is_escaped",
")",
")",
":",
"# Skip ahead to end of this token",
"return",
"(",
"InTextParser",
"(",
")",
",",
"len",
"(",
"self",
".",
"_waiting_until",
")",
",",
"self",
".",
"_started_at",
")",
"elif",
"self",
".",
"_waiting_until",
"==",
"ParserState",
".",
"EOL",
"and",
"column",
"==",
"0",
":",
"# We hit a new line and the state ends here. Return",
"# corresponding state",
"return",
"(",
"InTextParser",
"(",
")",
",",
"0",
",",
"self",
".",
"_started_at",
")",
"elif",
"eof",
":",
"# We hit the end of the file and were still in a comment",
"# state. Grab everything up to here.",
"return",
"(",
"InTextParser",
"(",
")",
",",
"0",
",",
"self",
".",
"_started_at",
")",
"# Move ahead by one character otherwise",
"return",
"(",
"self",
",",
"1",
",",
"None",
")"
] | Get transition from InCommentParser. | [
"Get",
"transition",
"from",
"InCommentParser",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L600-L641 |
251,735 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | InQuoteParser.get_transition | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
*args,
**kwargs):
"""Get transition from InQuoteParser."""
del line_index
del args
del kwargs
wait_until_len = len(self._waiting_until)
if (_token_at_col_in_line(line,
column,
self._waiting_until,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
return (InTextParser(), 1, None)
return (self, 1, None) | python | def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
*args,
**kwargs):
"""Get transition from InQuoteParser."""
del line_index
del args
del kwargs
wait_until_len = len(self._waiting_until)
if (_token_at_col_in_line(line,
column,
self._waiting_until,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
return (InTextParser(), 1, None)
return (self, 1, None) | [
"def",
"get_transition",
"(",
"self",
",",
"# suppress(too-many-arguments)",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"line_index",
"del",
"args",
"del",
"kwargs",
"wait_until_len",
"=",
"len",
"(",
"self",
".",
"_waiting_until",
")",
"if",
"(",
"_token_at_col_in_line",
"(",
"line",
",",
"column",
",",
"self",
".",
"_waiting_until",
",",
"wait_until_len",
")",
"and",
"not",
"_is_escaped",
"(",
"line",
",",
"column",
",",
"is_escaped",
")",
")",
":",
"return",
"(",
"InTextParser",
"(",
")",
",",
"1",
",",
"None",
")",
"return",
"(",
"self",
",",
"1",
",",
"None",
")"
] | Get transition from InQuoteParser. | [
"Get",
"transition",
"from",
"InQuoteParser",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L648-L668 |
251,736 | polysquare/polysquare-generic-file-linter | polysquarelinter/spelling.py | Dictionary.corrections | def corrections(self, word, prefix=1, distance=2):
"""Get corrections for word, if word is an invalid word.
:prefix: is the number of characters the prefix of the word must
have in common with the suggested corrections.
:distance: is the character distance the corrections may have between
the input word. This limits the number of available corrections
but decreases the correction search space.
The return value of this function is a Result tuple, with the
:valid: member indicating whether the input word is a valid one and
:suggestions: member containing a list of suggestions.
"""
if word not in self._words:
return Dictionary.Result(False,
self._corrector.suggest(word,
prefix=prefix,
maxdist=distance))
else:
return Dictionary.Result(True, list()) | python | def corrections(self, word, prefix=1, distance=2):
"""Get corrections for word, if word is an invalid word.
:prefix: is the number of characters the prefix of the word must
have in common with the suggested corrections.
:distance: is the character distance the corrections may have between
the input word. This limits the number of available corrections
but decreases the correction search space.
The return value of this function is a Result tuple, with the
:valid: member indicating whether the input word is a valid one and
:suggestions: member containing a list of suggestions.
"""
if word not in self._words:
return Dictionary.Result(False,
self._corrector.suggest(word,
prefix=prefix,
maxdist=distance))
else:
return Dictionary.Result(True, list()) | [
"def",
"corrections",
"(",
"self",
",",
"word",
",",
"prefix",
"=",
"1",
",",
"distance",
"=",
"2",
")",
":",
"if",
"word",
"not",
"in",
"self",
".",
"_words",
":",
"return",
"Dictionary",
".",
"Result",
"(",
"False",
",",
"self",
".",
"_corrector",
".",
"suggest",
"(",
"word",
",",
"prefix",
"=",
"prefix",
",",
"maxdist",
"=",
"distance",
")",
")",
"else",
":",
"return",
"Dictionary",
".",
"Result",
"(",
"True",
",",
"list",
"(",
")",
")"
] | Get corrections for word, if word is an invalid word.
:prefix: is the number of characters the prefix of the word must
have in common with the suggested corrections.
:distance: is the character distance the corrections may have between
the input word. This limits the number of available corrections
but decreases the correction search space.
The return value of this function is a Result tuple, with the
:valid: member indicating whether the input word is a valid one and
:suggestions: member containing a list of suggestions. | [
"Get",
"corrections",
"for",
"word",
"if",
"word",
"is",
"an",
"invalid",
"word",
"."
] | cfc88771acd3d5551c28fa5d917bb0aeb584c4cc | https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L910-L930 |
251,737 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/edeposit_autoparser.py | _create_dom | def _create_dom(data):
"""
Creates doublelinked DOM from `data`.
Args:
data (str/HTMLElement): Either string or HTML element.
Returns:
obj: HTMLElement containing double linked DOM.
"""
if not isinstance(data, dhtmlparser.HTMLElement):
data = dhtmlparser.parseString(
utils.handle_encodnig(data)
)
dhtmlparser.makeDoubleLinked(data)
return data | python | def _create_dom(data):
"""
Creates doublelinked DOM from `data`.
Args:
data (str/HTMLElement): Either string or HTML element.
Returns:
obj: HTMLElement containing double linked DOM.
"""
if not isinstance(data, dhtmlparser.HTMLElement):
data = dhtmlparser.parseString(
utils.handle_encodnig(data)
)
dhtmlparser.makeDoubleLinked(data)
return data | [
"def",
"_create_dom",
"(",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dhtmlparser",
".",
"HTMLElement",
")",
":",
"data",
"=",
"dhtmlparser",
".",
"parseString",
"(",
"utils",
".",
"handle_encodnig",
"(",
"data",
")",
")",
"dhtmlparser",
".",
"makeDoubleLinked",
"(",
"data",
")",
"return",
"data"
] | Creates doublelinked DOM from `data`.
Args:
data (str/HTMLElement): Either string or HTML element.
Returns:
obj: HTMLElement containing double linked DOM. | [
"Creates",
"doublelinked",
"DOM",
"from",
"data",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/edeposit_autoparser.py#L22-L39 |
251,738 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/edeposit_autoparser.py | _locate_element | def _locate_element(dom, el_content, transformer=None):
"""
Find element containing `el_content` in `dom`. Use `transformer` function
to content of all elements in `dom` in order to correctly transforming them
to match them with `el_content`.
Args:
dom (obj): HTMLElement tree.
el_content (str): Content of element will be picked from `dom`.
transformer (fn, default None): Transforming function.
Note:
`transformer` parameter can be for example simple lambda::
lambda x: x.strip()
Returns:
list: Matching HTMLElements.
"""
return dom.find(
None,
fn=utils.content_matchs(el_content, transformer)
) | python | def _locate_element(dom, el_content, transformer=None):
"""
Find element containing `el_content` in `dom`. Use `transformer` function
to content of all elements in `dom` in order to correctly transforming them
to match them with `el_content`.
Args:
dom (obj): HTMLElement tree.
el_content (str): Content of element will be picked from `dom`.
transformer (fn, default None): Transforming function.
Note:
`transformer` parameter can be for example simple lambda::
lambda x: x.strip()
Returns:
list: Matching HTMLElements.
"""
return dom.find(
None,
fn=utils.content_matchs(el_content, transformer)
) | [
"def",
"_locate_element",
"(",
"dom",
",",
"el_content",
",",
"transformer",
"=",
"None",
")",
":",
"return",
"dom",
".",
"find",
"(",
"None",
",",
"fn",
"=",
"utils",
".",
"content_matchs",
"(",
"el_content",
",",
"transformer",
")",
")"
] | Find element containing `el_content` in `dom`. Use `transformer` function
to content of all elements in `dom` in order to correctly transforming them
to match them with `el_content`.
Args:
dom (obj): HTMLElement tree.
el_content (str): Content of element will be picked from `dom`.
transformer (fn, default None): Transforming function.
Note:
`transformer` parameter can be for example simple lambda::
lambda x: x.strip()
Returns:
list: Matching HTMLElements. | [
"Find",
"element",
"containing",
"el_content",
"in",
"dom",
".",
"Use",
"transformer",
"function",
"to",
"content",
"of",
"all",
"elements",
"in",
"dom",
"in",
"order",
"to",
"correctly",
"transforming",
"them",
"to",
"match",
"them",
"with",
"el_content",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/edeposit_autoparser.py#L42-L64 |
251,739 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/edeposit_autoparser.py | _match_elements | def _match_elements(dom, matches):
"""
Find location of elements matching patterns specified in `matches`.
Args:
dom (obj): HTMLElement DOM tree.
matches (dict): Structure: ``{"var": {"data": "match", ..}, ..}``.
Returns:
dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}``
"""
out = {}
for key, content in matches.items():
pattern = content["data"].strip()
if "\n" in pattern:
pattern = pattern.split()
transformer = lambda x: x.strip().split()
else:
transformer = lambda x: x.strip()
matching_elements = _locate_element(
dom,
pattern,
transformer=transformer
)
not_found_msg = content.get("notfoundmsg", "").replace("$name", key)
if not not_found_msg.strip():
not_found_msg = "Can't locate variable '%s' with content '%s'!" % (
key,
pattern,
)
content["notfoundmsg"] = not_found_msg
# in case of multiple elements, find only elements with propert tagname
tagname = content.get("tagname", "").strip().lower()
if tagname:
matching_elements = filter(
lambda x: x.getTagName().strip().lower() == tagname,
matching_elements
)
if not matching_elements:
raise UserWarning(not_found_msg)
if len(matching_elements) > 1:
raise UserWarning(
"Ambigious content '%s'!" % content
+ "Content was found in multiple elements!"
)
out[key] = matching_elements[0]
return out | python | def _match_elements(dom, matches):
"""
Find location of elements matching patterns specified in `matches`.
Args:
dom (obj): HTMLElement DOM tree.
matches (dict): Structure: ``{"var": {"data": "match", ..}, ..}``.
Returns:
dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}``
"""
out = {}
for key, content in matches.items():
pattern = content["data"].strip()
if "\n" in pattern:
pattern = pattern.split()
transformer = lambda x: x.strip().split()
else:
transformer = lambda x: x.strip()
matching_elements = _locate_element(
dom,
pattern,
transformer=transformer
)
not_found_msg = content.get("notfoundmsg", "").replace("$name", key)
if not not_found_msg.strip():
not_found_msg = "Can't locate variable '%s' with content '%s'!" % (
key,
pattern,
)
content["notfoundmsg"] = not_found_msg
# in case of multiple elements, find only elements with propert tagname
tagname = content.get("tagname", "").strip().lower()
if tagname:
matching_elements = filter(
lambda x: x.getTagName().strip().lower() == tagname,
matching_elements
)
if not matching_elements:
raise UserWarning(not_found_msg)
if len(matching_elements) > 1:
raise UserWarning(
"Ambigious content '%s'!" % content
+ "Content was found in multiple elements!"
)
out[key] = matching_elements[0]
return out | [
"def",
"_match_elements",
"(",
"dom",
",",
"matches",
")",
":",
"out",
"=",
"{",
"}",
"for",
"key",
",",
"content",
"in",
"matches",
".",
"items",
"(",
")",
":",
"pattern",
"=",
"content",
"[",
"\"data\"",
"]",
".",
"strip",
"(",
")",
"if",
"\"\\n\"",
"in",
"pattern",
":",
"pattern",
"=",
"pattern",
".",
"split",
"(",
")",
"transformer",
"=",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"else",
":",
"transformer",
"=",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
"matching_elements",
"=",
"_locate_element",
"(",
"dom",
",",
"pattern",
",",
"transformer",
"=",
"transformer",
")",
"not_found_msg",
"=",
"content",
".",
"get",
"(",
"\"notfoundmsg\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"$name\"",
",",
"key",
")",
"if",
"not",
"not_found_msg",
".",
"strip",
"(",
")",
":",
"not_found_msg",
"=",
"\"Can't locate variable '%s' with content '%s'!\"",
"%",
"(",
"key",
",",
"pattern",
",",
")",
"content",
"[",
"\"notfoundmsg\"",
"]",
"=",
"not_found_msg",
"# in case of multiple elements, find only elements with propert tagname",
"tagname",
"=",
"content",
".",
"get",
"(",
"\"tagname\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"tagname",
":",
"matching_elements",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"getTagName",
"(",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"tagname",
",",
"matching_elements",
")",
"if",
"not",
"matching_elements",
":",
"raise",
"UserWarning",
"(",
"not_found_msg",
")",
"if",
"len",
"(",
"matching_elements",
")",
">",
"1",
":",
"raise",
"UserWarning",
"(",
"\"Ambigious content '%s'!\"",
"%",
"content",
"+",
"\"Content was found in multiple elements!\"",
")",
"out",
"[",
"key",
"]",
"=",
"matching_elements",
"[",
"0",
"]",
"return",
"out"
] | Find location of elements matching patterns specified in `matches`.
Args:
dom (obj): HTMLElement DOM tree.
matches (dict): Structure: ``{"var": {"data": "match", ..}, ..}``.
Returns:
dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}`` | [
"Find",
"location",
"of",
"elements",
"matching",
"patterns",
"specified",
"in",
"matches",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/edeposit_autoparser.py#L67-L120 |
251,740 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/edeposit_autoparser.py | _collect_paths | def _collect_paths(element):
"""
Collect all possible path which leads to `element`.
Function returns standard path from root element to this, reverse path,
which uses negative indexes for path, also some pattern matches, like
"this is element, which has neighbour with id 7" and so on.
Args:
element (obj): HTMLElement instance.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
"""
output = []
# look for element by parameters - sometimes the ID is unique
path = vectors.el_to_path_vector(element)
root = path[0]
params = element.params if element.params else None
match = root.find(element.getTagName(), params)
if len(match) == 1:
output.append(
PathCall("find", 0, [element.getTagName(), params])
)
# look for element by neighbours
output.extend(path_patterns.neighbours_pattern(element))
# look for elements by patterns - element, which parent has tagname, and
# which parent has tagname ..
output.extend(path_patterns.predecesors_pattern(element, root))
index_backtrack = []
last_index_backtrack = []
params_backtrack = []
last_params_backtrack = []
# look for element by paths from root to element
for el in reversed(path):
# skip root elements
if not el.parent:
continue
tag_name = el.getTagName()
match = el.parent.wfind(tag_name).childs
index = match.index(el)
index_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_index_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
# if element has some parameters, use them for lookup
if el.params:
match = el.parent.wfind(tag_name, el.params).childs
index = match.index(el)
params_backtrack.append(
PathCall("wfind", index, [tag_name, el.params])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name, el.params])
)
else:
params_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
output.extend([
Chained(reversed(params_backtrack)),
Chained(reversed(last_params_backtrack)),
Chained(reversed(index_backtrack)),
Chained(reversed(last_index_backtrack)),
])
return output | python | def _collect_paths(element):
"""
Collect all possible path which leads to `element`.
Function returns standard path from root element to this, reverse path,
which uses negative indexes for path, also some pattern matches, like
"this is element, which has neighbour with id 7" and so on.
Args:
element (obj): HTMLElement instance.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
"""
output = []
# look for element by parameters - sometimes the ID is unique
path = vectors.el_to_path_vector(element)
root = path[0]
params = element.params if element.params else None
match = root.find(element.getTagName(), params)
if len(match) == 1:
output.append(
PathCall("find", 0, [element.getTagName(), params])
)
# look for element by neighbours
output.extend(path_patterns.neighbours_pattern(element))
# look for elements by patterns - element, which parent has tagname, and
# which parent has tagname ..
output.extend(path_patterns.predecesors_pattern(element, root))
index_backtrack = []
last_index_backtrack = []
params_backtrack = []
last_params_backtrack = []
# look for element by paths from root to element
for el in reversed(path):
# skip root elements
if not el.parent:
continue
tag_name = el.getTagName()
match = el.parent.wfind(tag_name).childs
index = match.index(el)
index_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_index_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
# if element has some parameters, use them for lookup
if el.params:
match = el.parent.wfind(tag_name, el.params).childs
index = match.index(el)
params_backtrack.append(
PathCall("wfind", index, [tag_name, el.params])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name, el.params])
)
else:
params_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
output.extend([
Chained(reversed(params_backtrack)),
Chained(reversed(last_params_backtrack)),
Chained(reversed(index_backtrack)),
Chained(reversed(last_index_backtrack)),
])
return output | [
"def",
"_collect_paths",
"(",
"element",
")",
":",
"output",
"=",
"[",
"]",
"# look for element by parameters - sometimes the ID is unique",
"path",
"=",
"vectors",
".",
"el_to_path_vector",
"(",
"element",
")",
"root",
"=",
"path",
"[",
"0",
"]",
"params",
"=",
"element",
".",
"params",
"if",
"element",
".",
"params",
"else",
"None",
"match",
"=",
"root",
".",
"find",
"(",
"element",
".",
"getTagName",
"(",
")",
",",
"params",
")",
"if",
"len",
"(",
"match",
")",
"==",
"1",
":",
"output",
".",
"append",
"(",
"PathCall",
"(",
"\"find\"",
",",
"0",
",",
"[",
"element",
".",
"getTagName",
"(",
")",
",",
"params",
"]",
")",
")",
"# look for element by neighbours",
"output",
".",
"extend",
"(",
"path_patterns",
".",
"neighbours_pattern",
"(",
"element",
")",
")",
"# look for elements by patterns - element, which parent has tagname, and",
"# which parent has tagname ..",
"output",
".",
"extend",
"(",
"path_patterns",
".",
"predecesors_pattern",
"(",
"element",
",",
"root",
")",
")",
"index_backtrack",
"=",
"[",
"]",
"last_index_backtrack",
"=",
"[",
"]",
"params_backtrack",
"=",
"[",
"]",
"last_params_backtrack",
"=",
"[",
"]",
"# look for element by paths from root to element",
"for",
"el",
"in",
"reversed",
"(",
"path",
")",
":",
"# skip root elements",
"if",
"not",
"el",
".",
"parent",
":",
"continue",
"tag_name",
"=",
"el",
".",
"getTagName",
"(",
")",
"match",
"=",
"el",
".",
"parent",
".",
"wfind",
"(",
"tag_name",
")",
".",
"childs",
"index",
"=",
"match",
".",
"index",
"(",
"el",
")",
"index_backtrack",
".",
"append",
"(",
"PathCall",
"(",
"\"wfind\"",
",",
"index",
",",
"[",
"tag_name",
"]",
")",
")",
"last_index_backtrack",
".",
"append",
"(",
"PathCall",
"(",
"\"wfind\"",
",",
"index",
"-",
"len",
"(",
"match",
")",
",",
"[",
"tag_name",
"]",
")",
")",
"# if element has some parameters, use them for lookup",
"if",
"el",
".",
"params",
":",
"match",
"=",
"el",
".",
"parent",
".",
"wfind",
"(",
"tag_name",
",",
"el",
".",
"params",
")",
".",
"childs",
"index",
"=",
"match",
".",
"index",
"(",
"el",
")",
"params_backtrack",
".",
"append",
"(",
"PathCall",
"(",
"\"wfind\"",
",",
"index",
",",
"[",
"tag_name",
",",
"el",
".",
"params",
"]",
")",
")",
"last_params_backtrack",
".",
"append",
"(",
"PathCall",
"(",
"\"wfind\"",
",",
"index",
"-",
"len",
"(",
"match",
")",
",",
"[",
"tag_name",
",",
"el",
".",
"params",
"]",
")",
")",
"else",
":",
"params_backtrack",
".",
"append",
"(",
"PathCall",
"(",
"\"wfind\"",
",",
"index",
",",
"[",
"tag_name",
"]",
")",
")",
"last_params_backtrack",
".",
"append",
"(",
"PathCall",
"(",
"\"wfind\"",
",",
"index",
"-",
"len",
"(",
"match",
")",
",",
"[",
"tag_name",
"]",
")",
")",
"output",
".",
"extend",
"(",
"[",
"Chained",
"(",
"reversed",
"(",
"params_backtrack",
")",
")",
",",
"Chained",
"(",
"reversed",
"(",
"last_params_backtrack",
")",
")",
",",
"Chained",
"(",
"reversed",
"(",
"index_backtrack",
")",
")",
",",
"Chained",
"(",
"reversed",
"(",
"last_index_backtrack",
")",
")",
",",
"]",
")",
"return",
"output"
] | Collect all possible path which leads to `element`.
Function returns standard path from root element to this, reverse path,
which uses negative indexes for path, also some pattern matches, like
"this is element, which has neighbour with id 7" and so on.
Args:
element (obj): HTMLElement instance.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects. | [
"Collect",
"all",
"possible",
"path",
"which",
"leads",
"to",
"element",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/edeposit_autoparser.py#L123-L205 |
251,741 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/edeposit_autoparser.py | _is_working_path | def _is_working_path(dom, path, element):
"""
Check whether the path is working or not.
Aply proper search function interpreting `path` to `dom` and check, if
returned object is `element`. If so, return ``True``, otherwise ``False``.
Args:
dom (obj): HTMLElement DOM.
path (obj): :class:`.PathCall` Instance containing informations about
path and which function it require to obtain element the
path is pointing to.
element (obj): HTMLElement instance used to decide whether `path`
points to correct `element` or not.
Returns:
bool: True if `path` correctly points to proper `element`.
"""
def i_or_none(el, i):
"""
Return ``el[i]`` if the list is not blank, or None otherwise.
Args:
el (list, tuple): Any indexable object.
i (int): Index.
Returns:
obj: Element at index `i` if `el` is not blank, or ``None``.
"""
if not el:
return None
return el[i]
# map decoders of all paths to one dictionary to make easier to call them
path_functions = {
"find": lambda el, index, params:
i_or_none(el.find(*params), index),
"wfind": lambda el, index, params:
i_or_none(el.wfind(*params).childs, index),
"match": lambda el, index, params:
i_or_none(el.match(*params), index),
"left_neighbour_tag": lambda el, index, neigh_data:
i_or_none(
el.find(
neigh_data.tag_name,
neigh_data.params,
fn=utils.has_neigh(*neigh_data.fn_params, left=True)
),
index
),
"right_neighbour_tag": lambda el, index, neigh_data:
i_or_none(
el.find(
neigh_data.tag_name,
neigh_data.params,
fn=utils.has_neigh(*neigh_data.fn_params, left=False)
),
index
),
}
# call all decoders and see what you get from them
el = None
if isinstance(path, PathCall):
el = path_functions[path.call_type](dom, path.index, path.params)
elif isinstance(path, Chained):
for path in path.chain:
dom = path_functions[path.call_type](dom, path.index, path.params)
if not dom:
return False
el = dom
else:
raise UserWarning(
"Unknown type of path parameters! (%s)" % str(path)
)
if not el:
return False
# test whether returned item is the item we are looking for
return el.getContent().strip() == element.getContent().strip() | python | def _is_working_path(dom, path, element):
"""
Check whether the path is working or not.
Aply proper search function interpreting `path` to `dom` and check, if
returned object is `element`. If so, return ``True``, otherwise ``False``.
Args:
dom (obj): HTMLElement DOM.
path (obj): :class:`.PathCall` Instance containing informations about
path and which function it require to obtain element the
path is pointing to.
element (obj): HTMLElement instance used to decide whether `path`
points to correct `element` or not.
Returns:
bool: True if `path` correctly points to proper `element`.
"""
def i_or_none(el, i):
"""
Return ``el[i]`` if the list is not blank, or None otherwise.
Args:
el (list, tuple): Any indexable object.
i (int): Index.
Returns:
obj: Element at index `i` if `el` is not blank, or ``None``.
"""
if not el:
return None
return el[i]
# map decoders of all paths to one dictionary to make easier to call them
path_functions = {
"find": lambda el, index, params:
i_or_none(el.find(*params), index),
"wfind": lambda el, index, params:
i_or_none(el.wfind(*params).childs, index),
"match": lambda el, index, params:
i_or_none(el.match(*params), index),
"left_neighbour_tag": lambda el, index, neigh_data:
i_or_none(
el.find(
neigh_data.tag_name,
neigh_data.params,
fn=utils.has_neigh(*neigh_data.fn_params, left=True)
),
index
),
"right_neighbour_tag": lambda el, index, neigh_data:
i_or_none(
el.find(
neigh_data.tag_name,
neigh_data.params,
fn=utils.has_neigh(*neigh_data.fn_params, left=False)
),
index
),
}
# call all decoders and see what you get from them
el = None
if isinstance(path, PathCall):
el = path_functions[path.call_type](dom, path.index, path.params)
elif isinstance(path, Chained):
for path in path.chain:
dom = path_functions[path.call_type](dom, path.index, path.params)
if not dom:
return False
el = dom
else:
raise UserWarning(
"Unknown type of path parameters! (%s)" % str(path)
)
if not el:
return False
# test whether returned item is the item we are looking for
return el.getContent().strip() == element.getContent().strip() | [
"def",
"_is_working_path",
"(",
"dom",
",",
"path",
",",
"element",
")",
":",
"def",
"i_or_none",
"(",
"el",
",",
"i",
")",
":",
"\"\"\"\n Return ``el[i]`` if the list is not blank, or None otherwise.\n\n Args:\n el (list, tuple): Any indexable object.\n i (int): Index.\n\n Returns:\n obj: Element at index `i` if `el` is not blank, or ``None``.\n \"\"\"",
"if",
"not",
"el",
":",
"return",
"None",
"return",
"el",
"[",
"i",
"]",
"# map decoders of all paths to one dictionary to make easier to call them",
"path_functions",
"=",
"{",
"\"find\"",
":",
"lambda",
"el",
",",
"index",
",",
"params",
":",
"i_or_none",
"(",
"el",
".",
"find",
"(",
"*",
"params",
")",
",",
"index",
")",
",",
"\"wfind\"",
":",
"lambda",
"el",
",",
"index",
",",
"params",
":",
"i_or_none",
"(",
"el",
".",
"wfind",
"(",
"*",
"params",
")",
".",
"childs",
",",
"index",
")",
",",
"\"match\"",
":",
"lambda",
"el",
",",
"index",
",",
"params",
":",
"i_or_none",
"(",
"el",
".",
"match",
"(",
"*",
"params",
")",
",",
"index",
")",
",",
"\"left_neighbour_tag\"",
":",
"lambda",
"el",
",",
"index",
",",
"neigh_data",
":",
"i_or_none",
"(",
"el",
".",
"find",
"(",
"neigh_data",
".",
"tag_name",
",",
"neigh_data",
".",
"params",
",",
"fn",
"=",
"utils",
".",
"has_neigh",
"(",
"*",
"neigh_data",
".",
"fn_params",
",",
"left",
"=",
"True",
")",
")",
",",
"index",
")",
",",
"\"right_neighbour_tag\"",
":",
"lambda",
"el",
",",
"index",
",",
"neigh_data",
":",
"i_or_none",
"(",
"el",
".",
"find",
"(",
"neigh_data",
".",
"tag_name",
",",
"neigh_data",
".",
"params",
",",
"fn",
"=",
"utils",
".",
"has_neigh",
"(",
"*",
"neigh_data",
".",
"fn_params",
",",
"left",
"=",
"False",
")",
")",
",",
"index",
")",
",",
"}",
"# call all decoders and see what you get from them",
"el",
"=",
"None",
"if",
"isinstance",
"(",
"path",
",",
"PathCall",
")",
":",
"el",
"=",
"path_functions",
"[",
"path",
".",
"call_type",
"]",
"(",
"dom",
",",
"path",
".",
"index",
",",
"path",
".",
"params",
")",
"elif",
"isinstance",
"(",
"path",
",",
"Chained",
")",
":",
"for",
"path",
"in",
"path",
".",
"chain",
":",
"dom",
"=",
"path_functions",
"[",
"path",
".",
"call_type",
"]",
"(",
"dom",
",",
"path",
".",
"index",
",",
"path",
".",
"params",
")",
"if",
"not",
"dom",
":",
"return",
"False",
"el",
"=",
"dom",
"else",
":",
"raise",
"UserWarning",
"(",
"\"Unknown type of path parameters! (%s)\"",
"%",
"str",
"(",
"path",
")",
")",
"if",
"not",
"el",
":",
"return",
"False",
"# test whether returned item is the item we are looking for",
"return",
"el",
".",
"getContent",
"(",
")",
".",
"strip",
"(",
")",
"==",
"element",
".",
"getContent",
"(",
")",
".",
"strip",
"(",
")"
] | Check whether the path is working or not.
Aply proper search function interpreting `path` to `dom` and check, if
returned object is `element`. If so, return ``True``, otherwise ``False``.
Args:
dom (obj): HTMLElement DOM.
path (obj): :class:`.PathCall` Instance containing informations about
path and which function it require to obtain element the
path is pointing to.
element (obj): HTMLElement instance used to decide whether `path`
points to correct `element` or not.
Returns:
bool: True if `path` correctly points to proper `element`. | [
"Check",
"whether",
"the",
"path",
"is",
"working",
"or",
"not",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/edeposit_autoparser.py#L208-L289 |
251,742 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/edeposit_autoparser.py | select_best_paths | def select_best_paths(examples):
"""
Process `examples`, select only paths that works for every example. Select
best paths with highest priority.
Args:
examples (dict): Output from :func:`.read_config`.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
"""
possible_paths = {} # {varname: [paths]}
# collect list of all possible paths to all existing variables
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, match in matching_elements.items():
if key not in possible_paths: # TODO: merge paths together?
possible_paths[key] = _collect_paths(match)
# leave only paths, that works in all examples where, are required
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, paths in possible_paths.items():
if key not in matching_elements:
continue
possible_paths[key] = filter(
lambda path: _is_working_path(
dom,
path,
matching_elements[key]
),
paths
)
priorities = [
"find",
"left_neighbour_tag",
"right_neighbour_tag",
"wfind",
"match",
"Chained"
]
priorities = dict(map(lambda x: (x[1], x[0]), enumerate(priorities)))
# sort all paths by priority table
for key in possible_paths.keys():
possible_paths[key] = list(sorted(
possible_paths[key],
key=lambda x: priorities.get(x.call_type, 100)
))
return possible_paths | python | def select_best_paths(examples):
"""
Process `examples`, select only paths that works for every example. Select
best paths with highest priority.
Args:
examples (dict): Output from :func:`.read_config`.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
"""
possible_paths = {} # {varname: [paths]}
# collect list of all possible paths to all existing variables
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, match in matching_elements.items():
if key not in possible_paths: # TODO: merge paths together?
possible_paths[key] = _collect_paths(match)
# leave only paths, that works in all examples where, are required
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, paths in possible_paths.items():
if key not in matching_elements:
continue
possible_paths[key] = filter(
lambda path: _is_working_path(
dom,
path,
matching_elements[key]
),
paths
)
priorities = [
"find",
"left_neighbour_tag",
"right_neighbour_tag",
"wfind",
"match",
"Chained"
]
priorities = dict(map(lambda x: (x[1], x[0]), enumerate(priorities)))
# sort all paths by priority table
for key in possible_paths.keys():
possible_paths[key] = list(sorted(
possible_paths[key],
key=lambda x: priorities.get(x.call_type, 100)
))
return possible_paths | [
"def",
"select_best_paths",
"(",
"examples",
")",
":",
"possible_paths",
"=",
"{",
"}",
"# {varname: [paths]}",
"# collect list of all possible paths to all existing variables",
"for",
"example",
"in",
"examples",
":",
"dom",
"=",
"_create_dom",
"(",
"example",
"[",
"\"html\"",
"]",
")",
"matching_elements",
"=",
"_match_elements",
"(",
"dom",
",",
"example",
"[",
"\"vars\"",
"]",
")",
"for",
"key",
",",
"match",
"in",
"matching_elements",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"possible_paths",
":",
"# TODO: merge paths together?",
"possible_paths",
"[",
"key",
"]",
"=",
"_collect_paths",
"(",
"match",
")",
"# leave only paths, that works in all examples where, are required",
"for",
"example",
"in",
"examples",
":",
"dom",
"=",
"_create_dom",
"(",
"example",
"[",
"\"html\"",
"]",
")",
"matching_elements",
"=",
"_match_elements",
"(",
"dom",
",",
"example",
"[",
"\"vars\"",
"]",
")",
"for",
"key",
",",
"paths",
"in",
"possible_paths",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"matching_elements",
":",
"continue",
"possible_paths",
"[",
"key",
"]",
"=",
"filter",
"(",
"lambda",
"path",
":",
"_is_working_path",
"(",
"dom",
",",
"path",
",",
"matching_elements",
"[",
"key",
"]",
")",
",",
"paths",
")",
"priorities",
"=",
"[",
"\"find\"",
",",
"\"left_neighbour_tag\"",
",",
"\"right_neighbour_tag\"",
",",
"\"wfind\"",
",",
"\"match\"",
",",
"\"Chained\"",
"]",
"priorities",
"=",
"dict",
"(",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
")",
",",
"enumerate",
"(",
"priorities",
")",
")",
")",
"# sort all paths by priority table",
"for",
"key",
"in",
"possible_paths",
".",
"keys",
"(",
")",
":",
"possible_paths",
"[",
"key",
"]",
"=",
"list",
"(",
"sorted",
"(",
"possible_paths",
"[",
"key",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"priorities",
".",
"get",
"(",
"x",
".",
"call_type",
",",
"100",
")",
")",
")",
"return",
"possible_paths"
] | Process `examples`, select only paths that works for every example. Select
best paths with highest priority.
Args:
examples (dict): Output from :func:`.read_config`.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects. | [
"Process",
"examples",
"select",
"only",
"paths",
"that",
"works",
"for",
"every",
"example",
".",
"Select",
"best",
"paths",
"with",
"highest",
"priority",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/edeposit_autoparser.py#L292-L349 |
251,743 | edeposit/edeposit.amqp.storage | src/edeposit/amqp/storage/publication_storage.py | _assert_obj_type | def _assert_obj_type(pub, name="pub", obj_type=DBPublication):
"""
Make sure, that `pub` is instance of the `obj_type`.
Args:
pub (obj): Instance which will be checked.
name (str): Name of the instance. Used in exception. Default `pub`.
obj_type (class): Class of which the `pub` should be instance. Default
:class:`.DBPublication`.
Raises:
InvalidType: When the `pub` is not instance of `obj_type`.
"""
if not isinstance(pub, obj_type):
raise InvalidType(
"`%s` have to be instance of %s, not %s!" % (
name,
obj_type.__name__,
pub.__class__.__name__
)
) | python | def _assert_obj_type(pub, name="pub", obj_type=DBPublication):
"""
Make sure, that `pub` is instance of the `obj_type`.
Args:
pub (obj): Instance which will be checked.
name (str): Name of the instance. Used in exception. Default `pub`.
obj_type (class): Class of which the `pub` should be instance. Default
:class:`.DBPublication`.
Raises:
InvalidType: When the `pub` is not instance of `obj_type`.
"""
if not isinstance(pub, obj_type):
raise InvalidType(
"`%s` have to be instance of %s, not %s!" % (
name,
obj_type.__name__,
pub.__class__.__name__
)
) | [
"def",
"_assert_obj_type",
"(",
"pub",
",",
"name",
"=",
"\"pub\"",
",",
"obj_type",
"=",
"DBPublication",
")",
":",
"if",
"not",
"isinstance",
"(",
"pub",
",",
"obj_type",
")",
":",
"raise",
"InvalidType",
"(",
"\"`%s` have to be instance of %s, not %s!\"",
"%",
"(",
"name",
",",
"obj_type",
".",
"__name__",
",",
"pub",
".",
"__class__",
".",
"__name__",
")",
")"
] | Make sure, that `pub` is instance of the `obj_type`.
Args:
pub (obj): Instance which will be checked.
name (str): Name of the instance. Used in exception. Default `pub`.
obj_type (class): Class of which the `pub` should be instance. Default
:class:`.DBPublication`.
Raises:
InvalidType: When the `pub` is not instance of `obj_type`. | [
"Make",
"sure",
"that",
"pub",
"is",
"instance",
"of",
"the",
"obj_type",
"."
] | fb6bd326249847de04b17b64e856c878665cea92 | https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/publication_storage.py#L30-L50 |
251,744 | edeposit/edeposit.amqp.storage | src/edeposit/amqp/storage/publication_storage.py | save_publication | def save_publication(pub):
"""
Save `pub` into database and into proper indexes.
Attr:
pub (obj): Instance of the :class:`.DBPublication`.
Returns:
obj: :class:`.DBPublication` without data.
Raises:
InvalidType: When the `pub` is not instance of :class:`.DBPublication`.
UnindexablePublication: When there is no index (property) which can be
used to index `pub` in database.
"""
_assert_obj_type(pub)
_get_handler().store_object(pub)
return pub.to_comm(light_request=True) | python | def save_publication(pub):
"""
Save `pub` into database and into proper indexes.
Attr:
pub (obj): Instance of the :class:`.DBPublication`.
Returns:
obj: :class:`.DBPublication` without data.
Raises:
InvalidType: When the `pub` is not instance of :class:`.DBPublication`.
UnindexablePublication: When there is no index (property) which can be
used to index `pub` in database.
"""
_assert_obj_type(pub)
_get_handler().store_object(pub)
return pub.to_comm(light_request=True) | [
"def",
"save_publication",
"(",
"pub",
")",
":",
"_assert_obj_type",
"(",
"pub",
")",
"_get_handler",
"(",
")",
".",
"store_object",
"(",
"pub",
")",
"return",
"pub",
".",
"to_comm",
"(",
"light_request",
"=",
"True",
")"
] | Save `pub` into database and into proper indexes.
Attr:
pub (obj): Instance of the :class:`.DBPublication`.
Returns:
obj: :class:`.DBPublication` without data.
Raises:
InvalidType: When the `pub` is not instance of :class:`.DBPublication`.
UnindexablePublication: When there is no index (property) which can be
used to index `pub` in database. | [
"Save",
"pub",
"into",
"database",
"and",
"into",
"proper",
"indexes",
"."
] | fb6bd326249847de04b17b64e856c878665cea92 | https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/publication_storage.py#L53-L72 |
251,745 | eddiejessup/agaro | agaro/runner.py | Runner.clear_dir | def clear_dir(self):
"""Clear the output directory of all output files."""
for snapshot in output_utils.get_filenames(self.output_dir):
if snapshot.endswith('.pkl'):
os.remove(snapshot) | python | def clear_dir(self):
"""Clear the output directory of all output files."""
for snapshot in output_utils.get_filenames(self.output_dir):
if snapshot.endswith('.pkl'):
os.remove(snapshot) | [
"def",
"clear_dir",
"(",
"self",
")",
":",
"for",
"snapshot",
"in",
"output_utils",
".",
"get_filenames",
"(",
"self",
".",
"output_dir",
")",
":",
"if",
"snapshot",
".",
"endswith",
"(",
"'.pkl'",
")",
":",
"os",
".",
"remove",
"(",
"snapshot",
")"
] | Clear the output directory of all output files. | [
"Clear",
"the",
"output",
"directory",
"of",
"all",
"output",
"files",
"."
] | b2feb45d6129d749088c70b3e9290af7ca7c7d33 | https://github.com/eddiejessup/agaro/blob/b2feb45d6129d749088c70b3e9290af7ca7c7d33/agaro/runner.py#L99-L103 |
251,746 | eddiejessup/agaro | agaro/runner.py | Runner.is_snapshot_time | def is_snapshot_time(self, output_every=None, t_output_every=None):
"""Determine whether or not the model's iteration number is one
where the runner is expected to make an output snapshot.
"""
if t_output_every is not None:
output_every = int(round(t_output_every // self.model.dt))
return not self.model.i % output_every | python | def is_snapshot_time(self, output_every=None, t_output_every=None):
"""Determine whether or not the model's iteration number is one
where the runner is expected to make an output snapshot.
"""
if t_output_every is not None:
output_every = int(round(t_output_every // self.model.dt))
return not self.model.i % output_every | [
"def",
"is_snapshot_time",
"(",
"self",
",",
"output_every",
"=",
"None",
",",
"t_output_every",
"=",
"None",
")",
":",
"if",
"t_output_every",
"is",
"not",
"None",
":",
"output_every",
"=",
"int",
"(",
"round",
"(",
"t_output_every",
"//",
"self",
".",
"model",
".",
"dt",
")",
")",
"return",
"not",
"self",
".",
"model",
".",
"i",
"%",
"output_every"
] | Determine whether or not the model's iteration number is one
where the runner is expected to make an output snapshot. | [
"Determine",
"whether",
"or",
"not",
"the",
"model",
"s",
"iteration",
"number",
"is",
"one",
"where",
"the",
"runner",
"is",
"expected",
"to",
"make",
"an",
"output",
"snapshot",
"."
] | b2feb45d6129d749088c70b3e9290af7ca7c7d33 | https://github.com/eddiejessup/agaro/blob/b2feb45d6129d749088c70b3e9290af7ca7c7d33/agaro/runner.py#L105-L111 |
251,747 | eddiejessup/agaro | agaro/runner.py | Runner.iterate | def iterate(self,
n=None, n_upto=None, t=None, t_upto=None,
output_every=None, t_output_every=None):
"""Run the model for a number of iterations, expressed in a number
of options.
Only one iteration argument should be passed.
Only one output arguments should be passed.
Parameters
----------
n: int
Run the model for `n` iterations from its current point.
n_upto: int
Run the model so that its iteration number is at
least `n_upto`.
t: float
Run the model for `t` time from its current point.
t_upto: float
Run the model so that its time is
at least `t_upto`.
output_every: int
How many iterations should elapse between making model snapshots.
t_upto: float
How much time should elapse between making model snapshots.
"""
if t is not None:
t_upto = self.model.t + t
if t_upto is not None:
n_upto = int(round(t_upto // self.model.dt))
if n is not None:
n_upto = self.model.i + n
while self.model.i <= n_upto:
if self.is_snapshot_time(output_every, t_output_every):
self.make_snapshot()
self.model.iterate() | python | def iterate(self,
n=None, n_upto=None, t=None, t_upto=None,
output_every=None, t_output_every=None):
"""Run the model for a number of iterations, expressed in a number
of options.
Only one iteration argument should be passed.
Only one output arguments should be passed.
Parameters
----------
n: int
Run the model for `n` iterations from its current point.
n_upto: int
Run the model so that its iteration number is at
least `n_upto`.
t: float
Run the model for `t` time from its current point.
t_upto: float
Run the model so that its time is
at least `t_upto`.
output_every: int
How many iterations should elapse between making model snapshots.
t_upto: float
How much time should elapse between making model snapshots.
"""
if t is not None:
t_upto = self.model.t + t
if t_upto is not None:
n_upto = int(round(t_upto // self.model.dt))
if n is not None:
n_upto = self.model.i + n
while self.model.i <= n_upto:
if self.is_snapshot_time(output_every, t_output_every):
self.make_snapshot()
self.model.iterate() | [
"def",
"iterate",
"(",
"self",
",",
"n",
"=",
"None",
",",
"n_upto",
"=",
"None",
",",
"t",
"=",
"None",
",",
"t_upto",
"=",
"None",
",",
"output_every",
"=",
"None",
",",
"t_output_every",
"=",
"None",
")",
":",
"if",
"t",
"is",
"not",
"None",
":",
"t_upto",
"=",
"self",
".",
"model",
".",
"t",
"+",
"t",
"if",
"t_upto",
"is",
"not",
"None",
":",
"n_upto",
"=",
"int",
"(",
"round",
"(",
"t_upto",
"//",
"self",
".",
"model",
".",
"dt",
")",
")",
"if",
"n",
"is",
"not",
"None",
":",
"n_upto",
"=",
"self",
".",
"model",
".",
"i",
"+",
"n",
"while",
"self",
".",
"model",
".",
"i",
"<=",
"n_upto",
":",
"if",
"self",
".",
"is_snapshot_time",
"(",
"output_every",
",",
"t_output_every",
")",
":",
"self",
".",
"make_snapshot",
"(",
")",
"self",
".",
"model",
".",
"iterate",
"(",
")"
] | Run the model for a number of iterations, expressed in a number
of options.
Only one iteration argument should be passed.
Only one output arguments should be passed.
Parameters
----------
n: int
Run the model for `n` iterations from its current point.
n_upto: int
Run the model so that its iteration number is at
least `n_upto`.
t: float
Run the model for `t` time from its current point.
t_upto: float
Run the model so that its time is
at least `t_upto`.
output_every: int
How many iterations should elapse between making model snapshots.
t_upto: float
How much time should elapse between making model snapshots. | [
"Run",
"the",
"model",
"for",
"a",
"number",
"of",
"iterations",
"expressed",
"in",
"a",
"number",
"of",
"options",
".",
"Only",
"one",
"iteration",
"argument",
"should",
"be",
"passed",
".",
"Only",
"one",
"output",
"arguments",
"should",
"be",
"passed",
"."
] | b2feb45d6129d749088c70b3e9290af7ca7c7d33 | https://github.com/eddiejessup/agaro/blob/b2feb45d6129d749088c70b3e9290af7ca7c7d33/agaro/runner.py#L113-L149 |
251,748 | eddiejessup/agaro | agaro/runner.py | Runner.make_snapshot | def make_snapshot(self):
"""Output a snapshot of the current model state, as a pickle of the
`Model` object in a file inside the output directory, with a name
determined by its iteration number.
"""
filename = join(self.output_dir, '{:010d}.pkl'.format(self.model.i))
output_utils.model_to_file(self.model, filename) | python | def make_snapshot(self):
"""Output a snapshot of the current model state, as a pickle of the
`Model` object in a file inside the output directory, with a name
determined by its iteration number.
"""
filename = join(self.output_dir, '{:010d}.pkl'.format(self.model.i))
output_utils.model_to_file(self.model, filename) | [
"def",
"make_snapshot",
"(",
"self",
")",
":",
"filename",
"=",
"join",
"(",
"self",
".",
"output_dir",
",",
"'{:010d}.pkl'",
".",
"format",
"(",
"self",
".",
"model",
".",
"i",
")",
")",
"output_utils",
".",
"model_to_file",
"(",
"self",
".",
"model",
",",
"filename",
")"
] | Output a snapshot of the current model state, as a pickle of the
`Model` object in a file inside the output directory, with a name
determined by its iteration number. | [
"Output",
"a",
"snapshot",
"of",
"the",
"current",
"model",
"state",
"as",
"a",
"pickle",
"of",
"the",
"Model",
"object",
"in",
"a",
"file",
"inside",
"the",
"output",
"directory",
"with",
"a",
"name",
"determined",
"by",
"its",
"iteration",
"number",
"."
] | b2feb45d6129d749088c70b3e9290af7ca7c7d33 | https://github.com/eddiejessup/agaro/blob/b2feb45d6129d749088c70b3e9290af7ca7c7d33/agaro/runner.py#L151-L157 |
251,749 | qzmfranklin/easyshell | easyshell/debugging_shell.py | DebuggingShell.parse_line | def parse_line(self, line):
"""Parser for the debugging shell.
Treat everything after the first token as one literal entity. Whitespace
characters between the first token and the next first non-whitespace
character are preserved.
For example, ' foo dicj didiw ' is parsed as
( 'foo', ' dicj didiw ' )
Returns:
A tuple (cmd, args), where the args is a list that consists of one
and only one string containing everything after the cmd as is.
"""
line = line.lstrip()
toks = shlex.split(line)
cmd = toks[0]
arg = line[len(cmd):]
return cmd, [ arg, ] | python | def parse_line(self, line):
"""Parser for the debugging shell.
Treat everything after the first token as one literal entity. Whitespace
characters between the first token and the next first non-whitespace
character are preserved.
For example, ' foo dicj didiw ' is parsed as
( 'foo', ' dicj didiw ' )
Returns:
A tuple (cmd, args), where the args is a list that consists of one
and only one string containing everything after the cmd as is.
"""
line = line.lstrip()
toks = shlex.split(line)
cmd = toks[0]
arg = line[len(cmd):]
return cmd, [ arg, ] | [
"def",
"parse_line",
"(",
"self",
",",
"line",
")",
":",
"line",
"=",
"line",
".",
"lstrip",
"(",
")",
"toks",
"=",
"shlex",
".",
"split",
"(",
"line",
")",
"cmd",
"=",
"toks",
"[",
"0",
"]",
"arg",
"=",
"line",
"[",
"len",
"(",
"cmd",
")",
":",
"]",
"return",
"cmd",
",",
"[",
"arg",
",",
"]"
] | Parser for the debugging shell.
Treat everything after the first token as one literal entity. Whitespace
characters between the first token and the next first non-whitespace
character are preserved.
For example, ' foo dicj didiw ' is parsed as
( 'foo', ' dicj didiw ' )
Returns:
A tuple (cmd, args), where the args is a list that consists of one
and only one string containing everything after the cmd as is. | [
"Parser",
"for",
"the",
"debugging",
"shell",
"."
] | 00c2e90e7767d32e7e127fc8c6875845aa308295 | https://github.com/qzmfranklin/easyshell/blob/00c2e90e7767d32e7e127fc8c6875845aa308295/easyshell/debugging_shell.py#L75-L93 |
251,750 | yv/pathconfig | py_src/pathconfig/factory.py | Factory.bind | def bind(self, **kwargs):
'''
creates a copy of the object without the
cached results and with the given keyword
arguments as properties.
'''
d = dict(self.__dict__)
for k in d.keys():
if k[0] == '_':
del d[k]
elif k.startswith('obj_'):
d[k] = d[k].bind(**kwargs)
d.update(kwargs)
return self.__class__(**d) | python | def bind(self, **kwargs):
'''
creates a copy of the object without the
cached results and with the given keyword
arguments as properties.
'''
d = dict(self.__dict__)
for k in d.keys():
if k[0] == '_':
del d[k]
elif k.startswith('obj_'):
d[k] = d[k].bind(**kwargs)
d.update(kwargs)
return self.__class__(**d) | [
"def",
"bind",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"d",
"=",
"dict",
"(",
"self",
".",
"__dict__",
")",
"for",
"k",
"in",
"d",
".",
"keys",
"(",
")",
":",
"if",
"k",
"[",
"0",
"]",
"==",
"'_'",
":",
"del",
"d",
"[",
"k",
"]",
"elif",
"k",
".",
"startswith",
"(",
"'obj_'",
")",
":",
"d",
"[",
"k",
"]",
"=",
"d",
"[",
"k",
"]",
".",
"bind",
"(",
"*",
"*",
"kwargs",
")",
"d",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"__class__",
"(",
"*",
"*",
"d",
")"
] | creates a copy of the object without the
cached results and with the given keyword
arguments as properties. | [
"creates",
"a",
"copy",
"of",
"the",
"object",
"without",
"the",
"cached",
"results",
"and",
"with",
"the",
"given",
"keyword",
"arguments",
"as",
"properties",
"."
] | ae13901773b8465061e2aa93b2a53fd436ab6c69 | https://github.com/yv/pathconfig/blob/ae13901773b8465061e2aa93b2a53fd436ab6c69/py_src/pathconfig/factory.py#L74-L87 |
251,751 | yv/pathconfig | py_src/pathconfig/factory.py | Factory.get | def get(self, name, *subkey):
"""
retrieves a data item, or loads it if it
is not present.
"""
if subkey == []:
return self.get_atomic(name)
else:
return self.get_subkey(name, tuple(subkey)) | python | def get(self, name, *subkey):
"""
retrieves a data item, or loads it if it
is not present.
"""
if subkey == []:
return self.get_atomic(name)
else:
return self.get_subkey(name, tuple(subkey)) | [
"def",
"get",
"(",
"self",
",",
"name",
",",
"*",
"subkey",
")",
":",
"if",
"subkey",
"==",
"[",
"]",
":",
"return",
"self",
".",
"get_atomic",
"(",
"name",
")",
"else",
":",
"return",
"self",
".",
"get_subkey",
"(",
"name",
",",
"tuple",
"(",
"subkey",
")",
")"
] | retrieves a data item, or loads it if it
is not present. | [
"retrieves",
"a",
"data",
"item",
"or",
"loads",
"it",
"if",
"it",
"is",
"not",
"present",
"."
] | ae13901773b8465061e2aa93b2a53fd436ab6c69 | https://github.com/yv/pathconfig/blob/ae13901773b8465061e2aa93b2a53fd436ab6c69/py_src/pathconfig/factory.py#L89-L97 |
251,752 | yv/pathconfig | py_src/pathconfig/factory.py | Factory.val | def val(self, name):
"""
retrieves a value, substituting actual
values for ConfigValue templates.
"""
v = getattr(self, name)
if hasattr(v, 'retrieve_value'):
v = v.retrieve_value(self.__dict__)
return v | python | def val(self, name):
"""
retrieves a value, substituting actual
values for ConfigValue templates.
"""
v = getattr(self, name)
if hasattr(v, 'retrieve_value'):
v = v.retrieve_value(self.__dict__)
return v | [
"def",
"val",
"(",
"self",
",",
"name",
")",
":",
"v",
"=",
"getattr",
"(",
"self",
",",
"name",
")",
"if",
"hasattr",
"(",
"v",
",",
"'retrieve_value'",
")",
":",
"v",
"=",
"v",
".",
"retrieve_value",
"(",
"self",
".",
"__dict__",
")",
"return",
"v"
] | retrieves a value, substituting actual
values for ConfigValue templates. | [
"retrieves",
"a",
"value",
"substituting",
"actual",
"values",
"for",
"ConfigValue",
"templates",
"."
] | ae13901773b8465061e2aa93b2a53fd436ab6c69 | https://github.com/yv/pathconfig/blob/ae13901773b8465061e2aa93b2a53fd436ab6c69/py_src/pathconfig/factory.py#L99-L107 |
251,753 | lsst-sqre/zenodio | zenodio/harvest.py | harvest_collection | def harvest_collection(community_name):
"""Harvest a Zenodo community's record metadata.
Examples
--------
You can harvest record metadata for a Zenodo community via its identifier
name. For example, the identifier for LSST Data Management's Zenodo
collection is ``'lsst-dm'``:
>>> import zenodio.harvest import harvest_collection
>>> collection = harvest_collection('lsst-dm')
``collection`` is a :class:`~zenodio.harvest.Datacite3Collection` instance.
Use its :meth:`~zenodio.harvest.Datacite3Collection.records` method to
generate :class:`~zenodio.harvest.Datacite3Record` objects for individual
records in the Zenodo collection.
Parameters
----------
community_name : str
Zenodo community identifier.
Returns
-------
collection : :class:`zenodio.harvest.Datacite3Collection`
The :class:`~zenodio.harvest.Datacite3Collection` instance with record
metadata downloaded from Zenodo.
"""
url = zenodo_harvest_url(community_name)
r = requests.get(url)
r.status_code
xml_content = r.content
return Datacite3Collection.from_collection_xml(xml_content) | python | def harvest_collection(community_name):
"""Harvest a Zenodo community's record metadata.
Examples
--------
You can harvest record metadata for a Zenodo community via its identifier
name. For example, the identifier for LSST Data Management's Zenodo
collection is ``'lsst-dm'``:
>>> import zenodio.harvest import harvest_collection
>>> collection = harvest_collection('lsst-dm')
``collection`` is a :class:`~zenodio.harvest.Datacite3Collection` instance.
Use its :meth:`~zenodio.harvest.Datacite3Collection.records` method to
generate :class:`~zenodio.harvest.Datacite3Record` objects for individual
records in the Zenodo collection.
Parameters
----------
community_name : str
Zenodo community identifier.
Returns
-------
collection : :class:`zenodio.harvest.Datacite3Collection`
The :class:`~zenodio.harvest.Datacite3Collection` instance with record
metadata downloaded from Zenodo.
"""
url = zenodo_harvest_url(community_name)
r = requests.get(url)
r.status_code
xml_content = r.content
return Datacite3Collection.from_collection_xml(xml_content) | [
"def",
"harvest_collection",
"(",
"community_name",
")",
":",
"url",
"=",
"zenodo_harvest_url",
"(",
"community_name",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"r",
".",
"status_code",
"xml_content",
"=",
"r",
".",
"content",
"return",
"Datacite3Collection",
".",
"from_collection_xml",
"(",
"xml_content",
")"
] | Harvest a Zenodo community's record metadata.
Examples
--------
You can harvest record metadata for a Zenodo community via its identifier
name. For example, the identifier for LSST Data Management's Zenodo
collection is ``'lsst-dm'``:
>>> import zenodio.harvest import harvest_collection
>>> collection = harvest_collection('lsst-dm')
``collection`` is a :class:`~zenodio.harvest.Datacite3Collection` instance.
Use its :meth:`~zenodio.harvest.Datacite3Collection.records` method to
generate :class:`~zenodio.harvest.Datacite3Record` objects for individual
records in the Zenodo collection.
Parameters
----------
community_name : str
Zenodo community identifier.
Returns
-------
collection : :class:`zenodio.harvest.Datacite3Collection`
The :class:`~zenodio.harvest.Datacite3Collection` instance with record
metadata downloaded from Zenodo. | [
"Harvest",
"a",
"Zenodo",
"community",
"s",
"record",
"metadata",
"."
] | 24283e84bee5714450e4f206ec024c4d32f2e761 | https://github.com/lsst-sqre/zenodio/blob/24283e84bee5714450e4f206ec024c4d32f2e761/zenodio/harvest.py#L28-L61 |
251,754 | lsst-sqre/zenodio | zenodio/harvest.py | zenodo_harvest_url | def zenodo_harvest_url(community_name, format='oai_datacite3'):
"""Build a URL for the Zenodo Community's metadata.
Parameters
----------
community_name : str
Zenodo community identifier.
format : str
OAI-PMH metadata specification name. See https://zenodo.org/dev.
Currently on ``oai_datacite3`` is supported.
Returns
-------
url : str
OAI-PMH metadata URL.
"""
template = 'http://zenodo.org/oai2d?verb=ListRecords&' \
'metadataPrefix={metadata_format}&set=user-{community}'
return template.format(metadata_format=format,
community=community_name) | python | def zenodo_harvest_url(community_name, format='oai_datacite3'):
"""Build a URL for the Zenodo Community's metadata.
Parameters
----------
community_name : str
Zenodo community identifier.
format : str
OAI-PMH metadata specification name. See https://zenodo.org/dev.
Currently on ``oai_datacite3`` is supported.
Returns
-------
url : str
OAI-PMH metadata URL.
"""
template = 'http://zenodo.org/oai2d?verb=ListRecords&' \
'metadataPrefix={metadata_format}&set=user-{community}'
return template.format(metadata_format=format,
community=community_name) | [
"def",
"zenodo_harvest_url",
"(",
"community_name",
",",
"format",
"=",
"'oai_datacite3'",
")",
":",
"template",
"=",
"'http://zenodo.org/oai2d?verb=ListRecords&'",
"'metadataPrefix={metadata_format}&set=user-{community}'",
"return",
"template",
".",
"format",
"(",
"metadata_format",
"=",
"format",
",",
"community",
"=",
"community_name",
")"
] | Build a URL for the Zenodo Community's metadata.
Parameters
----------
community_name : str
Zenodo community identifier.
format : str
OAI-PMH metadata specification name. See https://zenodo.org/dev.
Currently on ``oai_datacite3`` is supported.
Returns
-------
url : str
OAI-PMH metadata URL. | [
"Build",
"a",
"URL",
"for",
"the",
"Zenodo",
"Community",
"s",
"metadata",
"."
] | 24283e84bee5714450e4f206ec024c4d32f2e761 | https://github.com/lsst-sqre/zenodio/blob/24283e84bee5714450e4f206ec024c4d32f2e761/zenodio/harvest.py#L64-L83 |
251,755 | lsst-sqre/zenodio | zenodio/harvest.py | _pluralize | def _pluralize(value, item_key):
""""Force the value of a datacite3 key to be a list.
>>> _pluralize(xml_input['authors'], 'author')
['Sick, Jonathan', 'Economou, Frossie']
Background
----------
When `xmltodict` proceses metadata, it turns XML tags into new key-value
pairs whenever possible, even if the value should semantically be treated
as a `list`.
For example
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
</authors
Would be rendered by `xmltodict` as::
{'authors': {'author': 'Sick, Jonathan'}}
While
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
<author>Economou, Frossie</author>
</authors
is rendered by `xmltodict` as::
{'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}}
This function ensures that values are *always* lists so that they can be
treated uniformly.
Parameters
----------
value : obj
The value of a key from datacite metadata extracted by `xmltodict`.
For example, `xmldict['authors']`.
item_key : str
Name of the tag for each item; for example, with the `'authors'` key
the item key is `'author'`.
Returns
-------
item_values : list
List of values of all items.
"""
v = value[item_key]
if not isinstance(v, list):
# Force a singular value to be a list
return [v]
else:
return v | python | def _pluralize(value, item_key):
""""Force the value of a datacite3 key to be a list.
>>> _pluralize(xml_input['authors'], 'author')
['Sick, Jonathan', 'Economou, Frossie']
Background
----------
When `xmltodict` proceses metadata, it turns XML tags into new key-value
pairs whenever possible, even if the value should semantically be treated
as a `list`.
For example
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
</authors
Would be rendered by `xmltodict` as::
{'authors': {'author': 'Sick, Jonathan'}}
While
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
<author>Economou, Frossie</author>
</authors
is rendered by `xmltodict` as::
{'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}}
This function ensures that values are *always* lists so that they can be
treated uniformly.
Parameters
----------
value : obj
The value of a key from datacite metadata extracted by `xmltodict`.
For example, `xmldict['authors']`.
item_key : str
Name of the tag for each item; for example, with the `'authors'` key
the item key is `'author'`.
Returns
-------
item_values : list
List of values of all items.
"""
v = value[item_key]
if not isinstance(v, list):
# Force a singular value to be a list
return [v]
else:
return v | [
"def",
"_pluralize",
"(",
"value",
",",
"item_key",
")",
":",
"v",
"=",
"value",
"[",
"item_key",
"]",
"if",
"not",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"# Force a singular value to be a list",
"return",
"[",
"v",
"]",
"else",
":",
"return",
"v"
] | Force the value of a datacite3 key to be a list.
>>> _pluralize(xml_input['authors'], 'author')
['Sick, Jonathan', 'Economou, Frossie']
Background
----------
When `xmltodict` proceses metadata, it turns XML tags into new key-value
pairs whenever possible, even if the value should semantically be treated
as a `list`.
For example
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
</authors
Would be rendered by `xmltodict` as::
{'authors': {'author': 'Sick, Jonathan'}}
While
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
<author>Economou, Frossie</author>
</authors
is rendered by `xmltodict` as::
{'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}}
This function ensures that values are *always* lists so that they can be
treated uniformly.
Parameters
----------
value : obj
The value of a key from datacite metadata extracted by `xmltodict`.
For example, `xmldict['authors']`.
item_key : str
Name of the tag for each item; for example, with the `'authors'` key
the item key is `'author'`.
Returns
-------
item_values : list
List of values of all items. | [
"Force",
"the",
"value",
"of",
"a",
"datacite3",
"key",
"to",
"be",
"a",
"list",
"."
] | 24283e84bee5714450e4f206ec024c4d32f2e761 | https://github.com/lsst-sqre/zenodio/blob/24283e84bee5714450e4f206ec024c4d32f2e761/zenodio/harvest.py#L258-L317 |
251,756 | lsst-sqre/zenodio | zenodio/harvest.py | Author.from_xmldict | def from_xmldict(cls, xml_dict):
"""Create an `Author` from a datacite3 metadata converted by
`xmltodict`.
Parameters
----------
xml_dict : :class:`collections.OrderedDict`
A `dict`-like object mapping XML content for a single record (i.e.,
the contents of the ``record`` tag in OAI-PMH XML). This dict is
typically generated from :mod:`xmltodict`.
"""
name = xml_dict['creatorName']
kwargs = {}
if 'affiliation' in xml_dict:
kwargs['affiliation'] = xml_dict['affiliation']
return cls(name, **kwargs) | python | def from_xmldict(cls, xml_dict):
"""Create an `Author` from a datacite3 metadata converted by
`xmltodict`.
Parameters
----------
xml_dict : :class:`collections.OrderedDict`
A `dict`-like object mapping XML content for a single record (i.e.,
the contents of the ``record`` tag in OAI-PMH XML). This dict is
typically generated from :mod:`xmltodict`.
"""
name = xml_dict['creatorName']
kwargs = {}
if 'affiliation' in xml_dict:
kwargs['affiliation'] = xml_dict['affiliation']
return cls(name, **kwargs) | [
"def",
"from_xmldict",
"(",
"cls",
",",
"xml_dict",
")",
":",
"name",
"=",
"xml_dict",
"[",
"'creatorName'",
"]",
"kwargs",
"=",
"{",
"}",
"if",
"'affiliation'",
"in",
"xml_dict",
":",
"kwargs",
"[",
"'affiliation'",
"]",
"=",
"xml_dict",
"[",
"'affiliation'",
"]",
"return",
"cls",
"(",
"name",
",",
"*",
"*",
"kwargs",
")"
] | Create an `Author` from a datacite3 metadata converted by
`xmltodict`.
Parameters
----------
xml_dict : :class:`collections.OrderedDict`
A `dict`-like object mapping XML content for a single record (i.e.,
the contents of the ``record`` tag in OAI-PMH XML). This dict is
typically generated from :mod:`xmltodict`. | [
"Create",
"an",
"Author",
"from",
"a",
"datacite3",
"metadata",
"converted",
"by",
"xmltodict",
"."
] | 24283e84bee5714450e4f206ec024c4d32f2e761 | https://github.com/lsst-sqre/zenodio/blob/24283e84bee5714450e4f206ec024c4d32f2e761/zenodio/harvest.py#L228-L245 |
251,757 | minhhoit/yacms | yacms/core/managers.py | PublishedManager.published | def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from yacms.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED)) | python | def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from yacms.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED)) | [
"def",
"published",
"(",
"self",
",",
"for_user",
"=",
"None",
")",
":",
"from",
"yacms",
".",
"core",
".",
"models",
"import",
"CONTENT_STATUS_PUBLISHED",
"if",
"for_user",
"is",
"not",
"None",
"and",
"for_user",
".",
"is_staff",
":",
"return",
"self",
".",
"all",
"(",
")",
"return",
"self",
".",
"filter",
"(",
"Q",
"(",
"publish_date__lte",
"=",
"now",
"(",
")",
")",
"|",
"Q",
"(",
"publish_date__isnull",
"=",
"True",
")",
",",
"Q",
"(",
"expiry_date__gte",
"=",
"now",
"(",
")",
")",
"|",
"Q",
"(",
"expiry_date__isnull",
"=",
"True",
")",
",",
"Q",
"(",
"status",
"=",
"CONTENT_STATUS_PUBLISHED",
")",
")"
] | For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified. | [
"For",
"non",
"-",
"staff",
"users",
"return",
"items",
"with",
"a",
"published",
"status",
"and",
"whose",
"publish",
"and",
"expiry",
"dates",
"fall",
"before",
"and",
"after",
"the",
"current",
"date",
"when",
"specified",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L58-L70 |
251,758 | minhhoit/yacms | yacms/core/managers.py | SearchableQuerySet.search | def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct() | python | def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct() | [
"def",
"search",
"(",
"self",
",",
"query",
",",
"search_fields",
"=",
"None",
")",
":",
"# ### DETERMINE FIELDS TO SEARCH ###",
"# Use search_fields arg if given, otherwise use search_fields",
"# initially configured by the manager class.",
"if",
"search_fields",
":",
"self",
".",
"_search_fields",
"=",
"search_fields_to_dict",
"(",
"search_fields",
")",
"if",
"not",
"self",
".",
"_search_fields",
":",
"return",
"self",
".",
"none",
"(",
")",
"# ### BUILD LIST OF TERMS TO SEARCH FOR ###",
"# Remove extra spaces, put modifiers inside quoted terms.",
"terms",
"=",
"\" \"",
".",
"join",
"(",
"query",
".",
"split",
"(",
")",
")",
".",
"replace",
"(",
"\"+ \"",
",",
"\"+\"",
")",
".",
"replace",
"(",
"'+\"'",
",",
"'\"+'",
")",
".",
"replace",
"(",
"\"- \"",
",",
"\"-\"",
")",
".",
"replace",
"(",
"'-\"'",
",",
"'\"-'",
")",
".",
"split",
"(",
"'\"'",
")",
"# Strip punctuation other than modifiers from terms and create",
"# terms list, first from quoted terms and then remaining words.",
"terms",
"=",
"[",
"(",
"\"\"",
"if",
"t",
"[",
"0",
":",
"1",
"]",
"not",
"in",
"\"+-\"",
"else",
"t",
"[",
"0",
":",
"1",
"]",
")",
"+",
"t",
".",
"strip",
"(",
"punctuation",
")",
"for",
"t",
"in",
"terms",
"[",
"1",
":",
":",
"2",
"]",
"+",
"\"\"",
".",
"join",
"(",
"terms",
"[",
":",
":",
"2",
"]",
")",
".",
"split",
"(",
")",
"]",
"# Remove stop words from terms that aren't quoted or use",
"# modifiers, since words with these are an explicit part of",
"# the search query. If doing so ends up with an empty term",
"# list, then keep the stop words.",
"terms_no_stopwords",
"=",
"[",
"t",
"for",
"t",
"in",
"terms",
"if",
"t",
".",
"lower",
"(",
")",
"not",
"in",
"settings",
".",
"STOP_WORDS",
"]",
"get_positive_terms",
"=",
"lambda",
"terms",
":",
"[",
"t",
".",
"lower",
"(",
")",
".",
"strip",
"(",
"punctuation",
")",
"for",
"t",
"in",
"terms",
"if",
"t",
"[",
"0",
":",
"1",
"]",
"!=",
"\"-\"",
"]",
"positive_terms",
"=",
"get_positive_terms",
"(",
"terms_no_stopwords",
")",
"if",
"positive_terms",
":",
"terms",
"=",
"terms_no_stopwords",
"else",
":",
"positive_terms",
"=",
"get_positive_terms",
"(",
"terms",
")",
"# Append positive terms (those without the negative modifier)",
"# to the internal list for sorting when results are iterated.",
"if",
"not",
"positive_terms",
":",
"return",
"self",
".",
"none",
"(",
")",
"else",
":",
"self",
".",
"_search_terms",
".",
"update",
"(",
"positive_terms",
")",
"# ### BUILD QUERYSET FILTER ###",
"# Create the queryset combining each set of terms.",
"excluded",
"=",
"[",
"reduce",
"(",
"iand",
",",
"[",
"~",
"Q",
"(",
"*",
"*",
"{",
"\"%s__icontains\"",
"%",
"f",
":",
"t",
"[",
"1",
":",
"]",
"}",
")",
"for",
"f",
"in",
"self",
".",
"_search_fields",
".",
"keys",
"(",
")",
"]",
")",
"for",
"t",
"in",
"terms",
"if",
"t",
"[",
"0",
":",
"1",
"]",
"==",
"\"-\"",
"]",
"required",
"=",
"[",
"reduce",
"(",
"ior",
",",
"[",
"Q",
"(",
"*",
"*",
"{",
"\"%s__icontains\"",
"%",
"f",
":",
"t",
"[",
"1",
":",
"]",
"}",
")",
"for",
"f",
"in",
"self",
".",
"_search_fields",
".",
"keys",
"(",
")",
"]",
")",
"for",
"t",
"in",
"terms",
"if",
"t",
"[",
"0",
":",
"1",
"]",
"==",
"\"+\"",
"]",
"optional",
"=",
"[",
"reduce",
"(",
"ior",
",",
"[",
"Q",
"(",
"*",
"*",
"{",
"\"%s__icontains\"",
"%",
"f",
":",
"t",
"}",
")",
"for",
"f",
"in",
"self",
".",
"_search_fields",
".",
"keys",
"(",
")",
"]",
")",
"for",
"t",
"in",
"terms",
"if",
"t",
"[",
"0",
":",
"1",
"]",
"not",
"in",
"\"+-\"",
"]",
"queryset",
"=",
"self",
"if",
"excluded",
":",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"reduce",
"(",
"iand",
",",
"excluded",
")",
")",
"if",
"required",
":",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"reduce",
"(",
"iand",
",",
"required",
")",
")",
"# Optional terms aren't relevant to the filter if there are",
"# terms that are explicitly required.",
"elif",
"optional",
":",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"reduce",
"(",
"ior",
",",
"optional",
")",
")",
"return",
"queryset",
".",
"distinct",
"(",
")"
] | Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude. | [
"Build",
"a",
"queryset",
"matching",
"words",
"in",
"the",
"given",
"search",
"query",
"treating",
"quoted",
"terms",
"as",
"exact",
"phrases",
"and",
"taking",
"into",
"account",
"+",
"and",
"-",
"symbols",
"as",
"modifiers",
"controlling",
"which",
"terms",
"to",
"require",
"and",
"exclude",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L105-L172 |
251,759 | minhhoit/yacms | yacms/core/managers.py | SearchableQuerySet._clone | def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs) | python | def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs) | [
"def",
"_clone",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"attr",
"in",
"(",
"\"_search_terms\"",
",",
"\"_search_fields\"",
",",
"\"_search_ordered\"",
")",
":",
"kwargs",
"[",
"attr",
"]",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"return",
"super",
"(",
"SearchableQuerySet",
",",
"self",
")",
".",
"_clone",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Ensure attributes are copied to subsequent queries. | [
"Ensure",
"attributes",
"are",
"copied",
"to",
"subsequent",
"queries",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L174-L180 |
251,760 | minhhoit/yacms | yacms/core/managers.py | SearchableQuerySet.order_by | def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names) | python | def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names) | [
"def",
"order_by",
"(",
"self",
",",
"*",
"field_names",
")",
":",
"if",
"not",
"self",
".",
"_search_ordered",
":",
"self",
".",
"_search_ordered",
"=",
"len",
"(",
"self",
".",
"_search_terms",
")",
">",
"0",
"return",
"super",
"(",
"SearchableQuerySet",
",",
"self",
")",
".",
"order_by",
"(",
"*",
"field_names",
")"
] | Mark the filter as being ordered if search has occurred. | [
"Mark",
"the",
"filter",
"as",
"being",
"ordered",
"if",
"search",
"has",
"occurred",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L182-L188 |
251,761 | minhhoit/yacms | yacms/core/managers.py | SearchableQuerySet.iterator | def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results | python | def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results | [
"def",
"iterator",
"(",
"self",
")",
":",
"results",
"=",
"super",
"(",
"SearchableQuerySet",
",",
"self",
")",
".",
"iterator",
"(",
")",
"if",
"self",
".",
"_search_terms",
"and",
"not",
"self",
".",
"_search_ordered",
":",
"results",
"=",
"list",
"(",
"results",
")",
"for",
"i",
",",
"result",
"in",
"enumerate",
"(",
"results",
")",
":",
"count",
"=",
"0",
"related_weights",
"=",
"[",
"]",
"for",
"(",
"field",
",",
"weight",
")",
"in",
"self",
".",
"_search_fields",
".",
"items",
"(",
")",
":",
"if",
"\"__\"",
"in",
"field",
":",
"related_weights",
".",
"append",
"(",
"weight",
")",
"for",
"term",
"in",
"self",
".",
"_search_terms",
":",
"field_value",
"=",
"getattr",
"(",
"result",
",",
"field",
",",
"None",
")",
"if",
"field_value",
":",
"count",
"+=",
"field_value",
".",
"lower",
"(",
")",
".",
"count",
"(",
"term",
")",
"*",
"weight",
"if",
"not",
"count",
"and",
"related_weights",
":",
"count",
"=",
"int",
"(",
"sum",
"(",
"related_weights",
")",
"/",
"len",
"(",
"related_weights",
")",
")",
"results",
"[",
"i",
"]",
".",
"result_count",
"=",
"count",
"return",
"iter",
"(",
"results",
")",
"return",
"results"
] | If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships. | [
"If",
"search",
"has",
"occurred",
"and",
"no",
"ordering",
"has",
"occurred",
"decorate",
"each",
"result",
"with",
"the",
"number",
"of",
"search",
"terms",
"so",
"that",
"it",
"can",
"be",
"sorted",
"by",
"the",
"number",
"of",
"occurrence",
"of",
"terms",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L190-L221 |
251,762 | minhhoit/yacms | yacms/core/managers.py | SearchableManager.get_search_fields | def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields | python | def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields | [
"def",
"get_search_fields",
"(",
"self",
")",
":",
"search_fields",
"=",
"self",
".",
"_search_fields",
".",
"copy",
"(",
")",
"if",
"not",
"search_fields",
":",
"for",
"cls",
"in",
"reversed",
"(",
"self",
".",
"model",
".",
"__mro__",
")",
":",
"super_fields",
"=",
"getattr",
"(",
"cls",
",",
"\"search_fields\"",
",",
"{",
"}",
")",
"search_fields",
".",
"update",
"(",
"search_fields_to_dict",
"(",
"super_fields",
")",
")",
"if",
"not",
"search_fields",
":",
"search_fields",
"=",
"[",
"]",
"for",
"f",
"in",
"self",
".",
"model",
".",
"_meta",
".",
"fields",
":",
"if",
"isinstance",
"(",
"f",
",",
"(",
"CharField",
",",
"TextField",
")",
")",
":",
"search_fields",
".",
"append",
"(",
"f",
".",
"name",
")",
"search_fields",
"=",
"search_fields_to_dict",
"(",
"search_fields",
")",
"return",
"search_fields"
] | Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances. | [
"Returns",
"the",
"search",
"field",
"names",
"mapped",
"to",
"weights",
"as",
"a",
"dict",
".",
"Used",
"in",
"get_queryset",
"below",
"to",
"tell",
"SearchableQuerySet",
"which",
"search",
"fields",
"to",
"use",
".",
"Also",
"used",
"by",
"DisplayableAdmin",
"to",
"populate",
"Django",
"admin",
"s",
"search_fields",
"attribute",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L236-L269 |
251,763 | minhhoit/yacms | yacms/core/managers.py | SearchableManager.contribute_to_class | def contribute_to_class(self, model, name):
"""
Newer versions of Django explicitly prevent managers being
accessed from abstract classes, which is behaviour the search
API has always relied on. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self)) | python | def contribute_to_class(self, model, name):
"""
Newer versions of Django explicitly prevent managers being
accessed from abstract classes, which is behaviour the search
API has always relied on. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self)) | [
"def",
"contribute_to_class",
"(",
"self",
",",
"model",
",",
"name",
")",
":",
"super",
"(",
"SearchableManager",
",",
"self",
")",
".",
"contribute_to_class",
"(",
"model",
",",
"name",
")",
"setattr",
"(",
"model",
",",
"name",
",",
"ManagerDescriptor",
"(",
"self",
")",
")"
] | Newer versions of Django explicitly prevent managers being
accessed from abstract classes, which is behaviour the search
API has always relied on. Here we reinstate it. | [
"Newer",
"versions",
"of",
"Django",
"explicitly",
"prevent",
"managers",
"being",
"accessed",
"from",
"abstract",
"classes",
"which",
"is",
"behaviour",
"the",
"search",
"API",
"has",
"always",
"relied",
"on",
".",
"Here",
"we",
"reinstate",
"it",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L275-L282 |
251,764 | minhhoit/yacms | yacms/core/managers.py | SearchableManager.search | def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models()
if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list())
for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True) | python | def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models()
if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list())
for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True) | [
"def",
"search",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"settings",
".",
"SEARCH_MODEL_CHOICES",
":",
"# No choices defined - build a list of leaf models (those",
"# without subclasses) that inherit from Displayable.",
"models",
"=",
"[",
"m",
"for",
"m",
"in",
"apps",
".",
"get_models",
"(",
")",
"if",
"issubclass",
"(",
"m",
",",
"self",
".",
"model",
")",
"]",
"parents",
"=",
"reduce",
"(",
"ior",
",",
"[",
"set",
"(",
"m",
".",
"_meta",
".",
"get_parent_list",
"(",
")",
")",
"for",
"m",
"in",
"models",
"]",
")",
"models",
"=",
"[",
"m",
"for",
"m",
"in",
"models",
"if",
"m",
"not",
"in",
"parents",
"]",
"elif",
"getattr",
"(",
"self",
".",
"model",
".",
"_meta",
",",
"\"abstract\"",
",",
"False",
")",
":",
"# When we're combining model subclasses for an abstract",
"# model (eg Displayable), we only want to use models that",
"# are represented by the ``SEARCH_MODEL_CHOICES`` setting.",
"# Now this setting won't contain an exact list of models",
"# we should use, since it can define superclass models such",
"# as ``Page``, so we check the parent class list of each",
"# model when determining whether a model falls within the",
"# ``SEARCH_MODEL_CHOICES`` setting.",
"search_choices",
"=",
"set",
"(",
")",
"models",
"=",
"set",
"(",
")",
"parents",
"=",
"set",
"(",
")",
"errors",
"=",
"[",
"]",
"for",
"name",
"in",
"settings",
".",
"SEARCH_MODEL_CHOICES",
":",
"try",
":",
"model",
"=",
"apps",
".",
"get_model",
"(",
"*",
"name",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
")",
"except",
"LookupError",
":",
"errors",
".",
"append",
"(",
"name",
")",
"else",
":",
"search_choices",
".",
"add",
"(",
"model",
")",
"if",
"errors",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"Could not load the model(s) \"",
"\"%s defined in the 'SEARCH_MODEL_CHOICES' setting.\"",
"%",
"\", \"",
".",
"join",
"(",
"errors",
")",
")",
"for",
"model",
"in",
"apps",
".",
"get_models",
"(",
")",
":",
"# Model is actually a subclasses of what we're",
"# searching (eg Displayabale)",
"is_subclass",
"=",
"issubclass",
"(",
"model",
",",
"self",
".",
"model",
")",
"# Model satisfies the search choices list - either",
"# there are no search choices, model is directly in",
"# search choices, or its parent is.",
"this_parents",
"=",
"set",
"(",
"model",
".",
"_meta",
".",
"get_parent_list",
"(",
")",
")",
"in_choices",
"=",
"not",
"search_choices",
"or",
"model",
"in",
"search_choices",
"in_choices",
"=",
"in_choices",
"or",
"this_parents",
"&",
"search_choices",
"if",
"is_subclass",
"and",
"(",
"in_choices",
"or",
"not",
"search_choices",
")",
":",
"# Add to models we'll seach. Also maintain a parent",
"# set, used below for further refinement of models",
"# list to search.",
"models",
".",
"add",
"(",
"model",
")",
"parents",
".",
"update",
"(",
"this_parents",
")",
"# Strip out any models that are superclasses of models,",
"# specifically the Page model which will generally be the",
"# superclass for all custom content types, since if we",
"# query the Page model as well, we will get duplicate",
"# results.",
"models",
"-=",
"parents",
"else",
":",
"models",
"=",
"[",
"self",
".",
"model",
"]",
"all_results",
"=",
"[",
"]",
"user",
"=",
"kwargs",
".",
"pop",
"(",
"\"for_user\"",
",",
"None",
")",
"for",
"model",
"in",
"models",
":",
"try",
":",
"queryset",
"=",
"model",
".",
"objects",
".",
"published",
"(",
"for_user",
"=",
"user",
")",
"except",
"AttributeError",
":",
"queryset",
"=",
"model",
".",
"objects",
".",
"get_queryset",
"(",
")",
"all_results",
".",
"extend",
"(",
"queryset",
".",
"search",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"sorted",
"(",
"all_results",
",",
"key",
"=",
"lambda",
"r",
":",
"r",
".",
"result_count",
",",
"reverse",
"=",
"True",
")"
] | Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract. | [
"Proxy",
"to",
"queryset",
"s",
"search",
"method",
"for",
"the",
"manager",
"s",
"model",
"and",
"any",
"models",
"that",
"subclass",
"from",
"this",
"manager",
"s",
"model",
"if",
"the",
"model",
"is",
"abstract",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L284-L355 |
251,765 | minhhoit/yacms | yacms/core/managers.py | DisplayableManager.url_map | def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``yacms.core.sitemaps``.
"""
class Home:
title = _("Home")
home = Home()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in apps.get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items | python | def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``yacms.core.sitemaps``.
"""
class Home:
title = _("Home")
home = Home()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in apps.get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items | [
"def",
"url_map",
"(",
"self",
",",
"for_user",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"class",
"Home",
":",
"title",
"=",
"_",
"(",
"\"Home\"",
")",
"home",
"=",
"Home",
"(",
")",
"setattr",
"(",
"home",
",",
"\"get_absolute_url\"",
",",
"home_slug",
")",
"items",
"=",
"{",
"home",
".",
"get_absolute_url",
"(",
")",
":",
"home",
"}",
"for",
"model",
"in",
"apps",
".",
"get_models",
"(",
")",
":",
"if",
"issubclass",
"(",
"model",
",",
"self",
".",
"model",
")",
":",
"for",
"item",
"in",
"(",
"model",
".",
"objects",
".",
"published",
"(",
"for_user",
"=",
"for_user",
")",
".",
"filter",
"(",
"*",
"*",
"kwargs",
")",
".",
"exclude",
"(",
"slug__startswith",
"=",
"\"http://\"",
")",
".",
"exclude",
"(",
"slug__startswith",
"=",
"\"https://\"",
")",
")",
":",
"items",
"[",
"item",
".",
"get_absolute_url",
"(",
")",
"]",
"=",
"item",
"return",
"items"
] | Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``yacms.core.sitemaps``. | [
"Returns",
"a",
"dictionary",
"of",
"urls",
"mapped",
"to",
"Displayable",
"subclass",
"instances",
"including",
"a",
"fake",
"homepage",
"instance",
"if",
"none",
"exists",
".",
"Used",
"in",
"yacms",
".",
"core",
".",
"sitemaps",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L390-L408 |
251,766 | vecnet/vecnet.simulation | vecnet/simulation/sim_model.py | get_name | def get_name(model_id):
"""
Get the name for a model.
:returns str: The model's name. If the id has no associated name, then "id = {ID} (no name)" is returned.
"""
name = _names.get(model_id)
if name is None:
name = 'id = %s (no name)' % str(model_id)
return name | python | def get_name(model_id):
"""
Get the name for a model.
:returns str: The model's name. If the id has no associated name, then "id = {ID} (no name)" is returned.
"""
name = _names.get(model_id)
if name is None:
name = 'id = %s (no name)' % str(model_id)
return name | [
"def",
"get_name",
"(",
"model_id",
")",
":",
"name",
"=",
"_names",
".",
"get",
"(",
"model_id",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"'id = %s (no name)'",
"%",
"str",
"(",
"model_id",
")",
"return",
"name"
] | Get the name for a model.
:returns str: The model's name. If the id has no associated name, then "id = {ID} (no name)" is returned. | [
"Get",
"the",
"name",
"for",
"a",
"model",
"."
] | 3a4b3df7b12418c6fa8a7d9cd49656a1c031fc0e | https://github.com/vecnet/vecnet.simulation/blob/3a4b3df7b12418c6fa8a7d9cd49656a1c031fc0e/vecnet/simulation/sim_model.py#L51-L60 |
251,767 | eisensheng/kaviar | kaviar/adapter.py | KvLoggerAdapter.define_logger_func | def define_logger_func(self, level, field_names, default=NO_DEFAULT,
filters=None, include_exc_info=False):
"""Define a new logger function that will log the given arguments
with the given predefined keys.
:param level:
The log level to use for each call.
:param field_names:
Set of predefined keys.
:param default:
A default value for each key.
:param filters:
Additional filters for treating given arguments.
:param include_exc_info:
Include a stack trace with the log. Useful for the ``ERROR``
log level.
:return:
A function that will log given values with the predefined
keys and the given log level.
"""
kv_formatter = KvFormatter(field_names, default, filters)
return lambda *a, **kw: self._log(level, kv_formatter(*a, **kw),
include_exc_info) | python | def define_logger_func(self, level, field_names, default=NO_DEFAULT,
filters=None, include_exc_info=False):
"""Define a new logger function that will log the given arguments
with the given predefined keys.
:param level:
The log level to use for each call.
:param field_names:
Set of predefined keys.
:param default:
A default value for each key.
:param filters:
Additional filters for treating given arguments.
:param include_exc_info:
Include a stack trace with the log. Useful for the ``ERROR``
log level.
:return:
A function that will log given values with the predefined
keys and the given log level.
"""
kv_formatter = KvFormatter(field_names, default, filters)
return lambda *a, **kw: self._log(level, kv_formatter(*a, **kw),
include_exc_info) | [
"def",
"define_logger_func",
"(",
"self",
",",
"level",
",",
"field_names",
",",
"default",
"=",
"NO_DEFAULT",
",",
"filters",
"=",
"None",
",",
"include_exc_info",
"=",
"False",
")",
":",
"kv_formatter",
"=",
"KvFormatter",
"(",
"field_names",
",",
"default",
",",
"filters",
")",
"return",
"lambda",
"*",
"a",
",",
"*",
"*",
"kw",
":",
"self",
".",
"_log",
"(",
"level",
",",
"kv_formatter",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
",",
"include_exc_info",
")"
] | Define a new logger function that will log the given arguments
with the given predefined keys.
:param level:
The log level to use for each call.
:param field_names:
Set of predefined keys.
:param default:
A default value for each key.
:param filters:
Additional filters for treating given arguments.
:param include_exc_info:
Include a stack trace with the log. Useful for the ``ERROR``
log level.
:return:
A function that will log given values with the predefined
keys and the given log level. | [
"Define",
"a",
"new",
"logger",
"function",
"that",
"will",
"log",
"the",
"given",
"arguments",
"with",
"the",
"given",
"predefined",
"keys",
"."
] | 77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f | https://github.com/eisensheng/kaviar/blob/77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f/kaviar/adapter.py#L71-L93 |
251,768 | eisensheng/kaviar | kaviar/adapter.py | KvLoggerAdapter.log | def log(self, level, *args, **kwargs):
"""Delegate a log call to the underlying logger."""
return self._log_kw(level, args, kwargs) | python | def log(self, level, *args, **kwargs):
"""Delegate a log call to the underlying logger."""
return self._log_kw(level, args, kwargs) | [
"def",
"log",
"(",
"self",
",",
"level",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_log_kw",
"(",
"level",
",",
"args",
",",
"kwargs",
")"
] | Delegate a log call to the underlying logger. | [
"Delegate",
"a",
"log",
"call",
"to",
"the",
"underlying",
"logger",
"."
] | 77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f | https://github.com/eisensheng/kaviar/blob/77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f/kaviar/adapter.py#L95-L97 |
251,769 | eisensheng/kaviar | kaviar/adapter.py | KvLoggerAdapter.exception | def exception(self, *args, **kwargs):
"""Delegate a exception call to the underlying logger."""
return self._log_kw(ERROR, args, kwargs, exc_info=True) | python | def exception(self, *args, **kwargs):
"""Delegate a exception call to the underlying logger."""
return self._log_kw(ERROR, args, kwargs, exc_info=True) | [
"def",
"exception",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_log_kw",
"(",
"ERROR",
",",
"args",
",",
"kwargs",
",",
"exc_info",
"=",
"True",
")"
] | Delegate a exception call to the underlying logger. | [
"Delegate",
"a",
"exception",
"call",
"to",
"the",
"underlying",
"logger",
"."
] | 77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f | https://github.com/eisensheng/kaviar/blob/77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f/kaviar/adapter.py#L115-L117 |
251,770 | eagleamon/pynetio | pynetio.py | Netio.update | def update(self):
""" Update all the switch values """
self.states = [bool(int(x)) for x in self.get('port list') or '0000'] | python | def update(self):
""" Update all the switch values """
self.states = [bool(int(x)) for x in self.get('port list') or '0000'] | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"states",
"=",
"[",
"bool",
"(",
"int",
"(",
"x",
")",
")",
"for",
"x",
"in",
"self",
".",
"get",
"(",
"'port list'",
")",
"or",
"'0000'",
"]"
] | Update all the switch values | [
"Update",
"all",
"the",
"switch",
"values"
] | 3bc212cae18608de0214b964e395877d3ca4aa7b | https://github.com/eagleamon/pynetio/blob/3bc212cae18608de0214b964e395877d3ca4aa7b/pynetio.py#L46-L49 |
251,771 | hmpf/dataporten-auth | src/dataporten/psa.py | DataportenOAuth2.check_correct_audience | def check_correct_audience(self, audience):
"Assert that Dataporten sends back our own client id as audience"
client_id, _ = self.get_key_and_secret()
if audience != client_id:
raise AuthException('Wrong audience') | python | def check_correct_audience(self, audience):
"Assert that Dataporten sends back our own client id as audience"
client_id, _ = self.get_key_and_secret()
if audience != client_id:
raise AuthException('Wrong audience') | [
"def",
"check_correct_audience",
"(",
"self",
",",
"audience",
")",
":",
"client_id",
",",
"_",
"=",
"self",
".",
"get_key_and_secret",
"(",
")",
"if",
"audience",
"!=",
"client_id",
":",
"raise",
"AuthException",
"(",
"'Wrong audience'",
")"
] | Assert that Dataporten sends back our own client id as audience | [
"Assert",
"that",
"Dataporten",
"sends",
"back",
"our",
"own",
"client",
"id",
"as",
"audience"
] | bc2ff5e11a1fce2c3d7bffe3f2b513bd7e2c0fcc | https://github.com/hmpf/dataporten-auth/blob/bc2ff5e11a1fce2c3d7bffe3f2b513bd7e2c0fcc/src/dataporten/psa.py#L52-L56 |
251,772 | alexhayes/django-toolkit | django_toolkit/fields.py | ChoiceHumanReadable | def ChoiceHumanReadable(choices, choice):
"""
Return the human readable representation for a list of choices.
@see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices
"""
if choice == None: raise NoChoiceError()
for _choice in choices:
if _choice[0] == choice:
return _choice[1]
raise NoChoiceMatchError("The choice '%s' does not exist in '%s'" % (choice, ", ".join([choice[0] for choice in choices]))) | python | def ChoiceHumanReadable(choices, choice):
"""
Return the human readable representation for a list of choices.
@see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices
"""
if choice == None: raise NoChoiceError()
for _choice in choices:
if _choice[0] == choice:
return _choice[1]
raise NoChoiceMatchError("The choice '%s' does not exist in '%s'" % (choice, ", ".join([choice[0] for choice in choices]))) | [
"def",
"ChoiceHumanReadable",
"(",
"choices",
",",
"choice",
")",
":",
"if",
"choice",
"==",
"None",
":",
"raise",
"NoChoiceError",
"(",
")",
"for",
"_choice",
"in",
"choices",
":",
"if",
"_choice",
"[",
"0",
"]",
"==",
"choice",
":",
"return",
"_choice",
"[",
"1",
"]",
"raise",
"NoChoiceMatchError",
"(",
"\"The choice '%s' does not exist in '%s'\"",
"%",
"(",
"choice",
",",
"\", \"",
".",
"join",
"(",
"[",
"choice",
"[",
"0",
"]",
"for",
"choice",
"in",
"choices",
"]",
")",
")",
")"
] | Return the human readable representation for a list of choices.
@see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices | [
"Return",
"the",
"human",
"readable",
"representation",
"for",
"a",
"list",
"of",
"choices",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/fields.py#L15-L25 |
251,773 | alexhayes/django-toolkit | django_toolkit/fields.py | SeparatedValuesField.get_db_prep_value | def get_db_prep_value(self, value, connection=None, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not value:
return
if prepared:
return value
else:
assert(isinstance(value, list) or isinstance(value, tuple))
return self.separator.join([unicode(s) for s in value]) | python | def get_db_prep_value(self, value, connection=None, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not value:
return
if prepared:
return value
else:
assert(isinstance(value, list) or isinstance(value, tuple))
return self.separator.join([unicode(s) for s in value]) | [
"def",
"get_db_prep_value",
"(",
"self",
",",
"value",
",",
"connection",
"=",
"None",
",",
"prepared",
"=",
"False",
")",
":",
"if",
"not",
"value",
":",
"return",
"if",
"prepared",
":",
"return",
"value",
"else",
":",
"assert",
"(",
"isinstance",
"(",
"value",
",",
"list",
")",
"or",
"isinstance",
"(",
"value",
",",
"tuple",
")",
")",
"return",
"self",
".",
"separator",
".",
"join",
"(",
"[",
"unicode",
"(",
"s",
")",
"for",
"s",
"in",
"value",
"]",
")"
] | Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup``` | [
"Returns",
"field",
"s",
"value",
"prepared",
"for",
"interacting",
"with",
"the",
"database",
"backend",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/fields.py#L58-L71 |
251,774 | jmgilman/Neolib | neolib/user/SDB.py | SDB.load | def load(self):
""" Loads the user's SDB inventory
Raises
parseException
"""
self.inventory = SDBInventory(self.usr)
self.forms = self.inventory.forms | python | def load(self):
""" Loads the user's SDB inventory
Raises
parseException
"""
self.inventory = SDBInventory(self.usr)
self.forms = self.inventory.forms | [
"def",
"load",
"(",
"self",
")",
":",
"self",
".",
"inventory",
"=",
"SDBInventory",
"(",
"self",
".",
"usr",
")",
"self",
".",
"forms",
"=",
"self",
".",
"inventory",
".",
"forms"
] | Loads the user's SDB inventory
Raises
parseException | [
"Loads",
"the",
"user",
"s",
"SDB",
"inventory",
"Raises",
"parseException"
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/user/SDB.py#L43-L50 |
251,775 | jmgilman/Neolib | neolib/user/SDB.py | SDB.update | def update(self):
""" Upates the user's SDB inventory
Loops through all items on a page and checks for an item
that has changed. A changed item is identified as the remove
attribute being set to anything greater than 0. It will then
update each page accordingly with the changed items.
Returns
bool - True if successful, False otherwise
"""
for x in range(1, self.inventory.pages + 1):
if self._hasPageChanged(x):
form = self._updateForm(x)
form.usePin = True
pg = form.submit()
# Success redirects to SDB page
if "Your Safety Deposit Box" in pg.content:
return True
else:
logging.getLogger("neolib.shop").exception("Could not verify if SDB inventory was updated.", {'pg': pg})
return False | python | def update(self):
""" Upates the user's SDB inventory
Loops through all items on a page and checks for an item
that has changed. A changed item is identified as the remove
attribute being set to anything greater than 0. It will then
update each page accordingly with the changed items.
Returns
bool - True if successful, False otherwise
"""
for x in range(1, self.inventory.pages + 1):
if self._hasPageChanged(x):
form = self._updateForm(x)
form.usePin = True
pg = form.submit()
# Success redirects to SDB page
if "Your Safety Deposit Box" in pg.content:
return True
else:
logging.getLogger("neolib.shop").exception("Could not verify if SDB inventory was updated.", {'pg': pg})
return False | [
"def",
"update",
"(",
"self",
")",
":",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"self",
".",
"inventory",
".",
"pages",
"+",
"1",
")",
":",
"if",
"self",
".",
"_hasPageChanged",
"(",
"x",
")",
":",
"form",
"=",
"self",
".",
"_updateForm",
"(",
"x",
")",
"form",
".",
"usePin",
"=",
"True",
"pg",
"=",
"form",
".",
"submit",
"(",
")",
"# Success redirects to SDB page",
"if",
"\"Your Safety Deposit Box\"",
"in",
"pg",
".",
"content",
":",
"return",
"True",
"else",
":",
"logging",
".",
"getLogger",
"(",
"\"neolib.shop\"",
")",
".",
"exception",
"(",
"\"Could not verify if SDB inventory was updated.\"",
",",
"{",
"'pg'",
":",
"pg",
"}",
")",
"return",
"False"
] | Upates the user's SDB inventory
Loops through all items on a page and checks for an item
that has changed. A changed item is identified as the remove
attribute being set to anything greater than 0. It will then
update each page accordingly with the changed items.
Returns
bool - True if successful, False otherwise | [
"Upates",
"the",
"user",
"s",
"SDB",
"inventory",
"Loops",
"through",
"all",
"items",
"on",
"a",
"page",
"and",
"checks",
"for",
"an",
"item",
"that",
"has",
"changed",
".",
"A",
"changed",
"item",
"is",
"identified",
"as",
"the",
"remove",
"attribute",
"being",
"set",
"to",
"anything",
"greater",
"than",
"0",
".",
"It",
"will",
"then",
"update",
"each",
"page",
"accordingly",
"with",
"the",
"changed",
"items",
".",
"Returns",
"bool",
"-",
"True",
"if",
"successful",
"False",
"otherwise"
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/user/SDB.py#L52-L74 |
251,776 | tempodb/tempodb-python | tempodb/client.py | make_series_url | def make_series_url(key):
"""For internal use. Given a series key, generate a valid URL to the series
endpoint for that key.
:param string key: the series key
:rtype: string"""
url = urlparse.urljoin(endpoint.SERIES_ENDPOINT, 'key/')
url = urlparse.urljoin(url, urllib.quote(key))
return url | python | def make_series_url(key):
"""For internal use. Given a series key, generate a valid URL to the series
endpoint for that key.
:param string key: the series key
:rtype: string"""
url = urlparse.urljoin(endpoint.SERIES_ENDPOINT, 'key/')
url = urlparse.urljoin(url, urllib.quote(key))
return url | [
"def",
"make_series_url",
"(",
"key",
")",
":",
"url",
"=",
"urlparse",
".",
"urljoin",
"(",
"endpoint",
".",
"SERIES_ENDPOINT",
",",
"'key/'",
")",
"url",
"=",
"urlparse",
".",
"urljoin",
"(",
"url",
",",
"urllib",
".",
"quote",
"(",
"key",
")",
")",
"return",
"url"
] | For internal use. Given a series key, generate a valid URL to the series
endpoint for that key.
:param string key: the series key
:rtype: string | [
"For",
"internal",
"use",
".",
"Given",
"a",
"series",
"key",
"generate",
"a",
"valid",
"URL",
"to",
"the",
"series",
"endpoint",
"for",
"that",
"key",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L11-L20 |
251,777 | tempodb/tempodb-python | tempodb/client.py | Client.create_series | def create_series(self, key=None, tags=[], attrs={}):
"""Create a new series with an optional string key. A list of tags
and a map of attributes can also be optionally supplied.
:param string key: (optional) a string key for the series
:param list tags: (optional) the tags to create the series with
:param dict attrs: (optional) the attributes to the create the series
with
:rtype: :class:`tempodb.response.Response` object"""
body = protocol.make_series_key(key, tags, attrs)
resp = self.session.post(endpoint.SERIES_ENDPOINT, body)
return resp | python | def create_series(self, key=None, tags=[], attrs={}):
"""Create a new series with an optional string key. A list of tags
and a map of attributes can also be optionally supplied.
:param string key: (optional) a string key for the series
:param list tags: (optional) the tags to create the series with
:param dict attrs: (optional) the attributes to the create the series
with
:rtype: :class:`tempodb.response.Response` object"""
body = protocol.make_series_key(key, tags, attrs)
resp = self.session.post(endpoint.SERIES_ENDPOINT, body)
return resp | [
"def",
"create_series",
"(",
"self",
",",
"key",
"=",
"None",
",",
"tags",
"=",
"[",
"]",
",",
"attrs",
"=",
"{",
"}",
")",
":",
"body",
"=",
"protocol",
".",
"make_series_key",
"(",
"key",
",",
"tags",
",",
"attrs",
")",
"resp",
"=",
"self",
".",
"session",
".",
"post",
"(",
"endpoint",
".",
"SERIES_ENDPOINT",
",",
"body",
")",
"return",
"resp"
] | Create a new series with an optional string key. A list of tags
and a map of attributes can also be optionally supplied.
:param string key: (optional) a string key for the series
:param list tags: (optional) the tags to create the series with
:param dict attrs: (optional) the attributes to the create the series
with
:rtype: :class:`tempodb.response.Response` object | [
"Create",
"a",
"new",
"series",
"with",
"an",
"optional",
"string",
"key",
".",
"A",
"list",
"of",
"tags",
"and",
"a",
"map",
"of",
"attributes",
"can",
"also",
"be",
"optionally",
"supplied",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L133-L145 |
251,778 | tempodb/tempodb-python | tempodb/client.py | Client.delete_series | def delete_series(self, keys=None, tags=None, attrs=None,
allow_truncation=False):
"""Delete a series according to the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:param bool allow_truncation: whether to allow full deletion of a
database. Default is False.
:rtype: :class:`tempodb.response.Response` object"""
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'allow_truncation': str(allow_truncation).lower()
}
url_args = endpoint.make_url_args(params)
url = '?'.join([endpoint.SERIES_ENDPOINT, url_args])
resp = self.session.delete(url)
return resp | python | def delete_series(self, keys=None, tags=None, attrs=None,
allow_truncation=False):
"""Delete a series according to the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:param bool allow_truncation: whether to allow full deletion of a
database. Default is False.
:rtype: :class:`tempodb.response.Response` object"""
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'allow_truncation': str(allow_truncation).lower()
}
url_args = endpoint.make_url_args(params)
url = '?'.join([endpoint.SERIES_ENDPOINT, url_args])
resp = self.session.delete(url)
return resp | [
"def",
"delete_series",
"(",
"self",
",",
"keys",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"allow_truncation",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'key'",
":",
"keys",
",",
"'tag'",
":",
"tags",
",",
"'attr'",
":",
"attrs",
",",
"'allow_truncation'",
":",
"str",
"(",
"allow_truncation",
")",
".",
"lower",
"(",
")",
"}",
"url_args",
"=",
"endpoint",
".",
"make_url_args",
"(",
"params",
")",
"url",
"=",
"'?'",
".",
"join",
"(",
"[",
"endpoint",
".",
"SERIES_ENDPOINT",
",",
"url_args",
"]",
")",
"resp",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"url",
")",
"return",
"resp"
] | Delete a series according to the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:param bool allow_truncation: whether to allow full deletion of a
database. Default is False.
:rtype: :class:`tempodb.response.Response` object | [
"Delete",
"a",
"series",
"according",
"to",
"the",
"given",
"criteria",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L148-L174 |
251,779 | tempodb/tempodb-python | tempodb/client.py | Client.get_series | def get_series(self, key):
"""Get a series object from TempoDB given its key.
:param string key: a string name for the series
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.Series` data payload"""
url = make_series_url(key)
resp = self.session.get(url)
return resp | python | def get_series(self, key):
"""Get a series object from TempoDB given its key.
:param string key: a string name for the series
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.Series` data payload"""
url = make_series_url(key)
resp = self.session.get(url)
return resp | [
"def",
"get_series",
"(",
"self",
",",
"key",
")",
":",
"url",
"=",
"make_series_url",
"(",
"key",
")",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"return",
"resp"
] | Get a series object from TempoDB given its key.
:param string key: a string name for the series
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.Series` data payload | [
"Get",
"a",
"series",
"object",
"from",
"TempoDB",
"given",
"its",
"key",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L177-L186 |
251,780 | tempodb/tempodb-python | tempodb/client.py | Client.list_series | def list_series(self, keys=None, tags=None, attrs=None,
limit=1000):
"""Get a list of all series matching the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:rtype: :class:`tempodb.protocol.cursor.SeriesCursor` with an
iterator over :class:`tempodb.protocol.objects.Series`
objects"""
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([endpoint.SERIES_ENDPOINT, url_args])
resp = self.session.get(url)
return resp | python | def list_series(self, keys=None, tags=None, attrs=None,
limit=1000):
"""Get a list of all series matching the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:rtype: :class:`tempodb.protocol.cursor.SeriesCursor` with an
iterator over :class:`tempodb.protocol.objects.Series`
objects"""
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([endpoint.SERIES_ENDPOINT, url_args])
resp = self.session.get(url)
return resp | [
"def",
"list_series",
"(",
"self",
",",
"keys",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"limit",
"=",
"1000",
")",
":",
"params",
"=",
"{",
"'key'",
":",
"keys",
",",
"'tag'",
":",
"tags",
",",
"'attr'",
":",
"attrs",
",",
"'limit'",
":",
"limit",
"}",
"url_args",
"=",
"endpoint",
".",
"make_url_args",
"(",
"params",
")",
"url",
"=",
"'?'",
".",
"join",
"(",
"[",
"endpoint",
".",
"SERIES_ENDPOINT",
",",
"url_args",
"]",
")",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"return",
"resp"
] | Get a list of all series matching the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:rtype: :class:`tempodb.protocol.cursor.SeriesCursor` with an
iterator over :class:`tempodb.protocol.objects.Series`
objects | [
"Get",
"a",
"list",
"of",
"all",
"series",
"matching",
"the",
"given",
"criteria",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L189-L215 |
251,781 | tempodb/tempodb-python | tempodb/client.py | Client.aggregate_data | def aggregate_data(self, start, end, aggregation, keys=[], tags=[],
attrs={}, rollup=None, period=None, interpolationf=None,
interpolation_period=None, tz=None, limit=1000):
"""Read data from multiple series according to a filter and apply a
function across all the returned series to put the datapoints together
into one aggregrate series.
See the :meth:`list_series` method for a description of how the filter
criteria are applied, and the :meth:`read_data` method for how to
work with the start, end, and tz parameters.
Valid aggregation functions are the same as valid rollup functions.
:param string aggregation: the aggregation to perform
:param keys: (optional) filter by one or more series keys
:type keys: list or string
:param tags: (optional) filter by one or more tags
:type tags: list or string
:param dict attrs: (optional) filter by one or more key-value
attributes
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string rollup: (optional) the name of a rollup function to use
:param string period: (optional) downsampling rate for the data
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.DataPoint`
objects"""
url = 'segment'
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'key': keys,
'tag': tags,
'attr': attrs,
'aggregation.fold': aggregation,
'rollup.fold': rollup,
'rollup.period': period,
'interpolation.function': interpolationf,
'interpolation.period': interpolation_period,
'tz': tz,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp | python | def aggregate_data(self, start, end, aggregation, keys=[], tags=[],
attrs={}, rollup=None, period=None, interpolationf=None,
interpolation_period=None, tz=None, limit=1000):
"""Read data from multiple series according to a filter and apply a
function across all the returned series to put the datapoints together
into one aggregrate series.
See the :meth:`list_series` method for a description of how the filter
criteria are applied, and the :meth:`read_data` method for how to
work with the start, end, and tz parameters.
Valid aggregation functions are the same as valid rollup functions.
:param string aggregation: the aggregation to perform
:param keys: (optional) filter by one or more series keys
:type keys: list or string
:param tags: (optional) filter by one or more tags
:type tags: list or string
:param dict attrs: (optional) filter by one or more key-value
attributes
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string rollup: (optional) the name of a rollup function to use
:param string period: (optional) downsampling rate for the data
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.DataPoint`
objects"""
url = 'segment'
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'key': keys,
'tag': tags,
'attr': attrs,
'aggregation.fold': aggregation,
'rollup.fold': rollup,
'rollup.period': period,
'interpolation.function': interpolationf,
'interpolation.period': interpolation_period,
'tz': tz,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp | [
"def",
"aggregate_data",
"(",
"self",
",",
"start",
",",
"end",
",",
"aggregation",
",",
"keys",
"=",
"[",
"]",
",",
"tags",
"=",
"[",
"]",
",",
"attrs",
"=",
"{",
"}",
",",
"rollup",
"=",
"None",
",",
"period",
"=",
"None",
",",
"interpolationf",
"=",
"None",
",",
"interpolation_period",
"=",
"None",
",",
"tz",
"=",
"None",
",",
"limit",
"=",
"1000",
")",
":",
"url",
"=",
"'segment'",
"vstart",
"=",
"check_time_param",
"(",
"start",
")",
"vend",
"=",
"check_time_param",
"(",
"end",
")",
"params",
"=",
"{",
"'start'",
":",
"vstart",
",",
"'end'",
":",
"vend",
",",
"'key'",
":",
"keys",
",",
"'tag'",
":",
"tags",
",",
"'attr'",
":",
"attrs",
",",
"'aggregation.fold'",
":",
"aggregation",
",",
"'rollup.fold'",
":",
"rollup",
",",
"'rollup.period'",
":",
"period",
",",
"'interpolation.function'",
":",
"interpolationf",
",",
"'interpolation.period'",
":",
"interpolation_period",
",",
"'tz'",
":",
"tz",
",",
"'limit'",
":",
"limit",
"}",
"url_args",
"=",
"endpoint",
".",
"make_url_args",
"(",
"params",
")",
"url",
"=",
"'?'",
".",
"join",
"(",
"[",
"url",
",",
"url_args",
"]",
")",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"return",
"resp"
] | Read data from multiple series according to a filter and apply a
function across all the returned series to put the datapoints together
into one aggregrate series.
See the :meth:`list_series` method for a description of how the filter
criteria are applied, and the :meth:`read_data` method for how to
work with the start, end, and tz parameters.
Valid aggregation functions are the same as valid rollup functions.
:param string aggregation: the aggregation to perform
:param keys: (optional) filter by one or more series keys
:type keys: list or string
:param tags: (optional) filter by one or more tags
:type tags: list or string
:param dict attrs: (optional) filter by one or more key-value
attributes
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string rollup: (optional) the name of a rollup function to use
:param string period: (optional) downsampling rate for the data
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.DataPoint`
objects | [
"Read",
"data",
"from",
"multiple",
"series",
"according",
"to",
"a",
"filter",
"and",
"apply",
"a",
"function",
"across",
"all",
"the",
"returned",
"series",
"to",
"put",
"the",
"datapoints",
"together",
"into",
"one",
"aggregrate",
"series",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L433-L489 |
251,782 | tempodb/tempodb-python | tempodb/client.py | Client.write_data | def write_data(self, key, data, tags=[], attrs={}):
"""Write a set a datapoints into a series by its key. For now,
the tags and attributes arguments are ignored.
:param string key: the series to write data into
:param list data: a list of DataPoints to write
:rtype: :class:`tempodb.response.Response` object"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'data')
#revisit later if there are server changes to take these into
#account
#params = {
# 'tag': tag,
# 'attr': attr,
#}
#url_args = endpoint.make_url_args(params)
#url = '?'.join([url, url_args])
dlist = [d.to_dictionary() for d in data]
body = json.dumps(dlist)
resp = self.session.post(url, body)
return resp | python | def write_data(self, key, data, tags=[], attrs={}):
"""Write a set a datapoints into a series by its key. For now,
the tags and attributes arguments are ignored.
:param string key: the series to write data into
:param list data: a list of DataPoints to write
:rtype: :class:`tempodb.response.Response` object"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'data')
#revisit later if there are server changes to take these into
#account
#params = {
# 'tag': tag,
# 'attr': attr,
#}
#url_args = endpoint.make_url_args(params)
#url = '?'.join([url, url_args])
dlist = [d.to_dictionary() for d in data]
body = json.dumps(dlist)
resp = self.session.post(url, body)
return resp | [
"def",
"write_data",
"(",
"self",
",",
"key",
",",
"data",
",",
"tags",
"=",
"[",
"]",
",",
"attrs",
"=",
"{",
"}",
")",
":",
"url",
"=",
"make_series_url",
"(",
"key",
")",
"url",
"=",
"urlparse",
".",
"urljoin",
"(",
"url",
"+",
"'/'",
",",
"'data'",
")",
"#revisit later if there are server changes to take these into",
"#account",
"#params = {",
"# 'tag': tag,",
"# 'attr': attr,",
"#}",
"#url_args = endpoint.make_url_args(params)",
"#url = '?'.join([url, url_args])",
"dlist",
"=",
"[",
"d",
".",
"to_dictionary",
"(",
")",
"for",
"d",
"in",
"data",
"]",
"body",
"=",
"json",
".",
"dumps",
"(",
"dlist",
")",
"resp",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"body",
")",
"return",
"resp"
] | Write a set a datapoints into a series by its key. For now,
the tags and attributes arguments are ignored.
:param string key: the series to write data into
:param list data: a list of DataPoints to write
:rtype: :class:`tempodb.response.Response` object | [
"Write",
"a",
"set",
"a",
"datapoints",
"into",
"a",
"series",
"by",
"its",
"key",
".",
"For",
"now",
"the",
"tags",
"and",
"attributes",
"arguments",
"are",
"ignored",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L545-L568 |
251,783 | tempodb/tempodb-python | tempodb/client.py | Client.single_value | def single_value(self, key, ts=None, direction=None):
"""Return a single value for a series. You can supply a timestamp
as the ts argument, otherwise the search defaults to the current
time.
The direction argument can be one of "exact", "before", "after", or
"nearest".
:param string key: the key for the series to use
:param ts: (optional) the time to begin searching from
:type ts: ISO8601 string or Datetime object
:param string direction: criterion for the search
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.SingleValue` object as the
data payload"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'single')
if ts is not None:
vts = check_time_param(ts)
else:
vts = None
params = {
'ts': vts,
'direction': direction
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp | python | def single_value(self, key, ts=None, direction=None):
"""Return a single value for a series. You can supply a timestamp
as the ts argument, otherwise the search defaults to the current
time.
The direction argument can be one of "exact", "before", "after", or
"nearest".
:param string key: the key for the series to use
:param ts: (optional) the time to begin searching from
:type ts: ISO8601 string or Datetime object
:param string direction: criterion for the search
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.SingleValue` object as the
data payload"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'single')
if ts is not None:
vts = check_time_param(ts)
else:
vts = None
params = {
'ts': vts,
'direction': direction
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp | [
"def",
"single_value",
"(",
"self",
",",
"key",
",",
"ts",
"=",
"None",
",",
"direction",
"=",
"None",
")",
":",
"url",
"=",
"make_series_url",
"(",
"key",
")",
"url",
"=",
"urlparse",
".",
"urljoin",
"(",
"url",
"+",
"'/'",
",",
"'single'",
")",
"if",
"ts",
"is",
"not",
"None",
":",
"vts",
"=",
"check_time_param",
"(",
"ts",
")",
"else",
":",
"vts",
"=",
"None",
"params",
"=",
"{",
"'ts'",
":",
"vts",
",",
"'direction'",
":",
"direction",
"}",
"url_args",
"=",
"endpoint",
".",
"make_url_args",
"(",
"params",
")",
"url",
"=",
"'?'",
".",
"join",
"(",
"[",
"url",
",",
"url_args",
"]",
")",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"return",
"resp"
] | Return a single value for a series. You can supply a timestamp
as the ts argument, otherwise the search defaults to the current
time.
The direction argument can be one of "exact", "before", "after", or
"nearest".
:param string key: the key for the series to use
:param ts: (optional) the time to begin searching from
:type ts: ISO8601 string or Datetime object
:param string direction: criterion for the search
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.SingleValue` object as the
data payload | [
"Return",
"a",
"single",
"value",
"for",
"a",
"series",
".",
"You",
"can",
"supply",
"a",
"timestamp",
"as",
"the",
"ts",
"argument",
"otherwise",
"the",
"search",
"defaults",
"to",
"the",
"current",
"time",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L621-L653 |
251,784 | tempodb/tempodb-python | tempodb/client.py | Client.multi_series_single_value | def multi_series_single_value(self, keys=None, ts=None, direction=None,
attrs={}, tags=[]):
"""Return a single value for multiple series. You can supply a
timestamp as the ts argument, otherwise the search defaults to the
current time.
The direction argument can be one of "exact", "before", "after", or
"nearest".
The id, key, tag, and attr arguments allow you to filter for series.
See the :meth:`list_series` method for an explanation of their use.
:param string keys: (optional) a list of keys for the series to use
:param ts: (optional) the time to begin searching from
:type ts: ISO8601 string or Datetime object
:param string direction: criterion for the search
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:rtype: :class:`tempodb.protocol.cursor.SingleValueCursor` with an
iterator over :class:`tempodb.protocol.objects.SingleValue`
objects"""
url = 'single/'
if ts is not None:
vts = check_time_param(ts)
else:
vts = None
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'ts': vts,
'direction': direction
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp | python | def multi_series_single_value(self, keys=None, ts=None, direction=None,
attrs={}, tags=[]):
"""Return a single value for multiple series. You can supply a
timestamp as the ts argument, otherwise the search defaults to the
current time.
The direction argument can be one of "exact", "before", "after", or
"nearest".
The id, key, tag, and attr arguments allow you to filter for series.
See the :meth:`list_series` method for an explanation of their use.
:param string keys: (optional) a list of keys for the series to use
:param ts: (optional) the time to begin searching from
:type ts: ISO8601 string or Datetime object
:param string direction: criterion for the search
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:rtype: :class:`tempodb.protocol.cursor.SingleValueCursor` with an
iterator over :class:`tempodb.protocol.objects.SingleValue`
objects"""
url = 'single/'
if ts is not None:
vts = check_time_param(ts)
else:
vts = None
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'ts': vts,
'direction': direction
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp | [
"def",
"multi_series_single_value",
"(",
"self",
",",
"keys",
"=",
"None",
",",
"ts",
"=",
"None",
",",
"direction",
"=",
"None",
",",
"attrs",
"=",
"{",
"}",
",",
"tags",
"=",
"[",
"]",
")",
":",
"url",
"=",
"'single/'",
"if",
"ts",
"is",
"not",
"None",
":",
"vts",
"=",
"check_time_param",
"(",
"ts",
")",
"else",
":",
"vts",
"=",
"None",
"params",
"=",
"{",
"'key'",
":",
"keys",
",",
"'tag'",
":",
"tags",
",",
"'attr'",
":",
"attrs",
",",
"'ts'",
":",
"vts",
",",
"'direction'",
":",
"direction",
"}",
"url_args",
"=",
"endpoint",
".",
"make_url_args",
"(",
"params",
")",
"url",
"=",
"'?'",
".",
"join",
"(",
"[",
"url",
",",
"url_args",
"]",
")",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"return",
"resp"
] | Return a single value for multiple series. You can supply a
timestamp as the ts argument, otherwise the search defaults to the
current time.
The direction argument can be one of "exact", "before", "after", or
"nearest".
The id, key, tag, and attr arguments allow you to filter for series.
See the :meth:`list_series` method for an explanation of their use.
:param string keys: (optional) a list of keys for the series to use
:param ts: (optional) the time to begin searching from
:type ts: ISO8601 string or Datetime object
:param string direction: criterion for the search
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:rtype: :class:`tempodb.protocol.cursor.SingleValueCursor` with an
iterator over :class:`tempodb.protocol.objects.SingleValue`
objects | [
"Return",
"a",
"single",
"value",
"for",
"multiple",
"series",
".",
"You",
"can",
"supply",
"a",
"timestamp",
"as",
"the",
"ts",
"argument",
"otherwise",
"the",
"search",
"defaults",
"to",
"the",
"current",
"time",
"."
] | 8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3 | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L656-L696 |
251,785 | rorr73/LifeSOSpy | lifesospy/client.py | Client.async_open | async def async_open(self) -> None:
"""Opens connection to the LifeSOS ethernet interface."""
await self._loop.create_connection(
lambda: self,
self._host,
self._port) | python | async def async_open(self) -> None:
"""Opens connection to the LifeSOS ethernet interface."""
await self._loop.create_connection(
lambda: self,
self._host,
self._port) | [
"async",
"def",
"async_open",
"(",
"self",
")",
"->",
"None",
":",
"await",
"self",
".",
"_loop",
".",
"create_connection",
"(",
"lambda",
":",
"self",
",",
"self",
".",
"_host",
",",
"self",
".",
"_port",
")"
] | Opens connection to the LifeSOS ethernet interface. | [
"Opens",
"connection",
"to",
"the",
"LifeSOS",
"ethernet",
"interface",
"."
] | 62360fbab2e90bf04d52b547093bdab2d4e389b4 | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/client.py#L44-L50 |
251,786 | jeroyang/cateye | cateye/cateye.py | load_abbr | def load_abbr(abbr_file=ABBREVIATION_FILE):
"""
Load the abbr2long from file
"""
abbr2long = dict()
with open(abbr_file) as f:
lines = f.read().split('\n')
for line in lines:
m = re.match(r'(\w+)\t(.+)', line)
if m:
abbr2long[m.group(1)] = m.group(2)
return abbr2long | python | def load_abbr(abbr_file=ABBREVIATION_FILE):
"""
Load the abbr2long from file
"""
abbr2long = dict()
with open(abbr_file) as f:
lines = f.read().split('\n')
for line in lines:
m = re.match(r'(\w+)\t(.+)', line)
if m:
abbr2long[m.group(1)] = m.group(2)
return abbr2long | [
"def",
"load_abbr",
"(",
"abbr_file",
"=",
"ABBREVIATION_FILE",
")",
":",
"abbr2long",
"=",
"dict",
"(",
")",
"with",
"open",
"(",
"abbr_file",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'(\\w+)\\t(.+)'",
",",
"line",
")",
"if",
"m",
":",
"abbr2long",
"[",
"m",
".",
"group",
"(",
"1",
")",
"]",
"=",
"m",
".",
"group",
"(",
"2",
")",
"return",
"abbr2long"
] | Load the abbr2long from file | [
"Load",
"the",
"abbr2long",
"from",
"file"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L17-L28 |
251,787 | jeroyang/cateye | cateye/cateye.py | load_spelling | def load_spelling(spell_file=SPELLING_FILE):
"""
Load the term_freq from spell_file
"""
with open(spell_file) as f:
tokens = f.read().split('\n')
size = len(tokens)
term_freq = {token: size - i for i, token in enumerate(tokens)}
return term_freq | python | def load_spelling(spell_file=SPELLING_FILE):
"""
Load the term_freq from spell_file
"""
with open(spell_file) as f:
tokens = f.read().split('\n')
size = len(tokens)
term_freq = {token: size - i for i, token in enumerate(tokens)}
return term_freq | [
"def",
"load_spelling",
"(",
"spell_file",
"=",
"SPELLING_FILE",
")",
":",
"with",
"open",
"(",
"spell_file",
")",
"as",
"f",
":",
"tokens",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"size",
"=",
"len",
"(",
"tokens",
")",
"term_freq",
"=",
"{",
"token",
":",
"size",
"-",
"i",
"for",
"i",
",",
"token",
"in",
"enumerate",
"(",
"tokens",
")",
"}",
"return",
"term_freq"
] | Load the term_freq from spell_file | [
"Load",
"the",
"term_freq",
"from",
"spell_file"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L31-L39 |
251,788 | jeroyang/cateye | cateye/cateye.py | load_search_freq | def load_search_freq(fp=SEARCH_FREQ_JSON):
"""
Load the search_freq from JSON file
"""
try:
with open(fp) as f:
return Counter(json.load(f))
except FileNotFoundError:
return Counter() | python | def load_search_freq(fp=SEARCH_FREQ_JSON):
"""
Load the search_freq from JSON file
"""
try:
with open(fp) as f:
return Counter(json.load(f))
except FileNotFoundError:
return Counter() | [
"def",
"load_search_freq",
"(",
"fp",
"=",
"SEARCH_FREQ_JSON",
")",
":",
"try",
":",
"with",
"open",
"(",
"fp",
")",
"as",
"f",
":",
"return",
"Counter",
"(",
"json",
".",
"load",
"(",
"f",
")",
")",
"except",
"FileNotFoundError",
":",
"return",
"Counter",
"(",
")"
] | Load the search_freq from JSON file | [
"Load",
"the",
"search_freq",
"from",
"JSON",
"file"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L41-L49 |
251,789 | jeroyang/cateye | cateye/cateye.py | tokenize | def tokenize(s):
"""
A simple tokneizer
"""
s = re.sub(r'(?a)(\w+)\'s', r'\1', s) # clean the 's from Crohn's disease
#s = re.sub(r'(?a)\b', ' ', s) # split the borders of chinese and english chars
split_pattern = r'[{} ]+'.format(re.escape(STOPCHARS))
tokens = [token for token in re.split(split_pattern, s) if not set(token) <= set(string.punctuation)]
return tokens | python | def tokenize(s):
"""
A simple tokneizer
"""
s = re.sub(r'(?a)(\w+)\'s', r'\1', s) # clean the 's from Crohn's disease
#s = re.sub(r'(?a)\b', ' ', s) # split the borders of chinese and english chars
split_pattern = r'[{} ]+'.format(re.escape(STOPCHARS))
tokens = [token for token in re.split(split_pattern, s) if not set(token) <= set(string.punctuation)]
return tokens | [
"def",
"tokenize",
"(",
"s",
")",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"r'(?a)(\\w+)\\'s'",
",",
"r'\\1'",
",",
"s",
")",
"# clean the 's from Crohn's disease",
"#s = re.sub(r'(?a)\\b', ' ', s) # split the borders of chinese and english chars",
"split_pattern",
"=",
"r'[{} ]+'",
".",
"format",
"(",
"re",
".",
"escape",
"(",
"STOPCHARS",
")",
")",
"tokens",
"=",
"[",
"token",
"for",
"token",
"in",
"re",
".",
"split",
"(",
"split_pattern",
",",
"s",
")",
"if",
"not",
"set",
"(",
"token",
")",
"<=",
"set",
"(",
"string",
".",
"punctuation",
")",
"]",
"return",
"tokens"
] | A simple tokneizer | [
"A",
"simple",
"tokneizer"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L71-L80 |
251,790 | jeroyang/cateye | cateye/cateye.py | write_spelling | def write_spelling(token_folder, spelling_file):
"""
Generate the spelling correction file form token_folder and save to spelling_file
"""
token_pattern = r'[a-z]{3,}'
tokens = []
for base, dirlist, fnlist in os.walk(token_folder):
for fn in fnlist:
fp = os.path.join(base, fn)
with open(fp) as f:
toks = re.findall(token_pattern, f.read())
tokens.extend(toks)
token_ranked, _ = zip(*Counter(tokens).most_common())
with open(spelling_file, 'w') as f:
f.write('\n'.join(token_ranked)) | python | def write_spelling(token_folder, spelling_file):
"""
Generate the spelling correction file form token_folder and save to spelling_file
"""
token_pattern = r'[a-z]{3,}'
tokens = []
for base, dirlist, fnlist in os.walk(token_folder):
for fn in fnlist:
fp = os.path.join(base, fn)
with open(fp) as f:
toks = re.findall(token_pattern, f.read())
tokens.extend(toks)
token_ranked, _ = zip(*Counter(tokens).most_common())
with open(spelling_file, 'w') as f:
f.write('\n'.join(token_ranked)) | [
"def",
"write_spelling",
"(",
"token_folder",
",",
"spelling_file",
")",
":",
"token_pattern",
"=",
"r'[a-z]{3,}'",
"tokens",
"=",
"[",
"]",
"for",
"base",
",",
"dirlist",
",",
"fnlist",
"in",
"os",
".",
"walk",
"(",
"token_folder",
")",
":",
"for",
"fn",
"in",
"fnlist",
":",
"fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"fn",
")",
"with",
"open",
"(",
"fp",
")",
"as",
"f",
":",
"toks",
"=",
"re",
".",
"findall",
"(",
"token_pattern",
",",
"f",
".",
"read",
"(",
")",
")",
"tokens",
".",
"extend",
"(",
"toks",
")",
"token_ranked",
",",
"_",
"=",
"zip",
"(",
"*",
"Counter",
"(",
"tokens",
")",
".",
"most_common",
"(",
")",
")",
"with",
"open",
"(",
"spelling_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"token_ranked",
")",
")"
] | Generate the spelling correction file form token_folder and save to spelling_file | [
"Generate",
"the",
"spelling",
"correction",
"file",
"form",
"token_folder",
"and",
"save",
"to",
"spelling_file"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L121-L136 |
251,791 | jeroyang/cateye | cateye/cateye.py | get_hints | def get_hints(code_list, k=10, hint_folder=HINT_FOLDER, current_tokens=None):
"""
Fetch first k hints for given code_list
"""
def hint_score(v, size):
"""
The formula for hint score
"""
return 1.0 - abs(v / (size + 1) - 0.5)
if len(code_list) <= 1:
return [], []
if current_tokens is None:
current_tokens = []
size = min(len(code_list), MAX_HINT_SMAPLING_SIZE)
sample = random.sample(code_list, size)
hint_list = []
capital_dict = {}
for code in sample:
path = gen_path(hint_folder, code)
fp = os.path.join(path, code)
try:
with open(fp) as f:
hints = set(f.read().strip().split('\n'))
hint_list.extend([h.lower() for h in hints])
capital_dict.update({hint.lower(): hint for hint in hints})
except FileNotFoundError:
logging.warning("FileNotFoundError: No such file: %r" % fp )
document_freq = Counter(hint_list)
score = [(capital_dict[k], hint_score(v, size)) \
for k, v in document_freq.items() if k not in current_tokens]
if len(score) == 0:
return [], []
score.sort(key=lambda x: x[1], reverse=True)
hints, scores = tuple(list(zip(*score[:k])))
return hints, scores | python | def get_hints(code_list, k=10, hint_folder=HINT_FOLDER, current_tokens=None):
"""
Fetch first k hints for given code_list
"""
def hint_score(v, size):
"""
The formula for hint score
"""
return 1.0 - abs(v / (size + 1) - 0.5)
if len(code_list) <= 1:
return [], []
if current_tokens is None:
current_tokens = []
size = min(len(code_list), MAX_HINT_SMAPLING_SIZE)
sample = random.sample(code_list, size)
hint_list = []
capital_dict = {}
for code in sample:
path = gen_path(hint_folder, code)
fp = os.path.join(path, code)
try:
with open(fp) as f:
hints = set(f.read().strip().split('\n'))
hint_list.extend([h.lower() for h in hints])
capital_dict.update({hint.lower(): hint for hint in hints})
except FileNotFoundError:
logging.warning("FileNotFoundError: No such file: %r" % fp )
document_freq = Counter(hint_list)
score = [(capital_dict[k], hint_score(v, size)) \
for k, v in document_freq.items() if k not in current_tokens]
if len(score) == 0:
return [], []
score.sort(key=lambda x: x[1], reverse=True)
hints, scores = tuple(list(zip(*score[:k])))
return hints, scores | [
"def",
"get_hints",
"(",
"code_list",
",",
"k",
"=",
"10",
",",
"hint_folder",
"=",
"HINT_FOLDER",
",",
"current_tokens",
"=",
"None",
")",
":",
"def",
"hint_score",
"(",
"v",
",",
"size",
")",
":",
"\"\"\"\n The formula for hint score\n \"\"\"",
"return",
"1.0",
"-",
"abs",
"(",
"v",
"/",
"(",
"size",
"+",
"1",
")",
"-",
"0.5",
")",
"if",
"len",
"(",
"code_list",
")",
"<=",
"1",
":",
"return",
"[",
"]",
",",
"[",
"]",
"if",
"current_tokens",
"is",
"None",
":",
"current_tokens",
"=",
"[",
"]",
"size",
"=",
"min",
"(",
"len",
"(",
"code_list",
")",
",",
"MAX_HINT_SMAPLING_SIZE",
")",
"sample",
"=",
"random",
".",
"sample",
"(",
"code_list",
",",
"size",
")",
"hint_list",
"=",
"[",
"]",
"capital_dict",
"=",
"{",
"}",
"for",
"code",
"in",
"sample",
":",
"path",
"=",
"gen_path",
"(",
"hint_folder",
",",
"code",
")",
"fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"code",
")",
"try",
":",
"with",
"open",
"(",
"fp",
")",
"as",
"f",
":",
"hints",
"=",
"set",
"(",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"hint_list",
".",
"extend",
"(",
"[",
"h",
".",
"lower",
"(",
")",
"for",
"h",
"in",
"hints",
"]",
")",
"capital_dict",
".",
"update",
"(",
"{",
"hint",
".",
"lower",
"(",
")",
":",
"hint",
"for",
"hint",
"in",
"hints",
"}",
")",
"except",
"FileNotFoundError",
":",
"logging",
".",
"warning",
"(",
"\"FileNotFoundError: No such file: %r\"",
"%",
"fp",
")",
"document_freq",
"=",
"Counter",
"(",
"hint_list",
")",
"score",
"=",
"[",
"(",
"capital_dict",
"[",
"k",
"]",
",",
"hint_score",
"(",
"v",
",",
"size",
")",
")",
"for",
"k",
",",
"v",
"in",
"document_freq",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"current_tokens",
"]",
"if",
"len",
"(",
"score",
")",
"==",
"0",
":",
"return",
"[",
"]",
",",
"[",
"]",
"score",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"hints",
",",
"scores",
"=",
"tuple",
"(",
"list",
"(",
"zip",
"(",
"*",
"score",
"[",
":",
"k",
"]",
")",
")",
")",
"return",
"hints",
",",
"scores"
] | Fetch first k hints for given code_list | [
"Fetch",
"first",
"k",
"hints",
"for",
"given",
"code_list"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L138-L177 |
251,792 | jeroyang/cateye | cateye/cateye.py | fetch | def fetch(index, tokens):
"""
Fetch the codes from given tokens
"""
if len(tokens) == 0:
return set()
return set.intersection(*[set(index.get(token, [])) for token in tokens]) | python | def fetch(index, tokens):
"""
Fetch the codes from given tokens
"""
if len(tokens) == 0:
return set()
return set.intersection(*[set(index.get(token, [])) for token in tokens]) | [
"def",
"fetch",
"(",
"index",
",",
"tokens",
")",
":",
"if",
"len",
"(",
"tokens",
")",
"==",
"0",
":",
"return",
"set",
"(",
")",
"return",
"set",
".",
"intersection",
"(",
"*",
"[",
"set",
"(",
"index",
".",
"get",
"(",
"token",
",",
"[",
"]",
")",
")",
"for",
"token",
"in",
"tokens",
"]",
")"
] | Fetch the codes from given tokens | [
"Fetch",
"the",
"codes",
"from",
"given",
"tokens"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L179-L185 |
251,793 | jeroyang/cateye | cateye/cateye.py | get_snippets | def get_snippets(code_list, base=SNIPPET_FOLDER):
"""
Get the snippets
"""
output = []
for code in code_list:
path = gen_path(base, code)
fp = os.path.join(path, code)
try:
with open(fp) as f:
output.append(f.read())
except FileNotFoundError:
output.append('')
logging.warning("FileNotFoundError: No such file: %r" % fp )
return output | python | def get_snippets(code_list, base=SNIPPET_FOLDER):
"""
Get the snippets
"""
output = []
for code in code_list:
path = gen_path(base, code)
fp = os.path.join(path, code)
try:
with open(fp) as f:
output.append(f.read())
except FileNotFoundError:
output.append('')
logging.warning("FileNotFoundError: No such file: %r" % fp )
return output | [
"def",
"get_snippets",
"(",
"code_list",
",",
"base",
"=",
"SNIPPET_FOLDER",
")",
":",
"output",
"=",
"[",
"]",
"for",
"code",
"in",
"code_list",
":",
"path",
"=",
"gen_path",
"(",
"base",
",",
"code",
")",
"fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"code",
")",
"try",
":",
"with",
"open",
"(",
"fp",
")",
"as",
"f",
":",
"output",
".",
"append",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"FileNotFoundError",
":",
"output",
".",
"append",
"(",
"''",
")",
"logging",
".",
"warning",
"(",
"\"FileNotFoundError: No such file: %r\"",
"%",
"fp",
")",
"return",
"output"
] | Get the snippets | [
"Get",
"the",
"snippets"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L187-L202 |
251,794 | jeroyang/cateye | cateye/cateye.py | _ed1 | def _ed1(token):
"""
Return tokens the edit distance of which is one from the given token
"""
insertion = {letter.join([token[:i], token[i:]]) for letter in string.ascii_lowercase for i in range(1, len(token) + 1)}
deletion = {''.join([token[:i], token[i+1:]]) for i in range(1, len(token) + 1)}
substitution = {letter.join([token[:i], token[i+1:]]) for letter in string.ascii_lowercase for i in range(1, len(token) + 1)}
transposition = {''.join([token[:i], token[i+1:i+2], token[i:i+1], token[i+2:]]) for i in range(1, len(token)-1)}
return set.union(insertion, deletion, substitution, transposition) | python | def _ed1(token):
"""
Return tokens the edit distance of which is one from the given token
"""
insertion = {letter.join([token[:i], token[i:]]) for letter in string.ascii_lowercase for i in range(1, len(token) + 1)}
deletion = {''.join([token[:i], token[i+1:]]) for i in range(1, len(token) + 1)}
substitution = {letter.join([token[:i], token[i+1:]]) for letter in string.ascii_lowercase for i in range(1, len(token) + 1)}
transposition = {''.join([token[:i], token[i+1:i+2], token[i:i+1], token[i+2:]]) for i in range(1, len(token)-1)}
return set.union(insertion, deletion, substitution, transposition) | [
"def",
"_ed1",
"(",
"token",
")",
":",
"insertion",
"=",
"{",
"letter",
".",
"join",
"(",
"[",
"token",
"[",
":",
"i",
"]",
",",
"token",
"[",
"i",
":",
"]",
"]",
")",
"for",
"letter",
"in",
"string",
".",
"ascii_lowercase",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"token",
")",
"+",
"1",
")",
"}",
"deletion",
"=",
"{",
"''",
".",
"join",
"(",
"[",
"token",
"[",
":",
"i",
"]",
",",
"token",
"[",
"i",
"+",
"1",
":",
"]",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"token",
")",
"+",
"1",
")",
"}",
"substitution",
"=",
"{",
"letter",
".",
"join",
"(",
"[",
"token",
"[",
":",
"i",
"]",
",",
"token",
"[",
"i",
"+",
"1",
":",
"]",
"]",
")",
"for",
"letter",
"in",
"string",
".",
"ascii_lowercase",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"token",
")",
"+",
"1",
")",
"}",
"transposition",
"=",
"{",
"''",
".",
"join",
"(",
"[",
"token",
"[",
":",
"i",
"]",
",",
"token",
"[",
"i",
"+",
"1",
":",
"i",
"+",
"2",
"]",
",",
"token",
"[",
"i",
":",
"i",
"+",
"1",
"]",
",",
"token",
"[",
"i",
"+",
"2",
":",
"]",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"token",
")",
"-",
"1",
")",
"}",
"return",
"set",
".",
"union",
"(",
"insertion",
",",
"deletion",
",",
"substitution",
",",
"transposition",
")"
] | Return tokens the edit distance of which is one from the given token | [
"Return",
"tokens",
"the",
"edit",
"distance",
"of",
"which",
"is",
"one",
"from",
"the",
"given",
"token"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L216-L224 |
251,795 | jeroyang/cateye | cateye/cateye.py | _correct | def _correct(token, term_freq):
"""
Correct a single token according to the term_freq
"""
if token.lower() in term_freq:
return token
e1 = [t for t in _ed1(token) if t in term_freq]
if len(e1) > 0:
e1.sort(key=term_freq.get)
return e1[0]
e2 = [t for t in _ed2(token) if t in term_freq]
if len(e2) > 0:
e2.sort(key=term_freq.get)
return e2[0]
return token | python | def _correct(token, term_freq):
"""
Correct a single token according to the term_freq
"""
if token.lower() in term_freq:
return token
e1 = [t for t in _ed1(token) if t in term_freq]
if len(e1) > 0:
e1.sort(key=term_freq.get)
return e1[0]
e2 = [t for t in _ed2(token) if t in term_freq]
if len(e2) > 0:
e2.sort(key=term_freq.get)
return e2[0]
return token | [
"def",
"_correct",
"(",
"token",
",",
"term_freq",
")",
":",
"if",
"token",
".",
"lower",
"(",
")",
"in",
"term_freq",
":",
"return",
"token",
"e1",
"=",
"[",
"t",
"for",
"t",
"in",
"_ed1",
"(",
"token",
")",
"if",
"t",
"in",
"term_freq",
"]",
"if",
"len",
"(",
"e1",
")",
">",
"0",
":",
"e1",
".",
"sort",
"(",
"key",
"=",
"term_freq",
".",
"get",
")",
"return",
"e1",
"[",
"0",
"]",
"e2",
"=",
"[",
"t",
"for",
"t",
"in",
"_ed2",
"(",
"token",
")",
"if",
"t",
"in",
"term_freq",
"]",
"if",
"len",
"(",
"e2",
")",
">",
"0",
":",
"e2",
".",
"sort",
"(",
"key",
"=",
"term_freq",
".",
"get",
")",
"return",
"e2",
"[",
"0",
"]",
"return",
"token"
] | Correct a single token according to the term_freq | [
"Correct",
"a",
"single",
"token",
"according",
"to",
"the",
"term_freq"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L232-L246 |
251,796 | jeroyang/cateye | cateye/cateye.py | correct | def correct(tokens, term_freq):
"""
Correct a list of tokens, according to the term_freq
"""
log = []
output = []
for token in tokens:
corrected = _correct(token, term_freq)
if corrected != token:
log.append((token, corrected))
output.append(corrected)
return output, log | python | def correct(tokens, term_freq):
"""
Correct a list of tokens, according to the term_freq
"""
log = []
output = []
for token in tokens:
corrected = _correct(token, term_freq)
if corrected != token:
log.append((token, corrected))
output.append(corrected)
return output, log | [
"def",
"correct",
"(",
"tokens",
",",
"term_freq",
")",
":",
"log",
"=",
"[",
"]",
"output",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"corrected",
"=",
"_correct",
"(",
"token",
",",
"term_freq",
")",
"if",
"corrected",
"!=",
"token",
":",
"log",
".",
"append",
"(",
"(",
"token",
",",
"corrected",
")",
")",
"output",
".",
"append",
"(",
"corrected",
")",
"return",
"output",
",",
"log"
] | Correct a list of tokens, according to the term_freq | [
"Correct",
"a",
"list",
"of",
"tokens",
"according",
"to",
"the",
"term_freq"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L248-L259 |
251,797 | jeroyang/cateye | cateye/cateye.py | search | def search(index, query, snippet_folder=SNIPPET_FOLDER, term_freq=term_freq):
"""
The highest level of search function
"""
fallback_log = []
code_list = []
tokens = tokenize(query)
tokens, abbr_log = abbr_expand(tokens)
tokens, correct_log = correct(tokens, term_freq)
tokens = lemmatize(tokens)
tokens = filterout(tokens)
while len(tokens) > 0: # Fallback mechanism
code_list = fetch(index, tokens)
if len(code_list) > 0:
break
tokens.sort(key=lambda tk:len(index.get(tk, [])))
remove = tokens.pop()
fallback_log.append(remove)
snippets = get_snippets(code_list, snippet_folder)
hints, hint_scores = get_hints(code_list, current_tokens=tokens)
response = list(zip(code_list, snippets))
response.sort(key=result_sort_key, reverse=True)
# Count search_frequency
if len(response) <= MAX_RESULT: # the respone can be shown in one page
search_freq.update(code_list)
with open(SEARCH_FREQ_JSON, 'w') as f:
json.dump(search_freq, f, indent=2)
return response, tokens, hints, hint_scores, \
abbr_log, correct_log, fallback_log | python | def search(index, query, snippet_folder=SNIPPET_FOLDER, term_freq=term_freq):
"""
The highest level of search function
"""
fallback_log = []
code_list = []
tokens = tokenize(query)
tokens, abbr_log = abbr_expand(tokens)
tokens, correct_log = correct(tokens, term_freq)
tokens = lemmatize(tokens)
tokens = filterout(tokens)
while len(tokens) > 0: # Fallback mechanism
code_list = fetch(index, tokens)
if len(code_list) > 0:
break
tokens.sort(key=lambda tk:len(index.get(tk, [])))
remove = tokens.pop()
fallback_log.append(remove)
snippets = get_snippets(code_list, snippet_folder)
hints, hint_scores = get_hints(code_list, current_tokens=tokens)
response = list(zip(code_list, snippets))
response.sort(key=result_sort_key, reverse=True)
# Count search_frequency
if len(response) <= MAX_RESULT: # the respone can be shown in one page
search_freq.update(code_list)
with open(SEARCH_FREQ_JSON, 'w') as f:
json.dump(search_freq, f, indent=2)
return response, tokens, hints, hint_scores, \
abbr_log, correct_log, fallback_log | [
"def",
"search",
"(",
"index",
",",
"query",
",",
"snippet_folder",
"=",
"SNIPPET_FOLDER",
",",
"term_freq",
"=",
"term_freq",
")",
":",
"fallback_log",
"=",
"[",
"]",
"code_list",
"=",
"[",
"]",
"tokens",
"=",
"tokenize",
"(",
"query",
")",
"tokens",
",",
"abbr_log",
"=",
"abbr_expand",
"(",
"tokens",
")",
"tokens",
",",
"correct_log",
"=",
"correct",
"(",
"tokens",
",",
"term_freq",
")",
"tokens",
"=",
"lemmatize",
"(",
"tokens",
")",
"tokens",
"=",
"filterout",
"(",
"tokens",
")",
"while",
"len",
"(",
"tokens",
")",
">",
"0",
":",
"# Fallback mechanism",
"code_list",
"=",
"fetch",
"(",
"index",
",",
"tokens",
")",
"if",
"len",
"(",
"code_list",
")",
">",
"0",
":",
"break",
"tokens",
".",
"sort",
"(",
"key",
"=",
"lambda",
"tk",
":",
"len",
"(",
"index",
".",
"get",
"(",
"tk",
",",
"[",
"]",
")",
")",
")",
"remove",
"=",
"tokens",
".",
"pop",
"(",
")",
"fallback_log",
".",
"append",
"(",
"remove",
")",
"snippets",
"=",
"get_snippets",
"(",
"code_list",
",",
"snippet_folder",
")",
"hints",
",",
"hint_scores",
"=",
"get_hints",
"(",
"code_list",
",",
"current_tokens",
"=",
"tokens",
")",
"response",
"=",
"list",
"(",
"zip",
"(",
"code_list",
",",
"snippets",
")",
")",
"response",
".",
"sort",
"(",
"key",
"=",
"result_sort_key",
",",
"reverse",
"=",
"True",
")",
"# Count search_frequency",
"if",
"len",
"(",
"response",
")",
"<=",
"MAX_RESULT",
":",
"# the respone can be shown in one page",
"search_freq",
".",
"update",
"(",
"code_list",
")",
"with",
"open",
"(",
"SEARCH_FREQ_JSON",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"search_freq",
",",
"f",
",",
"indent",
"=",
"2",
")",
"return",
"response",
",",
"tokens",
",",
"hints",
",",
"hint_scores",
",",
"abbr_log",
",",
"correct_log",
",",
"fallback_log"
] | The highest level of search function | [
"The",
"highest",
"level",
"of",
"search",
"function"
] | 8f181d6428d113d2928e3eb31703705ce0779eae | https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L278-L308 |
251,798 | xgvargas/smartside | smartside/signal.py | SmartSignal._do_connection | def _do_connection(self, wgt, sig, func):
"""
Make a connection between a GUI widget and a callable.
wgt and sig are strings with widget and signal name
func is a callable for that signal
"""
#new style (we use this)
#self.btn_name.clicked.connect(self.on_btn_name_clicked)
#old style
#self.connect(self.btn_name, SIGNAL('clicked()'), self.on_btn_name_clicked)
if hasattr(self, wgt):
wgtobj = getattr(self, wgt)
if hasattr(wgtobj, sig):
sigobj = getattr(wgtobj, sig)
if isinstance(sigobj, Signal):
sigobj.connect(func)
return 0
return 1 | python | def _do_connection(self, wgt, sig, func):
"""
Make a connection between a GUI widget and a callable.
wgt and sig are strings with widget and signal name
func is a callable for that signal
"""
#new style (we use this)
#self.btn_name.clicked.connect(self.on_btn_name_clicked)
#old style
#self.connect(self.btn_name, SIGNAL('clicked()'), self.on_btn_name_clicked)
if hasattr(self, wgt):
wgtobj = getattr(self, wgt)
if hasattr(wgtobj, sig):
sigobj = getattr(wgtobj, sig)
if isinstance(sigobj, Signal):
sigobj.connect(func)
return 0
return 1 | [
"def",
"_do_connection",
"(",
"self",
",",
"wgt",
",",
"sig",
",",
"func",
")",
":",
"#new style (we use this)",
"#self.btn_name.clicked.connect(self.on_btn_name_clicked)",
"#old style",
"#self.connect(self.btn_name, SIGNAL('clicked()'), self.on_btn_name_clicked)",
"if",
"hasattr",
"(",
"self",
",",
"wgt",
")",
":",
"wgtobj",
"=",
"getattr",
"(",
"self",
",",
"wgt",
")",
"if",
"hasattr",
"(",
"wgtobj",
",",
"sig",
")",
":",
"sigobj",
"=",
"getattr",
"(",
"wgtobj",
",",
"sig",
")",
"if",
"isinstance",
"(",
"sigobj",
",",
"Signal",
")",
":",
"sigobj",
".",
"connect",
"(",
"func",
")",
"return",
"0",
"return",
"1"
] | Make a connection between a GUI widget and a callable.
wgt and sig are strings with widget and signal name
func is a callable for that signal | [
"Make",
"a",
"connection",
"between",
"a",
"GUI",
"widget",
"and",
"a",
"callable",
"."
] | c63acb7d628b161f438e877eca12d550647de34d | https://github.com/xgvargas/smartside/blob/c63acb7d628b161f438e877eca12d550647de34d/smartside/signal.py#L17-L36 |
251,799 | xgvargas/smartside | smartside/signal.py | SmartSignal._process_list | def _process_list(self, l):
"""
Processes a list of widget names.
If any name is between `` then it is supposed to be a regex.
"""
if hasattr(self, l):
t = getattr(self, l)
def proc(inp):
w = inp.strip()
if w.startswith('`'):
r = re.compile(w[1:-1])
return [u for u in [m.group() for m in [r.match(x) for x in dir(self)] if m] if isinstance(getattr(self, u), QObject)]
else:
return [w]
return list(set([y for x in map(proc, t.split(',')) for y in x]))
return [] | python | def _process_list(self, l):
"""
Processes a list of widget names.
If any name is between `` then it is supposed to be a regex.
"""
if hasattr(self, l):
t = getattr(self, l)
def proc(inp):
w = inp.strip()
if w.startswith('`'):
r = re.compile(w[1:-1])
return [u for u in [m.group() for m in [r.match(x) for x in dir(self)] if m] if isinstance(getattr(self, u), QObject)]
else:
return [w]
return list(set([y for x in map(proc, t.split(',')) for y in x]))
return [] | [
"def",
"_process_list",
"(",
"self",
",",
"l",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"l",
")",
":",
"t",
"=",
"getattr",
"(",
"self",
",",
"l",
")",
"def",
"proc",
"(",
"inp",
")",
":",
"w",
"=",
"inp",
".",
"strip",
"(",
")",
"if",
"w",
".",
"startswith",
"(",
"'`'",
")",
":",
"r",
"=",
"re",
".",
"compile",
"(",
"w",
"[",
"1",
":",
"-",
"1",
"]",
")",
"return",
"[",
"u",
"for",
"u",
"in",
"[",
"m",
".",
"group",
"(",
")",
"for",
"m",
"in",
"[",
"r",
".",
"match",
"(",
"x",
")",
"for",
"x",
"in",
"dir",
"(",
"self",
")",
"]",
"if",
"m",
"]",
"if",
"isinstance",
"(",
"getattr",
"(",
"self",
",",
"u",
")",
",",
"QObject",
")",
"]",
"else",
":",
"return",
"[",
"w",
"]",
"return",
"list",
"(",
"set",
"(",
"[",
"y",
"for",
"x",
"in",
"map",
"(",
"proc",
",",
"t",
".",
"split",
"(",
"','",
")",
")",
"for",
"y",
"in",
"x",
"]",
")",
")",
"return",
"[",
"]"
] | Processes a list of widget names.
If any name is between `` then it is supposed to be a regex. | [
"Processes",
"a",
"list",
"of",
"widget",
"names",
"."
] | c63acb7d628b161f438e877eca12d550647de34d | https://github.com/xgvargas/smartside/blob/c63acb7d628b161f438e877eca12d550647de34d/smartside/signal.py#L38-L58 |
Subsets and Splits