max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
pyntcloud/geometry/__init__.py | BrianPugh/pyntcloud | 5 | 6632351 |
"""
HAKUNA MATATA
"""
from .models.plane import Plane
from .models.sphere import Sphere
__all__ = ["models.plane.Plane"]
|
"""
HAKUNA MATATA
"""
from .models.plane import Plane
from .models.sphere import Sphere
__all__ = ["models.plane.Plane"]
| ru | 0.14232 | HAKUNA MATATA | 1.248899 | 1 |
var/spack/repos/builtin/packages/r-makecdfenv/package.py | HaochengLIU/spack | 2 | 6632352 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMakecdfenv(RPackage):
"""This package has two functions. One reads a Affymetrix
chip description file (CDF) and creates a hash table environment
containing the location/probe set membership mapping.
The other creates a package that automatically loads
that environment."""
homepage = "https://www.bioconductor.org/packages/makecdfenv/"
git = "https://git.bioconductor.org/packages/makecdfenv.git"
version('1.52.0', commit='<PASSWORD>')
depends_on('r@3.4.0:3.4.9', when='@1.52.0')
depends_on('r-affyio', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
| # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMakecdfenv(RPackage):
"""This package has two functions. One reads a Affymetrix
chip description file (CDF) and creates a hash table environment
containing the location/probe set membership mapping.
The other creates a package that automatically loads
that environment."""
homepage = "https://www.bioconductor.org/packages/makecdfenv/"
git = "https://git.bioconductor.org/packages/makecdfenv.git"
version('1.52.0', commit='<PASSWORD>')
depends_on('r@3.4.0:3.4.9', when='@1.52.0')
depends_on('r-affyio', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
| en | 0.783656 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) This package has two functions. One reads a Affymetrix chip description file (CDF) and creates a hash table environment containing the location/probe set membership mapping. The other creates a package that automatically loads that environment. | 1.676272 | 2 |
datasets/dealconll.py | anonymous-summa/MLT-ABSum | 0 | 6632353 | # _*_coding: utf-8_*_
import json
import pyconll
total_pos = []
total_relation = []
for file in ["train","valid","test"]:
data = pyconll.load_from_file(file + ".conll")
article = []
for sents in data:
text, head, pos, relations = [], [], [], []
for token in sents:
text.append(token._form.lower())
head.append(int(token.head))
pos.append(token.xpos)
relations.append(token.deprel)
total_pos.append(token.xpos)
total_relation.append(token.deprel)
article.append({
"text": [text],
"pos": [pos],
"head": [head],
"relation":[relations]
}
)
with open(file + "-conll.json", "w+", encoding="utf-8") as f:
for text in article:
json.dump(text, f, ensure_ascii=False)
f.write("\n")
total_pos = list(set(total_pos))
total_relation = list(set(total_relation))
posid = {k:i for i,k in enumerate(total_pos)}
relation2id = {k:i for i,k in enumerate(total_relation)}
print(posid)
print(relation2id)
| # _*_coding: utf-8_*_
import json
import pyconll
total_pos = []
total_relation = []
for file in ["train","valid","test"]:
data = pyconll.load_from_file(file + ".conll")
article = []
for sents in data:
text, head, pos, relations = [], [], [], []
for token in sents:
text.append(token._form.lower())
head.append(int(token.head))
pos.append(token.xpos)
relations.append(token.deprel)
total_pos.append(token.xpos)
total_relation.append(token.deprel)
article.append({
"text": [text],
"pos": [pos],
"head": [head],
"relation":[relations]
}
)
with open(file + "-conll.json", "w+", encoding="utf-8") as f:
for text in article:
json.dump(text, f, ensure_ascii=False)
f.write("\n")
total_pos = list(set(total_pos))
total_relation = list(set(total_relation))
posid = {k:i for i,k in enumerate(total_pos)}
relation2id = {k:i for i,k in enumerate(total_relation)}
print(posid)
print(relation2id)
| en | 0.740028 | # _*_coding: utf-8_*_ | 2.530401 | 3 |
challenges/linked_list/add_two_numbers_list.py | lukasmartinelli/sharpen | 13 | 6632354 | def zip_longest_linked_lists(a, b):
default = ListNode(0)
while a or b:
if a and b:
yield a, b
a = a.next
b = b.next
elif a:
yield a, default
a = a.next
elif b:
yield default, b
b = b.next
def add_numbers(term_a, term_b):
results = AdditionLinkedList()
for node_a, node_b in zip_longest_linked_lists(term_a, term_b):
results.add(node_a, node_b)
results.add_carryover()
return results.head
class AdditionLinkedList():
def __init__(self):
self.head = None
self.tail = self.head
self.carryover = 0
def add(self, term_a, term_b):
result = term_a.val + term_b.val + self.carryover
self.carryover = result / 10
digit = result % 10
new_node = ListNode(digit)
self.append(new_node)
def add_carryover(self):
if self.carryover > 0:
self.append(ListNode(self.carryover))
self.carryover = 0
def append(self, new_node):
if self.tail:
self.tail.next = new_node
self.tail = self.tail.next
else:
self.head = new_node
self.tail = self.head
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __eq__(self, other):
return self.val == other.val and self.next == other.next
def __repr__(self):
return '<Node {} {}>'.format(self.val, self.next)
class LinkedList(ListNode):
def __init__(self, arr):
nodes = [ListNode(v) for v in arr]
for i in range(1, len(nodes)):
nodes[i-1].next = nodes[i]
head = nodes[0]
self.val = head.val
self.next = head.next
def test_simple_addition():
a = LinkedList([2, 4, 3])
b = LinkedList([3, 5, 2])
assert add_numbers(a, b) == LinkedList([5, 9, 5])
def test_addition_carryover():
a = LinkedList([2, 4, 3])
b = LinkedList([5, 6, 4])
assert add_numbers(a, b) == LinkedList([7, 0, 8])
def test_addition_multi_carryover():
a = LinkedList([9, 3, 5])
b = LinkedList([2, 8, 9])
assert add_numbers(a, b) == LinkedList([1, 2, 5, 1])
def test_add_unequal_numbers():
a = LinkedList([9, 9, 1])
b = LinkedList([1])
assert add_numbers(a, b) == LinkedList([0, 0, 2])
c = LinkedList([1, 5])
d = LinkedList([5, 5, 9, 9])
assert add_numbers(c, d) == LinkedList([6, 0, 0, 0, 1])
| def zip_longest_linked_lists(a, b):
default = ListNode(0)
while a or b:
if a and b:
yield a, b
a = a.next
b = b.next
elif a:
yield a, default
a = a.next
elif b:
yield default, b
b = b.next
def add_numbers(term_a, term_b):
results = AdditionLinkedList()
for node_a, node_b in zip_longest_linked_lists(term_a, term_b):
results.add(node_a, node_b)
results.add_carryover()
return results.head
class AdditionLinkedList():
def __init__(self):
self.head = None
self.tail = self.head
self.carryover = 0
def add(self, term_a, term_b):
result = term_a.val + term_b.val + self.carryover
self.carryover = result / 10
digit = result % 10
new_node = ListNode(digit)
self.append(new_node)
def add_carryover(self):
if self.carryover > 0:
self.append(ListNode(self.carryover))
self.carryover = 0
def append(self, new_node):
if self.tail:
self.tail.next = new_node
self.tail = self.tail.next
else:
self.head = new_node
self.tail = self.head
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __eq__(self, other):
return self.val == other.val and self.next == other.next
def __repr__(self):
return '<Node {} {}>'.format(self.val, self.next)
class LinkedList(ListNode):
def __init__(self, arr):
nodes = [ListNode(v) for v in arr]
for i in range(1, len(nodes)):
nodes[i-1].next = nodes[i]
head = nodes[0]
self.val = head.val
self.next = head.next
def test_simple_addition():
a = LinkedList([2, 4, 3])
b = LinkedList([3, 5, 2])
assert add_numbers(a, b) == LinkedList([5, 9, 5])
def test_addition_carryover():
a = LinkedList([2, 4, 3])
b = LinkedList([5, 6, 4])
assert add_numbers(a, b) == LinkedList([7, 0, 8])
def test_addition_multi_carryover():
a = LinkedList([9, 3, 5])
b = LinkedList([2, 8, 9])
assert add_numbers(a, b) == LinkedList([1, 2, 5, 1])
def test_add_unequal_numbers():
a = LinkedList([9, 9, 1])
b = LinkedList([1])
assert add_numbers(a, b) == LinkedList([0, 0, 2])
c = LinkedList([1, 5])
d = LinkedList([5, 5, 9, 9])
assert add_numbers(c, d) == LinkedList([6, 0, 0, 0, 1])
| none | 1 | 3.565694 | 4 |
|
rdflib/util.py | donbowman/rdflib | 0 | 6632355 | """
Some utility functions.
Miscellaneous utilities
* list2set
* first
* uniq
* more_than
Term characterisation and generation
* to_term
* from_n3
Date/time utilities
* date_time
* parse_date_time
Statement and component type checkers
* check_context
* check_subject
* check_predicate
* check_object
* check_statement
* check_pattern
"""
from calendar import timegm
from time import altzone
# from time import daylight
from time import gmtime
from time import localtime
from time import time
from time import timezone
from os.path import splitext
from rdflib.exceptions import ContextTypeError
from rdflib.exceptions import ObjectTypeError
from rdflib.exceptions import PredicateTypeError
from rdflib.exceptions import SubjectTypeError
import rdflib.graph # avoid circular dependency
from rdflib.namespace import Namespace
from rdflib.namespace import NamespaceManager
from rdflib.term import BNode
from rdflib.term import Literal
from rdflib.term import URIRef
from rdflib.compat import sign
__all__ = [
"list2set",
"first",
"uniq",
"more_than",
"to_term",
"from_n3",
"date_time",
"parse_date_time",
"check_context",
"check_subject",
"check_predicate",
"check_object",
"check_statement",
"check_pattern",
"guess_format",
"find_roots",
"get_tree",
]
def list2set(seq):
"""
Return a new list without duplicates.
Preserves the order, unlike set(seq)
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def first(seq):
"""
return the first element in a python sequence
for graphs, use graph.value instead
"""
for result in seq:
return result
return None
def uniq(sequence, strip=0):
"""removes duplicate strings from the sequence."""
if strip:
return set(s.strip() for s in sequence)
else:
return set(sequence)
def more_than(sequence, number):
"Returns 1 if sequence has more items than number and 0 if not."
i = 0
for item in sequence:
i += 1
if i > number:
return 1
return 0
def to_term(s, default=None):
"""
Creates and returns an Identifier of type corresponding
to the pattern of the given positional argument string ``s``:
'' returns the ``default`` keyword argument value or ``None``
'<s>' returns ``URIRef(s)`` (i.e. without angle brackets)
'"s"' returns ``Literal(s)`` (i.e. without doublequotes)
'_s' returns ``BNode(s)`` (i.e. without leading underscore)
"""
if not s:
return default
elif s.startswith("<") and s.endswith(">"):
return URIRef(s[1:-1])
elif s.startswith('"') and s.endswith('"'):
return Literal(s[1:-1])
elif s.startswith("_"):
return BNode(s)
else:
msg = "Unrecognised term syntax: '%s'" % s
raise Exception(msg)
def from_n3(s: str, default=None, backend=None, nsm=None):
r'''
Creates the Identifier corresponding to the given n3 string.
>>> from_n3('<http://ex.com/foo>') == URIRef('http://ex.com/foo')
True
>>> from_n3('"foo"@de') == Literal('foo', lang='de')
True
>>> from_n3('"""multi\nline\nstring"""@en') == Literal(
... 'multi\nline\nstring', lang='en')
True
>>> from_n3('42') == Literal(42)
True
>>> from_n3(Literal(42).n3()) == Literal(42)
True
>>> from_n3('"42"^^xsd:integer') == Literal(42)
True
>>> from rdflib import RDFS
>>> from_n3('rdfs:label') == RDFS['label']
True
>>> nsm = NamespaceManager(rdflib.graph.Graph())
>>> nsm.bind('dbpedia', 'http://dbpedia.org/resource/')
>>> berlin = URIRef('http://dbpedia.org/resource/Berlin')
>>> from_n3('dbpedia:Berlin', nsm=nsm) == berlin
True
'''
if not s:
return default
if s.startswith("<"):
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
return URIRef(s[1:-1].encode("raw-unicode-escape").decode("unicode-escape"))
elif s.startswith('"'):
if s.startswith('"""'):
quotes = '"""'
else:
quotes = '"'
value, rest = s.rsplit(quotes, 1)
value = value[len(quotes) :] # strip leading quotes
datatype = None
language = None
# as a given datatype overrules lang-tag check for it first
dtoffset = rest.rfind("^^")
if dtoffset >= 0:
# found a datatype
# datatype has to come after lang-tag so ignore everything before
# see: http://www.w3.org/TR/2011/WD-turtle-20110809/
# #prod-turtle2-RDFLiteral
datatype = from_n3(rest[dtoffset + 2 :], default, backend, nsm)
else:
if rest.startswith("@"):
language = rest[1:] # strip leading at sign
value = value.replace(r"\"", '"')
# unicode-escape interprets \xhh as an escape sequence,
# but n3 does not define it as such.
value = value.replace(r"\x", r"\\x")
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
value = value.encode("raw-unicode-escape").decode("unicode-escape")
return Literal(value, language, datatype)
elif s == "true" or s == "false":
return Literal(s == "true")
elif s.isdigit():
return Literal(int(s))
elif s.startswith("{"):
identifier = from_n3(s[1:-1])
return rdflib.graph.QuotedGraph(backend, identifier)
elif s.startswith("["):
identifier = from_n3(s[1:-1])
return rdflib.graph.Graph(backend, identifier)
elif s.startswith("_:"):
return BNode(s[2:])
elif ":" in s:
if nsm is None:
# instantiate default NamespaceManager and rely on its defaults
nsm = NamespaceManager(rdflib.graph.Graph())
prefix, last_part = s.split(":", 1)
ns = dict(nsm.namespaces())[prefix]
return Namespace(ns)[last_part]
else:
return BNode(s)
def check_context(c):
if not (isinstance(c, URIRef) or isinstance(c, BNode)):
raise ContextTypeError("%s:%s" % (c, type(c)))
def check_subject(s):
"""Test that s is a valid subject identifier."""
if not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
def check_predicate(p):
"""Test that p is a valid predicate identifier."""
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
def check_object(o):
"""Test that o is a valid object identifier."""
if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_statement(triple):
(s, p, o) = triple
if not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_pattern(triple):
(s, p, o) = triple
if s and not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if p and not isinstance(p, URIRef):
raise PredicateTypeError(p)
if o and not (
isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)
):
raise ObjectTypeError(o)
def date_time(t=None, local_time_zone=False):
"""http://www.w3.org/TR/NOTE-datetime ex: 1997-07-16T19:20:30Z
>>> date_time(1126482850)
'2005-09-11T23:54:10Z'
@@ this will change depending on where it is run
#>>> date_time(1126482850, local_time_zone=True)
#'2005-09-11T19:54:10-04:00'
>>> date_time(1)
'1970-01-01T00:00:01Z'
>>> date_time(0)
'1970-01-01T00:00:00Z'
"""
if t is None:
t = time()
if local_time_zone:
time_tuple = localtime(t)
if time_tuple[8]:
tz_mins = altzone // 60
else:
tz_mins = timezone // 60
tzd = "-%02d:%02d" % (tz_mins // 60, tz_mins % 60)
else:
time_tuple = gmtime(t)
tzd = "Z"
year, month, day, hh, mm, ss, wd, y, z = time_tuple
s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % (year, month, day, hh, mm, ss, tzd)
return s
def parse_date_time(val):
"""always returns seconds in UTC
# tests are written like this to make any errors easier to understand
>>> parse_date_time('2005-09-11T23:54:10Z') - 1126482850.0
0.0
>>> parse_date_time('2005-09-11T16:54:10-07:00') - 1126482850.0
0.0
>>> parse_date_time('1970-01-01T00:00:01Z') - 1.0
0.0
>>> parse_date_time('1970-01-01T00:00:00Z') - 0.0
0.0
>>> parse_date_time("2005-09-05T10:42:00") - 1125916920.0
0.0
"""
if "T" not in val:
val += "T00:00:00Z"
ymd, time = val.split("T")
hms, tz_str = time[0:8], time[8:]
if not tz_str or tz_str == "Z":
time = time[:-1]
tz_offset = 0
else:
signed_hrs = int(tz_str[:3])
mins = int(tz_str[4:6])
secs = (sign(signed_hrs) * mins + signed_hrs * 60) * 60
tz_offset = -secs
year, month, day = ymd.split("-")
hour, minute, second = hms.split(":")
t = timegm(
(int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, 0)
)
t = t + tz_offset
return t
SUFFIX_FORMAT_MAP = {
"xml": "xml",
"rdf": "xml",
"owl": "xml",
"n3": "n3",
"ttl": "turtle",
"nt": "nt",
"trix": "trix",
"xhtml": "rdfa",
"html": "rdfa",
"svg": "rdfa",
"nq": "nquads",
"trig": "trig",
}
def guess_format(fpath, fmap=None):
"""
Guess RDF serialization based on file suffix. Uses
``SUFFIX_FORMAT_MAP`` unless ``fmap`` is provided. Examples:
>>> guess_format('path/to/file.rdf')
'xml'
>>> guess_format('path/to/file.owl')
'xml'
>>> guess_format('path/to/file.ttl')
'turtle'
>>> guess_format('path/to/file.xhtml')
'rdfa'
>>> guess_format('path/to/file.svg')
'rdfa'
>>> guess_format('path/to/file.xhtml', {'xhtml': 'grddl'})
'grddl'
This also works with just the suffixes, with or without leading dot, and
regardless of letter case::
>>> guess_format('.rdf')
'xml'
>>> guess_format('rdf')
'xml'
>>> guess_format('RDF')
'xml'
"""
fmap = fmap or SUFFIX_FORMAT_MAP
return fmap.get(_get_ext(fpath)) or fmap.get(fpath.lower())
def _get_ext(fpath, lower=True):
"""
Gets the file extension from a file(path); stripped of leading '.' and in
lower case. Examples:
>>> _get_ext("path/to/file.txt")
'txt'
>>> _get_ext("OTHER.PDF")
'pdf'
>>> _get_ext("noext")
''
>>> _get_ext(".rdf")
'rdf'
"""
ext = splitext(fpath)[-1]
if ext == "" and fpath.startswith("."):
ext = fpath
if lower:
ext = ext.lower()
if ext.startswith("."):
ext = ext[1:]
return ext
def find_roots(graph, prop, roots=None):
"""
Find the roots in some sort of transitive hierarchy.
find_roots(graph, rdflib.RDFS.subClassOf)
will return a set of all roots of the sub-class hierarchy
Assumes triple of the form (child, prop, parent), i.e. the direction of
RDFS.subClassOf or SKOS.broader
"""
non_roots = set()
if roots is None:
roots = set()
for x, y in graph.subject_objects(prop):
non_roots.add(x)
if x in roots:
roots.remove(x)
if y not in non_roots:
roots.add(y)
return roots
def get_tree(
graph, root, prop, mapper=lambda x: x, sortkey=None, done=None, dir="down"
):
"""
Return a nested list/tuple structure representing the tree
built by the transitive property given, starting from the root given
i.e.
get_tree(graph,
rdflib.URIRef("http://xmlns.com/foaf/0.1/Person"),
rdflib.RDFS.subClassOf)
will return the structure for the subClassTree below person.
dir='down' assumes triple of the form (child, prop, parent),
i.e. the direction of RDFS.subClassOf or SKOS.broader
Any other dir traverses in the other direction
"""
if done is None:
done = set()
if root in done:
return
done.add(root)
tree = []
if dir == "down":
branches = graph.subjects(prop, root)
else:
branches = graph.objects(root, prop)
for branch in branches:
t = get_tree(graph, branch, prop, mapper, sortkey, done, dir)
if t:
tree.append(t)
return (mapper(root), sorted(tree, key=sortkey))
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
# try to make the tests work outside of the time zone they were written in
# import os, time
# os.environ['TZ'] = 'US/Pacific'
# try:
# time.tzset()
# except AttributeError, e:
# print e
# pass
# tzset missing! see
# http://mail.python.org/pipermail/python-dev/2003-April/034480.html
test() # pragma: no cover
| """
Some utility functions.
Miscellaneous utilities
* list2set
* first
* uniq
* more_than
Term characterisation and generation
* to_term
* from_n3
Date/time utilities
* date_time
* parse_date_time
Statement and component type checkers
* check_context
* check_subject
* check_predicate
* check_object
* check_statement
* check_pattern
"""
from calendar import timegm
from time import altzone
# from time import daylight
from time import gmtime
from time import localtime
from time import time
from time import timezone
from os.path import splitext
from rdflib.exceptions import ContextTypeError
from rdflib.exceptions import ObjectTypeError
from rdflib.exceptions import PredicateTypeError
from rdflib.exceptions import SubjectTypeError
import rdflib.graph # avoid circular dependency
from rdflib.namespace import Namespace
from rdflib.namespace import NamespaceManager
from rdflib.term import BNode
from rdflib.term import Literal
from rdflib.term import URIRef
from rdflib.compat import sign
__all__ = [
"list2set",
"first",
"uniq",
"more_than",
"to_term",
"from_n3",
"date_time",
"parse_date_time",
"check_context",
"check_subject",
"check_predicate",
"check_object",
"check_statement",
"check_pattern",
"guess_format",
"find_roots",
"get_tree",
]
def list2set(seq):
"""
Return a new list without duplicates.
Preserves the order, unlike set(seq)
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def first(seq):
"""
return the first element in a python sequence
for graphs, use graph.value instead
"""
for result in seq:
return result
return None
def uniq(sequence, strip=0):
"""removes duplicate strings from the sequence."""
if strip:
return set(s.strip() for s in sequence)
else:
return set(sequence)
def more_than(sequence, number):
"Returns 1 if sequence has more items than number and 0 if not."
i = 0
for item in sequence:
i += 1
if i > number:
return 1
return 0
def to_term(s, default=None):
"""
Creates and returns an Identifier of type corresponding
to the pattern of the given positional argument string ``s``:
'' returns the ``default`` keyword argument value or ``None``
'<s>' returns ``URIRef(s)`` (i.e. without angle brackets)
'"s"' returns ``Literal(s)`` (i.e. without doublequotes)
'_s' returns ``BNode(s)`` (i.e. without leading underscore)
"""
if not s:
return default
elif s.startswith("<") and s.endswith(">"):
return URIRef(s[1:-1])
elif s.startswith('"') and s.endswith('"'):
return Literal(s[1:-1])
elif s.startswith("_"):
return BNode(s)
else:
msg = "Unrecognised term syntax: '%s'" % s
raise Exception(msg)
def from_n3(s: str, default=None, backend=None, nsm=None):
r'''
Creates the Identifier corresponding to the given n3 string.
>>> from_n3('<http://ex.com/foo>') == URIRef('http://ex.com/foo')
True
>>> from_n3('"foo"@de') == Literal('foo', lang='de')
True
>>> from_n3('"""multi\nline\nstring"""@en') == Literal(
... 'multi\nline\nstring', lang='en')
True
>>> from_n3('42') == Literal(42)
True
>>> from_n3(Literal(42).n3()) == Literal(42)
True
>>> from_n3('"42"^^xsd:integer') == Literal(42)
True
>>> from rdflib import RDFS
>>> from_n3('rdfs:label') == RDFS['label']
True
>>> nsm = NamespaceManager(rdflib.graph.Graph())
>>> nsm.bind('dbpedia', 'http://dbpedia.org/resource/')
>>> berlin = URIRef('http://dbpedia.org/resource/Berlin')
>>> from_n3('dbpedia:Berlin', nsm=nsm) == berlin
True
'''
if not s:
return default
if s.startswith("<"):
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
return URIRef(s[1:-1].encode("raw-unicode-escape").decode("unicode-escape"))
elif s.startswith('"'):
if s.startswith('"""'):
quotes = '"""'
else:
quotes = '"'
value, rest = s.rsplit(quotes, 1)
value = value[len(quotes) :] # strip leading quotes
datatype = None
language = None
# as a given datatype overrules lang-tag check for it first
dtoffset = rest.rfind("^^")
if dtoffset >= 0:
# found a datatype
# datatype has to come after lang-tag so ignore everything before
# see: http://www.w3.org/TR/2011/WD-turtle-20110809/
# #prod-turtle2-RDFLiteral
datatype = from_n3(rest[dtoffset + 2 :], default, backend, nsm)
else:
if rest.startswith("@"):
language = rest[1:] # strip leading at sign
value = value.replace(r"\"", '"')
# unicode-escape interprets \xhh as an escape sequence,
# but n3 does not define it as such.
value = value.replace(r"\x", r"\\x")
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
value = value.encode("raw-unicode-escape").decode("unicode-escape")
return Literal(value, language, datatype)
elif s == "true" or s == "false":
return Literal(s == "true")
elif s.isdigit():
return Literal(int(s))
elif s.startswith("{"):
identifier = from_n3(s[1:-1])
return rdflib.graph.QuotedGraph(backend, identifier)
elif s.startswith("["):
identifier = from_n3(s[1:-1])
return rdflib.graph.Graph(backend, identifier)
elif s.startswith("_:"):
return BNode(s[2:])
elif ":" in s:
if nsm is None:
# instantiate default NamespaceManager and rely on its defaults
nsm = NamespaceManager(rdflib.graph.Graph())
prefix, last_part = s.split(":", 1)
ns = dict(nsm.namespaces())[prefix]
return Namespace(ns)[last_part]
else:
return BNode(s)
def check_context(c):
if not (isinstance(c, URIRef) or isinstance(c, BNode)):
raise ContextTypeError("%s:%s" % (c, type(c)))
def check_subject(s):
"""Test that s is a valid subject identifier."""
if not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
def check_predicate(p):
"""Test that p is a valid predicate identifier."""
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
def check_object(o):
"""Test that o is a valid object identifier."""
if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_statement(triple):
(s, p, o) = triple
if not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_pattern(triple):
(s, p, o) = triple
if s and not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if p and not isinstance(p, URIRef):
raise PredicateTypeError(p)
if o and not (
isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)
):
raise ObjectTypeError(o)
def date_time(t=None, local_time_zone=False):
"""http://www.w3.org/TR/NOTE-datetime ex: 1997-07-16T19:20:30Z
>>> date_time(1126482850)
'2005-09-11T23:54:10Z'
@@ this will change depending on where it is run
#>>> date_time(1126482850, local_time_zone=True)
#'2005-09-11T19:54:10-04:00'
>>> date_time(1)
'1970-01-01T00:00:01Z'
>>> date_time(0)
'1970-01-01T00:00:00Z'
"""
if t is None:
t = time()
if local_time_zone:
time_tuple = localtime(t)
if time_tuple[8]:
tz_mins = altzone // 60
else:
tz_mins = timezone // 60
tzd = "-%02d:%02d" % (tz_mins // 60, tz_mins % 60)
else:
time_tuple = gmtime(t)
tzd = "Z"
year, month, day, hh, mm, ss, wd, y, z = time_tuple
s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % (year, month, day, hh, mm, ss, tzd)
return s
def parse_date_time(val):
"""always returns seconds in UTC
# tests are written like this to make any errors easier to understand
>>> parse_date_time('2005-09-11T23:54:10Z') - 1126482850.0
0.0
>>> parse_date_time('2005-09-11T16:54:10-07:00') - 1126482850.0
0.0
>>> parse_date_time('1970-01-01T00:00:01Z') - 1.0
0.0
>>> parse_date_time('1970-01-01T00:00:00Z') - 0.0
0.0
>>> parse_date_time("2005-09-05T10:42:00") - 1125916920.0
0.0
"""
if "T" not in val:
val += "T00:00:00Z"
ymd, time = val.split("T")
hms, tz_str = time[0:8], time[8:]
if not tz_str or tz_str == "Z":
time = time[:-1]
tz_offset = 0
else:
signed_hrs = int(tz_str[:3])
mins = int(tz_str[4:6])
secs = (sign(signed_hrs) * mins + signed_hrs * 60) * 60
tz_offset = -secs
year, month, day = ymd.split("-")
hour, minute, second = hms.split(":")
t = timegm(
(int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, 0)
)
t = t + tz_offset
return t
SUFFIX_FORMAT_MAP = {
"xml": "xml",
"rdf": "xml",
"owl": "xml",
"n3": "n3",
"ttl": "turtle",
"nt": "nt",
"trix": "trix",
"xhtml": "rdfa",
"html": "rdfa",
"svg": "rdfa",
"nq": "nquads",
"trig": "trig",
}
def guess_format(fpath, fmap=None):
"""
Guess RDF serialization based on file suffix. Uses
``SUFFIX_FORMAT_MAP`` unless ``fmap`` is provided. Examples:
>>> guess_format('path/to/file.rdf')
'xml'
>>> guess_format('path/to/file.owl')
'xml'
>>> guess_format('path/to/file.ttl')
'turtle'
>>> guess_format('path/to/file.xhtml')
'rdfa'
>>> guess_format('path/to/file.svg')
'rdfa'
>>> guess_format('path/to/file.xhtml', {'xhtml': 'grddl'})
'grddl'
This also works with just the suffixes, with or without leading dot, and
regardless of letter case::
>>> guess_format('.rdf')
'xml'
>>> guess_format('rdf')
'xml'
>>> guess_format('RDF')
'xml'
"""
fmap = fmap or SUFFIX_FORMAT_MAP
return fmap.get(_get_ext(fpath)) or fmap.get(fpath.lower())
def _get_ext(fpath, lower=True):
"""
Gets the file extension from a file(path); stripped of leading '.' and in
lower case. Examples:
>>> _get_ext("path/to/file.txt")
'txt'
>>> _get_ext("OTHER.PDF")
'pdf'
>>> _get_ext("noext")
''
>>> _get_ext(".rdf")
'rdf'
"""
ext = splitext(fpath)[-1]
if ext == "" and fpath.startswith("."):
ext = fpath
if lower:
ext = ext.lower()
if ext.startswith("."):
ext = ext[1:]
return ext
def find_roots(graph, prop, roots=None):
"""
Find the roots in some sort of transitive hierarchy.
find_roots(graph, rdflib.RDFS.subClassOf)
will return a set of all roots of the sub-class hierarchy
Assumes triple of the form (child, prop, parent), i.e. the direction of
RDFS.subClassOf or SKOS.broader
"""
non_roots = set()
if roots is None:
roots = set()
for x, y in graph.subject_objects(prop):
non_roots.add(x)
if x in roots:
roots.remove(x)
if y not in non_roots:
roots.add(y)
return roots
def get_tree(
graph, root, prop, mapper=lambda x: x, sortkey=None, done=None, dir="down"
):
"""
Return a nested list/tuple structure representing the tree
built by the transitive property given, starting from the root given
i.e.
get_tree(graph,
rdflib.URIRef("http://xmlns.com/foaf/0.1/Person"),
rdflib.RDFS.subClassOf)
will return the structure for the subClassTree below person.
dir='down' assumes triple of the form (child, prop, parent),
i.e. the direction of RDFS.subClassOf or SKOS.broader
Any other dir traverses in the other direction
"""
if done is None:
done = set()
if root in done:
return
done.add(root)
tree = []
if dir == "down":
branches = graph.subjects(prop, root)
else:
branches = graph.objects(root, prop)
for branch in branches:
t = get_tree(graph, branch, prop, mapper, sortkey, done, dir)
if t:
tree.append(t)
return (mapper(root), sorted(tree, key=sortkey))
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
# try to make the tests work outside of the time zone they were written in
# import os, time
# os.environ['TZ'] = 'US/Pacific'
# try:
# time.tzset()
# except AttributeError, e:
# print e
# pass
# tzset missing! see
# http://mail.python.org/pipermail/python-dev/2003-April/034480.html
test() # pragma: no cover
| en | 0.549657 | Some utility functions. Miscellaneous utilities * list2set * first * uniq * more_than Term characterisation and generation * to_term * from_n3 Date/time utilities * date_time * parse_date_time Statement and component type checkers * check_context * check_subject * check_predicate * check_object * check_statement * check_pattern # from time import daylight # avoid circular dependency Return a new list without duplicates. Preserves the order, unlike set(seq) return the first element in a python sequence for graphs, use graph.value instead removes duplicate strings from the sequence. Creates and returns an Identifier of type corresponding to the pattern of the given positional argument string ``s``: '' returns the ``default`` keyword argument value or ``None`` '<s>' returns ``URIRef(s)`` (i.e. without angle brackets) '"s"' returns ``Literal(s)`` (i.e. without doublequotes) '_s' returns ``BNode(s)`` (i.e. without leading underscore) Creates the Identifier corresponding to the given n3 string. >>> from_n3('<http://ex.com/foo>') == URIRef('http://ex.com/foo') True >>> from_n3('"foo"@de') == Literal('foo', lang='de') True >>> from_n3('"""multi\nline\nstring"""@en') == Literal( ... 'multi\nline\nstring', lang='en') True >>> from_n3('42') == Literal(42) True >>> from_n3(Literal(42).n3()) == Literal(42) True >>> from_n3('"42"^^xsd:integer') == Literal(42) True >>> from rdflib import RDFS >>> from_n3('rdfs:label') == RDFS['label'] True >>> nsm = NamespaceManager(rdflib.graph.Graph()) >>> nsm.bind('dbpedia', 'http://dbpedia.org/resource/') >>> berlin = URIRef('http://dbpedia.org/resource/Berlin') >>> from_n3('dbpedia:Berlin', nsm=nsm) == berlin True # Hack: this should correctly handle strings with either native unicode # characters, or \u1234 unicode escapes. '): quotes = ' # strip leading quotes # as a given datatype overrules lang-tag check for it first # found a datatype # datatype has to come after lang-tag so ignore everything before # see: http://www.w3.org/TR/2011/WD-turtle-20110809/ # #prod-turtle2-RDFLiteral # strip leading at sign # unicode-escape interprets \xhh as an escape sequence, # but n3 does not define it as such. # Hack: this should correctly handle strings with either native unicode # characters, or \u1234 unicode escapes. # instantiate default NamespaceManager and rely on its defaults Test that s is a valid subject identifier. Test that p is a valid predicate identifier. Test that o is a valid object identifier. http://www.w3.org/TR/NOTE-datetime ex: 1997-07-16T19:20:30Z >>> date_time(1126482850) '2005-09-11T23:54:10Z' @@ this will change depending on where it is run #>>> date_time(1126482850, local_time_zone=True) #'2005-09-11T19:54:10-04:00' >>> date_time(1) '1970-01-01T00:00:01Z' >>> date_time(0) '1970-01-01T00:00:00Z' always returns seconds in UTC # tests are written like this to make any errors easier to understand >>> parse_date_time('2005-09-11T23:54:10Z') - 1126482850.0 0.0 >>> parse_date_time('2005-09-11T16:54:10-07:00') - 1126482850.0 0.0 >>> parse_date_time('1970-01-01T00:00:01Z') - 1.0 0.0 >>> parse_date_time('1970-01-01T00:00:00Z') - 0.0 0.0 >>> parse_date_time("2005-09-05T10:42:00") - 1125916920.0 0.0 Guess RDF serialization based on file suffix. Uses ``SUFFIX_FORMAT_MAP`` unless ``fmap`` is provided. Examples: >>> guess_format('path/to/file.rdf') 'xml' >>> guess_format('path/to/file.owl') 'xml' >>> guess_format('path/to/file.ttl') 'turtle' >>> guess_format('path/to/file.xhtml') 'rdfa' >>> guess_format('path/to/file.svg') 'rdfa' >>> guess_format('path/to/file.xhtml', {'xhtml': 'grddl'}) 'grddl' This also works with just the suffixes, with or without leading dot, and regardless of letter case:: >>> guess_format('.rdf') 'xml' >>> guess_format('rdf') 'xml' >>> guess_format('RDF') 'xml' Gets the file extension from a file(path); stripped of leading '.' and in lower case. Examples: >>> _get_ext("path/to/file.txt") 'txt' >>> _get_ext("OTHER.PDF") 'pdf' >>> _get_ext("noext") '' >>> _get_ext(".rdf") 'rdf' Find the roots in some sort of transitive hierarchy. find_roots(graph, rdflib.RDFS.subClassOf) will return a set of all roots of the sub-class hierarchy Assumes triple of the form (child, prop, parent), i.e. the direction of RDFS.subClassOf or SKOS.broader Return a nested list/tuple structure representing the tree built by the transitive property given, starting from the root given i.e. get_tree(graph, rdflib.URIRef("http://xmlns.com/foaf/0.1/Person"), rdflib.RDFS.subClassOf) will return the structure for the subClassTree below person. dir='down' assumes triple of the form (child, prop, parent), i.e. the direction of RDFS.subClassOf or SKOS.broader Any other dir traverses in the other direction # try to make the tests work outside of the time zone they were written in # import os, time # os.environ['TZ'] = 'US/Pacific' # try: # time.tzset() # except AttributeError, e: # print e # pass # tzset missing! see # http://mail.python.org/pipermail/python-dev/2003-April/034480.html # pragma: no cover | 2.785078 | 3 |
parkings/tests/api/conftest.py | dvainio/parkkihubi | 0 | 6632356 | <filename>parkings/tests/api/conftest.py
import pytest
from rest_framework.test import APIClient
from .utils import token_authenticate
@pytest.fixture(autouse=True)
def no_more_mark_django_db(transactional_db):
pass
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def monitoring_api_client(user_factory):
api_client = APIClient()
user = user_factory()
user.groups.get_or_create(name='monitoring')
api_client.force_authenticate(user)
return api_client
@pytest.fixture
def user_api_client(user_factory):
api_client = APIClient()
user = user_factory() # don't use the same user as operator_api_client
token_authenticate(api_client, user)
return api_client
@pytest.fixture
def staff_api_client(staff_user):
api_client = APIClient()
token_authenticate(api_client, staff_user)
return api_client
@pytest.fixture
def operator_api_client(operator):
api_client = APIClient()
token_authenticate(api_client, operator.user)
api_client.operator = operator
return api_client
@pytest.fixture
def operator_2(operator, operator_factory):
return operator_factory()
@pytest.fixture
def operator_2_api_client(operator_2):
api_client = APIClient()
token_authenticate(api_client, operator_2.user)
api_client.operator = operator_2
return api_client
| <filename>parkings/tests/api/conftest.py
import pytest
from rest_framework.test import APIClient
from .utils import token_authenticate
@pytest.fixture(autouse=True)
def no_more_mark_django_db(transactional_db):
pass
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def monitoring_api_client(user_factory):
api_client = APIClient()
user = user_factory()
user.groups.get_or_create(name='monitoring')
api_client.force_authenticate(user)
return api_client
@pytest.fixture
def user_api_client(user_factory):
api_client = APIClient()
user = user_factory() # don't use the same user as operator_api_client
token_authenticate(api_client, user)
return api_client
@pytest.fixture
def staff_api_client(staff_user):
api_client = APIClient()
token_authenticate(api_client, staff_user)
return api_client
@pytest.fixture
def operator_api_client(operator):
api_client = APIClient()
token_authenticate(api_client, operator.user)
api_client.operator = operator
return api_client
@pytest.fixture
def operator_2(operator, operator_factory):
return operator_factory()
@pytest.fixture
def operator_2_api_client(operator_2):
api_client = APIClient()
token_authenticate(api_client, operator_2.user)
api_client.operator = operator_2
return api_client
| en | 0.969988 | # don't use the same user as operator_api_client | 1.903156 | 2 |
LTSpiceGenerator.py | sventhijssen/compact | 2 | 6632357 | from MemristorCrossbar import MemristorCrossbar
class LTSpiceGenerator:
@staticmethod
def write_circuit(crossbar: MemristorCrossbar):
file_name = 'circuit.cir'
r_out = 100
with open(file_name, 'w') as f:
f.write('* Circuit analysis based on Yakopcic\n')
f.write('.subckt mem_dev TE BE xo=0.5 XSV=40\n')
f.write('.PARAM a1=0.2 a2=0.2 b=0.05 Vp=1.1 Vn=1.1 Ap=1.9e9 An=1.9e9 xp=0.675 xn=0.675 alphap=0.01 alphan=0.01 eta=1\n')
f.write('.FUNC wp(V) = \'(xp-V)/(1-xp)+1\'\n')
f.write('.FUNC wn(V) = \'V/(1-xn)\'\n')
f.write('.FUNC G(V) = \'IF(V <= Vp, IF(V >= -1*Vn, 0, -1*An*(exp(-1*V)+(-1*exp(Vn)))), Ap*(exp(V)+(-1*exp(Vp))))\'\n')
f.write('.FUNC F(V1,V2) = \'IF(eta*V1 >= 0, IF(V2 >= xp, exp(-1*alphap*(V2-xp))*wp(V2) ,1), IF(V2 <= (1-xn), exp(alphan*(V2+xn-1))*wn(V2) ,1))\'\n')
f.write('.FUNC IVRel(V1,V2) = \'IF(V1 >= 0, a1*V2*sinh(b*V1), a2*V2*sinh(b*V1))\'\n')
f.write('Cx XSV 0 \'1\'\n')
f.write('.ic V(XSV) = xo\n')
f.write('Gx 0 XSV value=\'eta*F(V(TE,BE),V(XSV,0))*G(V(TE,BE))\'\n')
f.write('Gm TE BE value = \'IVRel(V(TE,BE),V(XSV,0))\'\n')
f.write('.ends mem_dev\n')
f.write('\n')
# Begin memristor crossbar
for i in range(crossbar.rows):
for j in range(crossbar.columns):
if crossbar.get_memristor(i, j).literal.positive:
xo = '1.0'
else:
xo = '0.00001'
f.write('X_{}_{} m{} n{} mem_dev xo={}\n'.format(i, j, i, j, xo))
# End memristor crossbar
f.write('V1 m{} 0 10\n'.format(crossbar.rows-1))
for (output_variable, (row, _)) in crossbar.get_output_nanowires().items():
f.write('R_{} m{} 0 {}\n'.format(output_variable, row, r_out))
# f.write('.tran 0.1ms startup\n')
f.write('.end\n')
f.close()
| from MemristorCrossbar import MemristorCrossbar
class LTSpiceGenerator:
@staticmethod
def write_circuit(crossbar: MemristorCrossbar):
file_name = 'circuit.cir'
r_out = 100
with open(file_name, 'w') as f:
f.write('* Circuit analysis based on Yakopcic\n')
f.write('.subckt mem_dev TE BE xo=0.5 XSV=40\n')
f.write('.PARAM a1=0.2 a2=0.2 b=0.05 Vp=1.1 Vn=1.1 Ap=1.9e9 An=1.9e9 xp=0.675 xn=0.675 alphap=0.01 alphan=0.01 eta=1\n')
f.write('.FUNC wp(V) = \'(xp-V)/(1-xp)+1\'\n')
f.write('.FUNC wn(V) = \'V/(1-xn)\'\n')
f.write('.FUNC G(V) = \'IF(V <= Vp, IF(V >= -1*Vn, 0, -1*An*(exp(-1*V)+(-1*exp(Vn)))), Ap*(exp(V)+(-1*exp(Vp))))\'\n')
f.write('.FUNC F(V1,V2) = \'IF(eta*V1 >= 0, IF(V2 >= xp, exp(-1*alphap*(V2-xp))*wp(V2) ,1), IF(V2 <= (1-xn), exp(alphan*(V2+xn-1))*wn(V2) ,1))\'\n')
f.write('.FUNC IVRel(V1,V2) = \'IF(V1 >= 0, a1*V2*sinh(b*V1), a2*V2*sinh(b*V1))\'\n')
f.write('Cx XSV 0 \'1\'\n')
f.write('.ic V(XSV) = xo\n')
f.write('Gx 0 XSV value=\'eta*F(V(TE,BE),V(XSV,0))*G(V(TE,BE))\'\n')
f.write('Gm TE BE value = \'IVRel(V(TE,BE),V(XSV,0))\'\n')
f.write('.ends mem_dev\n')
f.write('\n')
# Begin memristor crossbar
for i in range(crossbar.rows):
for j in range(crossbar.columns):
if crossbar.get_memristor(i, j).literal.positive:
xo = '1.0'
else:
xo = '0.00001'
f.write('X_{}_{} m{} n{} mem_dev xo={}\n'.format(i, j, i, j, xo))
# End memristor crossbar
f.write('V1 m{} 0 10\n'.format(crossbar.rows-1))
for (output_variable, (row, _)) in crossbar.get_output_nanowires().items():
f.write('R_{} m{} 0 {}\n'.format(output_variable, row, r_out))
# f.write('.tran 0.1ms startup\n')
f.write('.end\n')
f.close()
| en | 0.163387 | # Begin memristor crossbar # End memristor crossbar # f.write('.tran 0.1ms startup\n') | 2.689058 | 3 |
application.py | rahulbansal16/vop | 0 | 6632358 | import os
from flask import Flask, url_for
from flask_restplus import Api, Resource, fields
from services.reviewService import insert_review
from services.userService import UserNotFoundException, signup_user, UserAlreadyExistException, \
InvalidLoginDetailsException, login_user
app = Flask(__name__)
if os.environ.get('AZURE'):
@property
def specs_url(self):
return url_for(self.endpoint('specs'), _external=True, _scheme='https')
Api.specs_url = specs_url
api = Api(app, version='1.0', title='VOP Api',
description='A Basic VOP API',
)
error_model = api.model('ErrorModel', {
"success": fields.String,
"errorType": fields.String,
"errorMessage": fields.String
})
# @app.errorhandler(Exception)
# @app.errorhandler(RequiredParametersMissingException)
# @app.errorhandler(ReviewAlreadyExistException)
@api.errorhandler(UserAlreadyExistException)
@api.errorhandler(UserNotFoundException)
@api.errorhandler(InvalidLoginDetailsException)
# @marshal_with(ErrorSchema, 500)
@api.marshal_with(error_model)
def handle_error_util(error):
code = error.code
success = False
response = {
'success': success,
'errorType': error.name,
'errorMessage': error.message
}
return response, code
ns_conf = api.namespace('reviews', description='Info about Review api')
# model = api.model('ReviewModel', {
# 'name': fields.String,
# 'address': fields.String,
# 'date_updated': fields.DateTime(dt_format='rfc822'),
# })
review_model = api.model('ReviewModel', {
"title": fields.String,
"description": fields.String,
"videoUrl": fields.String,
"thumbnailUrl": fields.String,
"afLink": fields.String,
"username": fields.String
})
@ns_conf.route("")
class Reviews(Resource):
@api.marshal_with(review_model)
def get(self):
pass
@api.marshal_with(review_model)
@api.expect(review_model)
def post(self):
return insert_review(api.payload)
user_model = api.model('UserModel', {
"username": fields.String,
"password": fields.String
})
ns_conf = api.namespace('users', description='Info about Review api')
# @ns_conf.route("")
class Users(Resource):
# @api.marshal_with()
def get(self):
pass
# @api.marshal_with()
@api.expect(user_model)
def post(self):
pass
login_model = api.model('LoginModel', {
'username': fields.String,
'password': fields.String
})
ns_conf = api.namespace('login', description='Login Api')
@ns_conf.route('')
# @api.expect(login_model)
# @api.marshal_with()
class Login(Resource):
# @api.marshal_with()
@api.expect(login_model)
def post(self):
return login_user(api.payload)
ns_conf = api.namespace('signup', description='Signup Api')
signup_model = api.model('SignupModel', {
'name': fields.String,
'username': fields.String,
'password': fields.String
})
@ns_conf.route('')
# @api.expect(login_model)
# @api.marshal_with()
class Signup(Resource):
# @api.marshal_with()
@api.expect(signup_model)
def post(self):
return signup_user(api.payload)
# print(api.payload)
# # review_params = ["title", "description", "videoUrl", "thumbnailUrl", "afLink", "username"]
# review_params = {
# "title": fields.Str(required=True),
# "description": fields.Str(required=True),
# "videoUrl": fields.Str(required=True),
# "thumbnailUrl": fields.Str(required=True),
# "afLink": fields.Str(required=True),
# "username": fields.Str(required=True)
# }
#
#
# @app.route("/review", methods=['POST'])
# @parser.use_args(review_params, locations=("json"))
# def review():
# return insert_review(request.json)
# docs.register(review)
#
# # signup_params = ["username", "password", "name"]
# signup_params = {
# "username": fields.Str(required=True),
# "password": fields.Str(required=True),
# "name": fields.Str(required=True)
# }
#
# # docs.register("review")
# @app.route("/signup", methods=['POST'])
# @parser.use_args(signup_params, locations=('json'))
# # @marshal_with( code=200)
# def signup(args):
# return signup_user(args)
#
#
# # login_params = ["username", "password"]
#
# login_params = {
# "username": fields.Str(required=True),
# "password": fields.Str(required=True)
# }
#
#
# @app.route("/login", methods=['POST'])
# @parser.use_args(login_params, locations=('json'))
# def login(args):
# return login_user(args)
#
#
# @app.route("/feed")
# def feed():
# pass
# # return json.dumps(get_video_feed())
if __name__ == "__main__":
app.run(debug=True)
| import os
from flask import Flask, url_for
from flask_restplus import Api, Resource, fields
from services.reviewService import insert_review
from services.userService import UserNotFoundException, signup_user, UserAlreadyExistException, \
InvalidLoginDetailsException, login_user
app = Flask(__name__)
if os.environ.get('AZURE'):
@property
def specs_url(self):
return url_for(self.endpoint('specs'), _external=True, _scheme='https')
Api.specs_url = specs_url
api = Api(app, version='1.0', title='VOP Api',
description='A Basic VOP API',
)
error_model = api.model('ErrorModel', {
"success": fields.String,
"errorType": fields.String,
"errorMessage": fields.String
})
# @app.errorhandler(Exception)
# @app.errorhandler(RequiredParametersMissingException)
# @app.errorhandler(ReviewAlreadyExistException)
@api.errorhandler(UserAlreadyExistException)
@api.errorhandler(UserNotFoundException)
@api.errorhandler(InvalidLoginDetailsException)
# @marshal_with(ErrorSchema, 500)
@api.marshal_with(error_model)
def handle_error_util(error):
code = error.code
success = False
response = {
'success': success,
'errorType': error.name,
'errorMessage': error.message
}
return response, code
ns_conf = api.namespace('reviews', description='Info about Review api')
# model = api.model('ReviewModel', {
# 'name': fields.String,
# 'address': fields.String,
# 'date_updated': fields.DateTime(dt_format='rfc822'),
# })
review_model = api.model('ReviewModel', {
"title": fields.String,
"description": fields.String,
"videoUrl": fields.String,
"thumbnailUrl": fields.String,
"afLink": fields.String,
"username": fields.String
})
@ns_conf.route("")
class Reviews(Resource):
@api.marshal_with(review_model)
def get(self):
pass
@api.marshal_with(review_model)
@api.expect(review_model)
def post(self):
return insert_review(api.payload)
user_model = api.model('UserModel', {
"username": fields.String,
"password": fields.String
})
ns_conf = api.namespace('users', description='Info about Review api')
# @ns_conf.route("")
class Users(Resource):
# @api.marshal_with()
def get(self):
pass
# @api.marshal_with()
@api.expect(user_model)
def post(self):
pass
login_model = api.model('LoginModel', {
'username': fields.String,
'password': fields.String
})
ns_conf = api.namespace('login', description='Login Api')
@ns_conf.route('')
# @api.expect(login_model)
# @api.marshal_with()
class Login(Resource):
# @api.marshal_with()
@api.expect(login_model)
def post(self):
return login_user(api.payload)
ns_conf = api.namespace('signup', description='Signup Api')
signup_model = api.model('SignupModel', {
'name': fields.String,
'username': fields.String,
'password': fields.String
})
@ns_conf.route('')
# @api.expect(login_model)
# @api.marshal_with()
class Signup(Resource):
# @api.marshal_with()
@api.expect(signup_model)
def post(self):
return signup_user(api.payload)
# print(api.payload)
# # review_params = ["title", "description", "videoUrl", "thumbnailUrl", "afLink", "username"]
# review_params = {
# "title": fields.Str(required=True),
# "description": fields.Str(required=True),
# "videoUrl": fields.Str(required=True),
# "thumbnailUrl": fields.Str(required=True),
# "afLink": fields.Str(required=True),
# "username": fields.Str(required=True)
# }
#
#
# @app.route("/review", methods=['POST'])
# @parser.use_args(review_params, locations=("json"))
# def review():
# return insert_review(request.json)
# docs.register(review)
#
# # signup_params = ["username", "password", "name"]
# signup_params = {
# "username": fields.Str(required=True),
# "password": fields.Str(required=True),
# "name": fields.Str(required=True)
# }
#
# # docs.register("review")
# @app.route("/signup", methods=['POST'])
# @parser.use_args(signup_params, locations=('json'))
# # @marshal_with( code=200)
# def signup(args):
# return signup_user(args)
#
#
# # login_params = ["username", "password"]
#
# login_params = {
# "username": fields.Str(required=True),
# "password": fields.Str(required=True)
# }
#
#
# @app.route("/login", methods=['POST'])
# @parser.use_args(login_params, locations=('json'))
# def login(args):
# return login_user(args)
#
#
# @app.route("/feed")
# def feed():
# pass
# # return json.dumps(get_video_feed())
if __name__ == "__main__":
app.run(debug=True)
| en | 0.322763 | # @app.errorhandler(Exception) # @app.errorhandler(RequiredParametersMissingException) # @app.errorhandler(ReviewAlreadyExistException) # @marshal_with(ErrorSchema, 500) # model = api.model('ReviewModel', { # 'name': fields.String, # 'address': fields.String, # 'date_updated': fields.DateTime(dt_format='rfc822'), # }) # @ns_conf.route("") # @api.marshal_with() # @api.marshal_with() # @api.expect(login_model) # @api.marshal_with() # @api.marshal_with() # @api.expect(login_model) # @api.marshal_with() # @api.marshal_with() # print(api.payload) # # review_params = ["title", "description", "videoUrl", "thumbnailUrl", "afLink", "username"] # review_params = { # "title": fields.Str(required=True), # "description": fields.Str(required=True), # "videoUrl": fields.Str(required=True), # "thumbnailUrl": fields.Str(required=True), # "afLink": fields.Str(required=True), # "username": fields.Str(required=True) # } # # # @app.route("/review", methods=['POST']) # @parser.use_args(review_params, locations=("json")) # def review(): # return insert_review(request.json) # docs.register(review) # # # signup_params = ["username", "password", "name"] # signup_params = { # "username": fields.Str(required=True), # "password": fields.Str(required=True), # "name": fields.Str(required=True) # } # # # docs.register("review") # @app.route("/signup", methods=['POST']) # @parser.use_args(signup_params, locations=('json')) # # @marshal_with( code=200) # def signup(args): # return signup_user(args) # # # # login_params = ["username", "password"] # # login_params = { # "username": fields.Str(required=True), # "password": fields.Str(required=True) # } # # # @app.route("/login", methods=['POST']) # @parser.use_args(login_params, locations=('json')) # def login(args): # return login_user(args) # # # @app.route("/feed") # def feed(): # pass # # return json.dumps(get_video_feed()) | 2.342144 | 2 |
views/__init__.py | Andrerodrigues0018/LGPD-compliant-website | 0 | 6632359 | <reponame>Andrerodrigues0018/LGPD-compliant-website
from .account import *
from .eventhooks import *
| from .account import *
from .eventhooks import * | none | 1 | 1.080734 | 1 |
|
simulation/models/env_db/onboarding_simple.py | LeonardII/KitCarFork | 13 | 6632360 | """A simple road for the onboarding task."""
from simulation.utils.road.road import Road # Definition of the road class
from simulation.utils.road.sections import Intersection, StraightRoad
road = Road()
road.append(StraightRoad())
road.append(StraightRoad(length=2))
road.append(Intersection())
road.append(StraightRoad())
| """A simple road for the onboarding task."""
from simulation.utils.road.road import Road # Definition of the road class
from simulation.utils.road.sections import Intersection, StraightRoad
road = Road()
road.append(StraightRoad())
road.append(StraightRoad(length=2))
road.append(Intersection())
road.append(StraightRoad())
| en | 0.752869 | A simple road for the onboarding task. # Definition of the road class | 2.583316 | 3 |
core/utils/help.py | Thirio27/Pemgu-Bot | 1 | 6632361 | <filename>core/utils/help.py
import discord, contextlib
from discord.ext import commands
import core.views.helpview as hv
class MinimalHelp(commands.MinimalHelpCommand):
def __init__(self):
self.emojis = {
"Anime": "🍘",
"Fun": "😹",
"Game": "🎮",
"Internet": "🌎",
"Math": "🧮",
"Meta": "🔧",
"Moderation": "🎩",
"Owner": "👑",
"Utility": "🧰",
"No": "❓"
}
super().__init__(
command_attrs={
"help": "The help command for this bot",
"aliases": ["h", "commands"]
}
)
async def send_pages(self):
mhmbed = discord.Embed(
color=self.context.bot.color,
title=F"{self.context.me.name}'s Help",
timestamp=self.context.message.created_at
)
mhmbed.set_thumbnail(url=self.context.me.display_avatar.url)
mhmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
mhmbed.set_footer(text="[] means the argument is optional. | <> means the argument is required.")
for page in self.paginator.pages:
mhmbed.description = page
await self.context.send(embed=mhmbed)
class CustomHelp(commands.HelpCommand):
def __init__(self):
super().__init__(
command_attrs={
"help": "The help command for this bot",
"aliases": ["h", "commands"]
}
)
self.emojis = {
"Anime": "🍘",
"Fun": "😹",
"Game": "🎮",
"Images": "📷",
"Information": "🔎",
"Moderation": "🎩",
"Owner": "👑",
"Settings": "🔧",
"Utility": "🧰",
"Jishaku": "🤿",
"Alone": "🔮"
}
# Help Main
async def send_bot_help(self, mapping):
view = hv.SelectView(self, mapping)
view.homepage.add_field(name="Prefix:", value=self.context.prefix or "In DM you don't need to use prefix")
view.homepage.add_field(name="Arguments:", value="[] means the argument is optional.\n<> means the argument is required.\n***DO NOT USE THESE WHEN DOING A COMMAND***")
view.homepage.set_thumbnail(url=self.context.me.display_avatar.url)
view.homepage.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
view.message = await self.context.send(content="Are you lost ?", embed=view.homepage, view=view)
return
# Help Cog
async def send_cog_help(self, cog):
hcogmbed = discord.Embed(
color=self.context.bot.color,
title=F"{self.emojis.get(cog.qualified_name) if self.emojis.get(cog.qualified_name) else '❓'} {cog.qualified_name} Category [{len(cog.get_commands())}]",
description=F"{cog.description}\n\n",
timestamp=self.context.message.created_at
)
for command in cog.walk_commands():
hcogmbed.description += F"• **{self.get_command_signature(command)}** - {command.help or 'No help found...'}\n"
hcogmbed.set_thumbnail(url=self.context.me.display_avatar.url)
hcogmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
hcogmbed.set_footer(text="<> is required | [] is optional")
await self.context.send(embed=hcogmbed)
return
# Help Command
async def send_command_help(self, command):
hcmdmbed = discord.Embed(
color=self.context.bot.color,
title=self.get_command_signature(command),
description=command.help or "No help found...",
timestamp=self.context.message.created_at
)
hcmdmbed.set_thumbnail(url=self.context.me.display_avatar.url)
hcmdmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
hcmdmbed.set_footer(text="<> is required | [] is optional")
if cog := command.cog:
hcmdmbed.add_field(name="Category:", value=F"{self.emojis.get(cog.qualified_name) if self.emojis.get(cog.qualified_name) else '❓'} {cog.qualified_name}")
can_run = "No"
with contextlib.suppress(commands.CommandError):
if await command.can_run(self.context):
can_run = "Yes"
hcmdmbed.add_field(name="Usable", value=can_run)
if command._buckets and (cooldown := command._buckets._cooldown):
hcmdmbed.add_field(name="Cooldown", value=F"{cooldown.rate} per {cooldown.per:.0f} seconds")
await self.context.send(embed=hcmdmbed)
return
# Help Group
async def send_group_help(self, group):
hgroupmbed = discord.Embed(
color=self.context.bot.color,
title=self.get_command_signature(group),
description=F"{group.help or 'No help found...'}\n\n",
timestamp=self.context.message.created_at
)
hgroupmbed.set_thumbnail(url=self.context.me.display_avatar.url)
hgroupmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
hgroupmbed.set_footer(text="<> is required | [] is optional")
for command in group.commands:
hgroupmbed.description += F"• **{self.get_command_signature(command)}** - {command.help or 'No help found...'}\n"
if cog := command.cog:
hgroupmbed.add_field(name="Category", value=F"{self.emojis.get(cog.qualified_name) if self.emojis.get(cog.qualified_name) else '❓'} {cog.qualified_name}")
can_run = "No"
with contextlib.suppress(commands.CommandError):
if await command.can_run(self.context):
can_run = "Yes"
hgroupmbed.add_field(name="Usable", value=can_run)
if command._buckets and (cooldown := command._buckets._cooldown):
hgroupmbed.add_field(name="Cooldown", value=F"{cooldown.rate} per {cooldown.per:.0f} seconds")
await self.context.send(embed=hgroupmbed)
return
# Help Error
async def send_error_message(self, error):
herrormbed = discord.Embed(
color=self.context.bot.color,
title=error,
timestamp=self.context.message.created_at
)
herrormbed.set_thumbnail(url=self.context.me.display_avatar.url)
herrormbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
await self.context.send(embed=herrormbed)
return | <filename>core/utils/help.py
import discord, contextlib
from discord.ext import commands
import core.views.helpview as hv
class MinimalHelp(commands.MinimalHelpCommand):
def __init__(self):
self.emojis = {
"Anime": "🍘",
"Fun": "😹",
"Game": "🎮",
"Internet": "🌎",
"Math": "🧮",
"Meta": "🔧",
"Moderation": "🎩",
"Owner": "👑",
"Utility": "🧰",
"No": "❓"
}
super().__init__(
command_attrs={
"help": "The help command for this bot",
"aliases": ["h", "commands"]
}
)
async def send_pages(self):
mhmbed = discord.Embed(
color=self.context.bot.color,
title=F"{self.context.me.name}'s Help",
timestamp=self.context.message.created_at
)
mhmbed.set_thumbnail(url=self.context.me.display_avatar.url)
mhmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
mhmbed.set_footer(text="[] means the argument is optional. | <> means the argument is required.")
for page in self.paginator.pages:
mhmbed.description = page
await self.context.send(embed=mhmbed)
class CustomHelp(commands.HelpCommand):
def __init__(self):
super().__init__(
command_attrs={
"help": "The help command for this bot",
"aliases": ["h", "commands"]
}
)
self.emojis = {
"Anime": "🍘",
"Fun": "😹",
"Game": "🎮",
"Images": "📷",
"Information": "🔎",
"Moderation": "🎩",
"Owner": "👑",
"Settings": "🔧",
"Utility": "🧰",
"Jishaku": "🤿",
"Alone": "🔮"
}
# Help Main
async def send_bot_help(self, mapping):
view = hv.SelectView(self, mapping)
view.homepage.add_field(name="Prefix:", value=self.context.prefix or "In DM you don't need to use prefix")
view.homepage.add_field(name="Arguments:", value="[] means the argument is optional.\n<> means the argument is required.\n***DO NOT USE THESE WHEN DOING A COMMAND***")
view.homepage.set_thumbnail(url=self.context.me.display_avatar.url)
view.homepage.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
view.message = await self.context.send(content="Are you lost ?", embed=view.homepage, view=view)
return
# Help Cog
async def send_cog_help(self, cog):
hcogmbed = discord.Embed(
color=self.context.bot.color,
title=F"{self.emojis.get(cog.qualified_name) if self.emojis.get(cog.qualified_name) else '❓'} {cog.qualified_name} Category [{len(cog.get_commands())}]",
description=F"{cog.description}\n\n",
timestamp=self.context.message.created_at
)
for command in cog.walk_commands():
hcogmbed.description += F"• **{self.get_command_signature(command)}** - {command.help or 'No help found...'}\n"
hcogmbed.set_thumbnail(url=self.context.me.display_avatar.url)
hcogmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
hcogmbed.set_footer(text="<> is required | [] is optional")
await self.context.send(embed=hcogmbed)
return
# Help Command
async def send_command_help(self, command):
hcmdmbed = discord.Embed(
color=self.context.bot.color,
title=self.get_command_signature(command),
description=command.help or "No help found...",
timestamp=self.context.message.created_at
)
hcmdmbed.set_thumbnail(url=self.context.me.display_avatar.url)
hcmdmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
hcmdmbed.set_footer(text="<> is required | [] is optional")
if cog := command.cog:
hcmdmbed.add_field(name="Category:", value=F"{self.emojis.get(cog.qualified_name) if self.emojis.get(cog.qualified_name) else '❓'} {cog.qualified_name}")
can_run = "No"
with contextlib.suppress(commands.CommandError):
if await command.can_run(self.context):
can_run = "Yes"
hcmdmbed.add_field(name="Usable", value=can_run)
if command._buckets and (cooldown := command._buckets._cooldown):
hcmdmbed.add_field(name="Cooldown", value=F"{cooldown.rate} per {cooldown.per:.0f} seconds")
await self.context.send(embed=hcmdmbed)
return
# Help Group
async def send_group_help(self, group):
hgroupmbed = discord.Embed(
color=self.context.bot.color,
title=self.get_command_signature(group),
description=F"{group.help or 'No help found...'}\n\n",
timestamp=self.context.message.created_at
)
hgroupmbed.set_thumbnail(url=self.context.me.display_avatar.url)
hgroupmbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
hgroupmbed.set_footer(text="<> is required | [] is optional")
for command in group.commands:
hgroupmbed.description += F"• **{self.get_command_signature(command)}** - {command.help or 'No help found...'}\n"
if cog := command.cog:
hgroupmbed.add_field(name="Category", value=F"{self.emojis.get(cog.qualified_name) if self.emojis.get(cog.qualified_name) else '❓'} {cog.qualified_name}")
can_run = "No"
with contextlib.suppress(commands.CommandError):
if await command.can_run(self.context):
can_run = "Yes"
hgroupmbed.add_field(name="Usable", value=can_run)
if command._buckets and (cooldown := command._buckets._cooldown):
hgroupmbed.add_field(name="Cooldown", value=F"{cooldown.rate} per {cooldown.per:.0f} seconds")
await self.context.send(embed=hgroupmbed)
return
# Help Error
async def send_error_message(self, error):
herrormbed = discord.Embed(
color=self.context.bot.color,
title=error,
timestamp=self.context.message.created_at
)
herrormbed.set_thumbnail(url=self.context.me.display_avatar.url)
herrormbed.set_author(name=self.context.author, icon_url=self.context.author.display_avatar.url)
await self.context.send(embed=herrormbed)
return | en | 0.67164 | # Help Main # Help Cog # Help Command # Help Group # Help Error | 2.442163 | 2 |
tests/scanner/audit/iap_rules_engine_test.py | mcunha/forseti-security | 1 | 6632362 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the IapRulesEngine."""
import copy
import itertools
import mock
import unittest
import yaml
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.gcp_type import backend_service
from google.cloud.forseti.common.gcp_type import resource as resource_mod
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
from google.cloud.forseti.scanner.audit import iap_rules_engine as ire
from google.cloud.forseti.scanner.scanners import iap_scanner
from tests.unittest_utils import get_datafile_path
from tests.scanner.audit.data import test_iap_rules
class IapRulesEngineTest(ForsetiTestCase):
"""Tests for the IapRulesEngine."""
def setUp(self):
"""Set up."""
self.maxDiff = None
self.fake_timestamp = '12345'
self.org789 = Organization('778899', display_name='My org')
self.project1 = Project(
'my-project-1', 12345,
display_name='My project 1',
parent=self.org789)
self.project2 = Project('my-project-2', 12346,
display_name='My project 2')
def test_build_rule_book_from_local_yaml_file_works(self):
"""Test that a RuleBook is built correctly with a yaml file."""
rules_local_path = get_datafile_path(__file__, 'iap_test_rules_1.yaml')
rules_engine = ire.IapRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book({})
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
def test_build_rule_book_from_local_json_file_works(self):
"""Test that a RuleBook is built correctly with a json file."""
rules_local_path = get_datafile_path(__file__, 'iap_test_rules_1.json')
rules_engine = ire.IapRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book({})
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
@mock.patch.object(file_loader,
'_read_file_from_gcs', autospec=True)
def test_build_rule_book_from_gcs_works(self, mock_load_rules_from_gcs):
"""Test that a RuleBook is built correctly with a mocked gcs file.
Setup:
* Create a mocked GCS object from a test yaml file.
* Get the yaml file content.
Expected results:
There are 4 resources that have rules, in the rule book.
"""
bucket_name = 'bucket-name'
rules_path = 'input/test_rules_1.yaml'
full_rules_path = 'gs://{}/{}'.format(bucket_name, rules_path)
rules_engine = ire.IapRulesEngine(rules_file_path=full_rules_path)
# Read in the rules file
file_content = None
with open(get_datafile_path(__file__, 'iap_test_rules_1.yaml'),
'r') as rules_local_file:
try:
file_content = yaml.safe_load(rules_local_file)
except yaml.YAMLError:
raise
mock_load_rules_from_gcs.return_value = file_content
rules_engine.build_rule_book({})
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
def test_build_rule_book_no_resource_type_fails(self):
"""Test that a rule without a resource type cannot be created."""
rules_local_path = get_datafile_path(__file__, 'iap_test_rules_2.yaml')
rules_engine = ire.IapRulesEngine(rules_file_path=rules_local_path)
with self.assertRaises(InvalidRulesSchemaError):
rules_engine.build_rule_book({})
def test_add_single_rule_builds_correct_map(self):
"""Test that adding a single rule builds the correct map."""
rule_book = ire.IapRuleBook(
{}, test_iap_rules.RULES1, self.fake_timestamp)
actual_rules = rule_book.resource_rules_map
rule = ire.Rule('my rule', 0, [], [], '^.*$')
expected_org_rules = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
expected_proj1_rules = ire.ResourceRules(self.project1,
rules=set([rule]),
applies_to='self')
expected_proj2_rules = ire.ResourceRules(self.project2,
rules=set([rule]),
applies_to='self')
expected_rules = {
(self.org789, 'self_and_children'): expected_org_rules,
(self.project1, 'self'): expected_proj1_rules,
(self.project2, 'self'): expected_proj2_rules
}
self.assertEqual(expected_rules, actual_rules)
def test_no_violations(self):
rule = ire.Rule('my rule', 0, [], [], '^.*$')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
project_id=self.project1.id,
name='bs1')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set(),
direct_access_sources=set(),
iap_enabled=True)
results = list(resource_rule.find_mismatches(service,
iap_resource))
self.assertEquals([], results)
def test_enabled_violation(self):
rule = ire.Rule('my rule', 0, [], [], '^True')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set(),
direct_access_sources=set(),
iap_enabled=False)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = [
ire.RuleViolation(
resource_type=resource_mod.ResourceType.BACKEND_SERVICE,
resource_name='bs1',
resource_id=service.resource_id,
full_name='fake_full_name111',
rule_name=rule.rule_name,
rule_index=rule.rule_index,
violation_type='IAP_VIOLATION',
alternate_services_violations=[],
direct_access_sources_violations=[],
iap_enabled_violation=True,
resource_data='{"name": "bs1", "full_name": "fake_full_name111", "id": "None"}'),
]
self.assertEquals(expected_violations, results)
def test_alternate_service_violation(self):
rule = ire.Rule('my rule', 0, [], [], '^True')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
alternate_service = backend_service.Key.from_args(
project_id=self.project1.id,
name='bs2')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set([alternate_service]),
direct_access_sources=set(),
iap_enabled=True)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = [
ire.RuleViolation(
resource_type=resource_mod.ResourceType.BACKEND_SERVICE,
resource_name='bs1',
resource_id=service.resource_id,
full_name='fake_full_name111',
rule_name=rule.rule_name,
rule_index=rule.rule_index,
violation_type='IAP_VIOLATION',
alternate_services_violations=[alternate_service],
direct_access_sources_violations=[],
iap_enabled_violation=False,
resource_data='{"name": "bs1", "full_name": "fake_full_name111", "id": "None"}'),
]
self.assertEquals(expected_violations, results)
def test_direct_access_violation(self):
rule = ire.Rule('my rule', 0, [], [], '^.*')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
direct_source = 'some-tag'
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set(),
direct_access_sources=set([direct_source]),
iap_enabled=True)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = [
ire.RuleViolation(
resource_type=resource_mod.ResourceType.BACKEND_SERVICE,
resource_name='bs1',
resource_id=service.resource_id,
full_name='fake_full_name111',
rule_name=rule.rule_name,
rule_index=rule.rule_index,
violation_type='IAP_VIOLATION',
alternate_services_violations=[],
direct_access_sources_violations=[direct_source],
iap_enabled_violation=False,
resource_data = '{"name": "bs1", "full_name": "fake_full_name111", "id": "None"}')
]
self.assertEquals(expected_violations, results)
def test_violations_iap_disabled(self):
"""If IAP is disabled, don't report other violations."""
rule = ire.Rule('my rule', 0, [], [], '^.*')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
alternate_service = backend_service.Key.from_args(
project_id=self.project1.id,
name='bs2')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set([alternate_service]),
direct_access_sources=set(['some-tag']),
iap_enabled=False)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = []
self.assertEquals(expected_violations, results)
if __name__ == '__main__':
unittest.main()
| # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the IapRulesEngine."""
import copy
import itertools
import mock
import unittest
import yaml
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.gcp_type import backend_service
from google.cloud.forseti.common.gcp_type import resource as resource_mod
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
from google.cloud.forseti.scanner.audit import iap_rules_engine as ire
from google.cloud.forseti.scanner.scanners import iap_scanner
from tests.unittest_utils import get_datafile_path
from tests.scanner.audit.data import test_iap_rules
class IapRulesEngineTest(ForsetiTestCase):
"""Tests for the IapRulesEngine."""
def setUp(self):
"""Set up."""
self.maxDiff = None
self.fake_timestamp = '12345'
self.org789 = Organization('778899', display_name='My org')
self.project1 = Project(
'my-project-1', 12345,
display_name='My project 1',
parent=self.org789)
self.project2 = Project('my-project-2', 12346,
display_name='My project 2')
def test_build_rule_book_from_local_yaml_file_works(self):
"""Test that a RuleBook is built correctly with a yaml file."""
rules_local_path = get_datafile_path(__file__, 'iap_test_rules_1.yaml')
rules_engine = ire.IapRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book({})
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
def test_build_rule_book_from_local_json_file_works(self):
"""Test that a RuleBook is built correctly with a json file."""
rules_local_path = get_datafile_path(__file__, 'iap_test_rules_1.json')
rules_engine = ire.IapRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book({})
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
@mock.patch.object(file_loader,
'_read_file_from_gcs', autospec=True)
def test_build_rule_book_from_gcs_works(self, mock_load_rules_from_gcs):
"""Test that a RuleBook is built correctly with a mocked gcs file.
Setup:
* Create a mocked GCS object from a test yaml file.
* Get the yaml file content.
Expected results:
There are 4 resources that have rules, in the rule book.
"""
bucket_name = 'bucket-name'
rules_path = 'input/test_rules_1.yaml'
full_rules_path = 'gs://{}/{}'.format(bucket_name, rules_path)
rules_engine = ire.IapRulesEngine(rules_file_path=full_rules_path)
# Read in the rules file
file_content = None
with open(get_datafile_path(__file__, 'iap_test_rules_1.yaml'),
'r') as rules_local_file:
try:
file_content = yaml.safe_load(rules_local_file)
except yaml.YAMLError:
raise
mock_load_rules_from_gcs.return_value = file_content
rules_engine.build_rule_book({})
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
def test_build_rule_book_no_resource_type_fails(self):
"""Test that a rule without a resource type cannot be created."""
rules_local_path = get_datafile_path(__file__, 'iap_test_rules_2.yaml')
rules_engine = ire.IapRulesEngine(rules_file_path=rules_local_path)
with self.assertRaises(InvalidRulesSchemaError):
rules_engine.build_rule_book({})
def test_add_single_rule_builds_correct_map(self):
"""Test that adding a single rule builds the correct map."""
rule_book = ire.IapRuleBook(
{}, test_iap_rules.RULES1, self.fake_timestamp)
actual_rules = rule_book.resource_rules_map
rule = ire.Rule('my rule', 0, [], [], '^.*$')
expected_org_rules = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
expected_proj1_rules = ire.ResourceRules(self.project1,
rules=set([rule]),
applies_to='self')
expected_proj2_rules = ire.ResourceRules(self.project2,
rules=set([rule]),
applies_to='self')
expected_rules = {
(self.org789, 'self_and_children'): expected_org_rules,
(self.project1, 'self'): expected_proj1_rules,
(self.project2, 'self'): expected_proj2_rules
}
self.assertEqual(expected_rules, actual_rules)
def test_no_violations(self):
rule = ire.Rule('my rule', 0, [], [], '^.*$')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
project_id=self.project1.id,
name='bs1')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set(),
direct_access_sources=set(),
iap_enabled=True)
results = list(resource_rule.find_mismatches(service,
iap_resource))
self.assertEquals([], results)
def test_enabled_violation(self):
rule = ire.Rule('my rule', 0, [], [], '^True')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set(),
direct_access_sources=set(),
iap_enabled=False)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = [
ire.RuleViolation(
resource_type=resource_mod.ResourceType.BACKEND_SERVICE,
resource_name='bs1',
resource_id=service.resource_id,
full_name='fake_full_name111',
rule_name=rule.rule_name,
rule_index=rule.rule_index,
violation_type='IAP_VIOLATION',
alternate_services_violations=[],
direct_access_sources_violations=[],
iap_enabled_violation=True,
resource_data='{"name": "bs1", "full_name": "fake_full_name111", "id": "None"}'),
]
self.assertEquals(expected_violations, results)
def test_alternate_service_violation(self):
rule = ire.Rule('my rule', 0, [], [], '^True')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
alternate_service = backend_service.Key.from_args(
project_id=self.project1.id,
name='bs2')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set([alternate_service]),
direct_access_sources=set(),
iap_enabled=True)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = [
ire.RuleViolation(
resource_type=resource_mod.ResourceType.BACKEND_SERVICE,
resource_name='bs1',
resource_id=service.resource_id,
full_name='fake_full_name111',
rule_name=rule.rule_name,
rule_index=rule.rule_index,
violation_type='IAP_VIOLATION',
alternate_services_violations=[alternate_service],
direct_access_sources_violations=[],
iap_enabled_violation=False,
resource_data='{"name": "bs1", "full_name": "fake_full_name111", "id": "None"}'),
]
self.assertEquals(expected_violations, results)
def test_direct_access_violation(self):
rule = ire.Rule('my rule', 0, [], [], '^.*')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
direct_source = 'some-tag'
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set(),
direct_access_sources=set([direct_source]),
iap_enabled=True)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = [
ire.RuleViolation(
resource_type=resource_mod.ResourceType.BACKEND_SERVICE,
resource_name='bs1',
resource_id=service.resource_id,
full_name='fake_full_name111',
rule_name=rule.rule_name,
rule_index=rule.rule_index,
violation_type='IAP_VIOLATION',
alternate_services_violations=[],
direct_access_sources_violations=[direct_source],
iap_enabled_violation=False,
resource_data = '{"name": "bs1", "full_name": "fake_full_name111", "id": "None"}')
]
self.assertEquals(expected_violations, results)
def test_violations_iap_disabled(self):
"""If IAP is disabled, don't report other violations."""
rule = ire.Rule('my rule', 0, [], [], '^.*')
resource_rule = ire.ResourceRules(self.org789,
rules=set([rule]),
applies_to='self_and_children')
service = backend_service.BackendService(
full_name='fake_full_name111',
project_id=self.project1.id,
name='bs1')
alternate_service = backend_service.Key.from_args(
project_id=self.project1.id,
name='bs2')
iap_resource = iap_scanner.IapResource(
project_full_name='',
backend_service=service,
alternate_services=set([alternate_service]),
direct_access_sources=set(['some-tag']),
iap_enabled=False)
results = list(resource_rule.find_mismatches(service,
iap_resource))
expected_violations = []
self.assertEquals(expected_violations, results)
if __name__ == '__main__':
unittest.main()
| en | 0.897965 | # Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests the IapRulesEngine. Tests for the IapRulesEngine. Set up. Test that a RuleBook is built correctly with a yaml file. Test that a RuleBook is built correctly with a json file. Test that a RuleBook is built correctly with a mocked gcs file. Setup: * Create a mocked GCS object from a test yaml file. * Get the yaml file content. Expected results: There are 4 resources that have rules, in the rule book. # Read in the rules file Test that a rule without a resource type cannot be created. Test that adding a single rule builds the correct map. If IAP is disabled, don't report other violations. | 1.722324 | 2 |
app/src/utils/visualizations.py | Sayar1106/Heart-Disease-Web-Application | 4 | 6632363 | import streamlit as st
import plotly.express as px
def plot_single_feature(df, feature):
"""
This function will be used to plot a single feature.
Every feature's type will be first evaluated and then the
feature's distribution will be graphed accordingly.
Rules for single variable visualizations:
* Numerical variables will be represented by histograms.
* The visualizations for numerical variables will have "Frequency" as the y-axis label.
* Categorical variables will be represented by bar charts.
* The visualizations for categorical variables will have "Count" as the y-axis label.
Parameters
----------
df: DataFrame
A dataframe containing the heart disease data.
feature: str
The feature whose data needs to be plotted.
Returns
-------
None
"""
fig = None
xaxis_type=None
yaxis_title=""
# Switching int features with low cardinality to object:
df["num_major_vessels"] = df["num_major_vessels"].astype("object")
df["target"] = df["target"].astype("object")
# Check feature type and plot appropriately:
if df[feature].dtype == 'int64' or df[feature].dtype == 'float64':
#TODO(Sayar) Add slider widget here:
fig = px.histogram(x=df[feature].values, nbins=0)
yaxis_title = "Frequency"
elif df[feature].dtype == 'object':
fig = px.bar(y=df[feature].value_counts(),
x=df[feature].value_counts().index.astype(str),
color=df[feature].value_counts().index.astype(str),
text=df[feature].value_counts())
xaxis_type = "category"
yaxis_title = "Count"
fig.update_xaxes(title=feature)
fig.update_yaxes(title=yaxis_title)
fig.update_layout(showlegend=False,
height=500,
width=500,
title="Distribution of {}".format(feature),
xaxis_type=xaxis_type)
st.plotly_chart(fig)
return
def plot_numerical_numerical(df, feature_1, feature_2):
"""Plots numerical vs numerical features"""
fig = px.scatter(df, feature_1, feature_2)
fig.update_layout(title="Plot of {} vs. {}".format(feature_1,
feature_2))
st.plotly_chart(fig)
def plot_numerical_categorical(df, feature_1, feature_2):
"""Plots numerical vs categorical features"""
x_var,y_var = feature_1, feature_2
# feature_1 is passed into x_var. If it is not categorical,
# we switch it with y_var:
if df[feature_1].dtypes == "int64" or df[feature_1].dtypes == "float64":
x_var,y_var = y_var,x_var
fig = px.box(df,
x=x_var,
y=y_var,
color=x_var)
fig.update_layout(title="Plot of {} vs. {}".format(x_var, y_var))
st.plotly_chart(fig)
def plot_categorical_categorical(df, feature_1, feature_2):
"""Plots categorical vs categorical features"""
fig = px.parallel_categories(df,
dimensions=[feature_1, feature_2],
)
fig.update_layout(title="Plot of {} vs. {}".format(feature_1, feature_2))
st.plotly_chart(fig)
def plot_dual_features(df, feature_1, feature_2):
"""
This function will be used to plot feature interactions between
two features.
Rules for feature interaction visualization:
* Only two variables can be used for this visualization.
* Both variables have to be different.
* For numerical vs numerical visuals, we will be using scatter plots.
* For numerical vs categorical visuals, we will be using box plots.
* For categorical vs categorical visuals, we will be using scatter plots.
Parameters
----------
df: DataFrame
A dataframe containing the heart disease data.
feature_1: str
The first feature to be used in the plot.
feature_2: str
The second feature to be used in the plot.
Returns
-------
None
"""
# Cannot allow same feature plots:
if feature_1 == feature_2:
raise ValueError("Please select two different features.")
# Changed to object type because of low cardinality:
df["num_major_vessels"] = df["num_major_vessels"].astype("object")
df["target"] = df["target"].astype("object")
feature_1_type = str(df[feature_1].dtype)
feature_2_type = str(df[feature_2].dtype)
# Dictionary to hash the appropriate function object:
switch_dict = {
("int64", "float64"): plot_numerical_numerical,
("float64", "int64"): plot_numerical_numerical,
("float64", "float64"): plot_numerical_numerical,
("int64", "int64"): plot_numerical_numerical,
("int64", "object"): plot_numerical_categorical,
("float64", "object"): plot_numerical_categorical,
("object", "int64"): plot_numerical_categorical,
("object", "float64"): plot_numerical_categorical,
("object", "object"): plot_categorical_categorical
}
# Calling function object:
switch_dict[(feature_1_type, feature_2_type)](df, feature_1, feature_2)
return
def visualizations(df):
"""Function for the visualizations page in the web app."""
st.header("Visualizing our data")
column_list = df.columns.to_list()
st.markdown("""
This section will have visualizations which will be created automatically
based on rules assigned for the type of variable being visualized.
Rules for single variable visualizations:
* Numerical variables will be represented by histograms.
* The visualizations for numerical variables will have "Frequency" as the y-axis label.
* Categorical variables will be represented by bar charts.
* The visualizations for categorical variables will have "Count" as the y-axis label.
""")
st.subheader("Single feature visualization")
# Dropdown style box to select feature:
feature = st.selectbox(label="Select the feature", options=column_list)
plot_single_feature(df, feature)
st.markdown("""
Feature interaction visualizations will have two variables
and will plot the relationship between them.
Rules for feature interaction visualization:
* Only two variables can be used for this visualization.
* Both variables have to be different.
* For numerical vs numerical visuals, we will be using scatter plots.
* For numerical vs categorical visuals, we will be using box plots.
* For categorical vs categorical visuals, we will be using scatter plots.
""")
st.subheader("Feature interaction visualization")
# Multiselect for selecting two features for interaction plots:
features = st.multiselect(label="Select any two distinct features",
options=column_list)
# Check for number of features selected:
if len(features) == 2:
plot_dual_features(df, features[0], features[1])
| import streamlit as st
import plotly.express as px
def plot_single_feature(df, feature):
"""
This function will be used to plot a single feature.
Every feature's type will be first evaluated and then the
feature's distribution will be graphed accordingly.
Rules for single variable visualizations:
* Numerical variables will be represented by histograms.
* The visualizations for numerical variables will have "Frequency" as the y-axis label.
* Categorical variables will be represented by bar charts.
* The visualizations for categorical variables will have "Count" as the y-axis label.
Parameters
----------
df: DataFrame
A dataframe containing the heart disease data.
feature: str
The feature whose data needs to be plotted.
Returns
-------
None
"""
fig = None
xaxis_type=None
yaxis_title=""
# Switching int features with low cardinality to object:
df["num_major_vessels"] = df["num_major_vessels"].astype("object")
df["target"] = df["target"].astype("object")
# Check feature type and plot appropriately:
if df[feature].dtype == 'int64' or df[feature].dtype == 'float64':
#TODO(Sayar) Add slider widget here:
fig = px.histogram(x=df[feature].values, nbins=0)
yaxis_title = "Frequency"
elif df[feature].dtype == 'object':
fig = px.bar(y=df[feature].value_counts(),
x=df[feature].value_counts().index.astype(str),
color=df[feature].value_counts().index.astype(str),
text=df[feature].value_counts())
xaxis_type = "category"
yaxis_title = "Count"
fig.update_xaxes(title=feature)
fig.update_yaxes(title=yaxis_title)
fig.update_layout(showlegend=False,
height=500,
width=500,
title="Distribution of {}".format(feature),
xaxis_type=xaxis_type)
st.plotly_chart(fig)
return
def plot_numerical_numerical(df, feature_1, feature_2):
"""Plots numerical vs numerical features"""
fig = px.scatter(df, feature_1, feature_2)
fig.update_layout(title="Plot of {} vs. {}".format(feature_1,
feature_2))
st.plotly_chart(fig)
def plot_numerical_categorical(df, feature_1, feature_2):
"""Plots numerical vs categorical features"""
x_var,y_var = feature_1, feature_2
# feature_1 is passed into x_var. If it is not categorical,
# we switch it with y_var:
if df[feature_1].dtypes == "int64" or df[feature_1].dtypes == "float64":
x_var,y_var = y_var,x_var
fig = px.box(df,
x=x_var,
y=y_var,
color=x_var)
fig.update_layout(title="Plot of {} vs. {}".format(x_var, y_var))
st.plotly_chart(fig)
def plot_categorical_categorical(df, feature_1, feature_2):
"""Plots categorical vs categorical features"""
fig = px.parallel_categories(df,
dimensions=[feature_1, feature_2],
)
fig.update_layout(title="Plot of {} vs. {}".format(feature_1, feature_2))
st.plotly_chart(fig)
def plot_dual_features(df, feature_1, feature_2):
"""
This function will be used to plot feature interactions between
two features.
Rules for feature interaction visualization:
* Only two variables can be used for this visualization.
* Both variables have to be different.
* For numerical vs numerical visuals, we will be using scatter plots.
* For numerical vs categorical visuals, we will be using box plots.
* For categorical vs categorical visuals, we will be using scatter plots.
Parameters
----------
df: DataFrame
A dataframe containing the heart disease data.
feature_1: str
The first feature to be used in the plot.
feature_2: str
The second feature to be used in the plot.
Returns
-------
None
"""
# Cannot allow same feature plots:
if feature_1 == feature_2:
raise ValueError("Please select two different features.")
# Changed to object type because of low cardinality:
df["num_major_vessels"] = df["num_major_vessels"].astype("object")
df["target"] = df["target"].astype("object")
feature_1_type = str(df[feature_1].dtype)
feature_2_type = str(df[feature_2].dtype)
# Dictionary to hash the appropriate function object:
switch_dict = {
("int64", "float64"): plot_numerical_numerical,
("float64", "int64"): plot_numerical_numerical,
("float64", "float64"): plot_numerical_numerical,
("int64", "int64"): plot_numerical_numerical,
("int64", "object"): plot_numerical_categorical,
("float64", "object"): plot_numerical_categorical,
("object", "int64"): plot_numerical_categorical,
("object", "float64"): plot_numerical_categorical,
("object", "object"): plot_categorical_categorical
}
# Calling function object:
switch_dict[(feature_1_type, feature_2_type)](df, feature_1, feature_2)
return
def visualizations(df):
"""Function for the visualizations page in the web app."""
st.header("Visualizing our data")
column_list = df.columns.to_list()
st.markdown("""
This section will have visualizations which will be created automatically
based on rules assigned for the type of variable being visualized.
Rules for single variable visualizations:
* Numerical variables will be represented by histograms.
* The visualizations for numerical variables will have "Frequency" as the y-axis label.
* Categorical variables will be represented by bar charts.
* The visualizations for categorical variables will have "Count" as the y-axis label.
""")
st.subheader("Single feature visualization")
# Dropdown style box to select feature:
feature = st.selectbox(label="Select the feature", options=column_list)
plot_single_feature(df, feature)
st.markdown("""
Feature interaction visualizations will have two variables
and will plot the relationship between them.
Rules for feature interaction visualization:
* Only two variables can be used for this visualization.
* Both variables have to be different.
* For numerical vs numerical visuals, we will be using scatter plots.
* For numerical vs categorical visuals, we will be using box plots.
* For categorical vs categorical visuals, we will be using scatter plots.
""")
st.subheader("Feature interaction visualization")
# Multiselect for selecting two features for interaction plots:
features = st.multiselect(label="Select any two distinct features",
options=column_list)
# Check for number of features selected:
if len(features) == 2:
plot_dual_features(df, features[0], features[1])
| en | 0.801653 | This function will be used to plot a single feature. Every feature's type will be first evaluated and then the feature's distribution will be graphed accordingly. Rules for single variable visualizations: * Numerical variables will be represented by histograms. * The visualizations for numerical variables will have "Frequency" as the y-axis label. * Categorical variables will be represented by bar charts. * The visualizations for categorical variables will have "Count" as the y-axis label. Parameters ---------- df: DataFrame A dataframe containing the heart disease data. feature: str The feature whose data needs to be plotted. Returns ------- None # Switching int features with low cardinality to object: # Check feature type and plot appropriately: #TODO(Sayar) Add slider widget here: Plots numerical vs numerical features Plots numerical vs categorical features # feature_1 is passed into x_var. If it is not categorical, # we switch it with y_var: Plots categorical vs categorical features This function will be used to plot feature interactions between two features. Rules for feature interaction visualization: * Only two variables can be used for this visualization. * Both variables have to be different. * For numerical vs numerical visuals, we will be using scatter plots. * For numerical vs categorical visuals, we will be using box plots. * For categorical vs categorical visuals, we will be using scatter plots. Parameters ---------- df: DataFrame A dataframe containing the heart disease data. feature_1: str The first feature to be used in the plot. feature_2: str The second feature to be used in the plot. Returns ------- None # Cannot allow same feature plots: # Changed to object type because of low cardinality: # Dictionary to hash the appropriate function object: # Calling function object: Function for the visualizations page in the web app. This section will have visualizations which will be created automatically based on rules assigned for the type of variable being visualized. Rules for single variable visualizations: * Numerical variables will be represented by histograms. * The visualizations for numerical variables will have "Frequency" as the y-axis label. * Categorical variables will be represented by bar charts. * The visualizations for categorical variables will have "Count" as the y-axis label. # Dropdown style box to select feature: Feature interaction visualizations will have two variables and will plot the relationship between them. Rules for feature interaction visualization: * Only two variables can be used for this visualization. * Both variables have to be different. * For numerical vs numerical visuals, we will be using scatter plots. * For numerical vs categorical visuals, we will be using box plots. * For categorical vs categorical visuals, we will be using scatter plots. # Multiselect for selecting two features for interaction plots: # Check for number of features selected: | 4.28752 | 4 |
cbuildbot/cbuildbot_run_unittest.py | hustwei/chromite | 0 | 6632364 | <reponame>hustwei/chromite
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the cbuildbot_run module."""
from __future__ import print_function
import cPickle
import os
import mock
import time
from chromite.cbuildbot import chromeos_config
from chromite.cbuildbot import cbuildbot_run
from chromite.lib import config_lib
from chromite.lib import config_lib_unittest
from chromite.lib import constants
from chromite.lib import cros_test_lib
from chromite.lib import parallel
DEFAULT_ARCHIVE_GS_PATH = 'bogus_bucket/TheArchiveBase'
DEFAULT_ARCHIVE_BASE = 'gs://%s' % DEFAULT_ARCHIVE_GS_PATH
DEFAULT_BUILDROOT = '/tmp/foo/bar/buildroot'
DEFAULT_BUILDNUMBER = 12345
DEFAULT_BRANCH = 'TheBranch'
DEFAULT_CHROME_BRANCH = 'TheChromeBranch'
DEFAULT_VERSION_STRING = 'TheVersionString'
DEFAULT_BOARD = 'TheBoard'
DEFAULT_BOT_NAME = 'TheCoolBot'
# pylint: disable=protected-access
DEFAULT_OPTIONS = cros_test_lib.EasyAttr(
archive_base=DEFAULT_ARCHIVE_BASE,
buildroot=DEFAULT_BUILDROOT,
buildnumber=DEFAULT_BUILDNUMBER,
buildbot=True,
branch=DEFAULT_BRANCH,
remote_trybot=False,
debug=False,
postsync_patch=True,
)
DEFAULT_CONFIG = config_lib.BuildConfig(
name=DEFAULT_BOT_NAME,
master=True,
boards=[DEFAULT_BOARD],
postsync_patch=True,
child_configs=[
config_lib.BuildConfig(
name='foo', postsync_patch=False, boards=[]),
config_lib.BuildConfig(
name='bar', postsync_patch=False, boards=[]),
],
)
DEFAULT_VERSION = '6543.2.1'
def _ExtendDefaultOptions(**kwargs):
"""Extend DEFAULT_OPTIONS with keys/values in kwargs."""
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
def _ExtendDefaultConfig(**kwargs):
"""Extend DEFAULT_CONFIG with keys/values in kwargs."""
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return config_lib.BuildConfig(**config_kwargs)
class ExceptionsTest(cros_test_lib.TestCase):
"""Test that the exceptions in the module are sane."""
def _TestException(self, err, expected_startswith):
"""Test that str and pickle behavior of |err| are as expected."""
err2 = cPickle.loads(cPickle.dumps(err, cPickle.HIGHEST_PROTOCOL))
self.assertTrue(str(err).startswith(expected_startswith))
self.assertEqual(str(err), str(err2))
def testParallelAttributeError(self):
"""Test ParallelAttributeError message and pickle behavior."""
err1 = cbuildbot_run.ParallelAttributeError('SomeAttr')
self._TestException(err1, 'No such parallel run attribute')
err2 = cbuildbot_run.ParallelAttributeError('SomeAttr', 'SomeBoard',
'SomeTarget')
self._TestException(err2, 'No such board-specific parallel run attribute')
def testAttrSepCountError(self):
"""Test AttrSepCountError message and pickle behavior."""
err1 = cbuildbot_run.AttrSepCountError('SomeAttr')
self._TestException(err1, 'Attribute name has an unexpected number')
def testAttrNotPickleableError(self):
"""Test AttrNotPickleableError message and pickle behavior."""
err1 = cbuildbot_run.AttrNotPickleableError('SomeAttr', 'SomeValue')
self._TestException(err1, 'Run attribute "SomeAttr" value cannot')
# TODO(mtennant): Turn this into a PartialMock.
class _BuilderRunTestCase(cros_test_lib.MockTestCase):
"""Provide methods for creating BuilderRun or ChildBuilderRun."""
def setUp(self):
self._manager = parallel.Manager()
# Mimic entering a 'with' statement.
self._manager.__enter__()
def tearDown(self):
# Mimic exiting a 'with' statement.
self._manager.__exit__(None, None, None)
def _NewRunAttributes(self):
return cbuildbot_run.RunAttributes(self._manager)
def _NewBuilderRun(self, options=None, config=None):
"""Create a BuilderRun objection from options and config values.
Args:
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
BuilderRun object.
"""
options = options or DEFAULT_OPTIONS
config = config or DEFAULT_CONFIG
site_config = config_lib_unittest.MockSiteConfig()
site_config[config.name] = config
return cbuildbot_run.BuilderRun(options, site_config, config, self._manager)
def _NewChildBuilderRun(self, child_index, options=None, config=None):
"""Create a ChildBuilderRun objection from options and config values.
Args:
child_index: Index of child config to use within config.
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
ChildBuilderRun object.
"""
run = self._NewBuilderRun(options, config)
return cbuildbot_run.ChildBuilderRun(run, child_index)
class BuilderRunPickleTest(_BuilderRunTestCase):
"""Make sure BuilderRun objects can be pickled."""
def setUp(self):
self.real_config = chromeos_config.GetConfig()['test-ap-group']
self.PatchObject(cbuildbot_run._BuilderRunBase, 'GetVersion',
return_value=DEFAULT_VERSION)
def _TestPickle(self, run1):
self.assertEquals(DEFAULT_VERSION, run1.GetVersion())
run1.attrs.release_tag = 'TheReleaseTag'
# Accessing a method on BuilderRun has special behavior, so access and
# use one before pickling.
patch_after_sync = run1.ShouldPatchAfterSync()
# Access the archive object before pickling, too.
upload_url = run1.GetArchive().upload_url
# Pickle and unpickle run1 into run2.
run2 = cPickle.loads(cPickle.dumps(run1, cPickle.HIGHEST_PROTOCOL))
self.assertEquals(run1.buildnumber, run2.buildnumber)
self.assertEquals(run1.config.boards, run2.config.boards)
self.assertEquals(run1.options.branch, run2.options.branch)
self.assertEquals(run1.attrs.release_tag, run2.attrs.release_tag)
self.assertRaises(AttributeError, getattr, run1.attrs, 'manifest_manager')
self.assertRaises(AttributeError, getattr, run2.attrs, 'manifest_manager')
self.assertEquals(patch_after_sync, run2.ShouldPatchAfterSync())
self.assertEquals(upload_url, run2.GetArchive().upload_url)
# The attrs objects should be identical.
self.assertIs(run1.attrs, run2.attrs)
# And the run objects themselves are different.
self.assertIsNot(run1, run2)
def testPickleBuilderRun(self):
self._TestPickle(self._NewBuilderRun(config=self.real_config))
def testPickleChildBuilderRun(self):
self._TestPickle(self._NewChildBuilderRun(0, config=self.real_config))
class BuilderRunTest(_BuilderRunTestCase):
"""Test the BuilderRun class."""
def testInit(self):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
run = self._NewBuilderRun()
self.assertEquals(DEFAULT_BUILDROOT, run.buildroot)
self.assertEquals(DEFAULT_BUILDNUMBER, run.buildnumber)
self.assertEquals(DEFAULT_BRANCH, run.manifest_branch)
self.assertEquals(DEFAULT_OPTIONS, run.options)
self.assertEquals(DEFAULT_CONFIG, run.config)
self.assertTrue(isinstance(run.attrs, cbuildbot_run.RunAttributes))
self.assertTrue(isinstance(run.GetArchive(),
cbuildbot_run.archive_lib.Archive))
# Make sure methods behave normally, since BuilderRun messes with them.
meth1 = run.GetVersionInfo
meth2 = run.GetVersionInfo
self.assertEqual(meth1.__name__, meth2.__name__)
# We actually do not support identity and equality checks right now.
self.assertNotEqual(meth1, meth2)
self.assertIsNot(meth1, meth2)
def testOptions(self):
options = _ExtendDefaultOptions(foo=True, bar=10)
run = self._NewBuilderRun(options=options)
self.assertEquals(True, run.options.foo)
self.assertEquals(10, run.options.__getattr__('bar'))
self.assertRaises(AttributeError, run.options.__getattr__, 'baz')
def testConfig(self):
config = _ExtendDefaultConfig(foo=True, bar=10)
run = self._NewBuilderRun(config=config)
self.assertEquals(True, run.config.foo)
self.assertEquals(10, run.config.__getattr__('bar'))
self.assertRaises(AttributeError, run.config.__getattr__, 'baz')
def testAttrs(self):
run = self._NewBuilderRun()
# manifest_manager is a valid run attribute. It gives Attribute error
# if accessed before being set, but thereafter works fine.
self.assertRaises(AttributeError, run.attrs.__getattribute__,
'manifest_manager')
run.attrs.manifest_manager = 'foo'
self.assertEquals('foo', run.attrs.manifest_manager)
self.assertEquals('foo', run.attrs.__getattribute__('manifest_manager'))
# foobar is not a valid run attribute. It gives AttributeError when
# accessed or changed.
self.assertRaises(AttributeError, run.attrs.__getattribute__, 'foobar')
self.assertRaises(AttributeError, run.attrs.__setattr__, 'foobar', 'foo')
def testArchive(self):
run = self._NewBuilderRun()
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
archive = run.GetArchive()
# Check archive.archive_path.
expected = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
cbuildbot_run.archive_lib.Archive._BUILDBOT_ARCHIVE,
DEFAULT_BOT_NAME, DEFAULT_VERSION))
self.assertEqual(expected, archive.archive_path)
# Check archive.upload_url.
expected = '%s/%s/%s' % (DEFAULT_ARCHIVE_BASE, DEFAULT_BOT_NAME,
DEFAULT_VERSION)
self.assertEqual(expected, archive.upload_url)
# Check archive.download_url.
expected = '%s%s/%s/%s' % (
cbuildbot_run.archive_lib.gs.PRIVATE_BASE_HTTPS_DOWNLOAD_URL,
DEFAULT_ARCHIVE_GS_PATH, DEFAULT_BOT_NAME, DEFAULT_VERSION)
self.assertEqual(expected, archive.download_url)
def _RunAccessor(self, method_name, options_dict, config_dict):
"""Run the given accessor method of the BuilderRun class.
Create a BuilderRun object with the options and config provided and
then return the result of calling the given method on it.
Args:
method_name: A BuilderRun method to call, specified by name.
options_dict: Extend default options with this.
config_dict: Extend default config with this.
Returns:
Result of calling the given method.
"""
options = _ExtendDefaultOptions(**options_dict)
config = _ExtendDefaultConfig(**config_dict)
run = self._NewBuilderRun(options=options, config=config)
method = getattr(run, method_name)
self.assertEqual(method.__name__, method_name)
return method()
def testDualEnableSetting(self):
settings = {
'prebuilts': 'ShouldUploadPrebuilts',
'postsync_patch': 'ShouldPatchAfterSync',
}
# Both option and config enabled should result in True.
# Create truth table with three variables in this order:
# <key> option value, <key> config value (e.g. <key> == 'prebuilts').
truth_table = cros_test_lib.TruthTable(inputs=[(True, True)])
for inputs in truth_table:
option_val, config_val = inputs
for key, accessor in settings.iteritems():
self.assertEquals(
self._RunAccessor(accessor, {key: option_val}, {key: config_val}),
truth_table.GetOutput(inputs))
def testShouldReexecAfterSync(self):
# If option and config have postsync_reexec enabled, and this file is not
# in the build root, then we expect ShouldReexecAfterSync to return True.
# Construct a truth table across three variables in this order:
# postsync_reexec option value, postsync_reexec config value, same_root.
truth_table = cros_test_lib.TruthTable(inputs=[(True, True, False)])
for inputs in truth_table:
option_val, config_val, same_root = inputs
if same_root:
build_root = os.path.dirname(os.path.dirname(__file__))
else:
build_root = DEFAULT_BUILDROOT
result = self._RunAccessor(
'ShouldReexecAfterSync',
{'postsync_reexec': option_val, 'buildroot': build_root},
{'postsync_reexec': config_val})
self.assertEquals(result, truth_table.GetOutput(inputs))
def testInProduction(self):
run = self._NewBuilderRun()
self.assertFalse(run.InProduction())
def testInEmailReportingEnvironment(self):
run = self._NewBuilderRun()
self.assertFalse(run.InEmailReportingEnvironment())
run.attrs.metadata.UpdateWithDict(
{'buildbot-master-name': constants.WATERFALL_BRILLO})
self.assertTrue(run.InEmailReportingEnvironment())
class GetVersionTest(_BuilderRunTestCase):
"""Test the GetVersion and GetVersionInfo methods of BuilderRun class."""
# pylint: disable=protected-access
def testGetVersionInfoNotSet(self):
"""Verify we throw an error when the version hasn't been set."""
run = self._NewBuilderRun()
self.assertRaises(RuntimeError, run.GetVersionInfo)
def testGetVersionInfo(self):
"""Verify we return the right version info value."""
# Prepare a real BuilderRun object with a version_info tag.
run = self._NewBuilderRun()
verinfo = object()
run.attrs.version_info = verinfo
result = run.GetVersionInfo()
self.assertEquals(verinfo, result)
def _TestGetVersionReleaseTag(self, release_tag):
with mock.patch.object(cbuildbot_run._BuilderRunBase,
'GetVersionInfo') as m:
verinfo_mock = mock.Mock()
verinfo_mock.chrome_branch = DEFAULT_CHROME_BRANCH
verinfo_mock.VersionString = mock.Mock(return_value='VS')
m.return_value = verinfo_mock
# Prepare a real BuilderRun object with a release tag.
run = self._NewBuilderRun()
run.attrs.release_tag = release_tag
# Run the test return the result.
result = run.GetVersion()
m.assert_called_once_with()
if release_tag is None:
verinfo_mock.VersionString.assert_called_once()
return result
def testGetVersionReleaseTag(self):
result = self._TestGetVersionReleaseTag('RT')
self.assertEquals('R%s-%s' % (DEFAULT_CHROME_BRANCH, 'RT'), result)
def testGetVersionNoReleaseTag(self):
result = self._TestGetVersionReleaseTag(None)
expected_result = ('R%s-%s-b%s' %
(DEFAULT_CHROME_BRANCH, 'VS', DEFAULT_BUILDNUMBER))
self.assertEquals(result, expected_result)
class ChildBuilderRunTest(_BuilderRunTestCase):
"""Test the ChildBuilderRun class"""
def testInit(self):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
crun = self._NewChildBuilderRun(0)
self.assertEquals(DEFAULT_BUILDROOT, crun.buildroot)
self.assertEquals(DEFAULT_BUILDNUMBER, crun.buildnumber)
self.assertEquals(DEFAULT_BRANCH, crun.manifest_branch)
self.assertEquals(DEFAULT_OPTIONS, crun.options)
self.assertEquals(DEFAULT_CONFIG.child_configs[0], crun.config)
self.assertEquals('foo', crun.config.name)
self.assertTrue(isinstance(crun.attrs, cbuildbot_run.RunAttributes))
self.assertTrue(isinstance(crun.GetArchive(),
cbuildbot_run.archive_lib.Archive))
# Make sure methods behave normally, since BuilderRun messes with them.
meth1 = crun.GetVersionInfo
meth2 = crun.GetVersionInfo
self.assertEqual(meth1.__name__, meth2.__name__)
# We actually do not support identity and equality checks right now.
self.assertNotEqual(meth1, meth2)
self.assertIsNot(meth1, meth2)
class RunAttributesTest(_BuilderRunTestCase):
"""Test the RunAttributes class."""
BOARD = 'SomeBoard'
TARGET = 'SomeConfigName'
VALUE = 'AnyValueWillDo'
# Any valid board-specific attribute will work here.
BATTR = 'breakpad_symbols_generated'
def testRegisterBoardTarget(self):
"""Test behavior of attributes before and after registering board target."""
ra = self._NewRunAttributes()
with self.assertRaises(AssertionError):
ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET)
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
ra.SetBoardParallel(self.BATTR, 'TheValue', self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
def testSetGet(self):
"""Test simple set/get of regular and parallel run attributes."""
ra = self._NewRunAttributes()
value = 'foobar'
# The __slots__ logic above confuses pylint.
# https://bitbucket.org/logilab/pylint/issue/380/
# pylint: disable=assigning-non-slot
# Set/Get a regular run attribute using direct access.
ra.release_tag = value
self.assertEqual(value, ra.release_tag)
# Set/Get of a parallel run attribute using direct access fails.
self.assertRaises(AttributeError, setattr, ra, 'unittest_value', value)
self.assertRaises(AttributeError, getattr, ra, 'unittest_value')
# Set/Get of a parallel run attribute with supported interface.
ra.SetParallel('unittest_value', value)
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Set/Get a board parallel run attribute, testing both the encouraged
# interface and the underlying interface.
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
ra.SetBoardParallel(self.BATTR, value, self.BOARD, self.TARGET)
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
def testSetDefault(self):
"""Test setting default value of parallel run attributes."""
ra = self._NewRunAttributes()
value = 'foobar'
# Attribute starts off not set.
self.assertFalse(ra.HasParallel('unittest_value'))
# Use SetParallelDefault to set it.
ra.SetParallelDefault('unittest_value', value)
self.assertTrue(ra.HasParallel('unittest_value'))
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Calling SetParallelDefault again has no effect.
ra.SetParallelDefault('unittest_value', 'junk')
self.assertTrue(ra.HasParallel('unittest_value'))
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Run through same sequence for a board-specific attribute.
with self.assertRaises(AssertionError):
ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET)
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
# Use SetBoardParallelDefault to set it.
ra.SetBoardParallelDefault(self.BATTR, value, self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
# Calling SetBoardParallelDefault again has no effect.
ra.SetBoardParallelDefault(self.BATTR, 'junk', self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
def testAttributeError(self):
"""Test accessing run attributes that do not exist."""
ra = self._NewRunAttributes()
value = 'foobar'
# Set/Get on made up attribute name.
self.assertRaises(AttributeError, setattr, ra, 'foo', value)
self.assertRaises(AttributeError, getattr, ra, 'foo')
# A board/target value is valid, but only if it is registered first.
self.assertRaises(AssertionError, ra.GetBoardParallel,
self.BATTR, self.BOARD, self.TARGET)
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertRaises(AttributeError, ra.GetBoardParallel,
self.BATTR, self.BOARD, self.TARGET)
class BoardRunAttributesTest(_BuilderRunTestCase):
"""Test the BoardRunAttributes class."""
BOARD = 'SomeBoard'
TARGET = 'SomeConfigName'
VALUE = 'AnyValueWillDo'
# Any valid board-specific attribute will work here.
BATTR = 'breakpad_symbols_generated'
class _SetAttr(object):
"""Stage-like class to set attr on a BoardRunAttributes obj."""
def __init__(self, bra, attr, value, delay=1):
self.bra = bra
self.attr = attr
self.value = value
self.delay = delay
def Run(self):
if self.delay:
time.sleep(self.delay)
self.bra.SetParallel(self.attr, self.value)
class _WaitForAttr(object):
"""Stage-like class to wait for attr on BoardRunAttributes obj."""
def __init__(self, bra, attr, expected_value, timeout=10):
self.bra = bra
self.attr = attr
self.expected_value = expected_value
self.timeout = timeout
def GetParallel(self):
return self.bra.GetParallel(self.attr, timeout=self.timeout)
class _CheckWaitForAttr(_WaitForAttr):
"""Stage-like class to wait for then check attr on BoardRunAttributes."""
def Run(self):
value = self.GetParallel()
assert value == self.expected_value, \
('For run attribute %s expected value %r but got %r.' %
(self.attr, self.expected_value, value))
class _TimeoutWaitForAttr(_WaitForAttr):
"""Stage-like class to time-out waiting for attr on BoardRunAttributes."""
def Run(self):
try:
self.GetParallel()
assert False, 'Expected AttrTimeoutError'
except cbuildbot_run.AttrTimeoutError:
pass
def setUp(self):
self.ra = self._NewRunAttributes()
self.bra = self.ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
def _TestParallelSetGet(self, stage_args):
"""Helper to run "stages" in parallel, according to |stage_args|.
Args:
stage_args: List of tuples of the form (stage_object, extra_args, ...)
where stage_object has a Run method which takes a BoardRunAttributes
object as the first argument and extra_args for the remaining arguments.
"""
stages = [a[0](self.bra, *a[1:]) for a in stage_args]
steps = [stage.Run for stage in stages]
parallel.RunParallelSteps(steps)
def testParallelSetGetFast(self):
"""Pass the parallel run attribute around with no delay."""
stage_args = [
(self._CheckWaitForAttr, self.BATTR, self.VALUE),
(self._SetAttr, self.BATTR, self.VALUE),
]
self._TestParallelSetGet(stage_args)
self.assertRaises(AttributeError,
getattr, self.bra, self.BATTR)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetSlow(self):
"""Pass the parallel run attribute around with a delay."""
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE, 10),
(self._TimeoutWaitForAttr, self.BATTR, self.VALUE, 2),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetManyGets(self):
"""Set the parallel run attribute in one stage, access in many stages."""
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE, 8),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._TimeoutWaitForAttr, self.BATTR, self.VALUE, 1),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetManySets(self):
"""Set the parallel run attribute in many stages, access in one stage."""
# Three "stages" set the value, with increasing delays. The stage that
# checks the value should get the first value set.
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE + '1', 1),
(self._SetAttr, self.BATTR, self.VALUE + '2', 11),
(self._CheckWaitForAttr, self.BATTR, self.VALUE + '1', 12),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE + '2', self.bra.GetParallel(self.BATTR))
def testSetGet(self):
"""Test that board-specific attrs do not work with set/get directly."""
self.assertRaises(AttributeError, setattr,
self.bra, 'breakpad_symbols_generated', self.VALUE)
self.assertRaises(AttributeError, getattr,
self.bra, 'breakpad_symbols_generated')
def testAccessRegularRunAttr(self):
"""Test that regular attributes are not known to BoardRunAttributes."""
self.assertRaises(AttributeError, getattr, self.bra, 'release_tag')
self.assertRaises(AttributeError, setattr, self.bra, 'release_tag', 'foo')
| # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the cbuildbot_run module."""
from __future__ import print_function
import cPickle
import os
import mock
import time
from chromite.cbuildbot import chromeos_config
from chromite.cbuildbot import cbuildbot_run
from chromite.lib import config_lib
from chromite.lib import config_lib_unittest
from chromite.lib import constants
from chromite.lib import cros_test_lib
from chromite.lib import parallel
DEFAULT_ARCHIVE_GS_PATH = 'bogus_bucket/TheArchiveBase'
DEFAULT_ARCHIVE_BASE = 'gs://%s' % DEFAULT_ARCHIVE_GS_PATH
DEFAULT_BUILDROOT = '/tmp/foo/bar/buildroot'
DEFAULT_BUILDNUMBER = 12345
DEFAULT_BRANCH = 'TheBranch'
DEFAULT_CHROME_BRANCH = 'TheChromeBranch'
DEFAULT_VERSION_STRING = 'TheVersionString'
DEFAULT_BOARD = 'TheBoard'
DEFAULT_BOT_NAME = 'TheCoolBot'
# pylint: disable=protected-access
DEFAULT_OPTIONS = cros_test_lib.EasyAttr(
archive_base=DEFAULT_ARCHIVE_BASE,
buildroot=DEFAULT_BUILDROOT,
buildnumber=DEFAULT_BUILDNUMBER,
buildbot=True,
branch=DEFAULT_BRANCH,
remote_trybot=False,
debug=False,
postsync_patch=True,
)
DEFAULT_CONFIG = config_lib.BuildConfig(
name=DEFAULT_BOT_NAME,
master=True,
boards=[DEFAULT_BOARD],
postsync_patch=True,
child_configs=[
config_lib.BuildConfig(
name='foo', postsync_patch=False, boards=[]),
config_lib.BuildConfig(
name='bar', postsync_patch=False, boards=[]),
],
)
DEFAULT_VERSION = '6543.2.1'
def _ExtendDefaultOptions(**kwargs):
"""Extend DEFAULT_OPTIONS with keys/values in kwargs."""
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
def _ExtendDefaultConfig(**kwargs):
"""Extend DEFAULT_CONFIG with keys/values in kwargs."""
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return config_lib.BuildConfig(**config_kwargs)
class ExceptionsTest(cros_test_lib.TestCase):
"""Test that the exceptions in the module are sane."""
def _TestException(self, err, expected_startswith):
"""Test that str and pickle behavior of |err| are as expected."""
err2 = cPickle.loads(cPickle.dumps(err, cPickle.HIGHEST_PROTOCOL))
self.assertTrue(str(err).startswith(expected_startswith))
self.assertEqual(str(err), str(err2))
def testParallelAttributeError(self):
"""Test ParallelAttributeError message and pickle behavior."""
err1 = cbuildbot_run.ParallelAttributeError('SomeAttr')
self._TestException(err1, 'No such parallel run attribute')
err2 = cbuildbot_run.ParallelAttributeError('SomeAttr', 'SomeBoard',
'SomeTarget')
self._TestException(err2, 'No such board-specific parallel run attribute')
def testAttrSepCountError(self):
"""Test AttrSepCountError message and pickle behavior."""
err1 = cbuildbot_run.AttrSepCountError('SomeAttr')
self._TestException(err1, 'Attribute name has an unexpected number')
def testAttrNotPickleableError(self):
"""Test AttrNotPickleableError message and pickle behavior."""
err1 = cbuildbot_run.AttrNotPickleableError('SomeAttr', 'SomeValue')
self._TestException(err1, 'Run attribute "SomeAttr" value cannot')
# TODO(mtennant): Turn this into a PartialMock.
class _BuilderRunTestCase(cros_test_lib.MockTestCase):
"""Provide methods for creating BuilderRun or ChildBuilderRun."""
def setUp(self):
self._manager = parallel.Manager()
# Mimic entering a 'with' statement.
self._manager.__enter__()
def tearDown(self):
# Mimic exiting a 'with' statement.
self._manager.__exit__(None, None, None)
def _NewRunAttributes(self):
return cbuildbot_run.RunAttributes(self._manager)
def _NewBuilderRun(self, options=None, config=None):
"""Create a BuilderRun objection from options and config values.
Args:
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
BuilderRun object.
"""
options = options or DEFAULT_OPTIONS
config = config or DEFAULT_CONFIG
site_config = config_lib_unittest.MockSiteConfig()
site_config[config.name] = config
return cbuildbot_run.BuilderRun(options, site_config, config, self._manager)
def _NewChildBuilderRun(self, child_index, options=None, config=None):
"""Create a ChildBuilderRun objection from options and config values.
Args:
child_index: Index of child config to use within config.
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
ChildBuilderRun object.
"""
run = self._NewBuilderRun(options, config)
return cbuildbot_run.ChildBuilderRun(run, child_index)
class BuilderRunPickleTest(_BuilderRunTestCase):
"""Make sure BuilderRun objects can be pickled."""
def setUp(self):
self.real_config = chromeos_config.GetConfig()['test-ap-group']
self.PatchObject(cbuildbot_run._BuilderRunBase, 'GetVersion',
return_value=DEFAULT_VERSION)
def _TestPickle(self, run1):
self.assertEquals(DEFAULT_VERSION, run1.GetVersion())
run1.attrs.release_tag = 'TheReleaseTag'
# Accessing a method on BuilderRun has special behavior, so access and
# use one before pickling.
patch_after_sync = run1.ShouldPatchAfterSync()
# Access the archive object before pickling, too.
upload_url = run1.GetArchive().upload_url
# Pickle and unpickle run1 into run2.
run2 = cPickle.loads(cPickle.dumps(run1, cPickle.HIGHEST_PROTOCOL))
self.assertEquals(run1.buildnumber, run2.buildnumber)
self.assertEquals(run1.config.boards, run2.config.boards)
self.assertEquals(run1.options.branch, run2.options.branch)
self.assertEquals(run1.attrs.release_tag, run2.attrs.release_tag)
self.assertRaises(AttributeError, getattr, run1.attrs, 'manifest_manager')
self.assertRaises(AttributeError, getattr, run2.attrs, 'manifest_manager')
self.assertEquals(patch_after_sync, run2.ShouldPatchAfterSync())
self.assertEquals(upload_url, run2.GetArchive().upload_url)
# The attrs objects should be identical.
self.assertIs(run1.attrs, run2.attrs)
# And the run objects themselves are different.
self.assertIsNot(run1, run2)
def testPickleBuilderRun(self):
self._TestPickle(self._NewBuilderRun(config=self.real_config))
def testPickleChildBuilderRun(self):
self._TestPickle(self._NewChildBuilderRun(0, config=self.real_config))
class BuilderRunTest(_BuilderRunTestCase):
"""Test the BuilderRun class."""
def testInit(self):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
run = self._NewBuilderRun()
self.assertEquals(DEFAULT_BUILDROOT, run.buildroot)
self.assertEquals(DEFAULT_BUILDNUMBER, run.buildnumber)
self.assertEquals(DEFAULT_BRANCH, run.manifest_branch)
self.assertEquals(DEFAULT_OPTIONS, run.options)
self.assertEquals(DEFAULT_CONFIG, run.config)
self.assertTrue(isinstance(run.attrs, cbuildbot_run.RunAttributes))
self.assertTrue(isinstance(run.GetArchive(),
cbuildbot_run.archive_lib.Archive))
# Make sure methods behave normally, since BuilderRun messes with them.
meth1 = run.GetVersionInfo
meth2 = run.GetVersionInfo
self.assertEqual(meth1.__name__, meth2.__name__)
# We actually do not support identity and equality checks right now.
self.assertNotEqual(meth1, meth2)
self.assertIsNot(meth1, meth2)
def testOptions(self):
options = _ExtendDefaultOptions(foo=True, bar=10)
run = self._NewBuilderRun(options=options)
self.assertEquals(True, run.options.foo)
self.assertEquals(10, run.options.__getattr__('bar'))
self.assertRaises(AttributeError, run.options.__getattr__, 'baz')
def testConfig(self):
config = _ExtendDefaultConfig(foo=True, bar=10)
run = self._NewBuilderRun(config=config)
self.assertEquals(True, run.config.foo)
self.assertEquals(10, run.config.__getattr__('bar'))
self.assertRaises(AttributeError, run.config.__getattr__, 'baz')
def testAttrs(self):
run = self._NewBuilderRun()
# manifest_manager is a valid run attribute. It gives Attribute error
# if accessed before being set, but thereafter works fine.
self.assertRaises(AttributeError, run.attrs.__getattribute__,
'manifest_manager')
run.attrs.manifest_manager = 'foo'
self.assertEquals('foo', run.attrs.manifest_manager)
self.assertEquals('foo', run.attrs.__getattribute__('manifest_manager'))
# foobar is not a valid run attribute. It gives AttributeError when
# accessed or changed.
self.assertRaises(AttributeError, run.attrs.__getattribute__, 'foobar')
self.assertRaises(AttributeError, run.attrs.__setattr__, 'foobar', 'foo')
def testArchive(self):
run = self._NewBuilderRun()
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
archive = run.GetArchive()
# Check archive.archive_path.
expected = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
cbuildbot_run.archive_lib.Archive._BUILDBOT_ARCHIVE,
DEFAULT_BOT_NAME, DEFAULT_VERSION))
self.assertEqual(expected, archive.archive_path)
# Check archive.upload_url.
expected = '%s/%s/%s' % (DEFAULT_ARCHIVE_BASE, DEFAULT_BOT_NAME,
DEFAULT_VERSION)
self.assertEqual(expected, archive.upload_url)
# Check archive.download_url.
expected = '%s%s/%s/%s' % (
cbuildbot_run.archive_lib.gs.PRIVATE_BASE_HTTPS_DOWNLOAD_URL,
DEFAULT_ARCHIVE_GS_PATH, DEFAULT_BOT_NAME, DEFAULT_VERSION)
self.assertEqual(expected, archive.download_url)
def _RunAccessor(self, method_name, options_dict, config_dict):
"""Run the given accessor method of the BuilderRun class.
Create a BuilderRun object with the options and config provided and
then return the result of calling the given method on it.
Args:
method_name: A BuilderRun method to call, specified by name.
options_dict: Extend default options with this.
config_dict: Extend default config with this.
Returns:
Result of calling the given method.
"""
options = _ExtendDefaultOptions(**options_dict)
config = _ExtendDefaultConfig(**config_dict)
run = self._NewBuilderRun(options=options, config=config)
method = getattr(run, method_name)
self.assertEqual(method.__name__, method_name)
return method()
def testDualEnableSetting(self):
settings = {
'prebuilts': 'ShouldUploadPrebuilts',
'postsync_patch': 'ShouldPatchAfterSync',
}
# Both option and config enabled should result in True.
# Create truth table with three variables in this order:
# <key> option value, <key> config value (e.g. <key> == 'prebuilts').
truth_table = cros_test_lib.TruthTable(inputs=[(True, True)])
for inputs in truth_table:
option_val, config_val = inputs
for key, accessor in settings.iteritems():
self.assertEquals(
self._RunAccessor(accessor, {key: option_val}, {key: config_val}),
truth_table.GetOutput(inputs))
def testShouldReexecAfterSync(self):
# If option and config have postsync_reexec enabled, and this file is not
# in the build root, then we expect ShouldReexecAfterSync to return True.
# Construct a truth table across three variables in this order:
# postsync_reexec option value, postsync_reexec config value, same_root.
truth_table = cros_test_lib.TruthTable(inputs=[(True, True, False)])
for inputs in truth_table:
option_val, config_val, same_root = inputs
if same_root:
build_root = os.path.dirname(os.path.dirname(__file__))
else:
build_root = DEFAULT_BUILDROOT
result = self._RunAccessor(
'ShouldReexecAfterSync',
{'postsync_reexec': option_val, 'buildroot': build_root},
{'postsync_reexec': config_val})
self.assertEquals(result, truth_table.GetOutput(inputs))
def testInProduction(self):
run = self._NewBuilderRun()
self.assertFalse(run.InProduction())
def testInEmailReportingEnvironment(self):
run = self._NewBuilderRun()
self.assertFalse(run.InEmailReportingEnvironment())
run.attrs.metadata.UpdateWithDict(
{'buildbot-master-name': constants.WATERFALL_BRILLO})
self.assertTrue(run.InEmailReportingEnvironment())
class GetVersionTest(_BuilderRunTestCase):
"""Test the GetVersion and GetVersionInfo methods of BuilderRun class."""
# pylint: disable=protected-access
def testGetVersionInfoNotSet(self):
"""Verify we throw an error when the version hasn't been set."""
run = self._NewBuilderRun()
self.assertRaises(RuntimeError, run.GetVersionInfo)
def testGetVersionInfo(self):
"""Verify we return the right version info value."""
# Prepare a real BuilderRun object with a version_info tag.
run = self._NewBuilderRun()
verinfo = object()
run.attrs.version_info = verinfo
result = run.GetVersionInfo()
self.assertEquals(verinfo, result)
def _TestGetVersionReleaseTag(self, release_tag):
with mock.patch.object(cbuildbot_run._BuilderRunBase,
'GetVersionInfo') as m:
verinfo_mock = mock.Mock()
verinfo_mock.chrome_branch = DEFAULT_CHROME_BRANCH
verinfo_mock.VersionString = mock.Mock(return_value='VS')
m.return_value = verinfo_mock
# Prepare a real BuilderRun object with a release tag.
run = self._NewBuilderRun()
run.attrs.release_tag = release_tag
# Run the test return the result.
result = run.GetVersion()
m.assert_called_once_with()
if release_tag is None:
verinfo_mock.VersionString.assert_called_once()
return result
def testGetVersionReleaseTag(self):
result = self._TestGetVersionReleaseTag('RT')
self.assertEquals('R%s-%s' % (DEFAULT_CHROME_BRANCH, 'RT'), result)
def testGetVersionNoReleaseTag(self):
result = self._TestGetVersionReleaseTag(None)
expected_result = ('R%s-%s-b%s' %
(DEFAULT_CHROME_BRANCH, 'VS', DEFAULT_BUILDNUMBER))
self.assertEquals(result, expected_result)
class ChildBuilderRunTest(_BuilderRunTestCase):
"""Test the ChildBuilderRun class"""
def testInit(self):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
crun = self._NewChildBuilderRun(0)
self.assertEquals(DEFAULT_BUILDROOT, crun.buildroot)
self.assertEquals(DEFAULT_BUILDNUMBER, crun.buildnumber)
self.assertEquals(DEFAULT_BRANCH, crun.manifest_branch)
self.assertEquals(DEFAULT_OPTIONS, crun.options)
self.assertEquals(DEFAULT_CONFIG.child_configs[0], crun.config)
self.assertEquals('foo', crun.config.name)
self.assertTrue(isinstance(crun.attrs, cbuildbot_run.RunAttributes))
self.assertTrue(isinstance(crun.GetArchive(),
cbuildbot_run.archive_lib.Archive))
# Make sure methods behave normally, since BuilderRun messes with them.
meth1 = crun.GetVersionInfo
meth2 = crun.GetVersionInfo
self.assertEqual(meth1.__name__, meth2.__name__)
# We actually do not support identity and equality checks right now.
self.assertNotEqual(meth1, meth2)
self.assertIsNot(meth1, meth2)
class RunAttributesTest(_BuilderRunTestCase):
"""Test the RunAttributes class."""
BOARD = 'SomeBoard'
TARGET = 'SomeConfigName'
VALUE = 'AnyValueWillDo'
# Any valid board-specific attribute will work here.
BATTR = 'breakpad_symbols_generated'
def testRegisterBoardTarget(self):
"""Test behavior of attributes before and after registering board target."""
ra = self._NewRunAttributes()
with self.assertRaises(AssertionError):
ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET)
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
ra.SetBoardParallel(self.BATTR, 'TheValue', self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
def testSetGet(self):
"""Test simple set/get of regular and parallel run attributes."""
ra = self._NewRunAttributes()
value = 'foobar'
# The __slots__ logic above confuses pylint.
# https://bitbucket.org/logilab/pylint/issue/380/
# pylint: disable=assigning-non-slot
# Set/Get a regular run attribute using direct access.
ra.release_tag = value
self.assertEqual(value, ra.release_tag)
# Set/Get of a parallel run attribute using direct access fails.
self.assertRaises(AttributeError, setattr, ra, 'unittest_value', value)
self.assertRaises(AttributeError, getattr, ra, 'unittest_value')
# Set/Get of a parallel run attribute with supported interface.
ra.SetParallel('unittest_value', value)
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Set/Get a board parallel run attribute, testing both the encouraged
# interface and the underlying interface.
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
ra.SetBoardParallel(self.BATTR, value, self.BOARD, self.TARGET)
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
def testSetDefault(self):
"""Test setting default value of parallel run attributes."""
ra = self._NewRunAttributes()
value = 'foobar'
# Attribute starts off not set.
self.assertFalse(ra.HasParallel('unittest_value'))
# Use SetParallelDefault to set it.
ra.SetParallelDefault('unittest_value', value)
self.assertTrue(ra.HasParallel('unittest_value'))
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Calling SetParallelDefault again has no effect.
ra.SetParallelDefault('unittest_value', 'junk')
self.assertTrue(ra.HasParallel('unittest_value'))
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Run through same sequence for a board-specific attribute.
with self.assertRaises(AssertionError):
ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET)
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
# Use SetBoardParallelDefault to set it.
ra.SetBoardParallelDefault(self.BATTR, value, self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
# Calling SetBoardParallelDefault again has no effect.
ra.SetBoardParallelDefault(self.BATTR, 'junk', self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
def testAttributeError(self):
"""Test accessing run attributes that do not exist."""
ra = self._NewRunAttributes()
value = 'foobar'
# Set/Get on made up attribute name.
self.assertRaises(AttributeError, setattr, ra, 'foo', value)
self.assertRaises(AttributeError, getattr, ra, 'foo')
# A board/target value is valid, but only if it is registered first.
self.assertRaises(AssertionError, ra.GetBoardParallel,
self.BATTR, self.BOARD, self.TARGET)
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertRaises(AttributeError, ra.GetBoardParallel,
self.BATTR, self.BOARD, self.TARGET)
class BoardRunAttributesTest(_BuilderRunTestCase):
"""Test the BoardRunAttributes class."""
BOARD = 'SomeBoard'
TARGET = 'SomeConfigName'
VALUE = 'AnyValueWillDo'
# Any valid board-specific attribute will work here.
BATTR = 'breakpad_symbols_generated'
class _SetAttr(object):
"""Stage-like class to set attr on a BoardRunAttributes obj."""
def __init__(self, bra, attr, value, delay=1):
self.bra = bra
self.attr = attr
self.value = value
self.delay = delay
def Run(self):
if self.delay:
time.sleep(self.delay)
self.bra.SetParallel(self.attr, self.value)
class _WaitForAttr(object):
"""Stage-like class to wait for attr on BoardRunAttributes obj."""
def __init__(self, bra, attr, expected_value, timeout=10):
self.bra = bra
self.attr = attr
self.expected_value = expected_value
self.timeout = timeout
def GetParallel(self):
return self.bra.GetParallel(self.attr, timeout=self.timeout)
class _CheckWaitForAttr(_WaitForAttr):
"""Stage-like class to wait for then check attr on BoardRunAttributes."""
def Run(self):
value = self.GetParallel()
assert value == self.expected_value, \
('For run attribute %s expected value %r but got %r.' %
(self.attr, self.expected_value, value))
class _TimeoutWaitForAttr(_WaitForAttr):
"""Stage-like class to time-out waiting for attr on BoardRunAttributes."""
def Run(self):
try:
self.GetParallel()
assert False, 'Expected AttrTimeoutError'
except cbuildbot_run.AttrTimeoutError:
pass
def setUp(self):
self.ra = self._NewRunAttributes()
self.bra = self.ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
def _TestParallelSetGet(self, stage_args):
"""Helper to run "stages" in parallel, according to |stage_args|.
Args:
stage_args: List of tuples of the form (stage_object, extra_args, ...)
where stage_object has a Run method which takes a BoardRunAttributes
object as the first argument and extra_args for the remaining arguments.
"""
stages = [a[0](self.bra, *a[1:]) for a in stage_args]
steps = [stage.Run for stage in stages]
parallel.RunParallelSteps(steps)
def testParallelSetGetFast(self):
"""Pass the parallel run attribute around with no delay."""
stage_args = [
(self._CheckWaitForAttr, self.BATTR, self.VALUE),
(self._SetAttr, self.BATTR, self.VALUE),
]
self._TestParallelSetGet(stage_args)
self.assertRaises(AttributeError,
getattr, self.bra, self.BATTR)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetSlow(self):
"""Pass the parallel run attribute around with a delay."""
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE, 10),
(self._TimeoutWaitForAttr, self.BATTR, self.VALUE, 2),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetManyGets(self):
"""Set the parallel run attribute in one stage, access in many stages."""
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE, 8),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._TimeoutWaitForAttr, self.BATTR, self.VALUE, 1),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetManySets(self):
"""Set the parallel run attribute in many stages, access in one stage."""
# Three "stages" set the value, with increasing delays. The stage that
# checks the value should get the first value set.
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE + '1', 1),
(self._SetAttr, self.BATTR, self.VALUE + '2', 11),
(self._CheckWaitForAttr, self.BATTR, self.VALUE + '1', 12),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE + '2', self.bra.GetParallel(self.BATTR))
def testSetGet(self):
"""Test that board-specific attrs do not work with set/get directly."""
self.assertRaises(AttributeError, setattr,
self.bra, 'breakpad_symbols_generated', self.VALUE)
self.assertRaises(AttributeError, getattr,
self.bra, 'breakpad_symbols_generated')
def testAccessRegularRunAttr(self):
"""Test that regular attributes are not known to BoardRunAttributes."""
self.assertRaises(AttributeError, getattr, self.bra, 'release_tag')
self.assertRaises(AttributeError, setattr, self.bra, 'release_tag', 'foo') | en | 0.7447 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Test the cbuildbot_run module. # pylint: disable=protected-access Extend DEFAULT_OPTIONS with keys/values in kwargs. Extend DEFAULT_CONFIG with keys/values in kwargs. Test that the exceptions in the module are sane. Test that str and pickle behavior of |err| are as expected. Test ParallelAttributeError message and pickle behavior. Test AttrSepCountError message and pickle behavior. Test AttrNotPickleableError message and pickle behavior. # TODO(mtennant): Turn this into a PartialMock. Provide methods for creating BuilderRun or ChildBuilderRun. # Mimic entering a 'with' statement. # Mimic exiting a 'with' statement. Create a BuilderRun objection from options and config values. Args: options: Specify options or default to DEFAULT_OPTIONS. config: Specify build config or default to DEFAULT_CONFIG. Returns: BuilderRun object. Create a ChildBuilderRun objection from options and config values. Args: child_index: Index of child config to use within config. options: Specify options or default to DEFAULT_OPTIONS. config: Specify build config or default to DEFAULT_CONFIG. Returns: ChildBuilderRun object. Make sure BuilderRun objects can be pickled. # Accessing a method on BuilderRun has special behavior, so access and # use one before pickling. # Access the archive object before pickling, too. # Pickle and unpickle run1 into run2. # The attrs objects should be identical. # And the run objects themselves are different. Test the BuilderRun class. # Make sure methods behave normally, since BuilderRun messes with them. # We actually do not support identity and equality checks right now. # manifest_manager is a valid run attribute. It gives Attribute error # if accessed before being set, but thereafter works fine. # foobar is not a valid run attribute. It gives AttributeError when # accessed or changed. # Check archive.archive_path. # Check archive.upload_url. # Check archive.download_url. Run the given accessor method of the BuilderRun class. Create a BuilderRun object with the options and config provided and then return the result of calling the given method on it. Args: method_name: A BuilderRun method to call, specified by name. options_dict: Extend default options with this. config_dict: Extend default config with this. Returns: Result of calling the given method. # Both option and config enabled should result in True. # Create truth table with three variables in this order: # <key> option value, <key> config value (e.g. <key> == 'prebuilts'). # If option and config have postsync_reexec enabled, and this file is not # in the build root, then we expect ShouldReexecAfterSync to return True. # Construct a truth table across three variables in this order: # postsync_reexec option value, postsync_reexec config value, same_root. Test the GetVersion and GetVersionInfo methods of BuilderRun class. # pylint: disable=protected-access Verify we throw an error when the version hasn't been set. Verify we return the right version info value. # Prepare a real BuilderRun object with a version_info tag. # Prepare a real BuilderRun object with a release tag. # Run the test return the result. Test the ChildBuilderRun class # Make sure methods behave normally, since BuilderRun messes with them. # We actually do not support identity and equality checks right now. Test the RunAttributes class. # Any valid board-specific attribute will work here. Test behavior of attributes before and after registering board target. Test simple set/get of regular and parallel run attributes. # The __slots__ logic above confuses pylint. # https://bitbucket.org/logilab/pylint/issue/380/ # pylint: disable=assigning-non-slot # Set/Get a regular run attribute using direct access. # Set/Get of a parallel run attribute using direct access fails. # Set/Get of a parallel run attribute with supported interface. # Set/Get a board parallel run attribute, testing both the encouraged # interface and the underlying interface. Test setting default value of parallel run attributes. # Attribute starts off not set. # Use SetParallelDefault to set it. # Calling SetParallelDefault again has no effect. # Run through same sequence for a board-specific attribute. # Use SetBoardParallelDefault to set it. # Calling SetBoardParallelDefault again has no effect. Test accessing run attributes that do not exist. # Set/Get on made up attribute name. # A board/target value is valid, but only if it is registered first. Test the BoardRunAttributes class. # Any valid board-specific attribute will work here. Stage-like class to set attr on a BoardRunAttributes obj. Stage-like class to wait for attr on BoardRunAttributes obj. Stage-like class to wait for then check attr on BoardRunAttributes. Stage-like class to time-out waiting for attr on BoardRunAttributes. Helper to run "stages" in parallel, according to |stage_args|. Args: stage_args: List of tuples of the form (stage_object, extra_args, ...) where stage_object has a Run method which takes a BoardRunAttributes object as the first argument and extra_args for the remaining arguments. Pass the parallel run attribute around with no delay. Pass the parallel run attribute around with a delay. Set the parallel run attribute in one stage, access in many stages. Set the parallel run attribute in many stages, access in one stage. # Three "stages" set the value, with increasing delays. The stage that # checks the value should get the first value set. Test that board-specific attrs do not work with set/get directly. Test that regular attributes are not known to BoardRunAttributes. | 1.783946 | 2 |
code.py | lekhabajpai/nlp-intro | 0 | 6632365 | # --------------
# Importing Necessary libraries
from sklearn.datasets import fetch_20newsgroups
from pprint import pprint
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score , f1_score
from sklearn.feature_extraction.text import TfidfVectorizer
# Load the 20newsgroups dataset
df = fetch_20newsgroups(subset = 'train')
#pprint((list(df.target_names)))
#Create a list of 4 newsgroup and fetch it using function fetch_20newsgroups
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
newsgroup_train = fetch_20newsgroups(subset = 'train', categories=categories)
newsgroup_test = fetch_20newsgroups(subset = 'test', categories=categories)
#Use TfidfVectorizer on train data and find out the Number of Non-Zero components per sample.
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(newsgroup_train.data)
print(vectors.shape)
#Use TfidfVectorizer on test data and apply Naive Bayes model and calculate f1_score.
vectors_test = vectorizer.transform(newsgroup_test.data)
clf = MultinomialNB(alpha=0.01)
clf.fit(vectors,newsgroup_train.target)
pred = clf.predict(vectors_test)
print("f1 score: ", f1_score(newsgroup_test.target, pred, average='macro'))
#Print the top 20 news category and top 20 words for every news category
def show_top20(classifier, vectorizer, categories):
feature_names = np.asarray(vectorizer.get_feature_names())
for i, category in enumerate(categories):
top20 = np.argsort(classifier.coef_[i])[-20:]
print('%s: %s' %(category, " ".join(feature_names[top20])))
print('\n')
show_top20(clf, vectorizer, newsgroup_train.target_names)
| # --------------
# Importing Necessary libraries
from sklearn.datasets import fetch_20newsgroups
from pprint import pprint
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score , f1_score
from sklearn.feature_extraction.text import TfidfVectorizer
# Load the 20newsgroups dataset
df = fetch_20newsgroups(subset = 'train')
#pprint((list(df.target_names)))
#Create a list of 4 newsgroup and fetch it using function fetch_20newsgroups
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
newsgroup_train = fetch_20newsgroups(subset = 'train', categories=categories)
newsgroup_test = fetch_20newsgroups(subset = 'test', categories=categories)
#Use TfidfVectorizer on train data and find out the Number of Non-Zero components per sample.
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(newsgroup_train.data)
print(vectors.shape)
#Use TfidfVectorizer on test data and apply Naive Bayes model and calculate f1_score.
vectors_test = vectorizer.transform(newsgroup_test.data)
clf = MultinomialNB(alpha=0.01)
clf.fit(vectors,newsgroup_train.target)
pred = clf.predict(vectors_test)
print("f1 score: ", f1_score(newsgroup_test.target, pred, average='macro'))
#Print the top 20 news category and top 20 words for every news category
def show_top20(classifier, vectorizer, categories):
feature_names = np.asarray(vectorizer.get_feature_names())
for i, category in enumerate(categories):
top20 = np.argsort(classifier.coef_[i])[-20:]
print('%s: %s' %(category, " ".join(feature_names[top20])))
print('\n')
show_top20(clf, vectorizer, newsgroup_train.target_names)
| en | 0.622246 | # -------------- # Importing Necessary libraries # Load the 20newsgroups dataset #pprint((list(df.target_names))) #Create a list of 4 newsgroup and fetch it using function fetch_20newsgroups #Use TfidfVectorizer on train data and find out the Number of Non-Zero components per sample. #Use TfidfVectorizer on test data and apply Naive Bayes model and calculate f1_score. #Print the top 20 news category and top 20 words for every news category | 3.028801 | 3 |
mytb/importlib/__init__.py | quentinql/mytb | 0 | 6632366 | #!/usr/bin/env python
# #############################################################################
# Copyright : (C) 2017-2021 by Teledomic.eu All rights reserved
#
# Name: mytb.importlib
#
# Description: helper for locating / importing modules
#
# #############################################################################
import importlib
def module_exists(modulename):
""" tries to find out whether a module exists """
import pkgutil
moduleparts = modulename.split('.')
mod = ""
for part in moduleparts:
mod += '.' + part if mod else part
if pkgutil.find_loader(mod) is None:
return False
return True
def import_obj(obj_str):
"""
handles input string as module.object
and imports the module and returns the object
"""
if ':' in obj_str:
modname, objname = obj_str.split(':')
else:
modname, objname = obj_str.rsplit('.', 1)
mod = importlib.import_module(modname)
return getattr(mod, objname)
def import_if_mod_exists(mod_or_func_str):
""" attempts to import a module or an object if the
nodule exists
returns None if module is not existing and should
raise an exception if there's another problem
during the module's import
:param mod_or_func_str: if str contains ":", then the last
part is considered to be an object within
the module
"""
mod_name = mod_or_func_str.split(":")[0]
get_obj = ":" in mod_or_func_str
rslt = None
try:
if get_obj:
rslt = import_obj(mod_or_func_str)
else:
rslt = importlib.import_module(mod_or_func_str)
except ModuleNotFoundError as exc:
msg = str(exc)
if "'%s'" % mod_name not in msg:
raise
return rslt
| #!/usr/bin/env python
# #############################################################################
# Copyright : (C) 2017-2021 by Teledomic.eu All rights reserved
#
# Name: mytb.importlib
#
# Description: helper for locating / importing modules
#
# #############################################################################
import importlib
def module_exists(modulename):
""" tries to find out whether a module exists """
import pkgutil
moduleparts = modulename.split('.')
mod = ""
for part in moduleparts:
mod += '.' + part if mod else part
if pkgutil.find_loader(mod) is None:
return False
return True
def import_obj(obj_str):
"""
handles input string as module.object
and imports the module and returns the object
"""
if ':' in obj_str:
modname, objname = obj_str.split(':')
else:
modname, objname = obj_str.rsplit('.', 1)
mod = importlib.import_module(modname)
return getattr(mod, objname)
def import_if_mod_exists(mod_or_func_str):
""" attempts to import a module or an object if the
nodule exists
returns None if module is not existing and should
raise an exception if there's another problem
during the module's import
:param mod_or_func_str: if str contains ":", then the last
part is considered to be an object within
the module
"""
mod_name = mod_or_func_str.split(":")[0]
get_obj = ":" in mod_or_func_str
rslt = None
try:
if get_obj:
rslt = import_obj(mod_or_func_str)
else:
rslt = importlib.import_module(mod_or_func_str)
except ModuleNotFoundError as exc:
msg = str(exc)
if "'%s'" % mod_name not in msg:
raise
return rslt
| en | 0.372383 | #!/usr/bin/env python # ############################################################################# # Copyright : (C) 2017-2021 by Teledomic.eu All rights reserved # # Name: mytb.importlib # # Description: helper for locating / importing modules # # ############################################################################# tries to find out whether a module exists handles input string as module.object and imports the module and returns the object attempts to import a module or an object if the nodule exists returns None if module is not existing and should raise an exception if there's another problem during the module's import :param mod_or_func_str: if str contains ":", then the last part is considered to be an object within the module | 3.030757 | 3 |
GNU-Radio/am_receiver.py | IgrikXD/SDR-Exp | 7 | 6632367 | <gh_stars>1-10
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: AM Receiver
# Author: <NAME>
# GNU Radio version: 3.7.13.5
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import pmt
import sip
import sys
from gnuradio import qtgui
class am_receiver(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "AM Receiver")
Qt.QWidget.__init__(self)
self.setWindowTitle("AM Receiver")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "am_receiver")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.lo_freq = lo_freq = 0
self.center_freq = center_freq = 710000
self.station_freq = station_freq = center_freq-lo_freq
self.samp_rate = samp_rate = 256000
self.resamp_factor = resamp_factor = 4
##################################################
# Blocks
##################################################
self._lo_freq_range = Range(-120000, 120000, 5000, 0, 200)
self._lo_freq_win = RangeWidget(self._lo_freq_range, self.set_lo_freq, 'Frequency shift', "counter_slider", float)
self.top_grid_layout.addWidget(self._lo_freq_win)
self._station_freq_tool_bar = Qt.QToolBar(self)
if None:
self._station_freq_formatter = None
else:
self._station_freq_formatter = lambda x: str(x)
self._station_freq_tool_bar.addWidget(Qt.QLabel('Station frequency'+": "))
self._station_freq_label = Qt.QLabel(str(self._station_freq_formatter(self.station_freq)))
self._station_freq_tool_bar.addWidget(self._station_freq_label)
self.top_grid_layout.addWidget(self._station_freq_tool_bar)
self.rational_resampler_xxx_1 = filter.rational_resampler_fff(
interpolation=3,
decimation=4,
taps=None,
fractional_bw=None,
)
self.qtgui_freq_sink_x_0_0_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
center_freq-lo_freq, #fc
samp_rate, #bw
'Signal spectrum', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0_0.set_update_time(0.05)
self.qtgui_freq_sink_x_0_0_0.set_y_axis(-60, 50)
self.qtgui_freq_sink_x_0_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_0.enable_grid(True)
self.qtgui_freq_sink_x_0_0_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_0.enable_control_panel(True)
if not True:
self.qtgui_freq_sink_x_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_0_0_win)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
center_freq-lo_freq, #fc
samp_rate/resamp_factor, #bw
'Station spectrum', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.05)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_win)
self.low_pass_filter_0 = filter.fir_filter_ccf(resamp_factor, firdes.low_pass(
1, 256000, 5000, 100, firdes.WIN_HAMMING, 6.76))
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, 'D:\\am_usrp710.dat', True)
self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)
self.audio_sink_0 = audio.sink(48000, '', True)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, lo_freq, 1, 0)
self.analog_am_demod_cf_0 = analog.am_demod_cf(
channel_rate=samp_rate/resamp_factor,
audio_decim=1,
audio_pass=<PASSWORD>,
audio_stop=5500,
)
self.analog_agc2_xx_0 = analog.agc2_cc(6.25e-4, 1e-5, 0.2, 1.0)
self.analog_agc2_xx_0.set_max_gain(65536)
##################################################
# Connections
##################################################
self.connect((self.analog_agc2_xx_0, 0), (self.analog_am_demod_cf_0, 0))
self.connect((self.analog_am_demod_cf_0, 0), (self.rational_resampler_xxx_1, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blocks_file_source_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.qtgui_freq_sink_x_0_0_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_agc2_xx_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.audio_sink_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.qtgui_freq_sink_x_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "am_receiver")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_lo_freq(self):
return self.lo_freq
def set_lo_freq(self, lo_freq):
self.lo_freq = lo_freq
self.set_station_freq(self._station_freq_formatter(self.center_freq-self.lo_freq))
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
self.analog_sig_source_x_0.set_frequency(self.lo_freq)
def get_center_freq(self):
return self.center_freq
def set_center_freq(self, center_freq):
self.center_freq = center_freq
self.set_station_freq(self._station_freq_formatter(self.center_freq-self.lo_freq))
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
def get_station_freq(self):
return self.station_freq
def set_station_freq(self, station_freq):
self.station_freq = station_freq
Qt.QMetaObject.invokeMethod(self._station_freq_label, "setText", Qt.Q_ARG("QString", self.station_freq))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
def get_resamp_factor(self):
return self.resamp_factor
def set_resamp_factor(self, resamp_factor):
self.resamp_factor = resamp_factor
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
def main(top_block_cls=am_receiver, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: AM Receiver
# Author: <NAME>
# GNU Radio version: 3.7.13.5
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import pmt
import sip
import sys
from gnuradio import qtgui
class am_receiver(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "AM Receiver")
Qt.QWidget.__init__(self)
self.setWindowTitle("AM Receiver")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "am_receiver")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.lo_freq = lo_freq = 0
self.center_freq = center_freq = 710000
self.station_freq = station_freq = center_freq-lo_freq
self.samp_rate = samp_rate = 256000
self.resamp_factor = resamp_factor = 4
##################################################
# Blocks
##################################################
self._lo_freq_range = Range(-120000, 120000, 5000, 0, 200)
self._lo_freq_win = RangeWidget(self._lo_freq_range, self.set_lo_freq, 'Frequency shift', "counter_slider", float)
self.top_grid_layout.addWidget(self._lo_freq_win)
self._station_freq_tool_bar = Qt.QToolBar(self)
if None:
self._station_freq_formatter = None
else:
self._station_freq_formatter = lambda x: str(x)
self._station_freq_tool_bar.addWidget(Qt.QLabel('Station frequency'+": "))
self._station_freq_label = Qt.QLabel(str(self._station_freq_formatter(self.station_freq)))
self._station_freq_tool_bar.addWidget(self._station_freq_label)
self.top_grid_layout.addWidget(self._station_freq_tool_bar)
self.rational_resampler_xxx_1 = filter.rational_resampler_fff(
interpolation=3,
decimation=4,
taps=None,
fractional_bw=None,
)
self.qtgui_freq_sink_x_0_0_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
center_freq-lo_freq, #fc
samp_rate, #bw
'Signal spectrum', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0_0.set_update_time(0.05)
self.qtgui_freq_sink_x_0_0_0.set_y_axis(-60, 50)
self.qtgui_freq_sink_x_0_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_0.enable_grid(True)
self.qtgui_freq_sink_x_0_0_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_0.enable_control_panel(True)
if not True:
self.qtgui_freq_sink_x_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_0_0_win)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
center_freq-lo_freq, #fc
samp_rate/resamp_factor, #bw
'Station spectrum', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.05)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_win)
self.low_pass_filter_0 = filter.fir_filter_ccf(resamp_factor, firdes.low_pass(
1, 256000, 5000, 100, firdes.WIN_HAMMING, 6.76))
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, 'D:\\am_usrp710.dat', True)
self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)
self.audio_sink_0 = audio.sink(48000, '', True)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, lo_freq, 1, 0)
self.analog_am_demod_cf_0 = analog.am_demod_cf(
channel_rate=samp_rate/resamp_factor,
audio_decim=1,
audio_pass=<PASSWORD>,
audio_stop=5500,
)
self.analog_agc2_xx_0 = analog.agc2_cc(6.25e-4, 1e-5, 0.2, 1.0)
self.analog_agc2_xx_0.set_max_gain(65536)
##################################################
# Connections
##################################################
self.connect((self.analog_agc2_xx_0, 0), (self.analog_am_demod_cf_0, 0))
self.connect((self.analog_am_demod_cf_0, 0), (self.rational_resampler_xxx_1, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blocks_file_source_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.qtgui_freq_sink_x_0_0_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_agc2_xx_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.audio_sink_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.qtgui_freq_sink_x_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "am_receiver")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_lo_freq(self):
return self.lo_freq
def set_lo_freq(self, lo_freq):
self.lo_freq = lo_freq
self.set_station_freq(self._station_freq_formatter(self.center_freq-self.lo_freq))
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
self.analog_sig_source_x_0.set_frequency(self.lo_freq)
def get_center_freq(self):
return self.center_freq
def set_center_freq(self, center_freq):
self.center_freq = center_freq
self.set_station_freq(self._station_freq_formatter(self.center_freq-self.lo_freq))
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
def get_station_freq(self):
return self.station_freq
def set_station_freq(self, station_freq):
self.station_freq = station_freq
Qt.QMetaObject.invokeMethod(self._station_freq_label, "setText", Qt.Q_ARG("QString", self.station_freq))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
def get_resamp_factor(self):
return self.resamp_factor
def set_resamp_factor(self, resamp_factor):
self.resamp_factor = resamp_factor
self.qtgui_freq_sink_x_0.set_frequency_range(self.center_freq-self.lo_freq, self.samp_rate/self.resamp_factor)
def main(top_block_cls=am_receiver, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main() | de | 0.678998 | #!/usr/bin/env python2 # -*- coding: utf-8 -*- ################################################## # GNU Radio Python Flow Graph # Title: AM Receiver # Author: <NAME> # GNU Radio version: 3.7.13.5 ################################################## ################################################## # Variables ################################################## ################################################## # Blocks ################################################## #size #wintype #fc #bw #name #number of inputs #size #wintype #fc #bw #name #number of inputs ################################################## # Connections ################################################## | 2.141239 | 2 |
pytext/models/embeddings/char_embedding.py | baronrustamov/pytext | 1 | 6632368 | <gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytext.config.field_config import CharFeatConfig
from pytext.data.utils import Vocabulary
from pytext.fields import FieldMeta
from pytext.utils.usage import log_class_usage
from .embedding_base import EmbeddingBase
class CharacterEmbedding(EmbeddingBase):
"""
Module for character aware CNN embeddings for tokens. It uses convolution
followed by max-pooling over character embeddings to obtain an embedding
vector for each token.
Implementation is loosely based on https://arxiv.org/abs/1508.06615.
Args:
num_embeddings (int): Total number of characters (vocabulary size).
embed_dim (int): Size of character embeddings to be passed to convolutions.
out_channels (int): Number of output channels.
kernel_sizes (List[int]): Dimension of input Tensor passed to MLP.
highway_layers (int): Number of highway layers applied to pooled output.
projection_dim (int): If specified, size of output embedding for token, via
a linear projection from convolution output.
Attributes:
char_embed (nn.Embedding): Character embedding table.
convs (nn.ModuleList): Convolution layers that operate on character
embeddings.
highway_layers (nn.Module): Highway layers on top of convolution output.
projection (nn.Module): Final linear layer to token embedding.
embedding_dim (int): Dimension of the final token embedding produced.
"""
Config = CharFeatConfig
@classmethod
def from_config(
cls,
config: CharFeatConfig,
metadata: Optional[FieldMeta] = None,
vocab_size: Optional[int] = None,
):
"""Factory method to construct an instance of CharacterEmbedding from
the module's config object and the field's metadata object.
Args:
config (CharFeatConfig): Configuration object specifying all the
parameters of CharacterEmbedding.
metadata (FieldMeta): Object containing this field's metadata.
Returns:
type: An instance of CharacterEmbedding.
"""
if vocab_size is None:
vocab_size = metadata.vocab_size
return cls(
vocab_size,
config.embed_dim,
config.cnn.kernel_num,
config.cnn.kernel_sizes,
config.highway_layers,
config.projection_dim,
)
def __init__(
self,
num_embeddings: int,
embed_dim: int,
out_channels: int,
kernel_sizes: List[int],
highway_layers: int,
projection_dim: Optional[int],
*args,
**kwargs,
) -> None:
conv_out_dim = len(kernel_sizes) * out_channels
output_dim = projection_dim or conv_out_dim
super().__init__(output_dim)
self.char_embed = nn.Embedding(num_embeddings, embed_dim)
self.convs = nn.ModuleList(
[
# in_channels = embed_dim because input is treated as sequence
# of dim [max_word_length] with embed_dim channels
# Adding padding to provide robustness in cases where input
# length is less than conv filter width
nn.Conv1d(embed_dim, out_channels, K, padding=K // 2)
for K in kernel_sizes
]
)
self.highway = None
if highway_layers > 0:
self.highway = Highway(conv_out_dim, highway_layers)
self.projection = None
if projection_dim:
self.projection = nn.Linear(conv_out_dim, projection_dim)
log_class_usage(__class__)
def forward(self, chars: torch.Tensor) -> torch.Tensor:
"""
Given a batch of sentences such that tokens are broken into character ids,
produce token embedding vectors for each sentence in the batch.
Args:
chars (torch.Tensor): Batch of sentences where each token is broken
into characters.
Dimension: batch size X maximum sentence length X maximum word length
Returns:
torch.Tensor: Embedded batch of sentences. Dimension:
batch size X maximum sentence length, token embedding size.
Token embedding size = `out_channels * len(self.convs))`
"""
batch_size = chars.size(0)
max_sent_length = chars.size(1)
max_word_length = chars.size(2)
chars = chars.view(batch_size * max_sent_length, max_word_length)
# char_embedding: (bsize * max_sent_length, max_word_length, embed_dim)
char_embedding = self.char_embed(chars)
# conv_inp dim: (bsize * max_sent_length, emb_size, max_word_length)
conv_inp = char_embedding.transpose(1, 2)
char_conv_outs = [F.relu(conv(conv_inp)) for conv in self.convs]
# Apply max pooling
# char_pool_out[i] dims: (bsize * max_sent_length, out_channels)
char_pool_outs = [torch.max(out, dim=2)[0] for out in char_conv_outs]
# Concat different feature maps together
# char_pool_out dim: (bsize * max_sent_length, out_channel * num_kernels)
char_out = torch.cat(char_pool_outs, 1)
# Highway layers, preserves dims
if self.highway is not None:
char_out = self.highway(char_out)
if self.projection is not None:
# Linear map back to final embedding size:
# (bsize * max_sent_length, projection_dim)
char_out = self.projection(char_out)
# Reshape to (bsize, max_sent_length, "output_dim")
return char_out.view(batch_size, max_sent_length, -1)
class Highway(nn.Module):
"""
A `Highway layer <https://arxiv.org/abs/1505.00387>`.
Adopted from the AllenNLP implementation.
"""
def __init__(self, input_dim: int, num_layers: int = 1):
super().__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList(
[nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]
)
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
# As per comment in AllenNLP:
# We should bias the highway layer to just carry its input forward. We do
# that by setting the bias on `B(x)` to be positive, because that means `g`
# will be biased to be high, so we will carry the input forward. The bias
# on `B(x)` is the second half of the bias vector in each Linear layer.
nn.init.constant_(layer.bias[self.input_dim :], 1)
nn.init.constant_(layer.bias[: self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(self, x: torch.Tensor):
for layer in self.layers:
projection = layer(x)
proj_x, gate = projection.chunk(2, dim=-1)
proj_x = self.activation(proj_x)
gate = F.sigmoid(gate)
x = gate * x + (gate.new_tensor([1]) - gate) * proj_x
return x
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytext.config.field_config import CharFeatConfig
from pytext.data.utils import Vocabulary
from pytext.fields import FieldMeta
from pytext.utils.usage import log_class_usage
from .embedding_base import EmbeddingBase
class CharacterEmbedding(EmbeddingBase):
"""
Module for character aware CNN embeddings for tokens. It uses convolution
followed by max-pooling over character embeddings to obtain an embedding
vector for each token.
Implementation is loosely based on https://arxiv.org/abs/1508.06615.
Args:
num_embeddings (int): Total number of characters (vocabulary size).
embed_dim (int): Size of character embeddings to be passed to convolutions.
out_channels (int): Number of output channels.
kernel_sizes (List[int]): Dimension of input Tensor passed to MLP.
highway_layers (int): Number of highway layers applied to pooled output.
projection_dim (int): If specified, size of output embedding for token, via
a linear projection from convolution output.
Attributes:
char_embed (nn.Embedding): Character embedding table.
convs (nn.ModuleList): Convolution layers that operate on character
embeddings.
highway_layers (nn.Module): Highway layers on top of convolution output.
projection (nn.Module): Final linear layer to token embedding.
embedding_dim (int): Dimension of the final token embedding produced.
"""
Config = CharFeatConfig
@classmethod
def from_config(
cls,
config: CharFeatConfig,
metadata: Optional[FieldMeta] = None,
vocab_size: Optional[int] = None,
):
"""Factory method to construct an instance of CharacterEmbedding from
the module's config object and the field's metadata object.
Args:
config (CharFeatConfig): Configuration object specifying all the
parameters of CharacterEmbedding.
metadata (FieldMeta): Object containing this field's metadata.
Returns:
type: An instance of CharacterEmbedding.
"""
if vocab_size is None:
vocab_size = metadata.vocab_size
return cls(
vocab_size,
config.embed_dim,
config.cnn.kernel_num,
config.cnn.kernel_sizes,
config.highway_layers,
config.projection_dim,
)
def __init__(
self,
num_embeddings: int,
embed_dim: int,
out_channels: int,
kernel_sizes: List[int],
highway_layers: int,
projection_dim: Optional[int],
*args,
**kwargs,
) -> None:
conv_out_dim = len(kernel_sizes) * out_channels
output_dim = projection_dim or conv_out_dim
super().__init__(output_dim)
self.char_embed = nn.Embedding(num_embeddings, embed_dim)
self.convs = nn.ModuleList(
[
# in_channels = embed_dim because input is treated as sequence
# of dim [max_word_length] with embed_dim channels
# Adding padding to provide robustness in cases where input
# length is less than conv filter width
nn.Conv1d(embed_dim, out_channels, K, padding=K // 2)
for K in kernel_sizes
]
)
self.highway = None
if highway_layers > 0:
self.highway = Highway(conv_out_dim, highway_layers)
self.projection = None
if projection_dim:
self.projection = nn.Linear(conv_out_dim, projection_dim)
log_class_usage(__class__)
def forward(self, chars: torch.Tensor) -> torch.Tensor:
"""
Given a batch of sentences such that tokens are broken into character ids,
produce token embedding vectors for each sentence in the batch.
Args:
chars (torch.Tensor): Batch of sentences where each token is broken
into characters.
Dimension: batch size X maximum sentence length X maximum word length
Returns:
torch.Tensor: Embedded batch of sentences. Dimension:
batch size X maximum sentence length, token embedding size.
Token embedding size = `out_channels * len(self.convs))`
"""
batch_size = chars.size(0)
max_sent_length = chars.size(1)
max_word_length = chars.size(2)
chars = chars.view(batch_size * max_sent_length, max_word_length)
# char_embedding: (bsize * max_sent_length, max_word_length, embed_dim)
char_embedding = self.char_embed(chars)
# conv_inp dim: (bsize * max_sent_length, emb_size, max_word_length)
conv_inp = char_embedding.transpose(1, 2)
char_conv_outs = [F.relu(conv(conv_inp)) for conv in self.convs]
# Apply max pooling
# char_pool_out[i] dims: (bsize * max_sent_length, out_channels)
char_pool_outs = [torch.max(out, dim=2)[0] for out in char_conv_outs]
# Concat different feature maps together
# char_pool_out dim: (bsize * max_sent_length, out_channel * num_kernels)
char_out = torch.cat(char_pool_outs, 1)
# Highway layers, preserves dims
if self.highway is not None:
char_out = self.highway(char_out)
if self.projection is not None:
# Linear map back to final embedding size:
# (bsize * max_sent_length, projection_dim)
char_out = self.projection(char_out)
# Reshape to (bsize, max_sent_length, "output_dim")
return char_out.view(batch_size, max_sent_length, -1)
class Highway(nn.Module):
"""
A `Highway layer <https://arxiv.org/abs/1505.00387>`.
Adopted from the AllenNLP implementation.
"""
def __init__(self, input_dim: int, num_layers: int = 1):
super().__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList(
[nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]
)
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
# As per comment in AllenNLP:
# We should bias the highway layer to just carry its input forward. We do
# that by setting the bias on `B(x)` to be positive, because that means `g`
# will be biased to be high, so we will carry the input forward. The bias
# on `B(x)` is the second half of the bias vector in each Linear layer.
nn.init.constant_(layer.bias[self.input_dim :], 1)
nn.init.constant_(layer.bias[: self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(self, x: torch.Tensor):
for layer in self.layers:
projection = layer(x)
proj_x, gate = projection.chunk(2, dim=-1)
proj_x = self.activation(proj_x)
gate = F.sigmoid(gate)
x = gate * x + (gate.new_tensor([1]) - gate) * proj_x
return x | en | 0.726061 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved Module for character aware CNN embeddings for tokens. It uses convolution followed by max-pooling over character embeddings to obtain an embedding vector for each token. Implementation is loosely based on https://arxiv.org/abs/1508.06615. Args: num_embeddings (int): Total number of characters (vocabulary size). embed_dim (int): Size of character embeddings to be passed to convolutions. out_channels (int): Number of output channels. kernel_sizes (List[int]): Dimension of input Tensor passed to MLP. highway_layers (int): Number of highway layers applied to pooled output. projection_dim (int): If specified, size of output embedding for token, via a linear projection from convolution output. Attributes: char_embed (nn.Embedding): Character embedding table. convs (nn.ModuleList): Convolution layers that operate on character embeddings. highway_layers (nn.Module): Highway layers on top of convolution output. projection (nn.Module): Final linear layer to token embedding. embedding_dim (int): Dimension of the final token embedding produced. Factory method to construct an instance of CharacterEmbedding from the module's config object and the field's metadata object. Args: config (CharFeatConfig): Configuration object specifying all the parameters of CharacterEmbedding. metadata (FieldMeta): Object containing this field's metadata. Returns: type: An instance of CharacterEmbedding. # in_channels = embed_dim because input is treated as sequence # of dim [max_word_length] with embed_dim channels # Adding padding to provide robustness in cases where input # length is less than conv filter width Given a batch of sentences such that tokens are broken into character ids, produce token embedding vectors for each sentence in the batch. Args: chars (torch.Tensor): Batch of sentences where each token is broken into characters. Dimension: batch size X maximum sentence length X maximum word length Returns: torch.Tensor: Embedded batch of sentences. Dimension: batch size X maximum sentence length, token embedding size. Token embedding size = `out_channels * len(self.convs))` # char_embedding: (bsize * max_sent_length, max_word_length, embed_dim) # conv_inp dim: (bsize * max_sent_length, emb_size, max_word_length) # Apply max pooling # char_pool_out[i] dims: (bsize * max_sent_length, out_channels) # Concat different feature maps together # char_pool_out dim: (bsize * max_sent_length, out_channel * num_kernels) # Highway layers, preserves dims # Linear map back to final embedding size: # (bsize * max_sent_length, projection_dim) # Reshape to (bsize, max_sent_length, "output_dim") A `Highway layer <https://arxiv.org/abs/1505.00387>`. Adopted from the AllenNLP implementation. # As per comment in AllenNLP: # We should bias the highway layer to just carry its input forward. We do # that by setting the bias on `B(x)` to be positive, because that means `g` # will be biased to be high, so we will carry the input forward. The bias # on `B(x)` is the second half of the bias vector in each Linear layer. | 2.562251 | 3 |
orquestador/nyc_ccci_etl/orchestrator_tasks/load_bias_fairness_metadata.py | gemathus/dpa-2020 | 1 | 6632369 | import json
import luigi
from datetime import datetime
from luigi.contrib.postgres import CopyToTable
from nyc_ccci_etl.commons.configuration import get_database_connection_parameters
from nyc_ccci_etl.utils.get_os_user import get_os_user
from nyc_ccci_etl.utils.get_current_ip import get_current_ip
from nyc_ccci_etl.orchestrator_tasks.load_aequitas_groups import LoadAequitasGroups
from nyc_ccci_etl.orchestrator_tasks.load_aequitas_bias import LoadAequitasBias
from nyc_ccci_etl.orchestrator_tasks.load_aequitas_fairness import LoadAequitasFairness
from nyc_ccci_etl.metadata_helper.metadata_helper import MetadataHelper
class LoadBiasFairnessMetadata(CopyToTable):
year = luigi.IntParameter()
month = luigi.IntParameter()
day = luigi.IntParameter()
pipeline_type = luigi.Parameter()
def requires(self):
return (
LoadAequitasGroups(self.year, self.month, self.day, self.pipeline_type),
LoadAequitasBias(self.year, self.month, self.day, self.pipeline_type),
LoadAequitasFairness(self.year, self.month, self.day, self.pipeline_type)
)
host, database, user, password = get_database_connection_parameters()
table = "aequitas.metadata"
schema = "aequitas"
columns = [
("executed_at", "timestamp"),
("task_params", "varchar"),
("bias_records", "integer"),
("fairness_records", "integer"),
("groups_records", "integer"),
("execution_user", "varchar"),
("source_ip", "varchar"),
]
def run(self):
helper = MetadataHelper(self.year, self.month, self.day)
self.inserted_bias_records = helper.get_inserted_aequitas_bias()
self.inserted_fairness_records = helper.get_inserted_aequitas_fairness()
self.inserted_groups_records = helper.get_inserted_aequitas_groups()
super().run()
def rows(self):
params_string = "year={} month={} day={}".format(str(self.year), str(self.month), str(self.day))
row = (
str(datetime.now(tz=None)),
params_string,
self.inserted_bias_records,
self.inserted_fairness_records,
self.inserted_groups_records,
get_os_user(),
get_current_ip()
)
yield row | import json
import luigi
from datetime import datetime
from luigi.contrib.postgres import CopyToTable
from nyc_ccci_etl.commons.configuration import get_database_connection_parameters
from nyc_ccci_etl.utils.get_os_user import get_os_user
from nyc_ccci_etl.utils.get_current_ip import get_current_ip
from nyc_ccci_etl.orchestrator_tasks.load_aequitas_groups import LoadAequitasGroups
from nyc_ccci_etl.orchestrator_tasks.load_aequitas_bias import LoadAequitasBias
from nyc_ccci_etl.orchestrator_tasks.load_aequitas_fairness import LoadAequitasFairness
from nyc_ccci_etl.metadata_helper.metadata_helper import MetadataHelper
class LoadBiasFairnessMetadata(CopyToTable):
year = luigi.IntParameter()
month = luigi.IntParameter()
day = luigi.IntParameter()
pipeline_type = luigi.Parameter()
def requires(self):
return (
LoadAequitasGroups(self.year, self.month, self.day, self.pipeline_type),
LoadAequitasBias(self.year, self.month, self.day, self.pipeline_type),
LoadAequitasFairness(self.year, self.month, self.day, self.pipeline_type)
)
host, database, user, password = get_database_connection_parameters()
table = "aequitas.metadata"
schema = "aequitas"
columns = [
("executed_at", "timestamp"),
("task_params", "varchar"),
("bias_records", "integer"),
("fairness_records", "integer"),
("groups_records", "integer"),
("execution_user", "varchar"),
("source_ip", "varchar"),
]
def run(self):
helper = MetadataHelper(self.year, self.month, self.day)
self.inserted_bias_records = helper.get_inserted_aequitas_bias()
self.inserted_fairness_records = helper.get_inserted_aequitas_fairness()
self.inserted_groups_records = helper.get_inserted_aequitas_groups()
super().run()
def rows(self):
params_string = "year={} month={} day={}".format(str(self.year), str(self.month), str(self.day))
row = (
str(datetime.now(tz=None)),
params_string,
self.inserted_bias_records,
self.inserted_fairness_records,
self.inserted_groups_records,
get_os_user(),
get_current_ip()
)
yield row | none | 1 | 1.995291 | 2 |
|
ramp_experiment/__main__.py | Deric-W/Supratix_experiment | 0 | 6632370 | #!/usr/bin/python3
# run server (dont forget to set the pins 3 and 2 to GPIO mode and enable PWM)
import logging
import signal
import time
from argparse import ArgumentParser
from typing import NamedTuple, Optional
from configparser import ConfigParser, NoOptionError
from contextlib import ExitStack
from enum import Enum
from queue import Empty, SimpleQueue
from threading import Event
import paho.mqtt.client as mqtt
from onionGpio import Direction, Edge, OnionGpio, Value
from onionPwm import OnionPwm
from . import __version__
from .A4988 import A4988
from .motor import WormMotor
from .mqtt import ANGLE_STRUCT, TIMESTAMP_STRUCT, Status
from .ramp import Ramp
class Topics(NamedTuple):
"""named tuple containing the mqtt topics"""
status: str
last_timestamp: str
current_angle: str
target_angle: str
class LogLevel(Enum):
"""logelevels as strings"""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
FATAL = "fatal"
def to_level(self) -> int:
"""convert variant to log level"""
return getattr(logging, self.name)
class Waiter:
"""class enforcing a certain time period between actions"""
def __init__(self, period: float) -> None:
"""init with period in seconds"""
self.period = period
self.timestamp = time.monotonic() - period
def __enter__(self) -> float:
return self.wait()
def __exit__(self, type, value, traceback) -> bool:
self.reset()
return False
def wait(self) -> float:
"""wait remaining time, return time spend sleeping"""
to_wait = self.timestamp + self.period - time.monotonic()
if to_wait > 0:
time.sleep(to_wait)
return to_wait
return 0
def is_waiting(self) -> bool:
"""check if wait() would block"""
return self.timestamp + self.period > time.monotonic()
def reset(self) -> None:
"""reset waiter to waiting state"""
self.timestamp = time.monotonic()
class RampServer:
def __init__(self,
host: str,
port: int,
mqtt_client: mqtt.Client,
ramp: Ramp,
step_size: float,
elevator: OnionPwm,
landing_zone: OnionGpio,
landing_zone_timeout: float,
swing_time: float,
logger: logging.Logger,
topics: Topics,
qos: int = 0
):
self.mqtt_client = mqtt_client
self.ramp = ramp
self.step_size = step_size
self.elevator = elevator
self.landing_zone = landing_zone
self.landing_zone_timeout = landing_zone_timeout
self.landing_zone_waiter = Waiter(swing_time) # use waiter to prevent pointless sleeping
self.logger = logger
self.topics = topics
self.qos = qos
self.target_queue = SimpleQueue() # type: SimpleQueue[Optional[float]]
self.stop_scheduled = False
self.is_stopped = Event()
self.mqtt_client.will_set(topics.status, Status.OFFLINE.to_bytes(), qos, retain=True) # if the onion crashes, set status to offline
self.mqtt_client.message_callback_add(topics.target_angle, self.submit_target)
self.mqtt_client.connect(host, port)
if self.mqtt_client.subscribe(topics.target_angle, qos)[0] != mqtt.MQTT_ERR_SUCCESS:
self.logger.critical(f"failed to subscribe to {topics.target_angle}")
raise ValueError(f"failed to subscribe to '{topics.target_angle}'")
self.logger.info(f"connecting to {host} on port {port}")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.shutdown()
return False
def submit_target(self, client: mqtt.Client, userdata, message: mqtt.MQTTMessage) -> None:
"""submit target to be processed"""
try:
angle = ANGLE_STRUCT.unpack(message.payload)[0]
except Exception as e:
self.logger.critical(f"failed to unpack {message.payload}, signaling main loop to crash", exc_info=e)
self.target_queue.put(None)
else:
self.logger.info(f"received angle {angle}")
self.target_queue.put(angle)
def handle_target(self, angle: float) -> None:
"""process a single target angle"""
self.logger.info(f"handling angle {angle}")
self.logger.debug(f"moving to angle {angle}")
self.update_status(Status.BUSY)
for _ in self.ramp.iter_angle(angle, self.step_size):
self.update_current(self.ramp.get_angle()) # update for each step
current_angle = self.ramp.get_angle()
self.update_current(current_angle) # update final angle
self.logger.debug(f"reached angle {current_angle}")
with self.landing_zone_waiter as waited: # wait for oscillations to stop
self.logger.debug(f"waited {waited} seconds for landing zone to stop swinging")
self.logger.debug("enable elevator")
self.elevator.enable()
try:
self.logger.debug("waiting for edge")
self.landing_zone.waitForEdge(self.landing_zone_timeout)
except TimeoutError: # marble left the experiment
self.logger.error("landing zone timeout expired")
self.update_status(Status.ERROR)
else: # marble landed
self.update_timestamp(time.time())
self.update_status(Status.READY)
finally: # make sure to disable the elevator no matter what happens
self.elevator.disable()
self.logger.debug("disabled elevator")
def update_timestamp(self, timestamp: float) -> mqtt.MQTTMessageInfo:
"""update last timestamp"""
self.logger.info(f"updating timestamp to {timestamp}")
return self.mqtt_client.publish(
self.topics.last_timestamp,
TIMESTAMP_STRUCT.pack(timestamp),
self.qos
)
def update_status(self, status: Status) -> mqtt.MQTTMessageInfo:
"""update status"""
self.logger.info(f"updating status to {status.name}")
return self.mqtt_client.publish(
self.topics.status,
status.to_bytes(),
self.qos,
retain=True
)
def update_current(self, angle: float) -> mqtt.MQTTMessage:
"""update current angle"""
self.logger.info(f"updating current angle to {angle}")
return self.mqtt_client.publish(
self.topics.current_angle,
ANGLE_STRUCT.pack(angle),
self.qos
)
def loop_forever(self) -> None:
"""process targets until stop() is called"""
self.is_stopped.clear() # make stop() block
try:
self.update_status(Status.READY)
self.mqtt_client.loop_start()
while not self.stop_scheduled:
try:
target = self.target_queue.get(timeout=0.5)
if target is not None:
self.handle_target(target)
else:
raise ValueError("encountered an error during unpacking")
except Empty: # no targets to process, check for shutdown
pass
except Exception as e: # something else happend, log exception and crash
self.logger.critical("Exception occured while handling target", exc_info=e)
raise
finally: # prevent stop() from hanging if an exception occures
self.update_status(Status.OFFLINE).wait_for_publish() # update status and wait for messages to be send
self.mqtt_client.loop_stop()
self.stop_scheduled = False # reset to allow calling loop_forever() again
self.is_stopped.set()
def schedule_stop(self) -> None:
"""tell loop_forever() to stop processing targets"""
self.stop_scheduled = True
def stop(self) -> None:
"""wait for loop_forever() to stop processing targets"""
self.logger.info("stopping loop")
self.schedule_stop()
self.is_stopped.wait() # wait for it to stop
self.stop_scheduled = False # reset to allow calling loop_forever() again in case it was not running
self.logger.info("stopped server")
def shutdown(self) -> None:
"""shutdown server and devices"""
self.logger.info("shutting down server and devices")
try:
self.stop()
self.mqtt_client.disconnect() # disconnect after .stop() to allow queued messages to be send
except Exception as e:
self.logger.critical("server shutdown failed", exc_info=e)
raise # dont hide exception
finally: # dont abort on error
self.ramp.shutdown() # shutdown devices after server to make sure they are not in use
self.elevator.release()
self.landing_zone.release()
self.logger.info("shutdown server")
# parse args
parser = ArgumentParser(description="Server awaiting experiment requests for a marble ramp")
parser.add_argument(
"-v",
"--version",
help="display version number",
action="version",
version="%(prog)s {version}".format(version=__version__)
)
parser.add_argument(
"-c",
"--config",
type=str,
help="path to custom config file",
default="/etc/rampserver.conf"
)
args = parser.parse_args()
# parse config file
config = ConfigParser(inline_comment_prefixes=("#",))
with open(args.config, "r") as fd:
config.read_file(fd)
# setup logging
logging.basicConfig(
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=config.get("logging", "file", fallback=None) # fallback is stdout
)
server_logger = logging.getLogger("ramp_server")
mqtt_logger = logging.getLogger("paho-mqtt")
server_logger.setLevel(
LogLevel(config.get("logging", "server_level", fallback="info")).to_level()
)
mqtt_logger.setLevel(
LogLevel(config.get("logging", "mqtt_level", fallback="info")).to_level()
)
# setup mqtt
mqtt_client = mqtt.Client(
config.get("mqtt", "id"),
clean_session=config.getboolean("mqtt", "clean_session")
)
mqtt_client.enable_logger(mqtt_logger)
if config.getboolean("mqtt", "tls"):
mqtt_client.tls_set()
try:
username = config.get("mqtt", "username")
except NoOptionError:
pass
else:
mqtt_client.username_pw_set(username, config.get("mqtt", "password"))
with ExitStack() as stack:
# setup elevator
elevator = stack.enter_context(OnionPwm(
config.getint("elevator", "channel"),
config.getint("elevator", "chip"),
))
elevator.set_frequency(config.getint("elevator", "frequency"))
elevator.set_duty_cycle(config.getfloat("elevator", "duty_cycle"))
# setup landing_zone
landing_zone = stack.enter_context(OnionGpio(config.getint("landing_zone", "gpio")))
landing_zone.setDirection(Direction.INPUT)
landing_zone.setEdge(Edge.FALLING) # prepare for edge
# setup driver
driver = stack.enter_context(A4988(
config.getint("driver", "enable"),
config.getint("driver", "sleep"),
config.getint("driver", "step"),
config.getint("driver", "dir")
))
# setup ramp
ramp = Ramp(
WormMotor(
driver,
Value(config.get("motor", "direction")),
config.getfloat("motor", "step_width"),
config.getfloat("motor", "pps"),
config.getfloat("motor", "limit_lower"),
config.getfloat("motor", "limit_upper")
),
config.getfloat("ramp", "base_length"),
config.getfloat("ramp", "offset")
)
# setup server
server = RampServer(
host=config.get("mqtt", "host"),
port=config.getint("mqtt", "port"),
mqtt_client=mqtt_client,
ramp=ramp,
step_size=config.getfloat("ramp", "step_size"),
elevator=elevator,
landing_zone=landing_zone,
landing_zone_timeout=config.getfloat("landing_zone", "timeout"),
swing_time=config.getfloat("landing_zone", "swing_time"),
logger=server_logger,
topics=Topics(
config.get("topics", "status"),
config.get("topics", "timestamp"),
config.get("topics", "current"),
config.get("topics", "target")
),
qos=config.getint("mqtt", "qos")
)
signal.signal(signal.SIGTERM, lambda signum, frame: server.schedule_stop()) # schedule shutdown of server loop, cleanup is handled by with statement
stack.pop_all() # setup successfull, shutdown is handled by server
with server:
server.loop_forever()
| #!/usr/bin/python3
# run server (dont forget to set the pins 3 and 2 to GPIO mode and enable PWM)
import logging
import signal
import time
from argparse import ArgumentParser
from typing import NamedTuple, Optional
from configparser import ConfigParser, NoOptionError
from contextlib import ExitStack
from enum import Enum
from queue import Empty, SimpleQueue
from threading import Event
import paho.mqtt.client as mqtt
from onionGpio import Direction, Edge, OnionGpio, Value
from onionPwm import OnionPwm
from . import __version__
from .A4988 import A4988
from .motor import WormMotor
from .mqtt import ANGLE_STRUCT, TIMESTAMP_STRUCT, Status
from .ramp import Ramp
class Topics(NamedTuple):
"""named tuple containing the mqtt topics"""
status: str
last_timestamp: str
current_angle: str
target_angle: str
class LogLevel(Enum):
"""logelevels as strings"""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
FATAL = "fatal"
def to_level(self) -> int:
"""convert variant to log level"""
return getattr(logging, self.name)
class Waiter:
"""class enforcing a certain time period between actions"""
def __init__(self, period: float) -> None:
"""init with period in seconds"""
self.period = period
self.timestamp = time.monotonic() - period
def __enter__(self) -> float:
return self.wait()
def __exit__(self, type, value, traceback) -> bool:
self.reset()
return False
def wait(self) -> float:
"""wait remaining time, return time spend sleeping"""
to_wait = self.timestamp + self.period - time.monotonic()
if to_wait > 0:
time.sleep(to_wait)
return to_wait
return 0
def is_waiting(self) -> bool:
"""check if wait() would block"""
return self.timestamp + self.period > time.monotonic()
def reset(self) -> None:
"""reset waiter to waiting state"""
self.timestamp = time.monotonic()
class RampServer:
def __init__(self,
host: str,
port: int,
mqtt_client: mqtt.Client,
ramp: Ramp,
step_size: float,
elevator: OnionPwm,
landing_zone: OnionGpio,
landing_zone_timeout: float,
swing_time: float,
logger: logging.Logger,
topics: Topics,
qos: int = 0
):
self.mqtt_client = mqtt_client
self.ramp = ramp
self.step_size = step_size
self.elevator = elevator
self.landing_zone = landing_zone
self.landing_zone_timeout = landing_zone_timeout
self.landing_zone_waiter = Waiter(swing_time) # use waiter to prevent pointless sleeping
self.logger = logger
self.topics = topics
self.qos = qos
self.target_queue = SimpleQueue() # type: SimpleQueue[Optional[float]]
self.stop_scheduled = False
self.is_stopped = Event()
self.mqtt_client.will_set(topics.status, Status.OFFLINE.to_bytes(), qos, retain=True) # if the onion crashes, set status to offline
self.mqtt_client.message_callback_add(topics.target_angle, self.submit_target)
self.mqtt_client.connect(host, port)
if self.mqtt_client.subscribe(topics.target_angle, qos)[0] != mqtt.MQTT_ERR_SUCCESS:
self.logger.critical(f"failed to subscribe to {topics.target_angle}")
raise ValueError(f"failed to subscribe to '{topics.target_angle}'")
self.logger.info(f"connecting to {host} on port {port}")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.shutdown()
return False
def submit_target(self, client: mqtt.Client, userdata, message: mqtt.MQTTMessage) -> None:
"""submit target to be processed"""
try:
angle = ANGLE_STRUCT.unpack(message.payload)[0]
except Exception as e:
self.logger.critical(f"failed to unpack {message.payload}, signaling main loop to crash", exc_info=e)
self.target_queue.put(None)
else:
self.logger.info(f"received angle {angle}")
self.target_queue.put(angle)
def handle_target(self, angle: float) -> None:
"""process a single target angle"""
self.logger.info(f"handling angle {angle}")
self.logger.debug(f"moving to angle {angle}")
self.update_status(Status.BUSY)
for _ in self.ramp.iter_angle(angle, self.step_size):
self.update_current(self.ramp.get_angle()) # update for each step
current_angle = self.ramp.get_angle()
self.update_current(current_angle) # update final angle
self.logger.debug(f"reached angle {current_angle}")
with self.landing_zone_waiter as waited: # wait for oscillations to stop
self.logger.debug(f"waited {waited} seconds for landing zone to stop swinging")
self.logger.debug("enable elevator")
self.elevator.enable()
try:
self.logger.debug("waiting for edge")
self.landing_zone.waitForEdge(self.landing_zone_timeout)
except TimeoutError: # marble left the experiment
self.logger.error("landing zone timeout expired")
self.update_status(Status.ERROR)
else: # marble landed
self.update_timestamp(time.time())
self.update_status(Status.READY)
finally: # make sure to disable the elevator no matter what happens
self.elevator.disable()
self.logger.debug("disabled elevator")
def update_timestamp(self, timestamp: float) -> mqtt.MQTTMessageInfo:
"""update last timestamp"""
self.logger.info(f"updating timestamp to {timestamp}")
return self.mqtt_client.publish(
self.topics.last_timestamp,
TIMESTAMP_STRUCT.pack(timestamp),
self.qos
)
def update_status(self, status: Status) -> mqtt.MQTTMessageInfo:
"""update status"""
self.logger.info(f"updating status to {status.name}")
return self.mqtt_client.publish(
self.topics.status,
status.to_bytes(),
self.qos,
retain=True
)
def update_current(self, angle: float) -> mqtt.MQTTMessage:
"""update current angle"""
self.logger.info(f"updating current angle to {angle}")
return self.mqtt_client.publish(
self.topics.current_angle,
ANGLE_STRUCT.pack(angle),
self.qos
)
def loop_forever(self) -> None:
"""process targets until stop() is called"""
self.is_stopped.clear() # make stop() block
try:
self.update_status(Status.READY)
self.mqtt_client.loop_start()
while not self.stop_scheduled:
try:
target = self.target_queue.get(timeout=0.5)
if target is not None:
self.handle_target(target)
else:
raise ValueError("encountered an error during unpacking")
except Empty: # no targets to process, check for shutdown
pass
except Exception as e: # something else happend, log exception and crash
self.logger.critical("Exception occured while handling target", exc_info=e)
raise
finally: # prevent stop() from hanging if an exception occures
self.update_status(Status.OFFLINE).wait_for_publish() # update status and wait for messages to be send
self.mqtt_client.loop_stop()
self.stop_scheduled = False # reset to allow calling loop_forever() again
self.is_stopped.set()
def schedule_stop(self) -> None:
"""tell loop_forever() to stop processing targets"""
self.stop_scheduled = True
def stop(self) -> None:
"""wait for loop_forever() to stop processing targets"""
self.logger.info("stopping loop")
self.schedule_stop()
self.is_stopped.wait() # wait for it to stop
self.stop_scheduled = False # reset to allow calling loop_forever() again in case it was not running
self.logger.info("stopped server")
def shutdown(self) -> None:
"""shutdown server and devices"""
self.logger.info("shutting down server and devices")
try:
self.stop()
self.mqtt_client.disconnect() # disconnect after .stop() to allow queued messages to be send
except Exception as e:
self.logger.critical("server shutdown failed", exc_info=e)
raise # dont hide exception
finally: # dont abort on error
self.ramp.shutdown() # shutdown devices after server to make sure they are not in use
self.elevator.release()
self.landing_zone.release()
self.logger.info("shutdown server")
# parse args
parser = ArgumentParser(description="Server awaiting experiment requests for a marble ramp")
parser.add_argument(
"-v",
"--version",
help="display version number",
action="version",
version="%(prog)s {version}".format(version=__version__)
)
parser.add_argument(
"-c",
"--config",
type=str,
help="path to custom config file",
default="/etc/rampserver.conf"
)
args = parser.parse_args()
# parse config file
config = ConfigParser(inline_comment_prefixes=("#",))
with open(args.config, "r") as fd:
config.read_file(fd)
# setup logging
logging.basicConfig(
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=config.get("logging", "file", fallback=None) # fallback is stdout
)
server_logger = logging.getLogger("ramp_server")
mqtt_logger = logging.getLogger("paho-mqtt")
server_logger.setLevel(
LogLevel(config.get("logging", "server_level", fallback="info")).to_level()
)
mqtt_logger.setLevel(
LogLevel(config.get("logging", "mqtt_level", fallback="info")).to_level()
)
# setup mqtt
mqtt_client = mqtt.Client(
config.get("mqtt", "id"),
clean_session=config.getboolean("mqtt", "clean_session")
)
mqtt_client.enable_logger(mqtt_logger)
if config.getboolean("mqtt", "tls"):
mqtt_client.tls_set()
try:
username = config.get("mqtt", "username")
except NoOptionError:
pass
else:
mqtt_client.username_pw_set(username, config.get("mqtt", "password"))
with ExitStack() as stack:
# setup elevator
elevator = stack.enter_context(OnionPwm(
config.getint("elevator", "channel"),
config.getint("elevator", "chip"),
))
elevator.set_frequency(config.getint("elevator", "frequency"))
elevator.set_duty_cycle(config.getfloat("elevator", "duty_cycle"))
# setup landing_zone
landing_zone = stack.enter_context(OnionGpio(config.getint("landing_zone", "gpio")))
landing_zone.setDirection(Direction.INPUT)
landing_zone.setEdge(Edge.FALLING) # prepare for edge
# setup driver
driver = stack.enter_context(A4988(
config.getint("driver", "enable"),
config.getint("driver", "sleep"),
config.getint("driver", "step"),
config.getint("driver", "dir")
))
# setup ramp
ramp = Ramp(
WormMotor(
driver,
Value(config.get("motor", "direction")),
config.getfloat("motor", "step_width"),
config.getfloat("motor", "pps"),
config.getfloat("motor", "limit_lower"),
config.getfloat("motor", "limit_upper")
),
config.getfloat("ramp", "base_length"),
config.getfloat("ramp", "offset")
)
# setup server
server = RampServer(
host=config.get("mqtt", "host"),
port=config.getint("mqtt", "port"),
mqtt_client=mqtt_client,
ramp=ramp,
step_size=config.getfloat("ramp", "step_size"),
elevator=elevator,
landing_zone=landing_zone,
landing_zone_timeout=config.getfloat("landing_zone", "timeout"),
swing_time=config.getfloat("landing_zone", "swing_time"),
logger=server_logger,
topics=Topics(
config.get("topics", "status"),
config.get("topics", "timestamp"),
config.get("topics", "current"),
config.get("topics", "target")
),
qos=config.getint("mqtt", "qos")
)
signal.signal(signal.SIGTERM, lambda signum, frame: server.schedule_stop()) # schedule shutdown of server loop, cleanup is handled by with statement
stack.pop_all() # setup successfull, shutdown is handled by server
with server:
server.loop_forever()
| en | 0.809468 | #!/usr/bin/python3 # run server (dont forget to set the pins 3 and 2 to GPIO mode and enable PWM) named tuple containing the mqtt topics logelevels as strings convert variant to log level class enforcing a certain time period between actions init with period in seconds wait remaining time, return time spend sleeping check if wait() would block reset waiter to waiting state # use waiter to prevent pointless sleeping # type: SimpleQueue[Optional[float]] # if the onion crashes, set status to offline submit target to be processed process a single target angle # update for each step # update final angle # wait for oscillations to stop # marble left the experiment # marble landed # make sure to disable the elevator no matter what happens update last timestamp update status update current angle process targets until stop() is called # make stop() block # no targets to process, check for shutdown # something else happend, log exception and crash # prevent stop() from hanging if an exception occures # update status and wait for messages to be send # reset to allow calling loop_forever() again tell loop_forever() to stop processing targets wait for loop_forever() to stop processing targets # wait for it to stop # reset to allow calling loop_forever() again in case it was not running shutdown server and devices # disconnect after .stop() to allow queued messages to be send # dont hide exception # dont abort on error # shutdown devices after server to make sure they are not in use # parse args # parse config file # setup logging # fallback is stdout # setup mqtt # setup elevator # setup landing_zone # prepare for edge # setup driver # setup ramp # setup server # schedule shutdown of server loop, cleanup is handled by with statement # setup successfull, shutdown is handled by server | 2.637961 | 3 |
flambe/cluster/ssh.py | axel-sirota/flambe | 148 | 6632371 | """Implementation of the Manager for SSH hosts"""
import logging
from typing import List, TypeVar, Union, Optional
from flambe.cluster import instance
from flambe.cluster.cluster import Cluster, FactoryInsT
import os
logger = logging.getLogger(__name__)
FactoryT = TypeVar("FactoryT", instance.CPUFactoryInstance, instance.GPUFactoryInstance)
class SSHCluster(Cluster):
"""The SSH Manager needs to be used when having running instances.
For example when having on-prem hardware or just a couple
of AWS EC2 instances running.
When using this cluster, the user needs to specify the IPs of
the machines to use, both the public one and private one.
"""
def __init__(self,
name: str,
orchestrator_ip: Union[str, List[str]],
factories_ips: Union[List[str], List[List[str]]],
key: str,
username: str,
remote_context=None,
use_public: bool = True,
setup_cmds: Optional[List[str]] = None) -> None:
"""Initialize the SSHCluster."""
super().__init__(name, len(factories_ips), key, username, setup_cmds)
self.orchestrator_ip = orchestrator_ip
self.factories_ips = factories_ips
self.remote_context = remote_context
self.use_public = use_public
if remote_context:
self.cluster_id = self.remote_context.cluster_id
def load_all_instances(self, exp_name: str = None, force: bool = False) -> None:
"""This manager assumed that instances are running.
This method loads the Python objects to the manager's variables.
Parameters
----------
exp_name: str
The name of the experiment
force: bool
Whether to override the current experiment of the same name
"""
if isinstance(self.orchestrator_ip, list):
self.orchestrator = self.get_orchestrator(self.orchestrator_ip[0],
self.orchestrator_ip[1],
use_public=self.use_public)
else:
self.orchestrator = self.get_orchestrator(self.orchestrator_ip,
use_public=self.use_public)
aux: FactoryInsT
for each in self.factories_ips:
if isinstance(each, list):
factory = self.get_factory(each[0], each[1], use_public=self.use_public)
if factory.contains_gpu():
factory = self.get_gpu_factory(each[0], each[1], use_public=self.use_public)
else:
factory = self.get_factory(each, use_public=self.use_public)
if factory.contains_gpu():
factory = self.get_gpu_factory(each, use_public=self.use_public)
self.factories.append(factory)
def rollback_env(self) -> None:
pass
def rsync_hosts(self):
"""Rsyncs the host's result folders.
First, it rsyncs all worker folders to the orchestrator main
folder. After that, so that every worker gets the last changes,
the orchestrator rsync with all of them.
"""
if not self.remote_context:
logger.error("Can't rsyn without a remote context")
return
exclude = ["state.pkl"]
orch = self.orchestrator
orch_save_path = os.path.join(f"{orch.get_home_path()}", self.remote_context.save_folder)
orch_loc = f"{orch_save_path}"
for f in self.factories:
f_save_path = os.path.join(f"{orch.get_home_path()}", self.remote_context.save_folder)
f_loc = f"{f.username}@{f.private_host}:{f_save_path}"
orch.rsync_folder(f_loc, orch_loc, exclude)
for f in self.factories:
f_save_path = os.path.join(f"{f.get_home_path()}", self.remote_context.save_folder)
f_loc = f"{f.username}@{f.private_host}:{f_save_path}"
orch.rsync_folder(orch_loc, f_loc, exclude)
| """Implementation of the Manager for SSH hosts"""
import logging
from typing import List, TypeVar, Union, Optional
from flambe.cluster import instance
from flambe.cluster.cluster import Cluster, FactoryInsT
import os
logger = logging.getLogger(__name__)
FactoryT = TypeVar("FactoryT", instance.CPUFactoryInstance, instance.GPUFactoryInstance)
class SSHCluster(Cluster):
"""The SSH Manager needs to be used when having running instances.
For example when having on-prem hardware or just a couple
of AWS EC2 instances running.
When using this cluster, the user needs to specify the IPs of
the machines to use, both the public one and private one.
"""
def __init__(self,
name: str,
orchestrator_ip: Union[str, List[str]],
factories_ips: Union[List[str], List[List[str]]],
key: str,
username: str,
remote_context=None,
use_public: bool = True,
setup_cmds: Optional[List[str]] = None) -> None:
"""Initialize the SSHCluster."""
super().__init__(name, len(factories_ips), key, username, setup_cmds)
self.orchestrator_ip = orchestrator_ip
self.factories_ips = factories_ips
self.remote_context = remote_context
self.use_public = use_public
if remote_context:
self.cluster_id = self.remote_context.cluster_id
def load_all_instances(self, exp_name: str = None, force: bool = False) -> None:
"""This manager assumed that instances are running.
This method loads the Python objects to the manager's variables.
Parameters
----------
exp_name: str
The name of the experiment
force: bool
Whether to override the current experiment of the same name
"""
if isinstance(self.orchestrator_ip, list):
self.orchestrator = self.get_orchestrator(self.orchestrator_ip[0],
self.orchestrator_ip[1],
use_public=self.use_public)
else:
self.orchestrator = self.get_orchestrator(self.orchestrator_ip,
use_public=self.use_public)
aux: FactoryInsT
for each in self.factories_ips:
if isinstance(each, list):
factory = self.get_factory(each[0], each[1], use_public=self.use_public)
if factory.contains_gpu():
factory = self.get_gpu_factory(each[0], each[1], use_public=self.use_public)
else:
factory = self.get_factory(each, use_public=self.use_public)
if factory.contains_gpu():
factory = self.get_gpu_factory(each, use_public=self.use_public)
self.factories.append(factory)
def rollback_env(self) -> None:
pass
def rsync_hosts(self):
"""Rsyncs the host's result folders.
First, it rsyncs all worker folders to the orchestrator main
folder. After that, so that every worker gets the last changes,
the orchestrator rsync with all of them.
"""
if not self.remote_context:
logger.error("Can't rsyn without a remote context")
return
exclude = ["state.pkl"]
orch = self.orchestrator
orch_save_path = os.path.join(f"{orch.get_home_path()}", self.remote_context.save_folder)
orch_loc = f"{orch_save_path}"
for f in self.factories:
f_save_path = os.path.join(f"{orch.get_home_path()}", self.remote_context.save_folder)
f_loc = f"{f.username}@{f.private_host}:{f_save_path}"
orch.rsync_folder(f_loc, orch_loc, exclude)
for f in self.factories:
f_save_path = os.path.join(f"{f.get_home_path()}", self.remote_context.save_folder)
f_loc = f"{f.username}@{f.private_host}:{f_save_path}"
orch.rsync_folder(orch_loc, f_loc, exclude)
| en | 0.886695 | Implementation of the Manager for SSH hosts The SSH Manager needs to be used when having running instances. For example when having on-prem hardware or just a couple of AWS EC2 instances running. When using this cluster, the user needs to specify the IPs of the machines to use, both the public one and private one. Initialize the SSHCluster. This manager assumed that instances are running. This method loads the Python objects to the manager's variables. Parameters ---------- exp_name: str The name of the experiment force: bool Whether to override the current experiment of the same name Rsyncs the host's result folders. First, it rsyncs all worker folders to the orchestrator main folder. After that, so that every worker gets the last changes, the orchestrator rsync with all of them. | 2.936145 | 3 |
nerds/examples/GMB/read_data.py | elsevierlabs-os/nerds | 19 | 6632372 | <gh_stars>10-100
import csv
from nerds.core.model.input.document import Document
from nerds.util.convert import transform_bio_tags_to_annotated_document
PATH_TO_FILE = "train.csv"
def read_kaggle_data():
sentences = []
pos = []
tag = []
tmp_sentence = []
tmp_pos = []
tmp_tag = []
with open(PATH_TO_FILE, "rt") as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
# Ignore the header
next(csv_reader)
for row in csv_reader:
if row[0].startswith("Sentence: "):
if len(tmp_sentence) != 0:
sentences.append(tmp_sentence)
pos.append(tmp_pos)
tag.append(tmp_tag)
tmp_sentence = []
tmp_pos = []
tmp_tag = []
tmp_sentence.append(row[1])
tmp_pos.append(row[2])
tmp_tag.append(row[3].replace("-", "_"))
if len(tmp_sentence) != 0:
sentences.append(tmp_sentence)
pos.append(tmp_pos)
return sentences, pos, tag
def data_to_annotated_docs():
sentences, pos, tags = read_kaggle_data()
documents = [Document(u" ".join(sentence).encode("utf-8"))
for sentence in sentences]
ann_docs = []
for i in range(len(documents)):
try:
sentence = sentences[i]
tag = tags[i]
document = documents[i]
ann_docs.append(transform_bio_tags_to_annotated_document(sentence,
tag,
document))
except IndexError:
continue
return ann_docs
| import csv
from nerds.core.model.input.document import Document
from nerds.util.convert import transform_bio_tags_to_annotated_document
PATH_TO_FILE = "train.csv"
def read_kaggle_data():
sentences = []
pos = []
tag = []
tmp_sentence = []
tmp_pos = []
tmp_tag = []
with open(PATH_TO_FILE, "rt") as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
# Ignore the header
next(csv_reader)
for row in csv_reader:
if row[0].startswith("Sentence: "):
if len(tmp_sentence) != 0:
sentences.append(tmp_sentence)
pos.append(tmp_pos)
tag.append(tmp_tag)
tmp_sentence = []
tmp_pos = []
tmp_tag = []
tmp_sentence.append(row[1])
tmp_pos.append(row[2])
tmp_tag.append(row[3].replace("-", "_"))
if len(tmp_sentence) != 0:
sentences.append(tmp_sentence)
pos.append(tmp_pos)
return sentences, pos, tag
def data_to_annotated_docs():
sentences, pos, tags = read_kaggle_data()
documents = [Document(u" ".join(sentence).encode("utf-8"))
for sentence in sentences]
ann_docs = []
for i in range(len(documents)):
try:
sentence = sentences[i]
tag = tags[i]
document = documents[i]
ann_docs.append(transform_bio_tags_to_annotated_document(sentence,
tag,
document))
except IndexError:
continue
return ann_docs | en | 0.162013 | # Ignore the header | 2.864253 | 3 |
mc_states/tests/unit/modules/memcached_tests.py | makinacorpus/makina-states | 18 | 6632373 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from .. import base
from mc_states.api import invalidate_memoize_cache
class TestCase(base.ModuleCase):
def test_settings(self):
invalidate_memoize_cache('localreg_memcached_settings')
with self.patch(
grains={'os': 'Ubuntu',
'oscodename': 'precise',
'os_family': 'Debian'},
filtered=['mc.*'],
kinds=['modules']
):
data = self._('mc_memcached.settings')()
self.assertEquals(data['conf']['unitcachesize'], '10M')
if __name__ == '__main__':
unittest.main()
# vim:set et sts=4 ts=4 tw=80:
| #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from .. import base
from mc_states.api import invalidate_memoize_cache
class TestCase(base.ModuleCase):
def test_settings(self):
invalidate_memoize_cache('localreg_memcached_settings')
with self.patch(
grains={'os': 'Ubuntu',
'oscodename': 'precise',
'os_family': 'Debian'},
filtered=['mc.*'],
kinds=['modules']
):
data = self._('mc_memcached.settings')()
self.assertEquals(data['conf']['unitcachesize'], '10M')
if __name__ == '__main__':
unittest.main()
# vim:set et sts=4 ts=4 tw=80:
| fr | 0.181584 | #!/usr/bin/env python # vim:set et sts=4 ts=4 tw=80: | 1.978644 | 2 |
setup.py | nuga99/tsudo | 6 | 6632374 | import setuptools
with open('README.md') as readme_file:
readme = readme_file.read()
setuptools.setup(
name='tsudo',
python_requires='>3',
version='0.0.5',
author='<NAME>',
author_email='<EMAIL>',
description='Tsundere wrapper for sudo command.',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/giovanism/tsudo',
license='WTFPL',
packages=setuptools.find_packages(),
scripts=('scripts/tsudo',),
install_requires=(
'pexpect',
),
classifiers=(
'Programming Language :: Python :: 3',
'Operating System :: Unix',
'Environment :: Console',
'Environment :: Plugins',
'Topic :: System :: Systems Administration',
),
)
| import setuptools
with open('README.md') as readme_file:
readme = readme_file.read()
setuptools.setup(
name='tsudo',
python_requires='>3',
version='0.0.5',
author='<NAME>',
author_email='<EMAIL>',
description='Tsundere wrapper for sudo command.',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/giovanism/tsudo',
license='WTFPL',
packages=setuptools.find_packages(),
scripts=('scripts/tsudo',),
install_requires=(
'pexpect',
),
classifiers=(
'Programming Language :: Python :: 3',
'Operating System :: Unix',
'Environment :: Console',
'Environment :: Plugins',
'Topic :: System :: Systems Administration',
),
)
| none | 1 | 1.356857 | 1 |
|
pdms/qtum_bridge/R8Blockchain/qtumblockchain.py | chris0203/pmes | 0 | 6632375 | from bitcoinrpc.authproxy import AuthServiceProxy
from hashlib import sha256
from R8Blockchain.blockchain_handler import BlockchainHandler
import codecs
import logging
class QtumBlockchain(BlockchainHandler):
def __init__(self, qtum_rpc):
self.qtum_rpc = qtum_rpc
self.decode_hex = codecs.getdecoder("hex_codec")
self.encode_hex = codecs.getencoder("hex_codec")
@classmethod
def from_http_provider(cls, http_provider):
return cls(AuthServiceProxy(http_provider))
def get_block_count(self):
return self.qtum_rpc.getblockcount()
def get_balance(self):
return self.qtum_rpc.getbalance()
def get_last_block_hash(self):
return self.qtum_rpc.getbestblockhash()
def get_second_last_block_hash(self):
return self.get_block_hash(self.get_block_count()-1)
def get_block_hash(self, height):
return self.qtum_rpc.getblockhash(height)
def get_block_id(self, height):
block_hash = self.get_block_hash(height)
l = sha256(self.decode_hex(block_hash)[0]).hexdigest()
r = hex(height)
return l[0:10] + r[2:].rjust(10, '0')
def get_last_block_id(self):
last_block_height = self.get_block_count()
return self.get_block_id(last_block_height)
def get_second_last_block_id(self):
last_block_height = self.get_block_count() - 1
return self.get_block_id(last_block_height)
def get_accounts(self):
unspent = self.qtum_rpc.listunspent()
res = [tx['address'] for tx in unspent]
return res
def get_unspent(self):
unspent = self.qtum_rpc.listunspent()
res = {tx['address']: tx['amount'] for tx in unspent}
return res
def from_hex_address(self, address):
return self.qtum_rpc.fromhexaddress(address)
| from bitcoinrpc.authproxy import AuthServiceProxy
from hashlib import sha256
from R8Blockchain.blockchain_handler import BlockchainHandler
import codecs
import logging
class QtumBlockchain(BlockchainHandler):
def __init__(self, qtum_rpc):
self.qtum_rpc = qtum_rpc
self.decode_hex = codecs.getdecoder("hex_codec")
self.encode_hex = codecs.getencoder("hex_codec")
@classmethod
def from_http_provider(cls, http_provider):
return cls(AuthServiceProxy(http_provider))
def get_block_count(self):
return self.qtum_rpc.getblockcount()
def get_balance(self):
return self.qtum_rpc.getbalance()
def get_last_block_hash(self):
return self.qtum_rpc.getbestblockhash()
def get_second_last_block_hash(self):
return self.get_block_hash(self.get_block_count()-1)
def get_block_hash(self, height):
return self.qtum_rpc.getblockhash(height)
def get_block_id(self, height):
block_hash = self.get_block_hash(height)
l = sha256(self.decode_hex(block_hash)[0]).hexdigest()
r = hex(height)
return l[0:10] + r[2:].rjust(10, '0')
def get_last_block_id(self):
last_block_height = self.get_block_count()
return self.get_block_id(last_block_height)
def get_second_last_block_id(self):
last_block_height = self.get_block_count() - 1
return self.get_block_id(last_block_height)
def get_accounts(self):
unspent = self.qtum_rpc.listunspent()
res = [tx['address'] for tx in unspent]
return res
def get_unspent(self):
unspent = self.qtum_rpc.listunspent()
res = {tx['address']: tx['amount'] for tx in unspent}
return res
def from_hex_address(self, address):
return self.qtum_rpc.fromhexaddress(address)
| none | 1 | 2.380917 | 2 |
|
backend/bot/modules/tosurnament/bracket/qualifiers_spreadsheet.py | SpartanPlume/Tosurnament | 7 | 6632376 | """Contains all qualifiers spreadsheet settings commands related to Tosurnament."""
from discord.ext import commands
from bot.modules.tosurnament import module as tosurnament
from common.databases.tosurnament.spreadsheets.qualifiers_spreadsheet import QualifiersSpreadsheet
from common.api import spreadsheet as spreadsheet_api
from common.api import tosurnament as tosurnament_api
class TosurnamentQualifiersCog(tosurnament.TosurnamentBaseModule, name="qualifiers_spreadsheet"):
"""Tosurnament qualifiers spreadsheet settings commands."""
def __init__(self, bot):
super().__init__(bot)
self.bot = bot
def cog_check(self, ctx):
"""Check function called before any command of the cog."""
return self.admin_cog_check(ctx)
@commands.command(aliases=["sqs"])
async def set_qualifiers_spreadsheet(self, ctx, spreadsheet_id: str, *, sheet_name: str = ""):
"""Sets the qualifiers spreadsheet."""
tournament = self.get_tournament(ctx.guild.id)
bracket = tournament.current_bracket
spreadsheet_id = spreadsheet_api.extract_spreadsheet_id(spreadsheet_id)
qualifiers_spreadsheet = bracket._qualifiers_spreadsheet
if not qualifiers_spreadsheet:
qualifiers_spreadsheet = tosurnament_api.create_qualifiers_spreadsheet(
tournament.id, bracket.id, QualifiersSpreadsheet(spreadsheet_id=spreadsheet_id, sheet_name=sheet_name)
)
else:
qualifiers_spreadsheet.spreadsheet_id = spreadsheet_id
if sheet_name:
qualifiers_spreadsheet.sheet_name = sheet_name
tosurnament_api.update_qualifiers_spreadsheet(tournament.id, bracket.id, qualifiers_spreadsheet)
await self.send_reply(ctx, "success", qualifiers_spreadsheet.spreadsheet_id)
async def set_qualifiers_spreadsheet_values(self, ctx, values):
"""Puts the input values into the corresponding bracket."""
tournament = self.get_tournament(ctx.guild.id)
qualifiers_spreadsheet = tournament.current_bracket.get_spreadsheet_from_type("qualifiers")
if not qualifiers_spreadsheet:
raise tosurnament.NoSpreadsheet("qualifiers")
for key, value in values.items():
setattr(qualifiers_spreadsheet, key, value)
tosurnament_api.update_qualifiers_spreadsheet(
tournament.id, tournament.current_bracket.id, qualifiers_spreadsheet
)
await self.send_reply(ctx, "success", value)
async def set_qualifiers_spreadsheet_range_value(self, ctx, range_name, range_value):
"""Puts the input values into the corresponding bracket."""
if not spreadsheet_api.check_range(range_value):
raise commands.UserInputError()
await self.set_qualifiers_spreadsheet_values(ctx, {range_name: range_value})
@commands.command(aliases=["sqssn"])
async def set_qualifiers_spreadsheet_sheet_name(self, ctx, *, sheet_name: str = ""):
"""Sets the sheet name of the qualifiers spreadsheet."""
await self.set_qualifiers_spreadsheet_values(ctx, {"sheet_name": sheet_name})
@commands.command(aliases=["sqsrli"])
async def set_qualifiers_spreadsheet_range_lobby_id(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range lobby id."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_lobby_id", cell_range)
@commands.command(aliases=["sqsrts"])
async def set_qualifiers_spreadsheet_range_teams(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range teams."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_teams", cell_range)
@commands.command(aliases=["sqsrr"])
async def set_qualifiers_spreadsheet_range_referee(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range referee."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_referee", cell_range)
@commands.command(aliases=["sqsrd"])
async def set_qualifiers_spreadsheet_range_date(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range date."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_date", cell_range)
@commands.command(aliases=["sqsrt"])
async def set_qualifiers_spreadsheet_range_time(self, ctx, *, cell_range: str = ""):
"""Sets the qualifiers spreadsheet range time."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_time", cell_range)
@commands.command(aliases=["sqsmtir"])
async def set_qualifiers_spreadsheet_max_teams_in_row(self, ctx, *, max_teams_in_row: int):
"""Sets the qualifiers spreadsheet range time."""
await self.set_qualifiers_spreadsheet_values(ctx, {"max_teams_in_row": max_teams_in_row})
@commands.command(aliases=["sqss"])
async def show_qualifiers_spreadsheet_settings(self, ctx):
"""Shows the qualifiers spreadsheet settings."""
await self.show_spreadsheet_settings(ctx, "qualifiers")
def get_class(bot):
"""Returns the main class of the module"""
return TosurnamentQualifiersCog(bot)
def setup(bot):
"""Setups the cog"""
bot.add_cog(TosurnamentQualifiersCog(bot))
| """Contains all qualifiers spreadsheet settings commands related to Tosurnament."""
from discord.ext import commands
from bot.modules.tosurnament import module as tosurnament
from common.databases.tosurnament.spreadsheets.qualifiers_spreadsheet import QualifiersSpreadsheet
from common.api import spreadsheet as spreadsheet_api
from common.api import tosurnament as tosurnament_api
class TosurnamentQualifiersCog(tosurnament.TosurnamentBaseModule, name="qualifiers_spreadsheet"):
"""Tosurnament qualifiers spreadsheet settings commands."""
def __init__(self, bot):
super().__init__(bot)
self.bot = bot
def cog_check(self, ctx):
"""Check function called before any command of the cog."""
return self.admin_cog_check(ctx)
@commands.command(aliases=["sqs"])
async def set_qualifiers_spreadsheet(self, ctx, spreadsheet_id: str, *, sheet_name: str = ""):
"""Sets the qualifiers spreadsheet."""
tournament = self.get_tournament(ctx.guild.id)
bracket = tournament.current_bracket
spreadsheet_id = spreadsheet_api.extract_spreadsheet_id(spreadsheet_id)
qualifiers_spreadsheet = bracket._qualifiers_spreadsheet
if not qualifiers_spreadsheet:
qualifiers_spreadsheet = tosurnament_api.create_qualifiers_spreadsheet(
tournament.id, bracket.id, QualifiersSpreadsheet(spreadsheet_id=spreadsheet_id, sheet_name=sheet_name)
)
else:
qualifiers_spreadsheet.spreadsheet_id = spreadsheet_id
if sheet_name:
qualifiers_spreadsheet.sheet_name = sheet_name
tosurnament_api.update_qualifiers_spreadsheet(tournament.id, bracket.id, qualifiers_spreadsheet)
await self.send_reply(ctx, "success", qualifiers_spreadsheet.spreadsheet_id)
async def set_qualifiers_spreadsheet_values(self, ctx, values):
"""Puts the input values into the corresponding bracket."""
tournament = self.get_tournament(ctx.guild.id)
qualifiers_spreadsheet = tournament.current_bracket.get_spreadsheet_from_type("qualifiers")
if not qualifiers_spreadsheet:
raise tosurnament.NoSpreadsheet("qualifiers")
for key, value in values.items():
setattr(qualifiers_spreadsheet, key, value)
tosurnament_api.update_qualifiers_spreadsheet(
tournament.id, tournament.current_bracket.id, qualifiers_spreadsheet
)
await self.send_reply(ctx, "success", value)
async def set_qualifiers_spreadsheet_range_value(self, ctx, range_name, range_value):
"""Puts the input values into the corresponding bracket."""
if not spreadsheet_api.check_range(range_value):
raise commands.UserInputError()
await self.set_qualifiers_spreadsheet_values(ctx, {range_name: range_value})
@commands.command(aliases=["sqssn"])
async def set_qualifiers_spreadsheet_sheet_name(self, ctx, *, sheet_name: str = ""):
"""Sets the sheet name of the qualifiers spreadsheet."""
await self.set_qualifiers_spreadsheet_values(ctx, {"sheet_name": sheet_name})
@commands.command(aliases=["sqsrli"])
async def set_qualifiers_spreadsheet_range_lobby_id(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range lobby id."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_lobby_id", cell_range)
@commands.command(aliases=["sqsrts"])
async def set_qualifiers_spreadsheet_range_teams(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range teams."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_teams", cell_range)
@commands.command(aliases=["sqsrr"])
async def set_qualifiers_spreadsheet_range_referee(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range referee."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_referee", cell_range)
@commands.command(aliases=["sqsrd"])
async def set_qualifiers_spreadsheet_range_date(self, ctx, *, cell_range: str):
"""Sets the qualifiers spreadsheet range date."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_date", cell_range)
@commands.command(aliases=["sqsrt"])
async def set_qualifiers_spreadsheet_range_time(self, ctx, *, cell_range: str = ""):
"""Sets the qualifiers spreadsheet range time."""
await self.set_qualifiers_spreadsheet_range_value(ctx, "range_time", cell_range)
@commands.command(aliases=["sqsmtir"])
async def set_qualifiers_spreadsheet_max_teams_in_row(self, ctx, *, max_teams_in_row: int):
"""Sets the qualifiers spreadsheet range time."""
await self.set_qualifiers_spreadsheet_values(ctx, {"max_teams_in_row": max_teams_in_row})
@commands.command(aliases=["sqss"])
async def show_qualifiers_spreadsheet_settings(self, ctx):
"""Shows the qualifiers spreadsheet settings."""
await self.show_spreadsheet_settings(ctx, "qualifiers")
def get_class(bot):
"""Returns the main class of the module"""
return TosurnamentQualifiersCog(bot)
def setup(bot):
"""Setups the cog"""
bot.add_cog(TosurnamentQualifiersCog(bot))
| en | 0.733109 | Contains all qualifiers spreadsheet settings commands related to Tosurnament. Tosurnament qualifiers spreadsheet settings commands. Check function called before any command of the cog. Sets the qualifiers spreadsheet. Puts the input values into the corresponding bracket. Puts the input values into the corresponding bracket. Sets the sheet name of the qualifiers spreadsheet. Sets the qualifiers spreadsheet range lobby id. Sets the qualifiers spreadsheet range teams. Sets the qualifiers spreadsheet range referee. Sets the qualifiers spreadsheet range date. Sets the qualifiers spreadsheet range time. Sets the qualifiers spreadsheet range time. Shows the qualifiers spreadsheet settings. Returns the main class of the module Setups the cog | 2.454282 | 2 |
cvpy42.py | L3ndry/guanabara-python | 0 | 6632377 | <filename>cvpy42.py
segmento1 = float(input("Primeiro segmento: "))
segmento2 = float(input("Segundo segmento: "))
segmento3 = float(input("Terceiro segmento: "))
if segmento1 < segmento2 + segmento3 and segmento2 < segmento1 + segmento3 and segmento3 < segmento1 + segmento2:
if segmento1 == segmento2 == segmento3:
print("Esses segmentos podem formar um triângulo EQUILÁTERO.")
elif segmento1 == segmento2 or segmento1 == segmento3 or segmento2 == segmento3:
print("Esses segmentos podem formar um triângulo ISÓSCELES.")
else:
print("Esses segmentos podem formar um triângulo ESCALENO.")
else:
print("Esses segmentos NÃO PODEM formar um triângulo.")
| <filename>cvpy42.py
segmento1 = float(input("Primeiro segmento: "))
segmento2 = float(input("Segundo segmento: "))
segmento3 = float(input("Terceiro segmento: "))
if segmento1 < segmento2 + segmento3 and segmento2 < segmento1 + segmento3 and segmento3 < segmento1 + segmento2:
if segmento1 == segmento2 == segmento3:
print("Esses segmentos podem formar um triângulo EQUILÁTERO.")
elif segmento1 == segmento2 or segmento1 == segmento3 or segmento2 == segmento3:
print("Esses segmentos podem formar um triângulo ISÓSCELES.")
else:
print("Esses segmentos podem formar um triângulo ESCALENO.")
else:
print("Esses segmentos NÃO PODEM formar um triângulo.")
| none | 1 | 3.757558 | 4 |
|
setup.py | kentsanggds/pivotalclient | 0 | 6632378 | from distutils.core import setup
VERSION = '0.4'
setup(
name='pivotalclient',
packages=['pivotalclient'],
version=VERSION,
description='A Python pivotal tracker client.',
author='<NAME>, CloudBolt Software',
author_email='<EMAIL>',
url='https://github.com/CloudBoltSoftware/pivotalclient',
download_url='https://github.com/CloudBoltSoftware/pivotalclient/tarball/{version}'.format(version=VERSION),
keywords=['pivotal', 'api', 'rest', 'client'],
license='MIT',
long_description=open('README').read(),
classifiers=[],
py_modules=['pivotalclient'],
)
| from distutils.core import setup
VERSION = '0.4'
setup(
name='pivotalclient',
packages=['pivotalclient'],
version=VERSION,
description='A Python pivotal tracker client.',
author='<NAME>, CloudBolt Software',
author_email='<EMAIL>',
url='https://github.com/CloudBoltSoftware/pivotalclient',
download_url='https://github.com/CloudBoltSoftware/pivotalclient/tarball/{version}'.format(version=VERSION),
keywords=['pivotal', 'api', 'rest', 'client'],
license='MIT',
long_description=open('README').read(),
classifiers=[],
py_modules=['pivotalclient'],
)
| none | 1 | 1.195679 | 1 |
|
sdk/python/core/tests/test_sanity_filters.py | xulleon/ydk-gen | 0 | 6632379 | <reponame>xulleon/ydk-gen
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""test_sanity_levels.py
sanity test for ydktest-sanity.yang
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.providers import NetconfServiceProvider
from ydk.services import CRUDService
from ydk.filters import YFilter
from ydk.models.ydktest import ydktest_sanity as ysanity
from ydk.models.ydktest import openconfig_interfaces
from ydk.models.ydktest import iana_if_type
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityYang(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
cls.crud = CRUDService()
def setUp(self):
runner = ysanity.Runner()
self.crud.delete(self.ncc, runner)
def tearDown(self):
runner = ysanity.Runner()
self.crud.delete(self.ncc, runner)
def test_read_on_ref_class(self):
one = ysanity.Runner.One()
one.number, one.name = 1, 'runner:one:name'
self.crud.create(self.ncc, one)
one_filter = ysanity.Runner.One()
one_read = self.crud.read(self.ncc, one_filter)
self.assertIsNotNone(one_read)
self.assertEqual(one, one_read)
def test_read_on_leaf(self):
one = ysanity.Runner.One()
one.number, one.name = 1, 'runner:one:name'
self.crud.create(self.ncc, one)
r = ysanity.Runner()
one_filter = r.one
one_filter.name = YFilter.read
one_filter.number = YFilter.read
one_read = self.crud.read(self.ncc, one_filter)
self.assertIsNotNone(one_read)
self.assertEqual(one, one_read)
# no such value, will return empty data
one_filter = ysanity.Runner.One()
one_filter.number = 2
one_read = self.crud.read(self.ncc, one_filter)
self.assertIsNone(one_read)
def test_read_on_ref_enum_class(self):
from ydk.models.ydktest.ydktest_sanity import YdkEnumTest
r_1 = ysanity.Runner.Ytypes.BuiltInT()
r_1.enum_value = YdkEnumTest.local
self.crud.create(self.ncc, r_1)
r = ysanity.Runner()
r_2 = r.ytypes.built_in_t
r_2.enum_value = YFilter.read
runner_read = self.crud.read(self.ncc, r_2)
self.assertIsNotNone(runner_read)
self.assertEqual(r_1.enum_value, runner_read.enum_value)
r_2 = ysanity.Runner().Ytypes.BuiltInT()
r_2.enum_value = YdkEnumTest.local
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(r_1.enum_value, runner_read.enum_value)
# no such value, nothing returned
r_2 = ysanity.Runner.Ytypes.BuiltInT()
r_2.enum_value = YdkEnumTest.remote
runner_read = self.crud.read(self.ncc, r_2)
self.assertIsNone(runner_read)
def test_read_on_ref_list(self):
r_1 = ysanity.Runner()
l_1, l_2 = ysanity.Runner.OneList.Ldata(), ysanity.Runner.OneList.Ldata()
l_1.number, l_2.number = 1, 2
r_1.one_list.ldata.extend([l_1, l_2])
self.crud.create(self.ncc, r_1)
r_2 = ysanity.Runner()
r_2.one_list.ldata.yfilter = YFilter.read
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(runner_read, r_1)
def test_read_on_list_with_key(self):
r_1 = ysanity.Runner.OneList()
l_1, l_2 = ysanity.Runner.OneList.Ldata(), ysanity.Runner.OneList.Ldata()
l_1.number, l_2.number = 1, 2
r_1.ldata.extend([l_1, l_2])
self.crud.create(self.ncc, r_1)
r_2 = ysanity.Runner().OneList()
r_2.ldata.extend([l_1])
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(runner_read, r_2)
def test_read_on_leaflist(self):
r_1 = ysanity.Runner.Ytypes.BuiltInT()
r_1.llstring.extend(['1', '2', '3'])
self.crud.create(self.ncc, r_1)
r_2 = ysanity.Runner()
r_2.ytypes.built_in_t.llstring = YFilter.read
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(runner_read.ytypes.built_in_t.llstring, r_1.llstring)
def test_read_on_identity_ref(self):
r_1 = ysanity.Runner.Ytypes.BuiltInT()
r_1.identity_ref_value = ysanity.ChildIdentity()
self.crud.create(self.ncc, r_1)
r = ysanity.Runner()
r_2 = r.ytypes.built_in_t
r_2.identity_ref_value = YFilter.read
t_read = self.crud.read(self.ncc, r_2)
self.assertIsNotNone(t_read)
self.assertEqual(r_1.identity_ref_value, t_read.identity_ref_value)
def test_read_only_config(self):
r_1 = ysanity.Runner()
r_1.one.number, r_1.one.name = 1, 'runner:one:name'
self.crud.create(self.ncc, r_1)
r_2, r_3 = ysanity.Runner(), ysanity.Runner()
r_2.one.number = YFilter.read
r_3.one.number = YFilter.read
r_2 = self.crud.read_config(self.ncc, r_2)
r_3 = self.crud.read(self.ncc, r_3)
# ysanity only have config data, ok to compare
self.assertEqual(r_2.one.number, r_3.one.number)
self.assertEqual(r_2.one.name, r_3.one.name)
def test_decoder(self):
# send payload to device
element = ysanity.Runner.OneList.Ldata()
element.number = 5
element.name = 'five'
self.crud.create(self.ncc, element)
runner_filter = ysanity.Runner().OneList.Ldata()
runner_filter.number = 5
element_read = self.crud.read(self.ncc, runner_filter)
self.assertEqual(element, element_read)
def test_iana_if_type_decode(self):
# Build some configuration
ifcs_config = openconfig_interfaces.Interfaces()
ifc_config = openconfig_interfaces.Interfaces.Interface()
ifc_config.name = "GigabitEthernet0/0/0/2"
ifc_config.config.name = "GigabitEthernet0/0/0/2"
ifc_config.config.description = "Test interface"
ifc_config.config.type = iana_if_type.EthernetCsmacd()
ifcs_config.interface.append(ifc_config)
self.assertTrue( self.crud.create(self.ncc, ifc_config) )
# Read interface type only
ifcs = openconfig_interfaces.Interfaces()
ifc = openconfig_interfaces.Interfaces.Interface()
ifc.name = 'GigabitEthernet0/0/0/2'
ifc.config.type = YFilter.read
ifcs.interface.append(ifc)
ifc_read = self.crud.read(self.ncc, ifc)
self.assertIsNotNone(ifc_read)
self.assertEqual(ifc_read.config.type, iana_if_type.EthernetCsmacd())
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityYang,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
| # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""test_sanity_levels.py
sanity test for ydktest-sanity.yang
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.providers import NetconfServiceProvider
from ydk.services import CRUDService
from ydk.filters import YFilter
from ydk.models.ydktest import ydktest_sanity as ysanity
from ydk.models.ydktest import openconfig_interfaces
from ydk.models.ydktest import iana_if_type
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityYang(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
cls.crud = CRUDService()
def setUp(self):
runner = ysanity.Runner()
self.crud.delete(self.ncc, runner)
def tearDown(self):
runner = ysanity.Runner()
self.crud.delete(self.ncc, runner)
def test_read_on_ref_class(self):
one = ysanity.Runner.One()
one.number, one.name = 1, 'runner:one:name'
self.crud.create(self.ncc, one)
one_filter = ysanity.Runner.One()
one_read = self.crud.read(self.ncc, one_filter)
self.assertIsNotNone(one_read)
self.assertEqual(one, one_read)
def test_read_on_leaf(self):
one = ysanity.Runner.One()
one.number, one.name = 1, 'runner:one:name'
self.crud.create(self.ncc, one)
r = ysanity.Runner()
one_filter = r.one
one_filter.name = YFilter.read
one_filter.number = YFilter.read
one_read = self.crud.read(self.ncc, one_filter)
self.assertIsNotNone(one_read)
self.assertEqual(one, one_read)
# no such value, will return empty data
one_filter = ysanity.Runner.One()
one_filter.number = 2
one_read = self.crud.read(self.ncc, one_filter)
self.assertIsNone(one_read)
def test_read_on_ref_enum_class(self):
from ydk.models.ydktest.ydktest_sanity import YdkEnumTest
r_1 = ysanity.Runner.Ytypes.BuiltInT()
r_1.enum_value = YdkEnumTest.local
self.crud.create(self.ncc, r_1)
r = ysanity.Runner()
r_2 = r.ytypes.built_in_t
r_2.enum_value = YFilter.read
runner_read = self.crud.read(self.ncc, r_2)
self.assertIsNotNone(runner_read)
self.assertEqual(r_1.enum_value, runner_read.enum_value)
r_2 = ysanity.Runner().Ytypes.BuiltInT()
r_2.enum_value = YdkEnumTest.local
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(r_1.enum_value, runner_read.enum_value)
# no such value, nothing returned
r_2 = ysanity.Runner.Ytypes.BuiltInT()
r_2.enum_value = YdkEnumTest.remote
runner_read = self.crud.read(self.ncc, r_2)
self.assertIsNone(runner_read)
def test_read_on_ref_list(self):
r_1 = ysanity.Runner()
l_1, l_2 = ysanity.Runner.OneList.Ldata(), ysanity.Runner.OneList.Ldata()
l_1.number, l_2.number = 1, 2
r_1.one_list.ldata.extend([l_1, l_2])
self.crud.create(self.ncc, r_1)
r_2 = ysanity.Runner()
r_2.one_list.ldata.yfilter = YFilter.read
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(runner_read, r_1)
def test_read_on_list_with_key(self):
r_1 = ysanity.Runner.OneList()
l_1, l_2 = ysanity.Runner.OneList.Ldata(), ysanity.Runner.OneList.Ldata()
l_1.number, l_2.number = 1, 2
r_1.ldata.extend([l_1, l_2])
self.crud.create(self.ncc, r_1)
r_2 = ysanity.Runner().OneList()
r_2.ldata.extend([l_1])
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(runner_read, r_2)
def test_read_on_leaflist(self):
r_1 = ysanity.Runner.Ytypes.BuiltInT()
r_1.llstring.extend(['1', '2', '3'])
self.crud.create(self.ncc, r_1)
r_2 = ysanity.Runner()
r_2.ytypes.built_in_t.llstring = YFilter.read
runner_read = self.crud.read(self.ncc, r_2)
self.assertEqual(runner_read.ytypes.built_in_t.llstring, r_1.llstring)
def test_read_on_identity_ref(self):
r_1 = ysanity.Runner.Ytypes.BuiltInT()
r_1.identity_ref_value = ysanity.ChildIdentity()
self.crud.create(self.ncc, r_1)
r = ysanity.Runner()
r_2 = r.ytypes.built_in_t
r_2.identity_ref_value = YFilter.read
t_read = self.crud.read(self.ncc, r_2)
self.assertIsNotNone(t_read)
self.assertEqual(r_1.identity_ref_value, t_read.identity_ref_value)
def test_read_only_config(self):
r_1 = ysanity.Runner()
r_1.one.number, r_1.one.name = 1, 'runner:one:name'
self.crud.create(self.ncc, r_1)
r_2, r_3 = ysanity.Runner(), ysanity.Runner()
r_2.one.number = YFilter.read
r_3.one.number = YFilter.read
r_2 = self.crud.read_config(self.ncc, r_2)
r_3 = self.crud.read(self.ncc, r_3)
# ysanity only have config data, ok to compare
self.assertEqual(r_2.one.number, r_3.one.number)
self.assertEqual(r_2.one.name, r_3.one.name)
def test_decoder(self):
# send payload to device
element = ysanity.Runner.OneList.Ldata()
element.number = 5
element.name = 'five'
self.crud.create(self.ncc, element)
runner_filter = ysanity.Runner().OneList.Ldata()
runner_filter.number = 5
element_read = self.crud.read(self.ncc, runner_filter)
self.assertEqual(element, element_read)
def test_iana_if_type_decode(self):
# Build some configuration
ifcs_config = openconfig_interfaces.Interfaces()
ifc_config = openconfig_interfaces.Interfaces.Interface()
ifc_config.name = "GigabitEthernet0/0/0/2"
ifc_config.config.name = "GigabitEthernet0/0/0/2"
ifc_config.config.description = "Test interface"
ifc_config.config.type = iana_if_type.EthernetCsmacd()
ifcs_config.interface.append(ifc_config)
self.assertTrue( self.crud.create(self.ncc, ifc_config) )
# Read interface type only
ifcs = openconfig_interfaces.Interfaces()
ifc = openconfig_interfaces.Interfaces.Interface()
ifc.name = 'GigabitEthernet0/0/0/2'
ifc.config.type = YFilter.read
ifcs.interface.append(ifc)
ifc_read = self.crud.read(self.ncc, ifc)
self.assertIsNotNone(ifc_read)
self.assertEqual(ifc_read.config.type, iana_if_type.EthernetCsmacd())
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityYang,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret) | en | 0.742718 | # ---------------------------------------------------------------- # Copyright 2016 Cisco Systems # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------ test_sanity_levels.py sanity test for ydktest-sanity.yang # no such value, will return empty data # no such value, nothing returned # ysanity only have config data, ok to compare # send payload to device # Build some configuration # Read interface type only | 1.765766 | 2 |
ccapi/requests/products/productoperations.py | stcstores/ccapi | 1 | 6632380 | """ProductOperations request."""
from ..apirequest import APIRequest
class ProductOperations(APIRequest):
"""ProductOperations request."""
uri = "Handlers/Products/ProductOperations.ashx"
GET_GENERATED_SKU = "getgeneratedsku"
UPDATE_HS_CODE = "updatehscode"
PRODUCT_IDS = "ProductIDs"
HS_CODE = "HSCode"
def __new__(self, request_mode, product_IDs=[], HS_code=None):
"""Create ProductOperations request.
Args:
request_mode: requestmode header
"""
self.request_mode = request_mode
self.product_IDs = (product_IDs,)
self.HS_code = HS_code
return super().__new__(self)
def get_headers(self):
"""Get headers for request."""
return {"requestmode": self.request_mode}
def get_params(self):
"""Get parameters for get request."""
return {"d": "769"}
def get_data(self):
"""Return request data."""
data = None
if self.request_mode == self.UPDATE_HS_CODE:
data = {self.PRODUCT_IDS: self.product_IDs, self.HS_CODE: self.HS_code}
return data
def process_response(self, response):
"""Handle request response."""
self.raise_for_non_200(
self, response, "Product Operations request returned an error code."
)
result = response.json()
return ProductOperationsResult(result)
class ProductOperationsResult:
"""Response from ProductOperations request."""
def __init__(self, result):
"""Get information from ProductOperations request."""
self.success = result["Success"]
self.message = result["Message"]
self.record_count = result["RecordCount"]
self.data = result["Data"]
| """ProductOperations request."""
from ..apirequest import APIRequest
class ProductOperations(APIRequest):
"""ProductOperations request."""
uri = "Handlers/Products/ProductOperations.ashx"
GET_GENERATED_SKU = "getgeneratedsku"
UPDATE_HS_CODE = "updatehscode"
PRODUCT_IDS = "ProductIDs"
HS_CODE = "HSCode"
def __new__(self, request_mode, product_IDs=[], HS_code=None):
"""Create ProductOperations request.
Args:
request_mode: requestmode header
"""
self.request_mode = request_mode
self.product_IDs = (product_IDs,)
self.HS_code = HS_code
return super().__new__(self)
def get_headers(self):
"""Get headers for request."""
return {"requestmode": self.request_mode}
def get_params(self):
"""Get parameters for get request."""
return {"d": "769"}
def get_data(self):
"""Return request data."""
data = None
if self.request_mode == self.UPDATE_HS_CODE:
data = {self.PRODUCT_IDS: self.product_IDs, self.HS_CODE: self.HS_code}
return data
def process_response(self, response):
"""Handle request response."""
self.raise_for_non_200(
self, response, "Product Operations request returned an error code."
)
result = response.json()
return ProductOperationsResult(result)
class ProductOperationsResult:
"""Response from ProductOperations request."""
def __init__(self, result):
"""Get information from ProductOperations request."""
self.success = result["Success"]
self.message = result["Message"]
self.record_count = result["RecordCount"]
self.data = result["Data"]
| en | 0.6171 | ProductOperations request. ProductOperations request. Create ProductOperations request. Args: request_mode: requestmode header Get headers for request. Get parameters for get request. Return request data. Handle request response. Response from ProductOperations request. Get information from ProductOperations request. | 2.606495 | 3 |
host/greatfet/interfaces/spi_bus.py | hewittc/greatfet | 2 | 6632381 | #
# This file is part of GreatFET
#
from ..interface import PirateCompatibleInterface
class SPIBus(PirateCompatibleInterface):
"""
Class representing a GreatFET SPI bus.
For now, supports only the second SPI bus (SPI1), as the first controller
is being used to control the onboard flash.
"""
# Short name for this type of interface.
INTERFACE_SHORT_NAME = "i2c"
class FREQ():
"""
Set of predefined frequencies used to configure the SPI bus. It
contains tuple of clock_prescale_rate & serial_clock_rate. All of
these frequencies assume that the PCLK is set to 204 MHz
"""
C204000Hz = (100, 9)
C408000Hz = (100, 4)
C680000Hz = (100, 2)
C1020000Hz = (100, 1)
C2040000Hz = (50, 1)
C4250000Hz = (24, 1)
C8500000Hz = (12, 1)
C12750000Hz = (8, 1)
C17000000Hz = (6, 1)
C20400000Hz = (2, 4)
C25500000Hz = (4, 1)
C34000000Hz = (2, 2)
C51000000Hz = (2, 1)
C102000000Hz = (2, 0)
def __init__(self, board, chip_select_gpio, name='spi bus', buffer_size=255,
freq_preset=None, serial_clock_rate=2, clock_prescale_rate=100):
"""
Initialize a new SPI bus.
FIXME: There's no reason we shouldn't just take the frequency desired
and compute it for the user. This API should change soon.
SPI freq is set using either the freq_preset parameter or the
combination of serial_clock_rate and clock_prescale_rate parameters.
When using serial_clock_rate & clock_prescale_rate parameters, the
resulting frequency will be:
PCLK / (clock_prescale_rate * [serial_clock_rate+1]).
Args:
board -- The GreatFET board whose SPI bus we want to control.
name -- The display name for the given SPI bus.
chip_select_gpio -- The GPIOPin object that will represent the bus's default chip select
buffer_size -- The size of the SPI receive buffer on the GreatFET.
freq_preset -- Set clock_prescale_rate and serial_clock_rate using
one of the frequency presets defined by SPIBus.FREQ
clock_prescale_rate -- This even value between 2 and 254, by which
PCLK is divided to yield the prescaler output clock.
serial_clock_rate -- The number of prescaler-output clocks per bit
on the bus, minus one.
"""
# Store a reference to the parent board.
self.api =board.apis.spi
self.board = board
# Store our limitations.
# TODO: grab these from the board!
self.buffer_size = buffer_size
# Create a list that will store all connected devices.
self.devices = []
# Store our chip select.
self._chip_select = chip_select_gpio
# Apply our frequency information.
if freq_preset:
clock_prescale_rate, serial_clock_rate = freq_preset
# Set up the SPI bus for communications.
self.api.init(serial_clock_rate, clock_prescale_rate)
def attach_device(self, device):
"""
Attaches a given SPI device to this bus. Typically called
by the SPI device as it is constructed.
Arguments:
device -- The device object to attach to the given bus.
"""
# TODO: Check for select pin conflicts; and handle chip select pins.
# TODO: replace the device list with a set of weak references
self.devices.append(device)
def transmit(self, data, receive_length=None, chip_select=None, deassert_chip_select=True, spi_mode=0):
"""
Sends (and typically receives) data over the SPI bus.
Args:
data -- the data to be sent to the given device.
receive_length -- the total amount of data to be read. If longer
than the data length, the transmit will automatically be extended
with zeroes.
chip_select -- the GPIOPin object that will serve as the chip select
for this transaction, None to use the bus's default, or False to not set CS.
deassert_chip_select -- if set, the chip-select line will be left low after
communicating; this allows this transcation to be continued in the future
spi_mode -- The SPI mode number [0-3] to use for the communication. Defaults to 0.
"""
data_to_transmit = bytearray(data)
data_received = bytearray()
# If we weren't provided with a chip-select, use the bus's default.
if chip_select is None:
chip_select = self._chip_select
if receive_length is None:
receive_length = len(data)
# If we need to receive more than we've transmitted, extend the data out.
if receive_length > len(data):
padding = receive_length - len(data)
data_to_transmit.extend([0] * padding)
# Set the polarity and phase (the "SPI mode").
self.api.set_clock_polarity_and_phase(spi_mode)
# Bring the relevant chip select low, to start the transaction.
if chip_select:
chip_select.low()
# Transmit our data in chunks of the buffer size.
while data_to_transmit:
# Extract a single data chunk from the transmit buffer.
chunk = data_to_transmit[0:self.buffer_size]
del data_to_transmit[0:self.buffer_size]
# Finally, exchange the data.
response = self.api.clock_data(len(chunk), bytes(chunk))
data_received.extend(response)
# Finally, unless the caller has requested we keep chip-select asserted,
# finish the transaction by releasing chip select.
if chip_select and deassert_chip_select:
chip_select.high()
# Once we're done, return the data received.
return bytes(data_received)
def disable_drive(self):
""" Tristates each of the pins on the given SPI bus. """
self.api.enable_drive(False)
def enable_drive(self):
""" Enables the bus to drive each of its output pins. """
self.api.enable_drive(True)
#
# Support methods to support bus pirate commands.
#
def _handle_pirate_read(self, length, ends_transaction=False):
""" Performs a bus-pirate read of the given length, and returns a list of numeric values. """
data_bytes = self.transmit(b"", receive_length=length, chip_select=False)
return list(data_bytes)
def _handle_pirate_write(self, data, ends_transaction=False):
""" Performs a bus-pirate transmit of the given length, and returns a list of numeric values. """
data_bytes = self.transmit(data, chip_select=False)
return list(data_bytes)
def _handle_pirate_start(self):
""" Starts a given communication by performing any start conditions present on the interface. """
self._chip_select.low()
def _handle_pirate_stop(self):
""" Starts a given communication by performing any start conditions present on the interface. """
self._chip_select.high()
| #
# This file is part of GreatFET
#
from ..interface import PirateCompatibleInterface
class SPIBus(PirateCompatibleInterface):
"""
Class representing a GreatFET SPI bus.
For now, supports only the second SPI bus (SPI1), as the first controller
is being used to control the onboard flash.
"""
# Short name for this type of interface.
INTERFACE_SHORT_NAME = "i2c"
class FREQ():
"""
Set of predefined frequencies used to configure the SPI bus. It
contains tuple of clock_prescale_rate & serial_clock_rate. All of
these frequencies assume that the PCLK is set to 204 MHz
"""
C204000Hz = (100, 9)
C408000Hz = (100, 4)
C680000Hz = (100, 2)
C1020000Hz = (100, 1)
C2040000Hz = (50, 1)
C4250000Hz = (24, 1)
C8500000Hz = (12, 1)
C12750000Hz = (8, 1)
C17000000Hz = (6, 1)
C20400000Hz = (2, 4)
C25500000Hz = (4, 1)
C34000000Hz = (2, 2)
C51000000Hz = (2, 1)
C102000000Hz = (2, 0)
def __init__(self, board, chip_select_gpio, name='spi bus', buffer_size=255,
freq_preset=None, serial_clock_rate=2, clock_prescale_rate=100):
"""
Initialize a new SPI bus.
FIXME: There's no reason we shouldn't just take the frequency desired
and compute it for the user. This API should change soon.
SPI freq is set using either the freq_preset parameter or the
combination of serial_clock_rate and clock_prescale_rate parameters.
When using serial_clock_rate & clock_prescale_rate parameters, the
resulting frequency will be:
PCLK / (clock_prescale_rate * [serial_clock_rate+1]).
Args:
board -- The GreatFET board whose SPI bus we want to control.
name -- The display name for the given SPI bus.
chip_select_gpio -- The GPIOPin object that will represent the bus's default chip select
buffer_size -- The size of the SPI receive buffer on the GreatFET.
freq_preset -- Set clock_prescale_rate and serial_clock_rate using
one of the frequency presets defined by SPIBus.FREQ
clock_prescale_rate -- This even value between 2 and 254, by which
PCLK is divided to yield the prescaler output clock.
serial_clock_rate -- The number of prescaler-output clocks per bit
on the bus, minus one.
"""
# Store a reference to the parent board.
self.api =board.apis.spi
self.board = board
# Store our limitations.
# TODO: grab these from the board!
self.buffer_size = buffer_size
# Create a list that will store all connected devices.
self.devices = []
# Store our chip select.
self._chip_select = chip_select_gpio
# Apply our frequency information.
if freq_preset:
clock_prescale_rate, serial_clock_rate = freq_preset
# Set up the SPI bus for communications.
self.api.init(serial_clock_rate, clock_prescale_rate)
def attach_device(self, device):
"""
Attaches a given SPI device to this bus. Typically called
by the SPI device as it is constructed.
Arguments:
device -- The device object to attach to the given bus.
"""
# TODO: Check for select pin conflicts; and handle chip select pins.
# TODO: replace the device list with a set of weak references
self.devices.append(device)
def transmit(self, data, receive_length=None, chip_select=None, deassert_chip_select=True, spi_mode=0):
"""
Sends (and typically receives) data over the SPI bus.
Args:
data -- the data to be sent to the given device.
receive_length -- the total amount of data to be read. If longer
than the data length, the transmit will automatically be extended
with zeroes.
chip_select -- the GPIOPin object that will serve as the chip select
for this transaction, None to use the bus's default, or False to not set CS.
deassert_chip_select -- if set, the chip-select line will be left low after
communicating; this allows this transcation to be continued in the future
spi_mode -- The SPI mode number [0-3] to use for the communication. Defaults to 0.
"""
data_to_transmit = bytearray(data)
data_received = bytearray()
# If we weren't provided with a chip-select, use the bus's default.
if chip_select is None:
chip_select = self._chip_select
if receive_length is None:
receive_length = len(data)
# If we need to receive more than we've transmitted, extend the data out.
if receive_length > len(data):
padding = receive_length - len(data)
data_to_transmit.extend([0] * padding)
# Set the polarity and phase (the "SPI mode").
self.api.set_clock_polarity_and_phase(spi_mode)
# Bring the relevant chip select low, to start the transaction.
if chip_select:
chip_select.low()
# Transmit our data in chunks of the buffer size.
while data_to_transmit:
# Extract a single data chunk from the transmit buffer.
chunk = data_to_transmit[0:self.buffer_size]
del data_to_transmit[0:self.buffer_size]
# Finally, exchange the data.
response = self.api.clock_data(len(chunk), bytes(chunk))
data_received.extend(response)
# Finally, unless the caller has requested we keep chip-select asserted,
# finish the transaction by releasing chip select.
if chip_select and deassert_chip_select:
chip_select.high()
# Once we're done, return the data received.
return bytes(data_received)
def disable_drive(self):
""" Tristates each of the pins on the given SPI bus. """
self.api.enable_drive(False)
def enable_drive(self):
""" Enables the bus to drive each of its output pins. """
self.api.enable_drive(True)
#
# Support methods to support bus pirate commands.
#
def _handle_pirate_read(self, length, ends_transaction=False):
""" Performs a bus-pirate read of the given length, and returns a list of numeric values. """
data_bytes = self.transmit(b"", receive_length=length, chip_select=False)
return list(data_bytes)
def _handle_pirate_write(self, data, ends_transaction=False):
""" Performs a bus-pirate transmit of the given length, and returns a list of numeric values. """
data_bytes = self.transmit(data, chip_select=False)
return list(data_bytes)
def _handle_pirate_start(self):
""" Starts a given communication by performing any start conditions present on the interface. """
self._chip_select.low()
def _handle_pirate_stop(self):
""" Starts a given communication by performing any start conditions present on the interface. """
self._chip_select.high()
| en | 0.834441 | # # This file is part of GreatFET # Class representing a GreatFET SPI bus. For now, supports only the second SPI bus (SPI1), as the first controller is being used to control the onboard flash. # Short name for this type of interface. Set of predefined frequencies used to configure the SPI bus. It contains tuple of clock_prescale_rate & serial_clock_rate. All of these frequencies assume that the PCLK is set to 204 MHz Initialize a new SPI bus. FIXME: There's no reason we shouldn't just take the frequency desired and compute it for the user. This API should change soon. SPI freq is set using either the freq_preset parameter or the combination of serial_clock_rate and clock_prescale_rate parameters. When using serial_clock_rate & clock_prescale_rate parameters, the resulting frequency will be: PCLK / (clock_prescale_rate * [serial_clock_rate+1]). Args: board -- The GreatFET board whose SPI bus we want to control. name -- The display name for the given SPI bus. chip_select_gpio -- The GPIOPin object that will represent the bus's default chip select buffer_size -- The size of the SPI receive buffer on the GreatFET. freq_preset -- Set clock_prescale_rate and serial_clock_rate using one of the frequency presets defined by SPIBus.FREQ clock_prescale_rate -- This even value between 2 and 254, by which PCLK is divided to yield the prescaler output clock. serial_clock_rate -- The number of prescaler-output clocks per bit on the bus, minus one. # Store a reference to the parent board. # Store our limitations. # TODO: grab these from the board! # Create a list that will store all connected devices. # Store our chip select. # Apply our frequency information. # Set up the SPI bus for communications. Attaches a given SPI device to this bus. Typically called by the SPI device as it is constructed. Arguments: device -- The device object to attach to the given bus. # TODO: Check for select pin conflicts; and handle chip select pins. # TODO: replace the device list with a set of weak references Sends (and typically receives) data over the SPI bus. Args: data -- the data to be sent to the given device. receive_length -- the total amount of data to be read. If longer than the data length, the transmit will automatically be extended with zeroes. chip_select -- the GPIOPin object that will serve as the chip select for this transaction, None to use the bus's default, or False to not set CS. deassert_chip_select -- if set, the chip-select line will be left low after communicating; this allows this transcation to be continued in the future spi_mode -- The SPI mode number [0-3] to use for the communication. Defaults to 0. # If we weren't provided with a chip-select, use the bus's default. # If we need to receive more than we've transmitted, extend the data out. # Set the polarity and phase (the "SPI mode"). # Bring the relevant chip select low, to start the transaction. # Transmit our data in chunks of the buffer size. # Extract a single data chunk from the transmit buffer. # Finally, exchange the data. # Finally, unless the caller has requested we keep chip-select asserted, # finish the transaction by releasing chip select. # Once we're done, return the data received. Tristates each of the pins on the given SPI bus. Enables the bus to drive each of its output pins. # # Support methods to support bus pirate commands. # Performs a bus-pirate read of the given length, and returns a list of numeric values. Performs a bus-pirate transmit of the given length, and returns a list of numeric values. Starts a given communication by performing any start conditions present on the interface. Starts a given communication by performing any start conditions present on the interface. | 2.999123 | 3 |
tests/test_typeddict.py | bhumikadalal22/druidry | 0 | 6632382 | <reponame>bhumikadalal22/druidry<gh_stars>0
from .context import druidry
import unittest
class TestTypedDict(unittest.TestCase):
def test_invalid_type(self):
class OneTypeDict(druidry.typeddict.TypedDict):
required_fields = {'valid': {}}
with self.assertRaises(druidry.errors.DruidQueryError):
OneTypeDict('invalid')
def test_valid_type(self):
class OneTypeDict(druidry.typeddict.TypedDict):
required_fields = {'valid': {}}
result = OneTypeDict('valid')
self.assertEqual(result, {})
def test_one_required_field(self):
class RequiredFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': bool
}
}
result = RequiredFieldsDict('dict_type', required_field=True)
self.assertEqual(result, {'required_field': True})
def test_multiple_required_fields(self):
class RequiredFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field_1': bool,
'required_field_2': int
}
}
result = RequiredFieldsDict('dict_type', required_field_1=True, required_field_2=42)
self.assertEqual(result, {'required_field_2': 42, 'required_field_1': True})
def test_required_field_no_type(self):
class RequiredFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': None
}
}
result_1 = RequiredFieldsDict('dict_type', required_field=42)
self.assertEqual(result_1, {'required_field': 42})
result_2 = RequiredFieldsDict('dict_type', required_field=[1, 2, True])
self.assertEqual(result_2, {'required_field': [1, 2, True]})
def test_optional_field(self):
class OptionalFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': list
}
}
optional_field = {
'dict_type': {
'optional_field': dict
}
}
result_1 = OptionalFieldsDict('dict_type', required_field=[1, 2, 3], optional_field={'a': 'z'})
self.assertEqual(result_1, {'required_field': [1, 2, 3]})
result_2 = OptionalFieldsDict('dict_type', required_field=[1, 2, 3])
self.assertEqual(result_2, {'required_field': [1, 2, 3]})
def test_multile_types_field(self):
class MultipleTypesFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': [list, dict]
}
}
result_1 = MultipleTypesFieldsDict('dict_type', required_field=[1, 2, 3])
self.assertEqual(result_1, {'required_field': [1, 2, 3]})
result_2 = MultipleTypesFieldsDict('dict_type', required_field={'a': 'z'})
self.assertEqual(result_2, {'required_field': {'a': 'z'}})
def test_extend(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2})
self.assertEqual(d.extend(c=3), {'a': 1, 'b': 2, 'c': 3})
def test_extend_overwrite(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(d.extend(c=4), {'a': 1, 'b': 2, 'c': 4})
def test_extend_by(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2})
self.assertEqual(d.extend_by({'c': 3}), {'a': 1, 'b': 2, 'c': 3})
def test_extend_by_overwrite(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(d.extend_by({'c': 4}), {'a': 1, 'b': 2, 'c': 4})
def test_extend_subclass(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = MyDict({'a': 1, 'b': 2})
self.assertEqual(type(d.extend(c=3)), MyDict)
def test_extend_subclass_overwrite(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = MyDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(type(d.extend(c=4)), MyDict)
def test_wrap_instance(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = MyDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(type(MyDict.wrap(d)), MyDict)
def test_wrap_noninstance(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = {'a': 1, 'b': 2, 'c': 3}
self.assertEqual(type(MyDict.wrap(d)), MyDict)
| from .context import druidry
import unittest
class TestTypedDict(unittest.TestCase):
def test_invalid_type(self):
class OneTypeDict(druidry.typeddict.TypedDict):
required_fields = {'valid': {}}
with self.assertRaises(druidry.errors.DruidQueryError):
OneTypeDict('invalid')
def test_valid_type(self):
class OneTypeDict(druidry.typeddict.TypedDict):
required_fields = {'valid': {}}
result = OneTypeDict('valid')
self.assertEqual(result, {})
def test_one_required_field(self):
class RequiredFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': bool
}
}
result = RequiredFieldsDict('dict_type', required_field=True)
self.assertEqual(result, {'required_field': True})
def test_multiple_required_fields(self):
class RequiredFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field_1': bool,
'required_field_2': int
}
}
result = RequiredFieldsDict('dict_type', required_field_1=True, required_field_2=42)
self.assertEqual(result, {'required_field_2': 42, 'required_field_1': True})
def test_required_field_no_type(self):
class RequiredFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': None
}
}
result_1 = RequiredFieldsDict('dict_type', required_field=42)
self.assertEqual(result_1, {'required_field': 42})
result_2 = RequiredFieldsDict('dict_type', required_field=[1, 2, True])
self.assertEqual(result_2, {'required_field': [1, 2, True]})
def test_optional_field(self):
class OptionalFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': list
}
}
optional_field = {
'dict_type': {
'optional_field': dict
}
}
result_1 = OptionalFieldsDict('dict_type', required_field=[1, 2, 3], optional_field={'a': 'z'})
self.assertEqual(result_1, {'required_field': [1, 2, 3]})
result_2 = OptionalFieldsDict('dict_type', required_field=[1, 2, 3])
self.assertEqual(result_2, {'required_field': [1, 2, 3]})
def test_multile_types_field(self):
class MultipleTypesFieldsDict(druidry.typeddict.TypedDict):
required_fields = {
'dict_type': {
'required_field': [list, dict]
}
}
result_1 = MultipleTypesFieldsDict('dict_type', required_field=[1, 2, 3])
self.assertEqual(result_1, {'required_field': [1, 2, 3]})
result_2 = MultipleTypesFieldsDict('dict_type', required_field={'a': 'z'})
self.assertEqual(result_2, {'required_field': {'a': 'z'}})
def test_extend(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2})
self.assertEqual(d.extend(c=3), {'a': 1, 'b': 2, 'c': 3})
def test_extend_overwrite(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(d.extend(c=4), {'a': 1, 'b': 2, 'c': 4})
def test_extend_by(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2})
self.assertEqual(d.extend_by({'c': 3}), {'a': 1, 'b': 2, 'c': 3})
def test_extend_by_overwrite(self):
d = druidry.typeddict.ExtendableDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(d.extend_by({'c': 4}), {'a': 1, 'b': 2, 'c': 4})
def test_extend_subclass(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = MyDict({'a': 1, 'b': 2})
self.assertEqual(type(d.extend(c=3)), MyDict)
def test_extend_subclass_overwrite(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = MyDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(type(d.extend(c=4)), MyDict)
def test_wrap_instance(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = MyDict({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(type(MyDict.wrap(d)), MyDict)
def test_wrap_noninstance(self):
class MyDict(druidry.typeddict.ExtendableDict):
pass
d = {'a': 1, 'b': 2, 'c': 3}
self.assertEqual(type(MyDict.wrap(d)), MyDict) | none | 1 | 2.902006 | 3 |
|
test_client.py | neonbjb/BootstrapNLP | 0 | 6632383 | <filename>test_client.py
from __future__ import print_function
import sys
import threading
# This is a placeholder for a Google-internal import.
import grpc
import numpy
import tensorflow as tf
import orjson
import numpy as np
from transformers import GPT2Tokenizer
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
class _ResultCounter(object):
"""Counter for the prediction results."""
def __init__(self, num_tests, concurrency):
self._num_tests = num_tests
self._concurrency = concurrency
self._error = 0
self._done = 0
self._active = 0
self._condition = threading.Condition()
self._responses = []
def inc_error(self):
with self._condition:
self._error += 1
def inc_done(self):
with self._condition:
self._done += 1
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def get_error_rate(self):
with self._condition:
while self._done != self._num_tests:
self._condition.wait()
return self._error / float(self._num_tests)
def throttle(self):
with self._condition:
while self._active == self._concurrency:
self._condition.wait()
self._active += 1
def append_result(self, text, response):
with self._condition:
self._responses.append({'text': text, 'response': response})
def _create_rpc_callback(actual_text, actual_score, result_counter):
def _callback(result_future):
exception = result_future.exception()
if exception:
result_counter.inc_error()
print(exception)
else:
sys.stdout.write('.')
sys.stdout.flush()
response = numpy.array(result_future.result().outputs['final_linear'].float_val)
result_counter.append_result(actual_text, response)
print("%f/%f for {%s}" % (response, actual_score, actual_text))
result_counter.inc_done()
result_counter.dec_active()
return _callback
def load_data(filename):
data = orjson.loads(open(filename, "rb").read())
return np.asarray(data['input_id']), np.asarray(data['label'])
def do_inference():
i = 0
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
test_data_set, orig_stars = load_data("C:/Users/jbetk/Documents/data/ml/sentiment_analysis/outputs/gpt2/validation.json")
channel = grpc.insecure_channel("192.168.56.101:8500")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
num_tests = 150
result_counter = _ResultCounter(num_tests, 50)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'gpt2'
encoded_text = test_data_set[i]
original_response = orig_stars[i]
i += 1
request.inputs["input_ids"].CopyFrom(
tf.make_tensor_proto(encoded_text, shape=[1, encoded_text.size]))
result_counter.throttle()
#result_future = stub.Predict.future(request, 30.0) # 5 seconds
result_future = stub.Predict(request, 30.0) # 5 seconds
result_future.add_done_callback(
_create_rpc_callback(tokenizer.decode(encoded_text), original_response, result_counter))
if __name__ == '__main__':
do_inference() | <filename>test_client.py
from __future__ import print_function
import sys
import threading
# This is a placeholder for a Google-internal import.
import grpc
import numpy
import tensorflow as tf
import orjson
import numpy as np
from transformers import GPT2Tokenizer
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
class _ResultCounter(object):
"""Counter for the prediction results."""
def __init__(self, num_tests, concurrency):
self._num_tests = num_tests
self._concurrency = concurrency
self._error = 0
self._done = 0
self._active = 0
self._condition = threading.Condition()
self._responses = []
def inc_error(self):
with self._condition:
self._error += 1
def inc_done(self):
with self._condition:
self._done += 1
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def get_error_rate(self):
with self._condition:
while self._done != self._num_tests:
self._condition.wait()
return self._error / float(self._num_tests)
def throttle(self):
with self._condition:
while self._active == self._concurrency:
self._condition.wait()
self._active += 1
def append_result(self, text, response):
with self._condition:
self._responses.append({'text': text, 'response': response})
def _create_rpc_callback(actual_text, actual_score, result_counter):
def _callback(result_future):
exception = result_future.exception()
if exception:
result_counter.inc_error()
print(exception)
else:
sys.stdout.write('.')
sys.stdout.flush()
response = numpy.array(result_future.result().outputs['final_linear'].float_val)
result_counter.append_result(actual_text, response)
print("%f/%f for {%s}" % (response, actual_score, actual_text))
result_counter.inc_done()
result_counter.dec_active()
return _callback
def load_data(filename):
data = orjson.loads(open(filename, "rb").read())
return np.asarray(data['input_id']), np.asarray(data['label'])
def do_inference():
i = 0
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
test_data_set, orig_stars = load_data("C:/Users/jbetk/Documents/data/ml/sentiment_analysis/outputs/gpt2/validation.json")
channel = grpc.insecure_channel("192.168.56.101:8500")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
num_tests = 150
result_counter = _ResultCounter(num_tests, 50)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'gpt2'
encoded_text = test_data_set[i]
original_response = orig_stars[i]
i += 1
request.inputs["input_ids"].CopyFrom(
tf.make_tensor_proto(encoded_text, shape=[1, encoded_text.size]))
result_counter.throttle()
#result_future = stub.Predict.future(request, 30.0) # 5 seconds
result_future = stub.Predict(request, 30.0) # 5 seconds
result_future.add_done_callback(
_create_rpc_callback(tokenizer.decode(encoded_text), original_response, result_counter))
if __name__ == '__main__':
do_inference() | en | 0.691609 | # This is a placeholder for a Google-internal import. Counter for the prediction results. #result_future = stub.Predict.future(request, 30.0) # 5 seconds # 5 seconds | 2.446189 | 2 |
torch_color_describer.py | pablonm3/cs224u-1 | 1 | 6632384 | <reponame>pablonm3/cs224u-1
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from torch_model_base import TorchModelBase
import utils
from utils import START_SYMBOL, END_SYMBOL, UNK_SYMBOL
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2020"
class ColorDataset(torch.utils.data.Dataset):
"""PyTorch dataset for contextual color describers. The primary
function of this dataset is to organize the raw data into
batches of Tensors of the appropriate shape and type. When
using this dataset with `torch.utils.data.DataLoader`, it is
crucial to supply the `collate_fn` method as the argument for
the `DataLoader.collate_fn` parameter.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
ex_lengths : list of int
Dimension m. Each value gives the length of the corresponding
word sequence in `word_seqs`.
"""
def __init__(self, color_seqs, word_seqs, ex_lengths):
assert len(color_seqs) == len(ex_lengths)
assert len(color_seqs) == len(word_seqs)
self.color_seqs = color_seqs
self.word_seqs = word_seqs
self.ex_lengths = ex_lengths
@staticmethod
def collate_fn(batch):
"""Function for creating batches.
Parameter
---------
batch : tuple of length 3
Contains the `color_seqs`, `word_seqs`, and `ex_lengths`,
all as lists or similar Python iterables. The function
turns them into Tensors.
Returns
-------
color_seqs : torch.FloatTensor
Dimension (m, n, p).
word_seqs : torch.LongTensor
This is a padded sequence, dimension (m, k), where k is
the length of the longest sequence in the batch.
ex_lengths : torch.LongTensor
targets : torch.LongTensor
This is a padded sequence, dimension (m, k-1), where k is
the length of the longest sequence in the batch. The
targets match `word_seqs` except we drop the first symbol,
as it is always START_SYMBOL. When the loss is calculated,
we compare this sequence to `word_seqs` excluding the
final character, which is always the END_SYMBOL. The result
is that each timestep t is trained to predict the symbol
at t+1.
"""
color_seqs, word_seqs, ex_lengths = zip(*batch)
# Conversion to Tensors:
color_seqs = torch.FloatTensor(color_seqs)
word_seqs = [torch.LongTensor(seq) for seq in word_seqs]
ex_lengths = torch.LongTensor(ex_lengths)
# Targets as next-word predictions:
targets = [x[1: , ] for x in word_seqs]
# Padding
word_seqs = torch.nn.utils.rnn.pad_sequence(
word_seqs, batch_first=True)
targets = torch.nn.utils.rnn.pad_sequence(
targets, batch_first=True)
return color_seqs, word_seqs, ex_lengths, targets
def __len__(self):
return len(self.color_seqs)
def __getitem__(self, idx):
return (self.color_seqs[idx], self.word_seqs[idx], self.ex_lengths[idx])
class Encoder(nn.Module):
"""Simple Encoder model based on a GRU cell.
Parameters
----------
color_dim : int
hidden_dim : int
"""
def __init__(self, color_dim, hidden_dim):
super(Encoder, self).__init__()
self.color_dim = color_dim
self.hidden_dim = hidden_dim
self.rnn = nn.GRU(
input_size=self.color_dim,
hidden_size=self.hidden_dim,
batch_first=True)
def forward(self, color_seqs):
output, hidden = self.rnn(color_seqs)
return hidden
class Decoder(nn.Module):
"""Simple Decoder model based on a GRU cell. The hidden
representations of the GRU are passed through a dense linear
layer, and those logits are used to train the language model
according to a softmax objective in `ContextualColorDescriber`.
Parameters
----------
vocab_size : int
embed_dim : int
hidden_dim : int
embedding : np.array or None
If `None`, a random embedding is created. If `np.array`, this
value becomes the embedding.
"""
def __init__(self, vocab_size, embed_dim, hidden_dim, embedding=None):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.embedding = self._define_embedding(embedding, vocab_size, embed_dim)
self.embed_dim = self.embedding.embedding_dim
self.hidden_dim = hidden_dim
self.rnn = nn.GRU(
input_size=self.embed_dim,
hidden_size=self.hidden_dim,
batch_first=True)
self.output_layer = nn.Linear(self.hidden_dim, self.vocab_size)
def forward(self, word_seqs, seq_lengths=None, hidden=None, target_colors=None):
embs = self.get_embeddings(word_seqs, target_colors=target_colors)
if self.training:
# Packed sequence for performance:
embs = torch.nn.utils.rnn.pack_padded_sequence(
embs, batch_first=True, lengths=seq_lengths, enforce_sorted=False)
# RNN forward:
output, hidden = self.rnn(embs, hidden)
# Unpack:
output, seq_lengths = torch.nn.utils.rnn.pad_packed_sequence(
output, batch_first=True)
# Output dense layer to get logits:
output = self.output_layer(output)
# Drop the final element:
output = output[: , : -1, :]
# Reshape for the sake of the loss function:
output = output.transpose(1, 2)
return output, hidden
else:
output, hidden = self.rnn(embs, hidden)
output = self.output_layer(output)
return output, hidden
def get_embeddings(self, word_seqs, target_colors=None):
"""Gets the input token representations. At present, these are
just taken directly from `self.embedding`, but `target_colors`
can be made available in case the user wants to subclass this
function to append these representations to each input token.
Parameters
----------
word_seqs : torch.LongTensor
This is a padded sequence, dimension (m, k), where k is
the length of the longest sequence in the batch.
target_colors : torch.FloatTensor
Dimension (m, c), where m is the number of exampkes and
c is the dimensionality of the color representations.
"""
return self.embedding(word_seqs)
@staticmethod
def _define_embedding(embedding, vocab_size, embed_dim):
if embedding is None:
return nn.Embedding(vocab_size, embed_dim)
else:
embedding = torch.FloatTensor(embedding)
return nn.Embedding.from_pretrained(embedding)
class EncoderDecoder(nn.Module):
"""This class knits the `Encoder` and `Decoder` into a single class
that serves as the model for `ContextualColorDescriber`. This is
largely a convenience: it means that `ContextualColorDescriber`
can use a single `model` argument, and it allows us to localize
the core computations in the `forward` method of this class.
Parameters
----------
encoder : `Encoder`
decoder : `Decoder`
"""
def __init__(self, encoder, decoder):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self,
color_seqs,
word_seqs,
seq_lengths=None,
hidden=None,
targets=None):
"""This is the core method for this module. It has a lot of
arguments mainly to make it easy to create subclasses of this
class that do interesting things without requring modifications
to the `fit` method of `ContextualColorDescriber`.
Parameters
----------
color_seqs : torch.FloatTensor
Dimension (m, n, p), where m is the number of examples,
n is the number of colors in each context, and p is the
dimensionality of each color.
word_seqs : torch.LongTensor
Dimension (m, k), where m is the number of examples and k
is the length of all the (padded) sequences in the batch.
seq_lengths : torch.LongTensor or None
The true lengths of the sequences in `word_seqs`. If this
is None, then we are predicting new sequences, so we will
continue predicting until we hit a maximum length or we
generate STOP_SYMBOL.
hidden : torch.FloatTensor or None
The hidden representation for each of the m examples in this
batch. If this is None, we are predicting new sequences
and so the hidden representation is computed for each timestep
during decoding.
targets : torch.LongTensor
Dimension (m, k-1). These are ignored entirely by the current
implementation, but they are passed in so that they could be
used, for example, to allow some non-teacher-forcing training.
Returns
-------
output : torch.FloatTensor
Dimension (m, k, c), where m is the number of examples, k
is the length of the sequences in this batch, and c is the
number of classes (the size of the vocabulary).
hidden : torch.FloatTensor
Dimension (m, h) where m is the number of examples and h is
the dimensionality of the hidden representations of the model.
targets : torch.LongTensor
Should be identical to `targets` as passed in.
"""
if hidden is None:
hidden = self.encoder(color_seqs)
output, hidden = self.decoder(
word_seqs, seq_lengths=seq_lengths, hidden=hidden)
return output, hidden, targets
class ContextualColorDescriber(TorchModelBase):
"""The primary interface to modeling contextual colors datasets.
Parameters
----------
vocab : list of str
This should be the vocabulary. It needs to be aligned with
`embedding` in the sense that the ith element of vocab
should be represented by the ith row of `embedding`.
embedding : np.array or None
Each row represents a word in `vocab`, as described above.
embed_dim : int
Dimensionality for the initial embeddings. This is ignored
if `embedding` is not None, as a specified value there
determines this value.
hidden_dim : int
Dimensionality of the hidden layer.
max_iter : int
Maximum number of training epochs.
eta : float
Learning rate.
optimizer : PyTorch optimizer
Default is `torch.optim.Adam`.
l2_strength : float
L2 regularization strength. Default 0 is no regularization.
warm_start : bool
If True, calling `fit` will resume training with previously
defined trainable parameters. If False, calling `fit` will
reinitialize all trainable parameters. Default: False.
device : 'cpu' or 'cuda'
The default is to use 'cuda' iff available
"""
def __init__(self,
vocab,
embedding=None,
embed_dim=50,
hidden_dim=50,
**kwargs):
super(ContextualColorDescriber, self).__init__(
hidden_dim=hidden_dim, **kwargs)
self.vocab = vocab
self.embedding = embedding
self.vocab_size = len(vocab)
self.word2index = dict(zip(self.vocab, range(self.vocab_size)))
self.index2word = dict(zip(range(self.vocab_size), self.vocab))
self.embed_dim = embed_dim
self.output_dim = self.vocab_size
self.start_index = self.vocab.index(START_SYMBOL)
self.end_index = self.vocab.index(END_SYMBOL)
self.unk_index = self.vocab.index(UNK_SYMBOL)
self.params += ['embed_dim', 'embedding']
# The base class has this attribute, but this model doesn't,
# so we remove it to avoid misleading people:
delattr(self, 'hidden_activation')
self.params.remove('hidden_activation')
def fit(self, color_seqs, word_seqs):
"""Standard `fit` method where `color_seqs` are the inputs and
`word_seqs` are the sequences to predict.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
Returns
-------
self
"""
self.color_dim = len(color_seqs[0][0])
if not self.warm_start or not hasattr(self, "model"):
self.model = self.build_graph()
self.opt = self.optimizer(
self.model.parameters(),
lr=self.eta,
weight_decay=self.l2_strength)
# Make sure that these attributes are aligned -- important
# where a supplied pretrained embedding has determined
# a `embed_dim` that might be different from the user's
# argument.
self.embed_dim = self.model.decoder.embed_dim
self.model.to(self.device)
self.model.train()
dataset = self.build_dataset(color_seqs, word_seqs)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=True,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
loss = nn.CrossEntropyLoss()
for iteration in range(1, self.max_iter+1):
epoch_error = 0.0
for batch_colors, batch_words, batch_lens, targets in dataloader:
batch_colors = batch_colors.to(self.device)
batch_words = batch_words.to(self.device)
batch_lens = batch_lens.to(self.device)
targets = targets.to(self.device)
output, _, targets = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens,
targets=targets)
err = loss(output, targets)
epoch_error += err.item()
self.opt.zero_grad()
err.backward()
self.opt.step()
utils.progress_bar("Epoch {}; err = {}".format(iteration, epoch_error))
return self
def build_dataset(self, color_seqs, word_seqs):
word_seqs = [[self.word2index.get(w, self.unk_index) for w in seq]
for seq in word_seqs]
ex_lengths = [len(seq) for seq in word_seqs]
return ColorDataset(color_seqs, word_seqs, ex_lengths)
def build_graph(self):
encoder = Encoder(
color_dim=self.color_dim,
hidden_dim=self.hidden_dim)
decoder = Decoder(
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
embedding=self.embedding,
hidden_dim=self.hidden_dim)
return EncoderDecoder(encoder, decoder)
def predict(self, color_seqs, max_length=20):
"""Predict new sequences based on the color contexts in
`color_seqs`.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
max_length : int
Length of the longest sequences to create.
Returns
-------
list of str
"""
color_seqs = torch.FloatTensor(color_seqs)
self.model.to("cpu")
self.model.eval()
preds = []
with torch.no_grad():
# Get the hidden representations from the color contexts:
hidden = self.model.encoder(color_seqs)
# Start with START_SYMBOL for all examples:
decoder_input = [[self.start_index]] * len(color_seqs)
decoder_input = torch.LongTensor(decoder_input)
preds.append(decoder_input)
# Now move through the remaiming timesteps using the
# previous timestep to predict the next one:
for i in range(1, max_length):
output, hidden, _ = self.model(
color_seqs=color_seqs,
word_seqs=decoder_input,
seq_lengths=None,
hidden=hidden)
# Always take the highest probability token to
# be the prediction:
p = output.argmax(2)
preds.append(p)
decoder_input = p
# Convert all the predictions from indices to elements of
# `self.vocab`:
preds = torch.cat(preds, axis=1)
preds = [self._convert_predictions(p) for p in preds]
self.model.to(self.device)
return preds
def _convert_predictions(self, pred):
rep = []
for i in pred:
i = i.item()
rep.append(self.index2word[i])
if i == self.end_index:
return rep
return rep
def predict_proba(self, color_seqs, word_seqs):
"""Calculate the predicted probabilties of the sequences in
`word_seqs` given the color contexts in `color_seqs`.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
Returns
-------
list of lists of predicted probabilities. In other words,
for each example, at each timestep, there is a probability
distribution over the entire vocabulary.
"""
dataset = self.build_dataset(color_seqs, word_seqs)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
self.model.eval()
softmax = nn.Softmax(dim=2)
start_probs = np.zeros(self.vocab_size)
start_probs[self.start_index] = 1.0
all_probs = []
with torch.no_grad():
for batch_colors, batch_words, batch_lens, targets in dataloader:
batch_colors = batch_colors.to(self.device)
batch_words = batch_words.to(self.device)
batch_lens = batch_lens.to(self.device)
output, _, _ = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens)
probs = softmax(output)
probs = probs.cpu().numpy()
probs = np.insert(probs, 0, start_probs, axis=1)
all_probs += [p[: n] for p, n in zip(probs, batch_lens)]
return all_probs
def perplexities(self, color_seqs, word_seqs):
"""Compute the perplexity of each sequence in `word_seqs`
given `color_seqs`. For a sequence of conditional probabilities
p1, p2, ..., pN, the perplexity is calculated as
(p1 * p2 * ... * pN)**(-1/N)
Parameters
----------
color_seqs : list of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples, and the length of
each sequence can vary.
Returns
-------
list of float
"""
probs = self.predict_proba(color_seqs, word_seqs)
scores = []
for pred, seq in zip(probs, word_seqs):
# Get the probabilities corresponding to the path `seq`:
s = np.array([t[self.word2index.get(w, self.unk_index)]
for t, w in zip(pred, seq)])
scores.append(s)
perp = [np.prod(s)**(-1/len(s)) for s in scores]
return perp
def listener_predict_one(self, context, seq):
context = np.array(context)
n_colors = len(context)
# Get all possible context orders:
indices = list(range(n_colors))
orders = [list(x) for x in itertools.product(indices, repeat=n_colors)]
# All contexts as color sequences:
contexts = [context[x] for x in orders]
# Repeat the single utterance the needed number of times:
seqs = [seq] * len(contexts)
# All perplexities:
perps = self.perplexities(contexts, seqs)
# Ranking, using `order_indices` rather than colors and
# index sequences to avoid sorting errors from some versions
# of Python:
order_indices = range(len(orders))
ranking = sorted(zip(perps, order_indices))
# Return the minimum perplexity, the chosen color, and the
# index of the chosen color in the original context:
min_perp, order_index = ranking[0]
pred_color = contexts[order_index][-1]
pred_index = orders[order_index][-1]
return min_perp, pred_color, pred_index
def listener_accuracy(self, color_seqs, word_seqs):
"""Compute the "listener accuracy" of the model for each example.
For the ith example, this is defined as
prediction = max_{c in C_i} P(word_seq[i] | c)
where C_i is every possible permutation of the three colors in
color_seqs[i]. We take the model's prediction to be correct
if it chooses a c in which the target is in the privileged final
position in the color sequence. (There are two such c's, since
the distractors can be in two orders; we give full credit if one
of these two c's is chosen.)
Parameters
----------
color_seqs : list of lists of list of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples, and the length of
each sequence can vary.
Returns
-------
list of float
"""
correct = 0
for color_seq, word_seq in zip(color_seqs, word_seqs):
target_index = len(color_seq) - 1
min_perp, pred, pred_index = self.listener_predict_one(
color_seq, word_seq)
correct += int(target_index == pred_index)
return correct / len(color_seqs)
def score(self, color_seqs, word_seqs):
"""Alias for `listener_accuracy`. This method is included to
make it easier to use sklearn cross-validators, which expect
a method called `score`.
"""
return self.listener_accuracy(color_seqs, word_seqs)
def create_example_dataset(group_size=100, vec_dim=2):
"""Creates simple datasets in which the inputs are three-vector
sequences and the outputs are simple character sequences, with
the range of values in the final vector in the input determining
the output sequence. For example, a single input/output pair
will look like this:
[[0.44, 0.51], [0.87, 0.89], [0.1, 0.2]], ['<s>', 'A', '</s>']
The sequences are meaningless, as are their lengths (which were
chosen only to be different from each other).
"""
import random
groups = ((0.0, 0.2), (0.4, 0.6), (0.8, 1.0))
vocab = ['<s>', '</s>', 'A', 'B', '$UNK']
seqs = [
['<s>', 'A', '</s>'],
['<s>', 'A', 'B', '</s>'],
['<s>', 'B', 'A', 'B', 'A', '</s>']]
color_seqs = []
word_seqs = []
for i, ((l, u), seq) in enumerate(zip(groups, seqs)):
dis_indices = list(range(len(groups)))
dis_indices.remove(i)
random.shuffle(dis_indices)
disl1, disu1 = groups[dis_indices[0]]
dis2 = disl2, disu2 = groups[dis_indices[1]]
for _ in range(group_size):
target = utils.randvec(vec_dim, l, u)
dis1 = utils.randvec(vec_dim, disl1, disu1)
dis2 = utils.randvec(vec_dim, disl2, disu2)
context = [dis1, dis2, target]
color_seqs.append(context)
word_seqs += [seq for _ in range(group_size)]
return color_seqs, word_seqs, vocab
def simple_example(group_size=100, vec_dim=2, initial_embedding=False):
from sklearn.model_selection import train_test_split
color_seqs, word_seqs, vocab = create_example_dataset(
group_size=group_size, vec_dim=vec_dim)
if initial_embedding:
import numpy as np
embedding = np.random.uniform(
low=-0.5, high=0.5, size=(len(vocab), 11))
else:
embedding = None
X_train, X_test, y_train, y_test = train_test_split(
color_seqs, word_seqs)
mod = ContextualColorDescriber(
vocab,
embed_dim=10,
hidden_dim=10,
max_iter=100,
embedding=embedding)
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
correct = 0
for y, p in zip(y_test, preds):
if y == p:
correct += 1
print("\nExact sequence: {} of {} correct".format(correct, len(y_test)))
lis_acc = mod.listener_accuracy(X_test, y_test)
print("\nListener accuracy {}".format(lis_acc))
return lis_acc
if __name__ == '__main__':
simple_example()
| import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from torch_model_base import TorchModelBase
import utils
from utils import START_SYMBOL, END_SYMBOL, UNK_SYMBOL
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2020"
class ColorDataset(torch.utils.data.Dataset):
"""PyTorch dataset for contextual color describers. The primary
function of this dataset is to organize the raw data into
batches of Tensors of the appropriate shape and type. When
using this dataset with `torch.utils.data.DataLoader`, it is
crucial to supply the `collate_fn` method as the argument for
the `DataLoader.collate_fn` parameter.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
ex_lengths : list of int
Dimension m. Each value gives the length of the corresponding
word sequence in `word_seqs`.
"""
def __init__(self, color_seqs, word_seqs, ex_lengths):
assert len(color_seqs) == len(ex_lengths)
assert len(color_seqs) == len(word_seqs)
self.color_seqs = color_seqs
self.word_seqs = word_seqs
self.ex_lengths = ex_lengths
@staticmethod
def collate_fn(batch):
"""Function for creating batches.
Parameter
---------
batch : tuple of length 3
Contains the `color_seqs`, `word_seqs`, and `ex_lengths`,
all as lists or similar Python iterables. The function
turns them into Tensors.
Returns
-------
color_seqs : torch.FloatTensor
Dimension (m, n, p).
word_seqs : torch.LongTensor
This is a padded sequence, dimension (m, k), where k is
the length of the longest sequence in the batch.
ex_lengths : torch.LongTensor
targets : torch.LongTensor
This is a padded sequence, dimension (m, k-1), where k is
the length of the longest sequence in the batch. The
targets match `word_seqs` except we drop the first symbol,
as it is always START_SYMBOL. When the loss is calculated,
we compare this sequence to `word_seqs` excluding the
final character, which is always the END_SYMBOL. The result
is that each timestep t is trained to predict the symbol
at t+1.
"""
color_seqs, word_seqs, ex_lengths = zip(*batch)
# Conversion to Tensors:
color_seqs = torch.FloatTensor(color_seqs)
word_seqs = [torch.LongTensor(seq) for seq in word_seqs]
ex_lengths = torch.LongTensor(ex_lengths)
# Targets as next-word predictions:
targets = [x[1: , ] for x in word_seqs]
# Padding
word_seqs = torch.nn.utils.rnn.pad_sequence(
word_seqs, batch_first=True)
targets = torch.nn.utils.rnn.pad_sequence(
targets, batch_first=True)
return color_seqs, word_seqs, ex_lengths, targets
def __len__(self):
return len(self.color_seqs)
def __getitem__(self, idx):
return (self.color_seqs[idx], self.word_seqs[idx], self.ex_lengths[idx])
class Encoder(nn.Module):
"""Simple Encoder model based on a GRU cell.
Parameters
----------
color_dim : int
hidden_dim : int
"""
def __init__(self, color_dim, hidden_dim):
super(Encoder, self).__init__()
self.color_dim = color_dim
self.hidden_dim = hidden_dim
self.rnn = nn.GRU(
input_size=self.color_dim,
hidden_size=self.hidden_dim,
batch_first=True)
def forward(self, color_seqs):
output, hidden = self.rnn(color_seqs)
return hidden
class Decoder(nn.Module):
"""Simple Decoder model based on a GRU cell. The hidden
representations of the GRU are passed through a dense linear
layer, and those logits are used to train the language model
according to a softmax objective in `ContextualColorDescriber`.
Parameters
----------
vocab_size : int
embed_dim : int
hidden_dim : int
embedding : np.array or None
If `None`, a random embedding is created. If `np.array`, this
value becomes the embedding.
"""
def __init__(self, vocab_size, embed_dim, hidden_dim, embedding=None):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.embedding = self._define_embedding(embedding, vocab_size, embed_dim)
self.embed_dim = self.embedding.embedding_dim
self.hidden_dim = hidden_dim
self.rnn = nn.GRU(
input_size=self.embed_dim,
hidden_size=self.hidden_dim,
batch_first=True)
self.output_layer = nn.Linear(self.hidden_dim, self.vocab_size)
def forward(self, word_seqs, seq_lengths=None, hidden=None, target_colors=None):
embs = self.get_embeddings(word_seqs, target_colors=target_colors)
if self.training:
# Packed sequence for performance:
embs = torch.nn.utils.rnn.pack_padded_sequence(
embs, batch_first=True, lengths=seq_lengths, enforce_sorted=False)
# RNN forward:
output, hidden = self.rnn(embs, hidden)
# Unpack:
output, seq_lengths = torch.nn.utils.rnn.pad_packed_sequence(
output, batch_first=True)
# Output dense layer to get logits:
output = self.output_layer(output)
# Drop the final element:
output = output[: , : -1, :]
# Reshape for the sake of the loss function:
output = output.transpose(1, 2)
return output, hidden
else:
output, hidden = self.rnn(embs, hidden)
output = self.output_layer(output)
return output, hidden
def get_embeddings(self, word_seqs, target_colors=None):
"""Gets the input token representations. At present, these are
just taken directly from `self.embedding`, but `target_colors`
can be made available in case the user wants to subclass this
function to append these representations to each input token.
Parameters
----------
word_seqs : torch.LongTensor
This is a padded sequence, dimension (m, k), where k is
the length of the longest sequence in the batch.
target_colors : torch.FloatTensor
Dimension (m, c), where m is the number of exampkes and
c is the dimensionality of the color representations.
"""
return self.embedding(word_seqs)
@staticmethod
def _define_embedding(embedding, vocab_size, embed_dim):
if embedding is None:
return nn.Embedding(vocab_size, embed_dim)
else:
embedding = torch.FloatTensor(embedding)
return nn.Embedding.from_pretrained(embedding)
class EncoderDecoder(nn.Module):
"""This class knits the `Encoder` and `Decoder` into a single class
that serves as the model for `ContextualColorDescriber`. This is
largely a convenience: it means that `ContextualColorDescriber`
can use a single `model` argument, and it allows us to localize
the core computations in the `forward` method of this class.
Parameters
----------
encoder : `Encoder`
decoder : `Decoder`
"""
def __init__(self, encoder, decoder):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self,
color_seqs,
word_seqs,
seq_lengths=None,
hidden=None,
targets=None):
"""This is the core method for this module. It has a lot of
arguments mainly to make it easy to create subclasses of this
class that do interesting things without requring modifications
to the `fit` method of `ContextualColorDescriber`.
Parameters
----------
color_seqs : torch.FloatTensor
Dimension (m, n, p), where m is the number of examples,
n is the number of colors in each context, and p is the
dimensionality of each color.
word_seqs : torch.LongTensor
Dimension (m, k), where m is the number of examples and k
is the length of all the (padded) sequences in the batch.
seq_lengths : torch.LongTensor or None
The true lengths of the sequences in `word_seqs`. If this
is None, then we are predicting new sequences, so we will
continue predicting until we hit a maximum length or we
generate STOP_SYMBOL.
hidden : torch.FloatTensor or None
The hidden representation for each of the m examples in this
batch. If this is None, we are predicting new sequences
and so the hidden representation is computed for each timestep
during decoding.
targets : torch.LongTensor
Dimension (m, k-1). These are ignored entirely by the current
implementation, but they are passed in so that they could be
used, for example, to allow some non-teacher-forcing training.
Returns
-------
output : torch.FloatTensor
Dimension (m, k, c), where m is the number of examples, k
is the length of the sequences in this batch, and c is the
number of classes (the size of the vocabulary).
hidden : torch.FloatTensor
Dimension (m, h) where m is the number of examples and h is
the dimensionality of the hidden representations of the model.
targets : torch.LongTensor
Should be identical to `targets` as passed in.
"""
if hidden is None:
hidden = self.encoder(color_seqs)
output, hidden = self.decoder(
word_seqs, seq_lengths=seq_lengths, hidden=hidden)
return output, hidden, targets
class ContextualColorDescriber(TorchModelBase):
"""The primary interface to modeling contextual colors datasets.
Parameters
----------
vocab : list of str
This should be the vocabulary. It needs to be aligned with
`embedding` in the sense that the ith element of vocab
should be represented by the ith row of `embedding`.
embedding : np.array or None
Each row represents a word in `vocab`, as described above.
embed_dim : int
Dimensionality for the initial embeddings. This is ignored
if `embedding` is not None, as a specified value there
determines this value.
hidden_dim : int
Dimensionality of the hidden layer.
max_iter : int
Maximum number of training epochs.
eta : float
Learning rate.
optimizer : PyTorch optimizer
Default is `torch.optim.Adam`.
l2_strength : float
L2 regularization strength. Default 0 is no regularization.
warm_start : bool
If True, calling `fit` will resume training with previously
defined trainable parameters. If False, calling `fit` will
reinitialize all trainable parameters. Default: False.
device : 'cpu' or 'cuda'
The default is to use 'cuda' iff available
"""
def __init__(self,
vocab,
embedding=None,
embed_dim=50,
hidden_dim=50,
**kwargs):
super(ContextualColorDescriber, self).__init__(
hidden_dim=hidden_dim, **kwargs)
self.vocab = vocab
self.embedding = embedding
self.vocab_size = len(vocab)
self.word2index = dict(zip(self.vocab, range(self.vocab_size)))
self.index2word = dict(zip(range(self.vocab_size), self.vocab))
self.embed_dim = embed_dim
self.output_dim = self.vocab_size
self.start_index = self.vocab.index(START_SYMBOL)
self.end_index = self.vocab.index(END_SYMBOL)
self.unk_index = self.vocab.index(UNK_SYMBOL)
self.params += ['embed_dim', 'embedding']
# The base class has this attribute, but this model doesn't,
# so we remove it to avoid misleading people:
delattr(self, 'hidden_activation')
self.params.remove('hidden_activation')
def fit(self, color_seqs, word_seqs):
"""Standard `fit` method where `color_seqs` are the inputs and
`word_seqs` are the sequences to predict.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
Returns
-------
self
"""
self.color_dim = len(color_seqs[0][0])
if not self.warm_start or not hasattr(self, "model"):
self.model = self.build_graph()
self.opt = self.optimizer(
self.model.parameters(),
lr=self.eta,
weight_decay=self.l2_strength)
# Make sure that these attributes are aligned -- important
# where a supplied pretrained embedding has determined
# a `embed_dim` that might be different from the user's
# argument.
self.embed_dim = self.model.decoder.embed_dim
self.model.to(self.device)
self.model.train()
dataset = self.build_dataset(color_seqs, word_seqs)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=True,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
loss = nn.CrossEntropyLoss()
for iteration in range(1, self.max_iter+1):
epoch_error = 0.0
for batch_colors, batch_words, batch_lens, targets in dataloader:
batch_colors = batch_colors.to(self.device)
batch_words = batch_words.to(self.device)
batch_lens = batch_lens.to(self.device)
targets = targets.to(self.device)
output, _, targets = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens,
targets=targets)
err = loss(output, targets)
epoch_error += err.item()
self.opt.zero_grad()
err.backward()
self.opt.step()
utils.progress_bar("Epoch {}; err = {}".format(iteration, epoch_error))
return self
def build_dataset(self, color_seqs, word_seqs):
word_seqs = [[self.word2index.get(w, self.unk_index) for w in seq]
for seq in word_seqs]
ex_lengths = [len(seq) for seq in word_seqs]
return ColorDataset(color_seqs, word_seqs, ex_lengths)
def build_graph(self):
encoder = Encoder(
color_dim=self.color_dim,
hidden_dim=self.hidden_dim)
decoder = Decoder(
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
embedding=self.embedding,
hidden_dim=self.hidden_dim)
return EncoderDecoder(encoder, decoder)
def predict(self, color_seqs, max_length=20):
"""Predict new sequences based on the color contexts in
`color_seqs`.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
max_length : int
Length of the longest sequences to create.
Returns
-------
list of str
"""
color_seqs = torch.FloatTensor(color_seqs)
self.model.to("cpu")
self.model.eval()
preds = []
with torch.no_grad():
# Get the hidden representations from the color contexts:
hidden = self.model.encoder(color_seqs)
# Start with START_SYMBOL for all examples:
decoder_input = [[self.start_index]] * len(color_seqs)
decoder_input = torch.LongTensor(decoder_input)
preds.append(decoder_input)
# Now move through the remaiming timesteps using the
# previous timestep to predict the next one:
for i in range(1, max_length):
output, hidden, _ = self.model(
color_seqs=color_seqs,
word_seqs=decoder_input,
seq_lengths=None,
hidden=hidden)
# Always take the highest probability token to
# be the prediction:
p = output.argmax(2)
preds.append(p)
decoder_input = p
# Convert all the predictions from indices to elements of
# `self.vocab`:
preds = torch.cat(preds, axis=1)
preds = [self._convert_predictions(p) for p in preds]
self.model.to(self.device)
return preds
def _convert_predictions(self, pred):
rep = []
for i in pred:
i = i.item()
rep.append(self.index2word[i])
if i == self.end_index:
return rep
return rep
def predict_proba(self, color_seqs, word_seqs):
"""Calculate the predicted probabilties of the sequences in
`word_seqs` given the color contexts in `color_seqs`.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
Returns
-------
list of lists of predicted probabilities. In other words,
for each example, at each timestep, there is a probability
distribution over the entire vocabulary.
"""
dataset = self.build_dataset(color_seqs, word_seqs)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
self.model.eval()
softmax = nn.Softmax(dim=2)
start_probs = np.zeros(self.vocab_size)
start_probs[self.start_index] = 1.0
all_probs = []
with torch.no_grad():
for batch_colors, batch_words, batch_lens, targets in dataloader:
batch_colors = batch_colors.to(self.device)
batch_words = batch_words.to(self.device)
batch_lens = batch_lens.to(self.device)
output, _, _ = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens)
probs = softmax(output)
probs = probs.cpu().numpy()
probs = np.insert(probs, 0, start_probs, axis=1)
all_probs += [p[: n] for p, n in zip(probs, batch_lens)]
return all_probs
def perplexities(self, color_seqs, word_seqs):
"""Compute the perplexity of each sequence in `word_seqs`
given `color_seqs`. For a sequence of conditional probabilities
p1, p2, ..., pN, the perplexity is calculated as
(p1 * p2 * ... * pN)**(-1/N)
Parameters
----------
color_seqs : list of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples, and the length of
each sequence can vary.
Returns
-------
list of float
"""
probs = self.predict_proba(color_seqs, word_seqs)
scores = []
for pred, seq in zip(probs, word_seqs):
# Get the probabilities corresponding to the path `seq`:
s = np.array([t[self.word2index.get(w, self.unk_index)]
for t, w in zip(pred, seq)])
scores.append(s)
perp = [np.prod(s)**(-1/len(s)) for s in scores]
return perp
def listener_predict_one(self, context, seq):
context = np.array(context)
n_colors = len(context)
# Get all possible context orders:
indices = list(range(n_colors))
orders = [list(x) for x in itertools.product(indices, repeat=n_colors)]
# All contexts as color sequences:
contexts = [context[x] for x in orders]
# Repeat the single utterance the needed number of times:
seqs = [seq] * len(contexts)
# All perplexities:
perps = self.perplexities(contexts, seqs)
# Ranking, using `order_indices` rather than colors and
# index sequences to avoid sorting errors from some versions
# of Python:
order_indices = range(len(orders))
ranking = sorted(zip(perps, order_indices))
# Return the minimum perplexity, the chosen color, and the
# index of the chosen color in the original context:
min_perp, order_index = ranking[0]
pred_color = contexts[order_index][-1]
pred_index = orders[order_index][-1]
return min_perp, pred_color, pred_index
def listener_accuracy(self, color_seqs, word_seqs):
"""Compute the "listener accuracy" of the model for each example.
For the ith example, this is defined as
prediction = max_{c in C_i} P(word_seq[i] | c)
where C_i is every possible permutation of the three colors in
color_seqs[i]. We take the model's prediction to be correct
if it chooses a c in which the target is in the privileged final
position in the color sequence. (There are two such c's, since
the distractors can be in two orders; we give full credit if one
of these two c's is chosen.)
Parameters
----------
color_seqs : list of lists of list of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples, and the length of
each sequence can vary.
Returns
-------
list of float
"""
correct = 0
for color_seq, word_seq in zip(color_seqs, word_seqs):
target_index = len(color_seq) - 1
min_perp, pred, pred_index = self.listener_predict_one(
color_seq, word_seq)
correct += int(target_index == pred_index)
return correct / len(color_seqs)
def score(self, color_seqs, word_seqs):
"""Alias for `listener_accuracy`. This method is included to
make it easier to use sklearn cross-validators, which expect
a method called `score`.
"""
return self.listener_accuracy(color_seqs, word_seqs)
def create_example_dataset(group_size=100, vec_dim=2):
"""Creates simple datasets in which the inputs are three-vector
sequences and the outputs are simple character sequences, with
the range of values in the final vector in the input determining
the output sequence. For example, a single input/output pair
will look like this:
[[0.44, 0.51], [0.87, 0.89], [0.1, 0.2]], ['<s>', 'A', '</s>']
The sequences are meaningless, as are their lengths (which were
chosen only to be different from each other).
"""
import random
groups = ((0.0, 0.2), (0.4, 0.6), (0.8, 1.0))
vocab = ['<s>', '</s>', 'A', 'B', '$UNK']
seqs = [
['<s>', 'A', '</s>'],
['<s>', 'A', 'B', '</s>'],
['<s>', 'B', 'A', 'B', 'A', '</s>']]
color_seqs = []
word_seqs = []
for i, ((l, u), seq) in enumerate(zip(groups, seqs)):
dis_indices = list(range(len(groups)))
dis_indices.remove(i)
random.shuffle(dis_indices)
disl1, disu1 = groups[dis_indices[0]]
dis2 = disl2, disu2 = groups[dis_indices[1]]
for _ in range(group_size):
target = utils.randvec(vec_dim, l, u)
dis1 = utils.randvec(vec_dim, disl1, disu1)
dis2 = utils.randvec(vec_dim, disl2, disu2)
context = [dis1, dis2, target]
color_seqs.append(context)
word_seqs += [seq for _ in range(group_size)]
return color_seqs, word_seqs, vocab
def simple_example(group_size=100, vec_dim=2, initial_embedding=False):
from sklearn.model_selection import train_test_split
color_seqs, word_seqs, vocab = create_example_dataset(
group_size=group_size, vec_dim=vec_dim)
if initial_embedding:
import numpy as np
embedding = np.random.uniform(
low=-0.5, high=0.5, size=(len(vocab), 11))
else:
embedding = None
X_train, X_test, y_train, y_test = train_test_split(
color_seqs, word_seqs)
mod = ContextualColorDescriber(
vocab,
embed_dim=10,
hidden_dim=10,
max_iter=100,
embedding=embedding)
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
correct = 0
for y, p in zip(y_test, preds):
if y == p:
correct += 1
print("\nExact sequence: {} of {} correct".format(correct, len(y_test)))
lis_acc = mod.listener_accuracy(X_test, y_test)
print("\nListener accuracy {}".format(lis_acc))
return lis_acc
if __name__ == '__main__':
simple_example() | en | 0.818079 | PyTorch dataset for contextual color describers. The primary function of this dataset is to organize the raw data into batches of Tensors of the appropriate shape and type. When using this dataset with `torch.utils.data.DataLoader`, it is crucial to supply the `collate_fn` method as the argument for the `DataLoader.collate_fn` parameter. Parameters ---------- color_seqs : list of lists of lists of floats, or np.array Dimension (m, n, p) where m is the number of examples, n is the number of colors in each context, and p is the length of the color representations. word_seqs : list of list of int Dimension m, the number of examples. The length of each sequence can vary. ex_lengths : list of int Dimension m. Each value gives the length of the corresponding word sequence in `word_seqs`. Function for creating batches. Parameter --------- batch : tuple of length 3 Contains the `color_seqs`, `word_seqs`, and `ex_lengths`, all as lists or similar Python iterables. The function turns them into Tensors. Returns ------- color_seqs : torch.FloatTensor Dimension (m, n, p). word_seqs : torch.LongTensor This is a padded sequence, dimension (m, k), where k is the length of the longest sequence in the batch. ex_lengths : torch.LongTensor targets : torch.LongTensor This is a padded sequence, dimension (m, k-1), where k is the length of the longest sequence in the batch. The targets match `word_seqs` except we drop the first symbol, as it is always START_SYMBOL. When the loss is calculated, we compare this sequence to `word_seqs` excluding the final character, which is always the END_SYMBOL. The result is that each timestep t is trained to predict the symbol at t+1. # Conversion to Tensors: # Targets as next-word predictions: # Padding Simple Encoder model based on a GRU cell. Parameters ---------- color_dim : int hidden_dim : int Simple Decoder model based on a GRU cell. The hidden representations of the GRU are passed through a dense linear layer, and those logits are used to train the language model according to a softmax objective in `ContextualColorDescriber`. Parameters ---------- vocab_size : int embed_dim : int hidden_dim : int embedding : np.array or None If `None`, a random embedding is created. If `np.array`, this value becomes the embedding. # Packed sequence for performance: # RNN forward: # Unpack: # Output dense layer to get logits: # Drop the final element: # Reshape for the sake of the loss function: Gets the input token representations. At present, these are just taken directly from `self.embedding`, but `target_colors` can be made available in case the user wants to subclass this function to append these representations to each input token. Parameters ---------- word_seqs : torch.LongTensor This is a padded sequence, dimension (m, k), where k is the length of the longest sequence in the batch. target_colors : torch.FloatTensor Dimension (m, c), where m is the number of exampkes and c is the dimensionality of the color representations. This class knits the `Encoder` and `Decoder` into a single class that serves as the model for `ContextualColorDescriber`. This is largely a convenience: it means that `ContextualColorDescriber` can use a single `model` argument, and it allows us to localize the core computations in the `forward` method of this class. Parameters ---------- encoder : `Encoder` decoder : `Decoder` This is the core method for this module. It has a lot of arguments mainly to make it easy to create subclasses of this class that do interesting things without requring modifications to the `fit` method of `ContextualColorDescriber`. Parameters ---------- color_seqs : torch.FloatTensor Dimension (m, n, p), where m is the number of examples, n is the number of colors in each context, and p is the dimensionality of each color. word_seqs : torch.LongTensor Dimension (m, k), where m is the number of examples and k is the length of all the (padded) sequences in the batch. seq_lengths : torch.LongTensor or None The true lengths of the sequences in `word_seqs`. If this is None, then we are predicting new sequences, so we will continue predicting until we hit a maximum length or we generate STOP_SYMBOL. hidden : torch.FloatTensor or None The hidden representation for each of the m examples in this batch. If this is None, we are predicting new sequences and so the hidden representation is computed for each timestep during decoding. targets : torch.LongTensor Dimension (m, k-1). These are ignored entirely by the current implementation, but they are passed in so that they could be used, for example, to allow some non-teacher-forcing training. Returns ------- output : torch.FloatTensor Dimension (m, k, c), where m is the number of examples, k is the length of the sequences in this batch, and c is the number of classes (the size of the vocabulary). hidden : torch.FloatTensor Dimension (m, h) where m is the number of examples and h is the dimensionality of the hidden representations of the model. targets : torch.LongTensor Should be identical to `targets` as passed in. The primary interface to modeling contextual colors datasets. Parameters ---------- vocab : list of str This should be the vocabulary. It needs to be aligned with `embedding` in the sense that the ith element of vocab should be represented by the ith row of `embedding`. embedding : np.array or None Each row represents a word in `vocab`, as described above. embed_dim : int Dimensionality for the initial embeddings. This is ignored if `embedding` is not None, as a specified value there determines this value. hidden_dim : int Dimensionality of the hidden layer. max_iter : int Maximum number of training epochs. eta : float Learning rate. optimizer : PyTorch optimizer Default is `torch.optim.Adam`. l2_strength : float L2 regularization strength. Default 0 is no regularization. warm_start : bool If True, calling `fit` will resume training with previously defined trainable parameters. If False, calling `fit` will reinitialize all trainable parameters. Default: False. device : 'cpu' or 'cuda' The default is to use 'cuda' iff available # The base class has this attribute, but this model doesn't, # so we remove it to avoid misleading people: Standard `fit` method where `color_seqs` are the inputs and `word_seqs` are the sequences to predict. Parameters ---------- color_seqs : list of lists of lists of floats, or np.array Dimension (m, n, p) where m is the number of examples, n is the number of colors in each context, and p is the length of the color representations. word_seqs : list of list of int Dimension m, the number of examples. The length of each sequence can vary. Returns ------- self # Make sure that these attributes are aligned -- important # where a supplied pretrained embedding has determined # a `embed_dim` that might be different from the user's # argument. Predict new sequences based on the color contexts in `color_seqs`. Parameters ---------- color_seqs : list of lists of lists of floats, or np.array Dimension (m, n, p) where m is the number of examples, n is the number of colors in each context, and p is the length of the color representations. max_length : int Length of the longest sequences to create. Returns ------- list of str # Get the hidden representations from the color contexts: # Start with START_SYMBOL for all examples: # Now move through the remaiming timesteps using the # previous timestep to predict the next one: # Always take the highest probability token to # be the prediction: # Convert all the predictions from indices to elements of # `self.vocab`: Calculate the predicted probabilties of the sequences in `word_seqs` given the color contexts in `color_seqs`. Parameters ---------- color_seqs : list of lists of lists of floats, or np.array Dimension (m, n, p) where m is the number of examples, n is the number of colors in each context, and p is the length of the color representations. word_seqs : list of list of int Dimension m, the number of examples. The length of each sequence can vary. Returns ------- list of lists of predicted probabilities. In other words, for each example, at each timestep, there is a probability distribution over the entire vocabulary. Compute the perplexity of each sequence in `word_seqs` given `color_seqs`. For a sequence of conditional probabilities p1, p2, ..., pN, the perplexity is calculated as (p1 * p2 * ... * pN)**(-1/N) Parameters ---------- color_seqs : list of lists of floats, or np.array Dimension (m, n, p) where m is the number of examples, n is the number of colors in each context, and p is the length of the color representations. word_seqs : list of list of int Dimension m, the number of examples, and the length of each sequence can vary. Returns ------- list of float # Get the probabilities corresponding to the path `seq`: # Get all possible context orders: # All contexts as color sequences: # Repeat the single utterance the needed number of times: # All perplexities: # Ranking, using `order_indices` rather than colors and # index sequences to avoid sorting errors from some versions # of Python: # Return the minimum perplexity, the chosen color, and the # index of the chosen color in the original context: Compute the "listener accuracy" of the model for each example. For the ith example, this is defined as prediction = max_{c in C_i} P(word_seq[i] | c) where C_i is every possible permutation of the three colors in color_seqs[i]. We take the model's prediction to be correct if it chooses a c in which the target is in the privileged final position in the color sequence. (There are two such c's, since the distractors can be in two orders; we give full credit if one of these two c's is chosen.) Parameters ---------- color_seqs : list of lists of list of floats, or np.array Dimension (m, n, p) where m is the number of examples, n is the number of colors in each context, and p is the length of the color representations. word_seqs : list of list of int Dimension m, the number of examples, and the length of each sequence can vary. Returns ------- list of float Alias for `listener_accuracy`. This method is included to make it easier to use sklearn cross-validators, which expect a method called `score`. Creates simple datasets in which the inputs are three-vector sequences and the outputs are simple character sequences, with the range of values in the final vector in the input determining the output sequence. For example, a single input/output pair will look like this: [[0.44, 0.51], [0.87, 0.89], [0.1, 0.2]], ['<s>', 'A', '</s>'] The sequences are meaningless, as are their lengths (which were chosen only to be different from each other). | 2.896286 | 3 |
validator/testcases/javascript/instanceactions.py | andymckay/amo-validator | 0 | 6632385 | <filename>validator/testcases/javascript/instanceactions.py<gh_stars>0
"""
Prototype
---------
args
the raw list of arguments
traverser
the traverser
node
the current node being evaluated
"""
import types
from validator.compat import FX10_DEFINITION
from validator.constants import BUGZILLA_BUG
import actions
from jstypes import *
def createElement(args, traverser, node, wrapper):
"""Handles createElement calls"""
if not args:
return
simple_args = map(traverser._traverse_node, args)
first_as_str = actions._get_as_str(simple_args[0].get_literal_value())
if first_as_str.lower() == u"script":
_create_script_tag(traverser)
elif not simple_args[0].is_literal():
_create_variable_element(traverser)
def createElementNS(args, traverser, node, wrapper):
"""Handles createElementNS calls"""
if not args or len(args) < 2:
return
simple_args = map(traverser._traverse_node, args)
second_as_str = actions._get_as_str(simple_args[1].get_literal_value())
if "script" in second_as_str.lower():
_create_script_tag(traverser)
elif not simple_args[1].is_literal():
_create_variable_element(traverser)
def QueryInterface(args, traverser, node, wrapper):
"""Handles QueryInterface calls"""
if not args:
return
from call_definitions import xpcom_constructor
return xpcom_constructor("QueryInterface", True, True)(
wrapper=node,
arguments=args,
traverser=traverser)
def getInterface(args, traverser, node, wrapper):
"""Handles getInterface calls"""
# This really only needs to be handled for nsIInterfaceRequestor
# intarfaces, but as it's fair for code to assume that that
# interface has already been queried and methods with this name
# are unlikely to behave differently, we just process it for all
# objects.
if not args:
return
from call_definitions import xpcom_constructor
return xpcom_constructor("getInterface")(
wrapper=node,
arguments=args,
traverser=traverser)
def _create_script_tag(traverser):
"""Raises a warning that the dev is creating a script tag"""
traverser.err.warning(
err_id=("testcases_javascript_instanceactions", "_call_expression",
"called_createelement"),
warning="createElement() used to create script tag",
description="The createElement() function was used to create a script "
"tag in a JavaScript file. Add-ons are not allowed to "
"create script tags or load code dynamically from the "
"web.",
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def _create_variable_element(traverser):
"""Raises a warning that the dev is creating an arbitrary element"""
traverser.err.warning(
err_id=("testcases_javascript_instanceactions", "_call_expression",
"createelement_variable"),
warning="Variable element type being created",
description=["createElement or createElementNS were used with a "
"variable rather than a raw string. Literal values should "
"be used when taking advantage of the element creation "
"functions.",
"E.g.: createElement('foo') rather than "
"createElement(el_type)"],
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def setAttribute(args, traverser, node, wrapper):
"""This ensures that setAttribute calls don't set on* attributes"""
if not args:
return
simple_args = [traverser._traverse_node(a) for a in args]
first_as_str = actions._get_as_str(simple_args[0].get_literal_value())
if first_as_str.lower().startswith("on"):
traverser.err.notice(
err_id=("testcases_javascript_instanceactions", "setAttribute",
"setting_on*"),
notice="on* attribute being set using setAttribute",
description="To prevent vulnerabilities, event handlers (like "
"'onclick' and 'onhover') should always be defined "
"using addEventListener.",
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def nsIDOMFile_deprec(args, traverser, node, wrapper):
"""A wrapper for call_definitions.nsIDOMFile_deprec."""
from call_definitions import nsIDOMFile_deprec as cd_nsIDOMFile_deprec
cd_nsIDOMFile_deprec(None, [], traverser)
def isSameNode(args, traverser, node, wrapper):
"""Raise an error when an add-on uses node.isSameNode(foo)."""
traverser.err.error(
err_id=("testcases_javascript_instanceactions", "isSameNode"),
error="isSameNode function has been removed in Gecko 10.",
description='The "isSameNode" function has been removed. You can use '
'the === operator as an alternative. See %s for more '
'information.' % BUGZILLA_BUG % 687400,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
for_appversions=FX10_DEFINITION,
compatibility_type="error",
tier=5)
def replaceWholeText(args, traverser, node, wrapper):
"""Raise an error when an add-on uses node.replaceWholeText(foo)."""
traverser.err.error(
err_id=("testcases_javascript_instanceactions", "replaceWholeText"),
error="replaceWholeText function has been removed in Gecko 10.",
description='The "replaceWholeText" function has been removed. See '
'%s for more information.' % BUGZILLA_BUG % 683482,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
for_appversions=FX10_DEFINITION,
compatibility_type="error",
tier=5)
def PageMod(args, traverser, node, wrapper):
"""
This is the function that is called in Jetpack to modify the contents of a
page with a "content script". This function needs to analyze he first
parameter. If it is an object and if that object contains a "contentScript"
string, that string needs to be passed to the validator.testcases.scripting
library for testing as its own JS script file.
"""
if not args:
return
pm_properties = traverser._traverse_node(args[0])
if not pm_properties.has_property("contentScript"):
return
content_script = pm_properties.get(traverser, "contentScript")
content_script = content_script.get_literal_value()
if not isinstance(content_script, (str, unicode)):
return
import validator.testcases.scripting as sub_scripting
sub_scripting.test_js_file(
traverser.err, traverser.filename, content_script,
line=traverser.line, context=traverser.context)
INSTANCE_DEFINITIONS = {"createElement": createElement,
"createElementNS": createElementNS,
"getAsBinary": nsIDOMFile_deprec,
"getAsDataURL": nsIDOMFile_deprec,
"getInterface": getInterface,
"isSameNode": isSameNode,
"PageMod": PageMod,
"QueryInterface": QueryInterface,
"replaceWholeText": replaceWholeText,
"setAttribute": setAttribute}
| <filename>validator/testcases/javascript/instanceactions.py<gh_stars>0
"""
Prototype
---------
args
the raw list of arguments
traverser
the traverser
node
the current node being evaluated
"""
import types
from validator.compat import FX10_DEFINITION
from validator.constants import BUGZILLA_BUG
import actions
from jstypes import *
def createElement(args, traverser, node, wrapper):
"""Handles createElement calls"""
if not args:
return
simple_args = map(traverser._traverse_node, args)
first_as_str = actions._get_as_str(simple_args[0].get_literal_value())
if first_as_str.lower() == u"script":
_create_script_tag(traverser)
elif not simple_args[0].is_literal():
_create_variable_element(traverser)
def createElementNS(args, traverser, node, wrapper):
"""Handles createElementNS calls"""
if not args or len(args) < 2:
return
simple_args = map(traverser._traverse_node, args)
second_as_str = actions._get_as_str(simple_args[1].get_literal_value())
if "script" in second_as_str.lower():
_create_script_tag(traverser)
elif not simple_args[1].is_literal():
_create_variable_element(traverser)
def QueryInterface(args, traverser, node, wrapper):
"""Handles QueryInterface calls"""
if not args:
return
from call_definitions import xpcom_constructor
return xpcom_constructor("QueryInterface", True, True)(
wrapper=node,
arguments=args,
traverser=traverser)
def getInterface(args, traverser, node, wrapper):
"""Handles getInterface calls"""
# This really only needs to be handled for nsIInterfaceRequestor
# intarfaces, but as it's fair for code to assume that that
# interface has already been queried and methods with this name
# are unlikely to behave differently, we just process it for all
# objects.
if not args:
return
from call_definitions import xpcom_constructor
return xpcom_constructor("getInterface")(
wrapper=node,
arguments=args,
traverser=traverser)
def _create_script_tag(traverser):
"""Raises a warning that the dev is creating a script tag"""
traverser.err.warning(
err_id=("testcases_javascript_instanceactions", "_call_expression",
"called_createelement"),
warning="createElement() used to create script tag",
description="The createElement() function was used to create a script "
"tag in a JavaScript file. Add-ons are not allowed to "
"create script tags or load code dynamically from the "
"web.",
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def _create_variable_element(traverser):
"""Raises a warning that the dev is creating an arbitrary element"""
traverser.err.warning(
err_id=("testcases_javascript_instanceactions", "_call_expression",
"createelement_variable"),
warning="Variable element type being created",
description=["createElement or createElementNS were used with a "
"variable rather than a raw string. Literal values should "
"be used when taking advantage of the element creation "
"functions.",
"E.g.: createElement('foo') rather than "
"createElement(el_type)"],
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def setAttribute(args, traverser, node, wrapper):
"""This ensures that setAttribute calls don't set on* attributes"""
if not args:
return
simple_args = [traverser._traverse_node(a) for a in args]
first_as_str = actions._get_as_str(simple_args[0].get_literal_value())
if first_as_str.lower().startswith("on"):
traverser.err.notice(
err_id=("testcases_javascript_instanceactions", "setAttribute",
"setting_on*"),
notice="on* attribute being set using setAttribute",
description="To prevent vulnerabilities, event handlers (like "
"'onclick' and 'onhover') should always be defined "
"using addEventListener.",
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def nsIDOMFile_deprec(args, traverser, node, wrapper):
"""A wrapper for call_definitions.nsIDOMFile_deprec."""
from call_definitions import nsIDOMFile_deprec as cd_nsIDOMFile_deprec
cd_nsIDOMFile_deprec(None, [], traverser)
def isSameNode(args, traverser, node, wrapper):
"""Raise an error when an add-on uses node.isSameNode(foo)."""
traverser.err.error(
err_id=("testcases_javascript_instanceactions", "isSameNode"),
error="isSameNode function has been removed in Gecko 10.",
description='The "isSameNode" function has been removed. You can use '
'the === operator as an alternative. See %s for more '
'information.' % BUGZILLA_BUG % 687400,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
for_appversions=FX10_DEFINITION,
compatibility_type="error",
tier=5)
def replaceWholeText(args, traverser, node, wrapper):
"""Raise an error when an add-on uses node.replaceWholeText(foo)."""
traverser.err.error(
err_id=("testcases_javascript_instanceactions", "replaceWholeText"),
error="replaceWholeText function has been removed in Gecko 10.",
description='The "replaceWholeText" function has been removed. See '
'%s for more information.' % BUGZILLA_BUG % 683482,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
for_appversions=FX10_DEFINITION,
compatibility_type="error",
tier=5)
def PageMod(args, traverser, node, wrapper):
"""
This is the function that is called in Jetpack to modify the contents of a
page with a "content script". This function needs to analyze he first
parameter. If it is an object and if that object contains a "contentScript"
string, that string needs to be passed to the validator.testcases.scripting
library for testing as its own JS script file.
"""
if not args:
return
pm_properties = traverser._traverse_node(args[0])
if not pm_properties.has_property("contentScript"):
return
content_script = pm_properties.get(traverser, "contentScript")
content_script = content_script.get_literal_value()
if not isinstance(content_script, (str, unicode)):
return
import validator.testcases.scripting as sub_scripting
sub_scripting.test_js_file(
traverser.err, traverser.filename, content_script,
line=traverser.line, context=traverser.context)
INSTANCE_DEFINITIONS = {"createElement": createElement,
"createElementNS": createElementNS,
"getAsBinary": nsIDOMFile_deprec,
"getAsDataURL": nsIDOMFile_deprec,
"getInterface": getInterface,
"isSameNode": isSameNode,
"PageMod": PageMod,
"QueryInterface": QueryInterface,
"replaceWholeText": replaceWholeText,
"setAttribute": setAttribute}
| en | 0.813233 | Prototype --------- args the raw list of arguments traverser the traverser node the current node being evaluated Handles createElement calls Handles createElementNS calls Handles QueryInterface calls Handles getInterface calls # This really only needs to be handled for nsIInterfaceRequestor # intarfaces, but as it's fair for code to assume that that # interface has already been queried and methods with this name # are unlikely to behave differently, we just process it for all # objects. Raises a warning that the dev is creating a script tag Raises a warning that the dev is creating an arbitrary element This ensures that setAttribute calls don't set on* attributes A wrapper for call_definitions.nsIDOMFile_deprec. Raise an error when an add-on uses node.isSameNode(foo). Raise an error when an add-on uses node.replaceWholeText(foo). This is the function that is called in Jetpack to modify the contents of a page with a "content script". This function needs to analyze he first parameter. If it is an object and if that object contains a "contentScript" string, that string needs to be passed to the validator.testcases.scripting library for testing as its own JS script file. | 2.448383 | 2 |
plenum/test/test_connections_with_converted_key.py | andkononykhin/plenum | 148 | 6632386 | from binascii import unhexlify
from stp_core.crypto.util import ed25519SkToCurve25519, ed25519PkToCurve25519
def testNodesConnectedUsingConvertedKeys(txnPoolNodeSet):
for node in txnPoolNodeSet:
secretKey = ed25519SkToCurve25519(node.nodestack.keyhex)
publicKey = ed25519PkToCurve25519(node.nodestack.verhex)
assert unhexlify(node.nodestack.prihex) == secretKey
assert unhexlify(node.nodestack.pubhex) == publicKey
secretKey = ed25519SkToCurve25519(node.clientstack.keyhex)
publicKey = ed25519PkToCurve25519(node.clientstack.verhex)
assert unhexlify(node.clientstack.prihex) == secretKey
assert unhexlify(node.clientstack.pubhex) == publicKey
| from binascii import unhexlify
from stp_core.crypto.util import ed25519SkToCurve25519, ed25519PkToCurve25519
def testNodesConnectedUsingConvertedKeys(txnPoolNodeSet):
for node in txnPoolNodeSet:
secretKey = ed25519SkToCurve25519(node.nodestack.keyhex)
publicKey = ed25519PkToCurve25519(node.nodestack.verhex)
assert unhexlify(node.nodestack.prihex) == secretKey
assert unhexlify(node.nodestack.pubhex) == publicKey
secretKey = ed25519SkToCurve25519(node.clientstack.keyhex)
publicKey = ed25519PkToCurve25519(node.clientstack.verhex)
assert unhexlify(node.clientstack.prihex) == secretKey
assert unhexlify(node.clientstack.pubhex) == publicKey
| none | 1 | 2.363936 | 2 |
|
tests/device/test_get_set_temperature_offset.py | Sensirion/python-shdlc-svm40 | 1 | 6632387 | # -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
import pytest
@pytest.mark.needs_device
@pytest.mark.parametrize("t_offset", [
(-1.),
(1.),
(0.),
])
def test(device, t_offset):
"""
Test if get_compensation_temperature_offset() and
set_compensation_temperature_offset() work as expected.
"""
initial_value = device.get_compensation_temperature_offset()
device.set_compensation_temperature_offset(t_offset)
assert device.get_compensation_temperature_offset() == t_offset
# reset device and check that the value was not stored in the nv-memory
device.device_reset()
assert device.get_compensation_temperature_offset() == initial_value
| # -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
import pytest
@pytest.mark.needs_device
@pytest.mark.parametrize("t_offset", [
(-1.),
(1.),
(0.),
])
def test(device, t_offset):
"""
Test if get_compensation_temperature_offset() and
set_compensation_temperature_offset() work as expected.
"""
initial_value = device.get_compensation_temperature_offset()
device.set_compensation_temperature_offset(t_offset)
assert device.get_compensation_temperature_offset() == t_offset
# reset device and check that the value was not stored in the nv-memory
device.device_reset()
assert device.get_compensation_temperature_offset() == initial_value
| en | 0.840739 | # -*- coding: utf-8 -*- # (c) Copyright 2020 Sensirion AG, Switzerland Test if get_compensation_temperature_offset() and set_compensation_temperature_offset() work as expected. # reset device and check that the value was not stored in the nv-memory | 2.192295 | 2 |
src/concurrency/__init__.py | technicaltitch/django-concurrency | 0 | 6632388 | __author__ = 'sax'
default_app_config = 'concurrency.apps.ConcurrencyConfig'
VERSION = __version__ = "2.1a0"
NAME = 'django-concurrency'
| __author__ = 'sax'
default_app_config = 'concurrency.apps.ConcurrencyConfig'
VERSION = __version__ = "2.1a0"
NAME = 'django-concurrency'
| none | 1 | 1.061449 | 1 |
|
mdp_extras/envs/frozen_lake.py | aaronsnoswell/mdp-extras | 1 | 6632389 | """Utilities for working with the OpenAI Gym FrozenLake MDP"""
import gym
import numpy as np
from mdp_extras import DiscreteExplicitExtras, Indicator, Linear
HUMAN_ACTIONS = ["←", "↓", "→", "↑"]
def frozen_lake_extras(env, gamma=0.99):
"""Get extras for a gym.envs.toy_text.frozen_lake.FrozenLakeEnv
Args:
env (gym.envs.toy_text.frozen_lake.FrozenLakeEnv): Environment
gamma (float): Discount factor
Returns:
(DiscreteExplicitExtras): Extras object
(Indicator): State-action indicator feature function
(Linear): Linear reward function
"""
# How to handle <TimeLimit<______>> and other Wrappers?
# assert isinstance(env, gym.envs.toy_text.frozen_lake.FrozenLakeEnv)
xtr = DiscreteExplicitExtras.fromdiscrete(env, gamma=gamma)
# FrozenLake uses state-based indicator features
phi = Indicator(Indicator.Type.OBSERVATION, xtr)
# FrozenLake - agent gets low reward (0) for all states except the goal (final
# state), where it gets the high reward (1).
theta = np.zeros(len(phi)) + env.reward_range[0]
theta[-1] = env.reward_range[1]
reward = Linear(theta)
return xtr, phi, reward
| """Utilities for working with the OpenAI Gym FrozenLake MDP"""
import gym
import numpy as np
from mdp_extras import DiscreteExplicitExtras, Indicator, Linear
HUMAN_ACTIONS = ["←", "↓", "→", "↑"]
def frozen_lake_extras(env, gamma=0.99):
"""Get extras for a gym.envs.toy_text.frozen_lake.FrozenLakeEnv
Args:
env (gym.envs.toy_text.frozen_lake.FrozenLakeEnv): Environment
gamma (float): Discount factor
Returns:
(DiscreteExplicitExtras): Extras object
(Indicator): State-action indicator feature function
(Linear): Linear reward function
"""
# How to handle <TimeLimit<______>> and other Wrappers?
# assert isinstance(env, gym.envs.toy_text.frozen_lake.FrozenLakeEnv)
xtr = DiscreteExplicitExtras.fromdiscrete(env, gamma=gamma)
# FrozenLake uses state-based indicator features
phi = Indicator(Indicator.Type.OBSERVATION, xtr)
# FrozenLake - agent gets low reward (0) for all states except the goal (final
# state), where it gets the high reward (1).
theta = np.zeros(len(phi)) + env.reward_range[0]
theta[-1] = env.reward_range[1]
reward = Linear(theta)
return xtr, phi, reward
| en | 0.622141 | Utilities for working with the OpenAI Gym FrozenLake MDP Get extras for a gym.envs.toy_text.frozen_lake.FrozenLakeEnv Args: env (gym.envs.toy_text.frozen_lake.FrozenLakeEnv): Environment gamma (float): Discount factor Returns: (DiscreteExplicitExtras): Extras object (Indicator): State-action indicator feature function (Linear): Linear reward function # How to handle <TimeLimit<______>> and other Wrappers? # assert isinstance(env, gym.envs.toy_text.frozen_lake.FrozenLakeEnv) # FrozenLake uses state-based indicator features # FrozenLake - agent gets low reward (0) for all states except the goal (final # state), where it gets the high reward (1). | 2.904455 | 3 |
main.py | itworxs/suite | 890 | 6632390 | import os
import sys
from flask.ext.script import Manager, Server, Shell
from flask.ext.migrate import Migrate, MigrateCommand, upgrade
from app import create_app, db, cache, create_celery_app
from app.caches import SessionCache
from app.models import *
import unittest
app = create_app(os.environ.get("ENV", "prod"))
celery = create_celery_app(app)
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command("db", MigrateCommand)
manager.add_command("runserver", Server(host="0.0.0.0", port=80))
manager.add_command("runtestserver", Server(host="127.0.0.1", port=8080))
# Set flask-restful to be utf-8
reload(sys)
sys.setdefaultencoding("utf-8")
@app.teardown_appcontext
def shutdown_session(exception=None):
if app.config["SQLALCHEMY_COMMIT_ON_TEARDOWN"]:
db.session.commit()
db.session.remove()
def make_shell_context():
return dict(app=app,
db=db,
User=User,
Organization=Organization,
Location=Location,
Role=Role,
Schedule2=Schedule2,
Shift2=Shift2,
RecurringShift=RecurringShift,
RoleToUser=RoleToUser,
Preference=Preference,
cache=cache,
SessionCache=SessionCache,
Timeclock=Timeclock,
TimeOffRequest=TimeOffRequest,
ApiKey=ApiKey)
manager.add_command("shell", Shell(make_context=make_shell_context))
@manager.command
def test():
"""Run the unit tests."""
tests = unittest.TestLoader().discover('tests/unit')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def deploy():
"""Run deployment tasks."""
# migrate database to latest revision
upgrade()
if __name__ == '__main__':
manager.run()
| import os
import sys
from flask.ext.script import Manager, Server, Shell
from flask.ext.migrate import Migrate, MigrateCommand, upgrade
from app import create_app, db, cache, create_celery_app
from app.caches import SessionCache
from app.models import *
import unittest
app = create_app(os.environ.get("ENV", "prod"))
celery = create_celery_app(app)
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command("db", MigrateCommand)
manager.add_command("runserver", Server(host="0.0.0.0", port=80))
manager.add_command("runtestserver", Server(host="127.0.0.1", port=8080))
# Set flask-restful to be utf-8
reload(sys)
sys.setdefaultencoding("utf-8")
@app.teardown_appcontext
def shutdown_session(exception=None):
if app.config["SQLALCHEMY_COMMIT_ON_TEARDOWN"]:
db.session.commit()
db.session.remove()
def make_shell_context():
return dict(app=app,
db=db,
User=User,
Organization=Organization,
Location=Location,
Role=Role,
Schedule2=Schedule2,
Shift2=Shift2,
RecurringShift=RecurringShift,
RoleToUser=RoleToUser,
Preference=Preference,
cache=cache,
SessionCache=SessionCache,
Timeclock=Timeclock,
TimeOffRequest=TimeOffRequest,
ApiKey=ApiKey)
manager.add_command("shell", Shell(make_context=make_shell_context))
@manager.command
def test():
"""Run the unit tests."""
tests = unittest.TestLoader().discover('tests/unit')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def deploy():
"""Run deployment tasks."""
# migrate database to latest revision
upgrade()
if __name__ == '__main__':
manager.run()
| en | 0.769234 | # Set flask-restful to be utf-8 Run the unit tests. Run deployment tasks. # migrate database to latest revision | 2.284924 | 2 |
dothub/config.py | mariocj89/dothub | 12 | 6632391 | import getpass
import json
import time
import os
import logging
import click
import github_token
import os.path
DEFAULT_API_URL = "https://api.github.com"
APP_DIR = click.get_app_dir("dothub")
CONFIG_FILE = os.path.join(APP_DIR, "config.json")
AUTO_CONFIG = {}
LOG = logging.getLogger(__name__)
def load_config():
"""Returns a config object loaded from disk or an empty dict"""
try:
with open(CONFIG_FILE) as f:
conf = json.load(f)
except IOError:
if os.environ.get("GITHUB_USER") and os.environ.get("GITHUB_TOKEN"):
conf = AUTO_CONFIG
else:
LOG.info("Seems this is the first time you run dothub,"
" let me configure your settings...")
conf = config_wizard()
return conf
def config_wizard():
"""Runs the config wizard to configure all defaults for the application"""
conf = dict()
conf["metadata"] = dict(config_time=time.time())
if not os.path.isdir(APP_DIR):
os.makedirs(APP_DIR)
initial_config(conf)
with open(CONFIG_FILE, 'w') as f:
json.dump(conf, f, indent=4)
LOG.info("Config saved in: '%s'", CONFIG_FILE)
LOG.info("Delete this file to rerun the wizard")
return conf
def initial_config(conf):
"""Sets up the initial config for dothub
Asks the user for the general configuration for the app and fills the config object"""
user = click.prompt("What is your username? ")
conf["user"] = user
password = <PASSWORD>()
token_factory = github_token.TokenFactory(user, password, "<PASSWORD>", github_token.ALL_SCOPES)
token = token_factory(tfa_token_callback=lambda: click.prompt("Insert your TFA token: "))
conf["token"] = token
github_url = click.prompt("What is your github instance API url? ", default=DEFAULT_API_URL)
conf["github_base_url"] = github_url
| import getpass
import json
import time
import os
import logging
import click
import github_token
import os.path
DEFAULT_API_URL = "https://api.github.com"
APP_DIR = click.get_app_dir("dothub")
CONFIG_FILE = os.path.join(APP_DIR, "config.json")
AUTO_CONFIG = {}
LOG = logging.getLogger(__name__)
def load_config():
"""Returns a config object loaded from disk or an empty dict"""
try:
with open(CONFIG_FILE) as f:
conf = json.load(f)
except IOError:
if os.environ.get("GITHUB_USER") and os.environ.get("GITHUB_TOKEN"):
conf = AUTO_CONFIG
else:
LOG.info("Seems this is the first time you run dothub,"
" let me configure your settings...")
conf = config_wizard()
return conf
def config_wizard():
"""Runs the config wizard to configure all defaults for the application"""
conf = dict()
conf["metadata"] = dict(config_time=time.time())
if not os.path.isdir(APP_DIR):
os.makedirs(APP_DIR)
initial_config(conf)
with open(CONFIG_FILE, 'w') as f:
json.dump(conf, f, indent=4)
LOG.info("Config saved in: '%s'", CONFIG_FILE)
LOG.info("Delete this file to rerun the wizard")
return conf
def initial_config(conf):
"""Sets up the initial config for dothub
Asks the user for the general configuration for the app and fills the config object"""
user = click.prompt("What is your username? ")
conf["user"] = user
password = <PASSWORD>()
token_factory = github_token.TokenFactory(user, password, "<PASSWORD>", github_token.ALL_SCOPES)
token = token_factory(tfa_token_callback=lambda: click.prompt("Insert your TFA token: "))
conf["token"] = token
github_url = click.prompt("What is your github instance API url? ", default=DEFAULT_API_URL)
conf["github_base_url"] = github_url
| en | 0.630138 | Returns a config object loaded from disk or an empty dict Runs the config wizard to configure all defaults for the application Sets up the initial config for dothub Asks the user for the general configuration for the app and fills the config object | 2.735516 | 3 |
diesel/hub.py | byrgazov/diesel | 0 | 6632392 | # -*- coding: utf-8 -*-
"""An event hub that supports sockets and timers, based on Python 2.6's select & epoll support."""
import select
try:
import pyev
except:
have_libev = False
else:
have_libev = True
import errno
import fcntl
import os
import signal
import threading
from collections import deque, defaultdict
from operator import attrgetter
from time import time
from queue import Queue, Empty
TRIGGER_COMPARE = attrgetter('trigger_time')
class ExistingSignalHandler(Exception):
pass
class Timer:
"""A timer is a promise to call some function at a future date."""
ALLOWANCE = 0.03 # If we're within 30ms, the timer is due
def __init__(self, hub, interval, f, *args, **kw):
self.hub = hub
self.trigger_time = time() + interval
self.f = f
self.args = args
self.kw = kw
self.pending = True
self.inq = False
self.hub_data = None
def cancel(self):
self.pending = False
if self.inq:
self.inq = False
self.hub.remove_timer(self)
self.hub = None
def callback(self):
"""When the external entity checks this timer and determines
it's due, this function is called, which calls the original
callback."""
self.pending = False
self.inq = False
self.hub = None
return self.f(*self.args, **self.kw)
@property
def due(self):
"""Is it time to run this timer yet?
The allowance provides some give-and-take so that if a
sleep() delay comes back a little early, we still go."""
return (self.trigger_time - time()) < self.ALLOWANCE
class _PipeWrap:
def __init__(self, p):
self.p = p
def fileno(self):
return self.p
class IntWrap(_PipeWrap):
pass
class AbstractEventHub:
def __init__(self):
self.timers = []
self.new_timers = []
self.run = True
self.events = {}
self.run_now = deque()
self.fdmap = {}
self.fd_ids = defaultdict(int)
self._setup_threading()
self.reschedule = deque()
def _setup_threading(self):
self._t_recv, self._t_wakeup = os.pipe()
fcntl.fcntl(self._t_recv, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self._t_wakeup, fcntl.F_SETFL, os.O_NONBLOCK)
self.thread_comp_in = Queue()
def handle_thread_done():
try:
os.read(self._t_recv, 65536)
except IOError:
pass
while True:
try:
c, v = self.thread_comp_in.get(False)
except Empty:
break
else:
c(v)
self.register(_PipeWrap(self._t_recv), handle_thread_done, None, None)
def remove_timer(self, t):
try:
self.timers.remove(t)
except IndexError:
pass
def run_in_thread(self, reschedule, f, *args, **kw):
def wrap():
try:
res = f(*args, **kw)
except Exception as e:
self.thread_comp_in.put((reschedule, e))
else:
self.thread_comp_in.put((reschedule, res))
self.wake_from_other_thread()
thread = threading.Thread(target=wrap)
thread.setDaemon(True) # @todo: [bw] ???
thread.start()
def wake_from_other_thread(self):
try:
os.write(self._t_wakeup, b'\0')
except IOError:
pass
def schedule_loop_from_other_thread(self, l, v=None):
self.thread_comp_in.put((l.wake, v))
self.wake_from_other_thread()
def handle_events(self):
'''Run one pass of event handling.
'''
raise NotImplementedError
def call_later(self, interval, f, *args, **kw):
'''Schedule a timer on the hub.
'''
t = Timer(self, interval, f, *args, **kw)
self.new_timers.append(t)
return t
def schedule(self, c, reschedule=False):
if reschedule:
self.reschedule.append(c)
else:
self.run_now.append(c)
def register(self, fd, read_callback, write_callback, error_callback):
'''Register a socket fd with the hub, providing callbacks
for read (data is ready to be recv'd) and write (buffers are
ready for send()).
By default, only the read behavior will be polled and the
read callback used until enable_write is invoked.
'''
fn = fd.fileno()
self.fdmap[fd] = fn
self.fd_ids[fn] += 1
assert fn not in self.events
self.events[fn] = (read_callback, write_callback, error_callback)
self._add_fd(fd)
def add_signal_handler(self, sig, callback):
'''Run the given callback when signal sig is triggered.'''
raise NotImplementedError
def _add_fd(self, fd):
'''Add this socket to the list of sockets used in the
poll call.
'''
raise NotImplementedError
def enable_write(self, fd):
'''Enable write polling and the write callback.
'''
raise NotImplementedError
def disable_write(self, fd):
'''Disable write polling and the write callback.
'''
raise NotImplementedError
def unregister(self, fd):
'''Remove this socket from the list of sockets the
hub is polling on.
'''
if fd in self.fdmap:
fn = self.fdmap.pop(fd)
del self.events[fn]
self._remove_fd(fd)
def _remove_fd(self, fd):
'''Remove this socket from the list of sockets the
hub is polling on.
'''
raise NotImplementedError
@property
def describe(self):
raise NotImplementedError()
class EPollEventHub(AbstractEventHub):
"""A epoll-based hub."""
def __init__(self):
self.epoll = select.epoll()
self.signal_handlers = defaultdict(deque)
super().__init__()
@property
def describe(self):
return 'hand-rolled select.epoll'
def handle_events(self):
"""Run one pass of event handling.
epoll() is called, with a timeout equal to the next-scheduled
timer. When epoll returns, all fd-related events (if any) are
handled, and timers are handled as well.
"""
while self.run_now and self.run:
self.run_now.popleft()()
if self.new_timers:
for tr in self.new_timers:
if tr.pending:
tr.inq = True
self.timers.append(tr)
self.timers.sort(key=TRIGGER_COMPARE, reverse=True)
self.new_timers = []
tm = time()
timeout = (self.timers[-1].trigger_time - tm) if self.timers else 1e6
# epoll, etc, limit to 2^^31/1000 or OverflowError
timeout = min(timeout, 1e6)
if timeout < 0 or self.reschedule:
timeout = 0
# Run timers first, to try to nail their timings
while self.timers and self.timers[-1].due:
t = self.timers.pop()
if t.pending:
t.callback()
while self.run_now and self.run:
self.run_now.popleft()()
if not self.run:
return
# Handle all socket I/O
try:
for (fd, evtype) in self.epoll.poll(timeout):
fd_id = self.fd_ids[fd]
if evtype & select.EPOLLIN or evtype & select.EPOLLPRI:
self.events[fd][0]()
elif evtype & select.EPOLLERR or evtype & select.EPOLLHUP:
self.events[fd][2]()
# The fd could have been reassigned to a new socket or removed
# when running the callbacks immediately above. Only use it if
# neither of those is the case.
use_fd = fd_id == self.fd_ids[fd] and fd in self.events
if evtype & select.EPOLLOUT and use_fd:
try:
self.events[fd][1]()
except Exception:
# @xxx: [bw] если в обработчике выше ошибка, то это приведёт (может) к бесконечному циклу вывода ошибок
# я вызываю другой обработчик в таком случае, в надежде, что он что-нибудь сделает (закроет сокет)
self.events[fd][2]()
raise
while self.run_now and self.run:
self.run_now.popleft()()
if not self.run:
return
except IOError as e:
if e.errno == errno.EINTR:
while self.run_now and self.run:
self.run_now.popleft()()
else:
raise
self.run_now = self.reschedule
self.reschedule = deque()
def add_signal_handler(self, sig, callback):
existing = signal.getsignal(sig)
if not existing:
signal.signal(sig, self._report_signal)
elif existing != self._report_signal:
raise ExistingSignalHandler(existing)
self.signal_handlers[sig].append(callback)
def _report_signal(self, sig, frame):
for callback in self.signal_handlers[sig]:
self.run_now.append(callback)
self.signal_handlers[sig] = deque()
signal.signal(sig, signal.SIG_DFL)
def _add_fd(self, fd):
"""Add this socket to the list of sockets used in the poll call."""
self.epoll.register(fd, select.EPOLLIN | select.EPOLLPRI)
def enable_write(self, fd):
"""Enable write polling and the write callback."""
self.epoll.modify(fd, select.EPOLLIN | select.EPOLLPRI | select.EPOLLOUT)
def disable_write(self, fd):
"""Disable write polling and the write callback."""
self.epoll.modify(fd, select.EPOLLIN | select.EPOLLPRI)
def _remove_fd(self, fd):
"""Remove this socket from the list of sockets the hub is polling on."""
self.epoll.unregister(fd)
class LibEvHub(AbstractEventHub):
def __init__(self):
self._ev_loop = pyev.default_loop()
self._ev_watchers = {}
self._ev_fdmap = {}
AbstractEventHub.__init__(self)
def add_signal_handler(self, sig, callback):
existing = signal.getsignal(sig)
if existing:
raise ExistingSignalHandler(existing)
watcher = self._ev_loop.signal(sig, self._signal_fired)
self._ev_watchers[watcher] = callback
watcher.start()
@property
def describe(self):
return "pyev/libev (%s/%s) backend=%s" % (
self._pyev_version() + ({
1 : "select()",
2 : "poll()",
4 : "epoll()",
8 : "kqueue()",
16 : "/dev/poll",
32 : "event ports",
}.get(self._ev_loop.backend, "UNKNOWN"),))
def _pyev_version(self):
if hasattr(pyev, 'version'):
return pyev.version()
else:
pyev_ver = pyev.__version__
libev_ver = ".".join(str(p) for p in pyev.abi_version())
return (pyev_ver, libev_ver)
def handle_events(self):
'''Run one pass of event handling.
'''
while self.run_now and self.run:
self.run_now.popleft()()
if not self.run:
self._ev_loop.stop()
del self._ev_loop
return
if self.run_now or self.reschedule:
self._ev_loop.start(pyev.EVRUN_NOWAIT)
else:
while not self.run_now:
self._ev_loop.start(pyev.EVRUN_ONCE)
while self.run_now and self.run:
self.run_now.popleft()()
self.run_now.extend(self.reschedule)
self.reschedule = deque()
def call_later(self, interval, f, *args, **kw):
'''Schedule a timer on the hub.
'''
t = Timer(self, interval, f, *args, **kw)
t.inq = True
evt = self._ev_loop.timer(interval, 0, self._ev_timer_fired)
t.hub_data = evt
self._ev_watchers[evt] = t
evt.start()
return t
def _ev_timer_fired(self, watcher, revents):
t = self._ev_watchers.pop(watcher)
if t.hub_data:
t.hub_data = None
self.run_now.append(t.callback)
def remove_timer(self, t):
evt = t.hub_data
if evt in self._ev_watchers:
del self._ev_watchers[evt]
evt.stop()
def schedule(self, c, reschedule=False):
if reschedule:
self.reschedule.append(c)
else:
self.run_now.append(c)
def _signal_fired(self, watcher, revents):
callback = self._ev_watchers.pop(watcher)
watcher.stop()
self.run_now.append(callback)
def _ev_io_fired(self, watcher, revents):
r, w, e = self.events[watcher.fd]
if revents & pyev.EV_READ:
self.run_now.append(r)
if revents & pyev.EV_WRITE:
self.run_now.append(w)
if revents & pyev.EV_ERROR:
self.run_now.append(e)
def _add_fd(self, fd):
'''Add this socket to the list of sockets used in the
poll call.
'''
assert fd not in self._ev_fdmap
rev = self._ev_loop.io(fd, pyev.EV_READ, self._ev_io_fired)
wev = self._ev_loop.io(fd, pyev.EV_WRITE, self._ev_io_fired)
self._ev_fdmap[fd] = rev, wev
rev.start()
def enable_write(self, fd):
'''Enable write polling and the write callback.
'''
self._ev_fdmap[fd][1].start()
def disable_write(self, fd):
'''Disable write polling and the write callback.
'''
self._ev_fdmap[fd][1].stop()
def _remove_fd(self, fd):
'''Remove this socket from the list of sockets the
hub is polling on.
'''
rev, wev = self._ev_fdmap.pop(fd)
rev.stop()
wev.stop()
# Expose a usable EventHub implementation
if (os.environ.get('DIESEL_LIBEV') or
os.environ.get('DIESEL_NO_EPOLL') or
not hasattr(select, 'epoll')):
assert have_libev, "if you don't have select.epoll (not on linux?), please install pyev!"
EventHub = LibEvHub
else:
EventHub = EPollEventHub
| # -*- coding: utf-8 -*-
"""An event hub that supports sockets and timers, based on Python 2.6's select & epoll support."""
import select
try:
import pyev
except:
have_libev = False
else:
have_libev = True
import errno
import fcntl
import os
import signal
import threading
from collections import deque, defaultdict
from operator import attrgetter
from time import time
from queue import Queue, Empty
TRIGGER_COMPARE = attrgetter('trigger_time')
class ExistingSignalHandler(Exception):
pass
class Timer:
"""A timer is a promise to call some function at a future date."""
ALLOWANCE = 0.03 # If we're within 30ms, the timer is due
def __init__(self, hub, interval, f, *args, **kw):
self.hub = hub
self.trigger_time = time() + interval
self.f = f
self.args = args
self.kw = kw
self.pending = True
self.inq = False
self.hub_data = None
def cancel(self):
self.pending = False
if self.inq:
self.inq = False
self.hub.remove_timer(self)
self.hub = None
def callback(self):
"""When the external entity checks this timer and determines
it's due, this function is called, which calls the original
callback."""
self.pending = False
self.inq = False
self.hub = None
return self.f(*self.args, **self.kw)
@property
def due(self):
"""Is it time to run this timer yet?
The allowance provides some give-and-take so that if a
sleep() delay comes back a little early, we still go."""
return (self.trigger_time - time()) < self.ALLOWANCE
class _PipeWrap:
def __init__(self, p):
self.p = p
def fileno(self):
return self.p
class IntWrap(_PipeWrap):
pass
class AbstractEventHub:
def __init__(self):
self.timers = []
self.new_timers = []
self.run = True
self.events = {}
self.run_now = deque()
self.fdmap = {}
self.fd_ids = defaultdict(int)
self._setup_threading()
self.reschedule = deque()
def _setup_threading(self):
self._t_recv, self._t_wakeup = os.pipe()
fcntl.fcntl(self._t_recv, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self._t_wakeup, fcntl.F_SETFL, os.O_NONBLOCK)
self.thread_comp_in = Queue()
def handle_thread_done():
try:
os.read(self._t_recv, 65536)
except IOError:
pass
while True:
try:
c, v = self.thread_comp_in.get(False)
except Empty:
break
else:
c(v)
self.register(_PipeWrap(self._t_recv), handle_thread_done, None, None)
def remove_timer(self, t):
try:
self.timers.remove(t)
except IndexError:
pass
def run_in_thread(self, reschedule, f, *args, **kw):
def wrap():
try:
res = f(*args, **kw)
except Exception as e:
self.thread_comp_in.put((reschedule, e))
else:
self.thread_comp_in.put((reschedule, res))
self.wake_from_other_thread()
thread = threading.Thread(target=wrap)
thread.setDaemon(True) # @todo: [bw] ???
thread.start()
def wake_from_other_thread(self):
try:
os.write(self._t_wakeup, b'\0')
except IOError:
pass
def schedule_loop_from_other_thread(self, l, v=None):
self.thread_comp_in.put((l.wake, v))
self.wake_from_other_thread()
def handle_events(self):
'''Run one pass of event handling.
'''
raise NotImplementedError
def call_later(self, interval, f, *args, **kw):
'''Schedule a timer on the hub.
'''
t = Timer(self, interval, f, *args, **kw)
self.new_timers.append(t)
return t
def schedule(self, c, reschedule=False):
if reschedule:
self.reschedule.append(c)
else:
self.run_now.append(c)
def register(self, fd, read_callback, write_callback, error_callback):
'''Register a socket fd with the hub, providing callbacks
for read (data is ready to be recv'd) and write (buffers are
ready for send()).
By default, only the read behavior will be polled and the
read callback used until enable_write is invoked.
'''
fn = fd.fileno()
self.fdmap[fd] = fn
self.fd_ids[fn] += 1
assert fn not in self.events
self.events[fn] = (read_callback, write_callback, error_callback)
self._add_fd(fd)
def add_signal_handler(self, sig, callback):
'''Run the given callback when signal sig is triggered.'''
raise NotImplementedError
def _add_fd(self, fd):
'''Add this socket to the list of sockets used in the
poll call.
'''
raise NotImplementedError
def enable_write(self, fd):
'''Enable write polling and the write callback.
'''
raise NotImplementedError
def disable_write(self, fd):
'''Disable write polling and the write callback.
'''
raise NotImplementedError
def unregister(self, fd):
'''Remove this socket from the list of sockets the
hub is polling on.
'''
if fd in self.fdmap:
fn = self.fdmap.pop(fd)
del self.events[fn]
self._remove_fd(fd)
def _remove_fd(self, fd):
'''Remove this socket from the list of sockets the
hub is polling on.
'''
raise NotImplementedError
@property
def describe(self):
raise NotImplementedError()
class EPollEventHub(AbstractEventHub):
"""A epoll-based hub."""
def __init__(self):
self.epoll = select.epoll()
self.signal_handlers = defaultdict(deque)
super().__init__()
@property
def describe(self):
return 'hand-rolled select.epoll'
def handle_events(self):
"""Run one pass of event handling.
epoll() is called, with a timeout equal to the next-scheduled
timer. When epoll returns, all fd-related events (if any) are
handled, and timers are handled as well.
"""
while self.run_now and self.run:
self.run_now.popleft()()
if self.new_timers:
for tr in self.new_timers:
if tr.pending:
tr.inq = True
self.timers.append(tr)
self.timers.sort(key=TRIGGER_COMPARE, reverse=True)
self.new_timers = []
tm = time()
timeout = (self.timers[-1].trigger_time - tm) if self.timers else 1e6
# epoll, etc, limit to 2^^31/1000 or OverflowError
timeout = min(timeout, 1e6)
if timeout < 0 or self.reschedule:
timeout = 0
# Run timers first, to try to nail their timings
while self.timers and self.timers[-1].due:
t = self.timers.pop()
if t.pending:
t.callback()
while self.run_now and self.run:
self.run_now.popleft()()
if not self.run:
return
# Handle all socket I/O
try:
for (fd, evtype) in self.epoll.poll(timeout):
fd_id = self.fd_ids[fd]
if evtype & select.EPOLLIN or evtype & select.EPOLLPRI:
self.events[fd][0]()
elif evtype & select.EPOLLERR or evtype & select.EPOLLHUP:
self.events[fd][2]()
# The fd could have been reassigned to a new socket or removed
# when running the callbacks immediately above. Only use it if
# neither of those is the case.
use_fd = fd_id == self.fd_ids[fd] and fd in self.events
if evtype & select.EPOLLOUT and use_fd:
try:
self.events[fd][1]()
except Exception:
# @xxx: [bw] если в обработчике выше ошибка, то это приведёт (может) к бесконечному циклу вывода ошибок
# я вызываю другой обработчик в таком случае, в надежде, что он что-нибудь сделает (закроет сокет)
self.events[fd][2]()
raise
while self.run_now and self.run:
self.run_now.popleft()()
if not self.run:
return
except IOError as e:
if e.errno == errno.EINTR:
while self.run_now and self.run:
self.run_now.popleft()()
else:
raise
self.run_now = self.reschedule
self.reschedule = deque()
def add_signal_handler(self, sig, callback):
existing = signal.getsignal(sig)
if not existing:
signal.signal(sig, self._report_signal)
elif existing != self._report_signal:
raise ExistingSignalHandler(existing)
self.signal_handlers[sig].append(callback)
def _report_signal(self, sig, frame):
for callback in self.signal_handlers[sig]:
self.run_now.append(callback)
self.signal_handlers[sig] = deque()
signal.signal(sig, signal.SIG_DFL)
def _add_fd(self, fd):
"""Add this socket to the list of sockets used in the poll call."""
self.epoll.register(fd, select.EPOLLIN | select.EPOLLPRI)
def enable_write(self, fd):
"""Enable write polling and the write callback."""
self.epoll.modify(fd, select.EPOLLIN | select.EPOLLPRI | select.EPOLLOUT)
def disable_write(self, fd):
"""Disable write polling and the write callback."""
self.epoll.modify(fd, select.EPOLLIN | select.EPOLLPRI)
def _remove_fd(self, fd):
"""Remove this socket from the list of sockets the hub is polling on."""
self.epoll.unregister(fd)
class LibEvHub(AbstractEventHub):
def __init__(self):
self._ev_loop = pyev.default_loop()
self._ev_watchers = {}
self._ev_fdmap = {}
AbstractEventHub.__init__(self)
def add_signal_handler(self, sig, callback):
existing = signal.getsignal(sig)
if existing:
raise ExistingSignalHandler(existing)
watcher = self._ev_loop.signal(sig, self._signal_fired)
self._ev_watchers[watcher] = callback
watcher.start()
@property
def describe(self):
return "pyev/libev (%s/%s) backend=%s" % (
self._pyev_version() + ({
1 : "select()",
2 : "poll()",
4 : "epoll()",
8 : "kqueue()",
16 : "/dev/poll",
32 : "event ports",
}.get(self._ev_loop.backend, "UNKNOWN"),))
def _pyev_version(self):
if hasattr(pyev, 'version'):
return pyev.version()
else:
pyev_ver = pyev.__version__
libev_ver = ".".join(str(p) for p in pyev.abi_version())
return (pyev_ver, libev_ver)
def handle_events(self):
'''Run one pass of event handling.
'''
while self.run_now and self.run:
self.run_now.popleft()()
if not self.run:
self._ev_loop.stop()
del self._ev_loop
return
if self.run_now or self.reschedule:
self._ev_loop.start(pyev.EVRUN_NOWAIT)
else:
while not self.run_now:
self._ev_loop.start(pyev.EVRUN_ONCE)
while self.run_now and self.run:
self.run_now.popleft()()
self.run_now.extend(self.reschedule)
self.reschedule = deque()
def call_later(self, interval, f, *args, **kw):
'''Schedule a timer on the hub.
'''
t = Timer(self, interval, f, *args, **kw)
t.inq = True
evt = self._ev_loop.timer(interval, 0, self._ev_timer_fired)
t.hub_data = evt
self._ev_watchers[evt] = t
evt.start()
return t
def _ev_timer_fired(self, watcher, revents):
t = self._ev_watchers.pop(watcher)
if t.hub_data:
t.hub_data = None
self.run_now.append(t.callback)
def remove_timer(self, t):
evt = t.hub_data
if evt in self._ev_watchers:
del self._ev_watchers[evt]
evt.stop()
def schedule(self, c, reschedule=False):
if reschedule:
self.reschedule.append(c)
else:
self.run_now.append(c)
def _signal_fired(self, watcher, revents):
callback = self._ev_watchers.pop(watcher)
watcher.stop()
self.run_now.append(callback)
def _ev_io_fired(self, watcher, revents):
r, w, e = self.events[watcher.fd]
if revents & pyev.EV_READ:
self.run_now.append(r)
if revents & pyev.EV_WRITE:
self.run_now.append(w)
if revents & pyev.EV_ERROR:
self.run_now.append(e)
def _add_fd(self, fd):
'''Add this socket to the list of sockets used in the
poll call.
'''
assert fd not in self._ev_fdmap
rev = self._ev_loop.io(fd, pyev.EV_READ, self._ev_io_fired)
wev = self._ev_loop.io(fd, pyev.EV_WRITE, self._ev_io_fired)
self._ev_fdmap[fd] = rev, wev
rev.start()
def enable_write(self, fd):
'''Enable write polling and the write callback.
'''
self._ev_fdmap[fd][1].start()
def disable_write(self, fd):
'''Disable write polling and the write callback.
'''
self._ev_fdmap[fd][1].stop()
def _remove_fd(self, fd):
'''Remove this socket from the list of sockets the
hub is polling on.
'''
rev, wev = self._ev_fdmap.pop(fd)
rev.stop()
wev.stop()
# Expose a usable EventHub implementation
if (os.environ.get('DIESEL_LIBEV') or
os.environ.get('DIESEL_NO_EPOLL') or
not hasattr(select, 'epoll')):
assert have_libev, "if you don't have select.epoll (not on linux?), please install pyev!"
EventHub = LibEvHub
else:
EventHub = EPollEventHub
| en | 0.765633 | # -*- coding: utf-8 -*- An event hub that supports sockets and timers, based on Python 2.6's select & epoll support. A timer is a promise to call some function at a future date. # If we're within 30ms, the timer is due When the external entity checks this timer and determines it's due, this function is called, which calls the original callback. Is it time to run this timer yet? The allowance provides some give-and-take so that if a sleep() delay comes back a little early, we still go. # @todo: [bw] ??? Run one pass of event handling. Schedule a timer on the hub. Register a socket fd with the hub, providing callbacks for read (data is ready to be recv'd) and write (buffers are ready for send()). By default, only the read behavior will be polled and the read callback used until enable_write is invoked. Run the given callback when signal sig is triggered. Add this socket to the list of sockets used in the poll call. Enable write polling and the write callback. Disable write polling and the write callback. Remove this socket from the list of sockets the hub is polling on. Remove this socket from the list of sockets the hub is polling on. A epoll-based hub. Run one pass of event handling. epoll() is called, with a timeout equal to the next-scheduled timer. When epoll returns, all fd-related events (if any) are handled, and timers are handled as well. # epoll, etc, limit to 2^^31/1000 or OverflowError # Run timers first, to try to nail their timings # Handle all socket I/O # The fd could have been reassigned to a new socket or removed # when running the callbacks immediately above. Only use it if # neither of those is the case. # @xxx: [bw] если в обработчике выше ошибка, то это приведёт (может) к бесконечному циклу вывода ошибок # я вызываю другой обработчик в таком случае, в надежде, что он что-нибудь сделает (закроет сокет) Add this socket to the list of sockets used in the poll call. Enable write polling and the write callback. Disable write polling and the write callback. Remove this socket from the list of sockets the hub is polling on. Run one pass of event handling. Schedule a timer on the hub. Add this socket to the list of sockets used in the poll call. Enable write polling and the write callback. Disable write polling and the write callback. Remove this socket from the list of sockets the hub is polling on. # Expose a usable EventHub implementation | 2.572571 | 3 |
setup.py | RobotnikAutomation/perception | 66 | 6632393 | <reponame>RobotnikAutomation/perception
"""
Setup of Berkeley AUTOLab Perception module Python codebase.
Author: <NAME>
"""
import os
from setuptools import setup
requirements = [
"numpy",
"scipy",
"autolab_core",
"opencv-python",
"pyserial>=3.4",
"ffmpeg-python",
]
# load __version__ without importing anything
version_file = os.path.join(os.path.dirname(__file__), "perception/version.py")
with open(version_file, "r") as f:
# use eval to get a clean string of version from file
__version__ = eval(f.read().strip().split("=")[-1])
setup(
name="autolab_perception",
version=__version__,
description="Perception utilities for the Berkeley AutoLab",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="Apache Software License",
url="https://github.com/BerkeleyAutomation/perception",
keywords="robotics grasping vision perception",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
],
packages=["perception"],
install_requires=requirements,
extras_require={
"docs": ["sphinx", "sphinxcontrib-napoleon", "sphinx_rtd_theme"],
"ros": ["primesense", "rospkg", "catkin_pkg", "empy"],
},
)
| """
Setup of Berkeley AUTOLab Perception module Python codebase.
Author: <NAME>
"""
import os
from setuptools import setup
requirements = [
"numpy",
"scipy",
"autolab_core",
"opencv-python",
"pyserial>=3.4",
"ffmpeg-python",
]
# load __version__ without importing anything
version_file = os.path.join(os.path.dirname(__file__), "perception/version.py")
with open(version_file, "r") as f:
# use eval to get a clean string of version from file
__version__ = eval(f.read().strip().split("=")[-1])
setup(
name="autolab_perception",
version=__version__,
description="Perception utilities for the Berkeley AutoLab",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="Apache Software License",
url="https://github.com/BerkeleyAutomation/perception",
keywords="robotics grasping vision perception",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
],
packages=["perception"],
install_requires=requirements,
extras_require={
"docs": ["sphinx", "sphinxcontrib-napoleon", "sphinx_rtd_theme"],
"ros": ["primesense", "rospkg", "catkin_pkg", "empy"],
},
) | en | 0.625514 | Setup of Berkeley AUTOLab Perception module Python codebase. Author: <NAME> # load __version__ without importing anything # use eval to get a clean string of version from file | 1.429284 | 1 |
zoho_crm_api/related_module.py | ueni-ltd/zoho-crm-api | 0 | 6632394 | <filename>zoho_crm_api/related_module.py<gh_stars>0
from zoho_crm_api.module import ModuleBase
from zoho_crm_api.session import ZohoSession
class RelatedModule(ModuleBase):
def __init__(self, session: ZohoSession, module_name, related_module_name: str):
super().__init__(session, module_name)
self.related_module_name = related_module_name
def all(self, record_id, start_page=1, per_page=200):
url = f'{self.module_name}/{record_id}/{self.related_module_name}'
yield from self._get_all(url=url, start_page=start_page, per_page=per_page)
def update(self, record_id, related_record_id, data):
assert isinstance(data, dict), 'Only one related record can be updated at once'
url = f'{self.module_name}/{record_id}/{self.related_module_name}/{related_record_id}'
return self.session.put(url, json={'data': [data]})['data'][0]
| <filename>zoho_crm_api/related_module.py<gh_stars>0
from zoho_crm_api.module import ModuleBase
from zoho_crm_api.session import ZohoSession
class RelatedModule(ModuleBase):
def __init__(self, session: ZohoSession, module_name, related_module_name: str):
super().__init__(session, module_name)
self.related_module_name = related_module_name
def all(self, record_id, start_page=1, per_page=200):
url = f'{self.module_name}/{record_id}/{self.related_module_name}'
yield from self._get_all(url=url, start_page=start_page, per_page=per_page)
def update(self, record_id, related_record_id, data):
assert isinstance(data, dict), 'Only one related record can be updated at once'
url = f'{self.module_name}/{record_id}/{self.related_module_name}/{related_record_id}'
return self.session.put(url, json={'data': [data]})['data'][0]
| none | 1 | 2.059664 | 2 |
|
src_py/hat/util/aio.py | hrvojekeserica/hat-core | 0 | 6632395 | """Async utility functions
Attributes:
mlog (logging.Logger): module logger
"""
import asyncio
import collections
import concurrent.futures
import contextlib
import inspect
import itertools
import logging
import signal
import sys
mlog = logging.getLogger(__name__)
async def first(xs, fn=lambda _: True, default=None):
"""Return the first element from async iterable that satisfies
predicate `fn`, or `default` if no such element exists.
Args:
xs (AsyncIterable[Any]): async collection
fn (Callable[[Any],bool]): predicate
default (Any): default
Returns:
Any
"""
async for i in xs:
if fn(i):
return i
return default
async def uncancellable(f, raise_cancel=True):
"""Uncancellable execution of a Future.
Future is shielded and its execution cannot be interrupted.
If `raise_cancel` is `True` and the Future gets canceled,
:exc:`asyncio.CancelledError` is reraised after the Future finishes.
Warning:
If `raise_cancel` is `False`, this method suppresses
:exc:`asyncio.CancelledError` and stops its propagation. Use with
caution.
Args:
f (asyncio.Future): future
raise_cancel (bool): raise CancelledError flag
Returns:
Any: result
"""
exception = None
task = asyncio.ensure_future(f)
while not task.done():
try:
await asyncio.shield(task)
except asyncio.CancelledError as e:
if raise_cancel:
exception = e
except Exception:
pass
if exception:
raise exception
return task.result()
async def call(fn, *args, **kwargs):
"""Call a function or a coroutine.
Call a `fn` with `args` and `kwargs`. If `fn` is a coroutine, it is
awaited.
Args:
fn (Callable): function or coroutine
args: additional function arguments
kwargs: additional function keyword arguments
Returns:
function result
"""
result = fn(*args, **kwargs)
if inspect.isawaitable(result):
result = await result
return result
async def call_on_cancel(fn, *args, **kwargs):
"""Call a function or a coroutine when canceled.
When canceled, `fn` is called with `args` and `kwargs`. If `fn` is a
coroutine, it is awaited.
Args:
fn (Callable): function or coroutine
args: additional function arguments
kwargs: additional function keyword arguments
Returns:
function result
"""
with contextlib.suppress(asyncio.CancelledError):
await asyncio.Future()
return await call(fn, *args, *kwargs)
def create_executor(*args, executor_cls=concurrent.futures.ThreadPoolExecutor,
loop=None):
"""Create :meth:`asyncio.loop.run_in_executor` wrapper.
Returns a coroutine that takes a function and its arguments, executes the
function using executor created from `executor_cls` and `args`; and
returns the result.
Args:
args (Any): executor args
executor_cls (Type): executor class
loop (Optional[asyncio.AbstractEventLoop]): asyncio loop
Returns:
Coroutine[[Callable,...],Any]: executor coroutine
"""
executor = executor_cls(*args)
async def executor_wrapper(fn, *fn_args):
_loop = loop or asyncio.get_event_loop()
return await _loop.run_in_executor(executor, fn, *fn_args)
return executor_wrapper
def init_asyncio():
"""Initialize asyncio.
Sets event loop policy to :class:`uvloop.EventLoopPolicy` if possible.
On Windows, sets policy to :class:`asyncio.WindowsProactorEventLoopPolicy`.
"""
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
if sys.platform == 'win32':
asyncio.set_event_loop_policy(
asyncio.WindowsProactorEventLoopPolicy())
def run_asyncio(future):
"""Run asyncio loop until the `future` is completed and return the result.
SIGINT and SIGTERM handlers are temporarily overridden. Instead of raising
``KeyboardInterrupt`` on every signal reception, Future is canceled only
once. Additional signals are ignored.
On Windows, SIGBREAK (CTRL_BREAK_EVENT) handler is also overridden and
asyncio loop gets periodically woken up (every 0.5 seconds).
Args:
future (Awaitable): future or coroutine
Returns:
Any: result
"""
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(future, loop=loop)
canceled = False
signalnums = [signal.SIGINT, signal.SIGTERM]
if sys.platform == 'win32':
signalnums += [signal.SIGBREAK]
async def task_wrapper(task):
try:
while not task.done():
await asyncio.wait([task], timeout=0.5)
except asyncio.CancelledError:
task.cancel()
return await task
task = asyncio.ensure_future(task_wrapper(task), loop=loop)
def signal_handler(*args):
nonlocal canceled
if canceled:
return
loop.call_soon_threadsafe(task.cancel)
canceled = True
@contextlib.contextmanager
def change_signal_handlers():
handlers = {signalnum: signal.getsignal(signalnum) or signal.SIG_DFL
for signalnum in signalnums}
for signalnum in signalnums:
signal.signal(signalnum, signal_handler)
yield
for signalnum, handler in handlers.items():
signal.signal(signalnum, handler)
with change_signal_handlers():
return loop.run_until_complete(task)
class QueueClosedError(Exception):
"""Raised when trying to use a closed queue."""
class QueueEmptyError(Exception):
"""Raised if queue is empty."""
class QueueFullError(Exception):
"""Raised if queue is full."""
class Queue:
"""Asyncio queue which implements AsyncIterable and can be closed.
Interface and implementation are based on :class:`asyncio.Queue`.
If `maxsize` is less than or equal to zero, the queue size is infinite.
Args:
maxsize (int): maximum number of items in the queue
"""
def __init__(self, maxsize=0):
self._maxsize = maxsize
self._queue = collections.deque()
self._getters = collections.deque()
self._putters = collections.deque()
self._closed = asyncio.Future()
def __aiter__(self):
return self
async def __anext__(self):
try:
return await self.get()
except QueueClosedError:
raise StopAsyncIteration
def __str__(self):
return (f'<{type(self).__name__}'
f' _closed={self._closed.done()} '
f' _queue={list(self._queue)}>')
def __len__(self):
return len(self._queue)
@property
def maxsize(self):
"""int: Maximum number of items in the queue."""
return self._maxsize
@property
def closed(self):
"""asyncio.Future: Closed future."""
return asyncio.shield(self._closed)
def empty(self):
"""bool: `True` if queue is empty, `False` otherwise."""
return not self._queue
def full(self):
"""bool: `True` if queue is full, `False` otherwise."""
return (len(self._queue) >= self._maxsize if self._maxsize > 0
else False)
def qsize(self):
"""int: Number of items currently in the queue."""
return len(self._queue)
def close(self):
"""Close the queue."""
if self._closed.done():
return
self._closed.set_result(True)
self._wakeup_all(self._putters)
self._wakeup_next(self._getters)
def get_nowait(self):
"""Return an item if one is immediately available, else raise
:exc:`QueueEmptyError`.
Returns:
Any
Raises:
QueueEmptyError
"""
if self.empty():
raise QueueEmptyError()
item = self._queue.popleft()
self._wakeup_next(self._putters)
return item
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise :exc:`QueueFullError`.
Args:
item (Any): item
Raises:
QueueFullError
"""
if self._closed.done():
raise QueueClosedError()
if self.full():
raise QueueFullError()
self._queue.append(item)
self._wakeup_next(self._getters)
async def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
Returns:
Any
Raises:
QueueClosedError
"""
while self.empty():
if self._closed.done():
self._wakeup_all(self._getters)
raise QueueClosedError()
getter = asyncio.Future()
self._getters.append(getter)
try:
await getter
except BaseException:
getter.cancel()
with contextlib.suppress(ValueError):
self._getters.remove(getter)
if not getter.cancelled():
if not self.empty() or self._closed.done():
self._wakeup_next(self._getters)
raise
return self.get_nowait()
async def put(self, item):
"""Put an item into the queue.
If the queue is full, wait until a free slot is available before adding
the item.
Args:
item (Any): item
Raises:
QueueClosedError
"""
while not self._closed.done() and self.full():
putter = asyncio.Future()
self._putters.append(putter)
try:
await putter
except BaseException:
putter.cancel()
with contextlib.suppress(ValueError):
self._putters.remove(putter)
if not self.full() and not putter.cancelled():
self._wakeup_next(self._putters)
raise
return self.put_nowait(item)
async def get_until_empty(self):
"""Empty the queue and return the last item.
If queue is empty, wait until at least one item is available.
Returns:
Any
Raises:
QueueClosedError
"""
item = await self.get()
while not self.empty():
item = self.get_nowait()
return item
def get_nowait_until_empty(self):
"""Empty the queue and return the last item if at least one
item is immediately available, else raise :exc:`QueueEmptyError`.
Returns:
Any
Raises:
QueueEmptyError
"""
item = self.get_nowait()
while not self.empty():
item = self.get_nowait()
return item
def _wakeup_next(self, waiters):
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
break
def _wakeup_all(self, waiters):
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
class Group:
"""Group of asyncio Tasks.
Group enables creation and management of related asyncio Tasks. The
Group ensures uninterrupted execution of Tasks and Task completion upon
Group closing.
Group can contain subgroups, which are independent Groups managed by the
parent Group.
If a Task raises exception, other Tasks continue to execute.
If `exception_cb` handler is `None`, exceptions are logged with level
WARNING.
Args:
exception_cb (Optional[Callable[[Exception],None]]): exception handler
loop (Optional[asyncio.AbstractEventLoop]): asyncio loop
"""
def __init__(self, exception_cb=None, *, loop=None):
self._exception_cb = exception_cb
self._loop = loop or asyncio.get_event_loop()
self._closing = asyncio.Future()
self._closed = asyncio.Future()
self._canceled = False
self._tasks = set()
self._parent = None
self._children = set()
@property
def is_open(self):
"""bool: `True` if group is not closing or closed, `False` otherwise.
"""
return not self._closing.done()
@property
def closing(self):
"""asyncio.Future: Closing Future."""
return asyncio.shield(self._closing)
@property
def closed(self):
"""asyncio.Future: Closed Future."""
return asyncio.shield(self._closed)
def create_subgroup(self):
"""Create new Group as a child of this Group. Return the new Group.
When a parent Group gets closed, all of its children are closed.
Closing of a subgroup has no effect on the parent Group.
Subgroup inherits exception handler from its parent.
Returns:
Group
"""
if self._closing.done():
raise Exception('group not open')
child = Group(self._exception_cb, loop=self._loop)
child._parent = self
self._children.add(child)
return child
def wrap(self, future):
"""Wrap the Future into a Task and schedule its execution. Return the
Task object.
Resulting task is shielded and can be canceled only with
:meth:`Group.async_close`.
Args:
future (asyncio.Future): future
Returns:
asyncio.Task
"""
if self._closing.done():
raise Exception('group not open')
task = asyncio.ensure_future(future, loop=self._loop)
self._tasks.add(task)
task.add_done_callback(self._on_task_done)
return asyncio.shield(task)
def spawn(self, fn, *args, **kwargs):
"""Wrap the result of a `fn` into a Task and schedule its execution.
Return the Task object.
Function is called with provided `args` and `kwargs`.
Resulting Task is shielded and can be canceled only with
:meth:`Group.async_close`.
Args:
fn (Callable[[...],Awaitable]): function
args: function arguments
kwargs: function keyword arguments
Returns:
asyncio.Task
"""
if self._closing.done():
raise Exception('group not open')
future = fn(*args, **kwargs)
return self.wrap(future)
def close(self, cancel=True):
"""Schedule Group closing.
Closing Future is set immediately. All subgroups are closed, and all
running tasks are optionally canceled. Once closing of all subgroups
and execution of all tasks is completed, closed Future is set.
Args:
cancel (bool): cancel running tasks
"""
for child in list(self._children):
child.close(cancel)
if cancel and not self._canceled:
self._canceled = True
for task in self._tasks:
self._loop.call_soon(task.cancel)
if self._closing.done():
return
self._closing.set_result(True)
futures = list(itertools.chain(
self._tasks,
(child.closed for child in self._children)))
if futures:
waiting_future = asyncio.ensure_future(
asyncio.wait(futures), loop=self._loop)
waiting_future.add_done_callback(lambda _: self._on_closed())
else:
self._on_closed()
async def async_close(self, cancel=True):
"""Close Group and wait until closed Future is completed.
Args:
cancel (bool): cancel running tasks
"""
self.close(cancel)
await self.closed
async def __aenter__(self):
return self
async def __aexit__(self, *args):
await self.async_close()
def _on_closed(self):
if self._parent is not None:
self._parent._children.remove(self)
self._parent = None
self._closed.set_result(True)
def _on_task_done(self, task):
self._tasks.remove(task)
if task.cancelled():
return
e = task.exception()
if e:
exception_cb = self._exception_cb or self._default_exception_cb
exception_cb(e)
def _default_exception_cb(self, e):
mlog.warning('unhandled exception in async group: %s', e, exc_info=e)
| """Async utility functions
Attributes:
mlog (logging.Logger): module logger
"""
import asyncio
import collections
import concurrent.futures
import contextlib
import inspect
import itertools
import logging
import signal
import sys
mlog = logging.getLogger(__name__)
async def first(xs, fn=lambda _: True, default=None):
"""Return the first element from async iterable that satisfies
predicate `fn`, or `default` if no such element exists.
Args:
xs (AsyncIterable[Any]): async collection
fn (Callable[[Any],bool]): predicate
default (Any): default
Returns:
Any
"""
async for i in xs:
if fn(i):
return i
return default
async def uncancellable(f, raise_cancel=True):
"""Uncancellable execution of a Future.
Future is shielded and its execution cannot be interrupted.
If `raise_cancel` is `True` and the Future gets canceled,
:exc:`asyncio.CancelledError` is reraised after the Future finishes.
Warning:
If `raise_cancel` is `False`, this method suppresses
:exc:`asyncio.CancelledError` and stops its propagation. Use with
caution.
Args:
f (asyncio.Future): future
raise_cancel (bool): raise CancelledError flag
Returns:
Any: result
"""
exception = None
task = asyncio.ensure_future(f)
while not task.done():
try:
await asyncio.shield(task)
except asyncio.CancelledError as e:
if raise_cancel:
exception = e
except Exception:
pass
if exception:
raise exception
return task.result()
async def call(fn, *args, **kwargs):
"""Call a function or a coroutine.
Call a `fn` with `args` and `kwargs`. If `fn` is a coroutine, it is
awaited.
Args:
fn (Callable): function or coroutine
args: additional function arguments
kwargs: additional function keyword arguments
Returns:
function result
"""
result = fn(*args, **kwargs)
if inspect.isawaitable(result):
result = await result
return result
async def call_on_cancel(fn, *args, **kwargs):
"""Call a function or a coroutine when canceled.
When canceled, `fn` is called with `args` and `kwargs`. If `fn` is a
coroutine, it is awaited.
Args:
fn (Callable): function or coroutine
args: additional function arguments
kwargs: additional function keyword arguments
Returns:
function result
"""
with contextlib.suppress(asyncio.CancelledError):
await asyncio.Future()
return await call(fn, *args, *kwargs)
def create_executor(*args, executor_cls=concurrent.futures.ThreadPoolExecutor,
loop=None):
"""Create :meth:`asyncio.loop.run_in_executor` wrapper.
Returns a coroutine that takes a function and its arguments, executes the
function using executor created from `executor_cls` and `args`; and
returns the result.
Args:
args (Any): executor args
executor_cls (Type): executor class
loop (Optional[asyncio.AbstractEventLoop]): asyncio loop
Returns:
Coroutine[[Callable,...],Any]: executor coroutine
"""
executor = executor_cls(*args)
async def executor_wrapper(fn, *fn_args):
_loop = loop or asyncio.get_event_loop()
return await _loop.run_in_executor(executor, fn, *fn_args)
return executor_wrapper
def init_asyncio():
"""Initialize asyncio.
Sets event loop policy to :class:`uvloop.EventLoopPolicy` if possible.
On Windows, sets policy to :class:`asyncio.WindowsProactorEventLoopPolicy`.
"""
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
if sys.platform == 'win32':
asyncio.set_event_loop_policy(
asyncio.WindowsProactorEventLoopPolicy())
def run_asyncio(future):
"""Run asyncio loop until the `future` is completed and return the result.
SIGINT and SIGTERM handlers are temporarily overridden. Instead of raising
``KeyboardInterrupt`` on every signal reception, Future is canceled only
once. Additional signals are ignored.
On Windows, SIGBREAK (CTRL_BREAK_EVENT) handler is also overridden and
asyncio loop gets periodically woken up (every 0.5 seconds).
Args:
future (Awaitable): future or coroutine
Returns:
Any: result
"""
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(future, loop=loop)
canceled = False
signalnums = [signal.SIGINT, signal.SIGTERM]
if sys.platform == 'win32':
signalnums += [signal.SIGBREAK]
async def task_wrapper(task):
try:
while not task.done():
await asyncio.wait([task], timeout=0.5)
except asyncio.CancelledError:
task.cancel()
return await task
task = asyncio.ensure_future(task_wrapper(task), loop=loop)
def signal_handler(*args):
nonlocal canceled
if canceled:
return
loop.call_soon_threadsafe(task.cancel)
canceled = True
@contextlib.contextmanager
def change_signal_handlers():
handlers = {signalnum: signal.getsignal(signalnum) or signal.SIG_DFL
for signalnum in signalnums}
for signalnum in signalnums:
signal.signal(signalnum, signal_handler)
yield
for signalnum, handler in handlers.items():
signal.signal(signalnum, handler)
with change_signal_handlers():
return loop.run_until_complete(task)
class QueueClosedError(Exception):
"""Raised when trying to use a closed queue."""
class QueueEmptyError(Exception):
"""Raised if queue is empty."""
class QueueFullError(Exception):
"""Raised if queue is full."""
class Queue:
"""Asyncio queue which implements AsyncIterable and can be closed.
Interface and implementation are based on :class:`asyncio.Queue`.
If `maxsize` is less than or equal to zero, the queue size is infinite.
Args:
maxsize (int): maximum number of items in the queue
"""
def __init__(self, maxsize=0):
self._maxsize = maxsize
self._queue = collections.deque()
self._getters = collections.deque()
self._putters = collections.deque()
self._closed = asyncio.Future()
def __aiter__(self):
return self
async def __anext__(self):
try:
return await self.get()
except QueueClosedError:
raise StopAsyncIteration
def __str__(self):
return (f'<{type(self).__name__}'
f' _closed={self._closed.done()} '
f' _queue={list(self._queue)}>')
def __len__(self):
return len(self._queue)
@property
def maxsize(self):
"""int: Maximum number of items in the queue."""
return self._maxsize
@property
def closed(self):
"""asyncio.Future: Closed future."""
return asyncio.shield(self._closed)
def empty(self):
"""bool: `True` if queue is empty, `False` otherwise."""
return not self._queue
def full(self):
"""bool: `True` if queue is full, `False` otherwise."""
return (len(self._queue) >= self._maxsize if self._maxsize > 0
else False)
def qsize(self):
"""int: Number of items currently in the queue."""
return len(self._queue)
def close(self):
"""Close the queue."""
if self._closed.done():
return
self._closed.set_result(True)
self._wakeup_all(self._putters)
self._wakeup_next(self._getters)
def get_nowait(self):
"""Return an item if one is immediately available, else raise
:exc:`QueueEmptyError`.
Returns:
Any
Raises:
QueueEmptyError
"""
if self.empty():
raise QueueEmptyError()
item = self._queue.popleft()
self._wakeup_next(self._putters)
return item
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise :exc:`QueueFullError`.
Args:
item (Any): item
Raises:
QueueFullError
"""
if self._closed.done():
raise QueueClosedError()
if self.full():
raise QueueFullError()
self._queue.append(item)
self._wakeup_next(self._getters)
async def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
Returns:
Any
Raises:
QueueClosedError
"""
while self.empty():
if self._closed.done():
self._wakeup_all(self._getters)
raise QueueClosedError()
getter = asyncio.Future()
self._getters.append(getter)
try:
await getter
except BaseException:
getter.cancel()
with contextlib.suppress(ValueError):
self._getters.remove(getter)
if not getter.cancelled():
if not self.empty() or self._closed.done():
self._wakeup_next(self._getters)
raise
return self.get_nowait()
async def put(self, item):
"""Put an item into the queue.
If the queue is full, wait until a free slot is available before adding
the item.
Args:
item (Any): item
Raises:
QueueClosedError
"""
while not self._closed.done() and self.full():
putter = asyncio.Future()
self._putters.append(putter)
try:
await putter
except BaseException:
putter.cancel()
with contextlib.suppress(ValueError):
self._putters.remove(putter)
if not self.full() and not putter.cancelled():
self._wakeup_next(self._putters)
raise
return self.put_nowait(item)
async def get_until_empty(self):
"""Empty the queue and return the last item.
If queue is empty, wait until at least one item is available.
Returns:
Any
Raises:
QueueClosedError
"""
item = await self.get()
while not self.empty():
item = self.get_nowait()
return item
def get_nowait_until_empty(self):
"""Empty the queue and return the last item if at least one
item is immediately available, else raise :exc:`QueueEmptyError`.
Returns:
Any
Raises:
QueueEmptyError
"""
item = self.get_nowait()
while not self.empty():
item = self.get_nowait()
return item
def _wakeup_next(self, waiters):
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
break
def _wakeup_all(self, waiters):
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
class Group:
"""Group of asyncio Tasks.
Group enables creation and management of related asyncio Tasks. The
Group ensures uninterrupted execution of Tasks and Task completion upon
Group closing.
Group can contain subgroups, which are independent Groups managed by the
parent Group.
If a Task raises exception, other Tasks continue to execute.
If `exception_cb` handler is `None`, exceptions are logged with level
WARNING.
Args:
exception_cb (Optional[Callable[[Exception],None]]): exception handler
loop (Optional[asyncio.AbstractEventLoop]): asyncio loop
"""
def __init__(self, exception_cb=None, *, loop=None):
self._exception_cb = exception_cb
self._loop = loop or asyncio.get_event_loop()
self._closing = asyncio.Future()
self._closed = asyncio.Future()
self._canceled = False
self._tasks = set()
self._parent = None
self._children = set()
@property
def is_open(self):
"""bool: `True` if group is not closing or closed, `False` otherwise.
"""
return not self._closing.done()
@property
def closing(self):
"""asyncio.Future: Closing Future."""
return asyncio.shield(self._closing)
@property
def closed(self):
"""asyncio.Future: Closed Future."""
return asyncio.shield(self._closed)
def create_subgroup(self):
"""Create new Group as a child of this Group. Return the new Group.
When a parent Group gets closed, all of its children are closed.
Closing of a subgroup has no effect on the parent Group.
Subgroup inherits exception handler from its parent.
Returns:
Group
"""
if self._closing.done():
raise Exception('group not open')
child = Group(self._exception_cb, loop=self._loop)
child._parent = self
self._children.add(child)
return child
def wrap(self, future):
"""Wrap the Future into a Task and schedule its execution. Return the
Task object.
Resulting task is shielded and can be canceled only with
:meth:`Group.async_close`.
Args:
future (asyncio.Future): future
Returns:
asyncio.Task
"""
if self._closing.done():
raise Exception('group not open')
task = asyncio.ensure_future(future, loop=self._loop)
self._tasks.add(task)
task.add_done_callback(self._on_task_done)
return asyncio.shield(task)
def spawn(self, fn, *args, **kwargs):
"""Wrap the result of a `fn` into a Task and schedule its execution.
Return the Task object.
Function is called with provided `args` and `kwargs`.
Resulting Task is shielded and can be canceled only with
:meth:`Group.async_close`.
Args:
fn (Callable[[...],Awaitable]): function
args: function arguments
kwargs: function keyword arguments
Returns:
asyncio.Task
"""
if self._closing.done():
raise Exception('group not open')
future = fn(*args, **kwargs)
return self.wrap(future)
def close(self, cancel=True):
"""Schedule Group closing.
Closing Future is set immediately. All subgroups are closed, and all
running tasks are optionally canceled. Once closing of all subgroups
and execution of all tasks is completed, closed Future is set.
Args:
cancel (bool): cancel running tasks
"""
for child in list(self._children):
child.close(cancel)
if cancel and not self._canceled:
self._canceled = True
for task in self._tasks:
self._loop.call_soon(task.cancel)
if self._closing.done():
return
self._closing.set_result(True)
futures = list(itertools.chain(
self._tasks,
(child.closed for child in self._children)))
if futures:
waiting_future = asyncio.ensure_future(
asyncio.wait(futures), loop=self._loop)
waiting_future.add_done_callback(lambda _: self._on_closed())
else:
self._on_closed()
async def async_close(self, cancel=True):
"""Close Group and wait until closed Future is completed.
Args:
cancel (bool): cancel running tasks
"""
self.close(cancel)
await self.closed
async def __aenter__(self):
return self
async def __aexit__(self, *args):
await self.async_close()
def _on_closed(self):
if self._parent is not None:
self._parent._children.remove(self)
self._parent = None
self._closed.set_result(True)
def _on_task_done(self, task):
self._tasks.remove(task)
if task.cancelled():
return
e = task.exception()
if e:
exception_cb = self._exception_cb or self._default_exception_cb
exception_cb(e)
def _default_exception_cb(self, e):
mlog.warning('unhandled exception in async group: %s', e, exc_info=e)
| en | 0.76769 | Async utility functions Attributes: mlog (logging.Logger): module logger Return the first element from async iterable that satisfies predicate `fn`, or `default` if no such element exists. Args: xs (AsyncIterable[Any]): async collection fn (Callable[[Any],bool]): predicate default (Any): default Returns: Any Uncancellable execution of a Future. Future is shielded and its execution cannot be interrupted. If `raise_cancel` is `True` and the Future gets canceled, :exc:`asyncio.CancelledError` is reraised after the Future finishes. Warning: If `raise_cancel` is `False`, this method suppresses :exc:`asyncio.CancelledError` and stops its propagation. Use with caution. Args: f (asyncio.Future): future raise_cancel (bool): raise CancelledError flag Returns: Any: result Call a function or a coroutine. Call a `fn` with `args` and `kwargs`. If `fn` is a coroutine, it is awaited. Args: fn (Callable): function or coroutine args: additional function arguments kwargs: additional function keyword arguments Returns: function result Call a function or a coroutine when canceled. When canceled, `fn` is called with `args` and `kwargs`. If `fn` is a coroutine, it is awaited. Args: fn (Callable): function or coroutine args: additional function arguments kwargs: additional function keyword arguments Returns: function result Create :meth:`asyncio.loop.run_in_executor` wrapper. Returns a coroutine that takes a function and its arguments, executes the function using executor created from `executor_cls` and `args`; and returns the result. Args: args (Any): executor args executor_cls (Type): executor class loop (Optional[asyncio.AbstractEventLoop]): asyncio loop Returns: Coroutine[[Callable,...],Any]: executor coroutine Initialize asyncio. Sets event loop policy to :class:`uvloop.EventLoopPolicy` if possible. On Windows, sets policy to :class:`asyncio.WindowsProactorEventLoopPolicy`. Run asyncio loop until the `future` is completed and return the result. SIGINT and SIGTERM handlers are temporarily overridden. Instead of raising ``KeyboardInterrupt`` on every signal reception, Future is canceled only once. Additional signals are ignored. On Windows, SIGBREAK (CTRL_BREAK_EVENT) handler is also overridden and asyncio loop gets periodically woken up (every 0.5 seconds). Args: future (Awaitable): future or coroutine Returns: Any: result Raised when trying to use a closed queue. Raised if queue is empty. Raised if queue is full. Asyncio queue which implements AsyncIterable and can be closed. Interface and implementation are based on :class:`asyncio.Queue`. If `maxsize` is less than or equal to zero, the queue size is infinite. Args: maxsize (int): maximum number of items in the queue int: Maximum number of items in the queue. asyncio.Future: Closed future. bool: `True` if queue is empty, `False` otherwise. bool: `True` if queue is full, `False` otherwise. int: Number of items currently in the queue. Close the queue. Return an item if one is immediately available, else raise :exc:`QueueEmptyError`. Returns: Any Raises: QueueEmptyError Put an item into the queue without blocking. If no free slot is immediately available, raise :exc:`QueueFullError`. Args: item (Any): item Raises: QueueFullError Remove and return an item from the queue. If queue is empty, wait until an item is available. Returns: Any Raises: QueueClosedError Put an item into the queue. If the queue is full, wait until a free slot is available before adding the item. Args: item (Any): item Raises: QueueClosedError Empty the queue and return the last item. If queue is empty, wait until at least one item is available. Returns: Any Raises: QueueClosedError Empty the queue and return the last item if at least one item is immediately available, else raise :exc:`QueueEmptyError`. Returns: Any Raises: QueueEmptyError Group of asyncio Tasks. Group enables creation and management of related asyncio Tasks. The Group ensures uninterrupted execution of Tasks and Task completion upon Group closing. Group can contain subgroups, which are independent Groups managed by the parent Group. If a Task raises exception, other Tasks continue to execute. If `exception_cb` handler is `None`, exceptions are logged with level WARNING. Args: exception_cb (Optional[Callable[[Exception],None]]): exception handler loop (Optional[asyncio.AbstractEventLoop]): asyncio loop bool: `True` if group is not closing or closed, `False` otherwise. asyncio.Future: Closing Future. asyncio.Future: Closed Future. Create new Group as a child of this Group. Return the new Group. When a parent Group gets closed, all of its children are closed. Closing of a subgroup has no effect on the parent Group. Subgroup inherits exception handler from its parent. Returns: Group Wrap the Future into a Task and schedule its execution. Return the Task object. Resulting task is shielded and can be canceled only with :meth:`Group.async_close`. Args: future (asyncio.Future): future Returns: asyncio.Task Wrap the result of a `fn` into a Task and schedule its execution. Return the Task object. Function is called with provided `args` and `kwargs`. Resulting Task is shielded and can be canceled only with :meth:`Group.async_close`. Args: fn (Callable[[...],Awaitable]): function args: function arguments kwargs: function keyword arguments Returns: asyncio.Task Schedule Group closing. Closing Future is set immediately. All subgroups are closed, and all running tasks are optionally canceled. Once closing of all subgroups and execution of all tasks is completed, closed Future is set. Args: cancel (bool): cancel running tasks Close Group and wait until closed Future is completed. Args: cancel (bool): cancel running tasks | 2.899736 | 3 |
nautobot_golden_config/nornir_plays/config_intended.py | jmcgill298/nautobot-plugin-golden-config | 0 | 6632396 | """Nornir job for generating the intended config."""
# pylint: disable=relative-beyond-top-level
import os
import logging
from datetime import datetime
from nornir import InitNornir
from nornir.core.plugins.inventory import InventoryPluginRegister
from nornir.core.task import Result, Task
from nornir_nautobot.exceptions import NornirNautobotException
from nornir_nautobot.plugins.tasks.dispatcher import dispatcher
from nornir_nautobot.utils.logger import NornirLogger
from nautobot_plugin_nornir.plugins.inventory.nautobot_orm import NautobotORMInventory
from nautobot_plugin_nornir.constants import NORNIR_SETTINGS
from nautobot_plugin_nornir.utils import get_dispatcher
from nautobot_golden_config.models import GoldenConfigSetting, GoldenConfig
from nautobot_golden_config.utilities.helper import (
get_job_filter,
verify_global_settings,
check_jinja_template,
)
from nautobot_golden_config.utilities.graphql import graph_ql_query
from nautobot_golden_config.nornir_plays.processor import ProcessGoldenConfig
InventoryPluginRegister.register("nautobot-inventory", NautobotORMInventory)
LOGGER = logging.getLogger(__name__)
def run_template( # pylint: disable=too-many-arguments
task: Task, logger, global_settings, job_result, jinja_root_path, intended_root_folder
) -> Result:
"""Render Jinja Template.
Only one template is supported, so the expectation is that that template includes all other templates.
Args:
task (Task): Nornir task individual object
Returns:
result (Result): Result from Nornir task
"""
obj = task.host.data["obj"]
intended_obj = GoldenConfig.objects.filter(device=obj).first()
if not intended_obj:
intended_obj = GoldenConfig.objects.create(device=obj)
intended_obj.intended_last_attempt_date = task.host.defaults.data["now"]
intended_obj.save()
intended_path_template_obj = check_jinja_template(obj, logger, global_settings.intended_path_template)
output_file_location = os.path.join(intended_root_folder, intended_path_template_obj)
jinja_template = check_jinja_template(obj, logger, global_settings.jinja_path_template)
status, device_data = graph_ql_query(job_result.request, obj, global_settings.sot_agg_query)
if status != 200:
logger.log_failure(obj, f"The GraphQL query return a status of {str(status)} with error of {str(device_data)}")
raise NornirNautobotException()
task.host.data.update(device_data)
generated_config = task.run(
task=dispatcher,
name="GENERATE CONFIG",
method="generate_config",
obj=obj,
logger=logger,
jinja_template=jinja_template,
jinja_root_path=jinja_root_path,
output_file_location=output_file_location,
default_drivers_mapping=get_dispatcher(),
)[1].result["config"]
intended_obj.intended_last_success_date = task.host.defaults.data["now"]
intended_obj.intended_config = generated_config
intended_obj.save()
logger.log_success(obj, "Successfully generated the intended configuration.")
return Result(host=task.host, result=generated_config)
def config_intended(job_result, data, jinja_root_path, intended_root_folder):
"""Nornir play to generate configurations."""
now = datetime.now()
logger = NornirLogger(__name__, job_result, data.get("debug"))
global_settings = GoldenConfigSetting.objects.first()
verify_global_settings(logger, global_settings, ["jinja_path_template", "intended_path_template", "sot_agg_query"])
try:
with InitNornir(
runner=NORNIR_SETTINGS.get("runner"),
logging={"enabled": False},
inventory={
"plugin": "nautobot-inventory",
"options": {
"credentials_class": NORNIR_SETTINGS.get("credentials"),
"params": NORNIR_SETTINGS.get("inventory_params"),
"queryset": get_job_filter(data),
"defaults": {"now": now},
},
},
) as nornir_obj:
nr_with_processors = nornir_obj.with_processors([ProcessGoldenConfig(logger)])
# Run the Nornir Tasks
nr_with_processors.run(
task=run_template,
name="RENDER CONFIG",
logger=logger,
global_settings=global_settings,
job_result=job_result,
jinja_root_path=jinja_root_path,
intended_root_folder=intended_root_folder,
)
except Exception as err:
logger.log_failure(None, err)
raise
| """Nornir job for generating the intended config."""
# pylint: disable=relative-beyond-top-level
import os
import logging
from datetime import datetime
from nornir import InitNornir
from nornir.core.plugins.inventory import InventoryPluginRegister
from nornir.core.task import Result, Task
from nornir_nautobot.exceptions import NornirNautobotException
from nornir_nautobot.plugins.tasks.dispatcher import dispatcher
from nornir_nautobot.utils.logger import NornirLogger
from nautobot_plugin_nornir.plugins.inventory.nautobot_orm import NautobotORMInventory
from nautobot_plugin_nornir.constants import NORNIR_SETTINGS
from nautobot_plugin_nornir.utils import get_dispatcher
from nautobot_golden_config.models import GoldenConfigSetting, GoldenConfig
from nautobot_golden_config.utilities.helper import (
get_job_filter,
verify_global_settings,
check_jinja_template,
)
from nautobot_golden_config.utilities.graphql import graph_ql_query
from nautobot_golden_config.nornir_plays.processor import ProcessGoldenConfig
InventoryPluginRegister.register("nautobot-inventory", NautobotORMInventory)
LOGGER = logging.getLogger(__name__)
def run_template( # pylint: disable=too-many-arguments
task: Task, logger, global_settings, job_result, jinja_root_path, intended_root_folder
) -> Result:
"""Render Jinja Template.
Only one template is supported, so the expectation is that that template includes all other templates.
Args:
task (Task): Nornir task individual object
Returns:
result (Result): Result from Nornir task
"""
obj = task.host.data["obj"]
intended_obj = GoldenConfig.objects.filter(device=obj).first()
if not intended_obj:
intended_obj = GoldenConfig.objects.create(device=obj)
intended_obj.intended_last_attempt_date = task.host.defaults.data["now"]
intended_obj.save()
intended_path_template_obj = check_jinja_template(obj, logger, global_settings.intended_path_template)
output_file_location = os.path.join(intended_root_folder, intended_path_template_obj)
jinja_template = check_jinja_template(obj, logger, global_settings.jinja_path_template)
status, device_data = graph_ql_query(job_result.request, obj, global_settings.sot_agg_query)
if status != 200:
logger.log_failure(obj, f"The GraphQL query return a status of {str(status)} with error of {str(device_data)}")
raise NornirNautobotException()
task.host.data.update(device_data)
generated_config = task.run(
task=dispatcher,
name="GENERATE CONFIG",
method="generate_config",
obj=obj,
logger=logger,
jinja_template=jinja_template,
jinja_root_path=jinja_root_path,
output_file_location=output_file_location,
default_drivers_mapping=get_dispatcher(),
)[1].result["config"]
intended_obj.intended_last_success_date = task.host.defaults.data["now"]
intended_obj.intended_config = generated_config
intended_obj.save()
logger.log_success(obj, "Successfully generated the intended configuration.")
return Result(host=task.host, result=generated_config)
def config_intended(job_result, data, jinja_root_path, intended_root_folder):
"""Nornir play to generate configurations."""
now = datetime.now()
logger = NornirLogger(__name__, job_result, data.get("debug"))
global_settings = GoldenConfigSetting.objects.first()
verify_global_settings(logger, global_settings, ["jinja_path_template", "intended_path_template", "sot_agg_query"])
try:
with InitNornir(
runner=NORNIR_SETTINGS.get("runner"),
logging={"enabled": False},
inventory={
"plugin": "nautobot-inventory",
"options": {
"credentials_class": NORNIR_SETTINGS.get("credentials"),
"params": NORNIR_SETTINGS.get("inventory_params"),
"queryset": get_job_filter(data),
"defaults": {"now": now},
},
},
) as nornir_obj:
nr_with_processors = nornir_obj.with_processors([ProcessGoldenConfig(logger)])
# Run the Nornir Tasks
nr_with_processors.run(
task=run_template,
name="RENDER CONFIG",
logger=logger,
global_settings=global_settings,
job_result=job_result,
jinja_root_path=jinja_root_path,
intended_root_folder=intended_root_folder,
)
except Exception as err:
logger.log_failure(None, err)
raise
| en | 0.693985 | Nornir job for generating the intended config. # pylint: disable=relative-beyond-top-level # pylint: disable=too-many-arguments Render Jinja Template. Only one template is supported, so the expectation is that that template includes all other templates. Args: task (Task): Nornir task individual object Returns: result (Result): Result from Nornir task Nornir play to generate configurations. # Run the Nornir Tasks | 1.910396 | 2 |
day_05/main_test.py | 7Rocky/AoC-2021 | 1 | 6632397 | import io
import sys
import unittest
from main import main
class TestMain(unittest.TestCase):
def test_main(self):
rescued_stdout = io.StringIO()
sys.stdout = rescued_stdout
main()
want = 'Overlapping lines (1): 4421\n' + \
'Overlapping lines (2): 18674\n'
sys.stdout = sys.__stdout__
self.assertEqual(want, rescued_stdout.getvalue())
if __name__ == '__main__':
unittest.main()
| import io
import sys
import unittest
from main import main
class TestMain(unittest.TestCase):
def test_main(self):
rescued_stdout = io.StringIO()
sys.stdout = rescued_stdout
main()
want = 'Overlapping lines (1): 4421\n' + \
'Overlapping lines (2): 18674\n'
sys.stdout = sys.__stdout__
self.assertEqual(want, rescued_stdout.getvalue())
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.734925 | 3 |
|
tests/contrib/timeseries/test_gp.py | futurewarning/pyro | 0 | 6632398 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import pytest
import torch
import pyro
from pyro.contrib.timeseries import (
DependentMaternGP,
GenericLGSSM,
GenericLGSSMWithGPNoiseModel,
IndependentMaternGP,
LinearlyCoupledMaternGP,
)
from pyro.ops.tensor_utils import block_diag_embed
from tests.common import assert_equal
@pytest.mark.parametrize('model,obs_dim,nu_statedim', [('ssmgp', 3, 1.5), ('ssmgp', 2, 2.5),
('lcmgp', 3, 1.5), ('lcmgp', 2, 2.5),
('imgp', 1, 0.5), ('imgp', 2, 0.5),
('imgp', 1, 1.5), ('imgp', 3, 1.5),
('imgp', 1, 2.5), ('imgp', 3, 2.5),
('dmgp', 1, 1.5), ('dmgp', 2, 1.5),
('dmgp', 3, 1.5),
('glgssm', 1, 3), ('glgssm', 3, 1)])
@pytest.mark.parametrize('T', [11, 37])
def test_timeseries_models(model, nu_statedim, obs_dim, T):
torch.set_default_tensor_type('torch.DoubleTensor')
dt = 0.1 + torch.rand(1).item()
if model == 'lcmgp':
num_gps = 2
gp = LinearlyCoupledMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt, num_gps=num_gps,
length_scale_init=0.5 + torch.rand(num_gps),
kernel_scale_init=0.5 + torch.rand(num_gps),
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'imgp':
gp = IndependentMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt,
length_scale_init=0.5 + torch.rand(obs_dim),
kernel_scale_init=0.5 + torch.rand(obs_dim),
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'glgssm':
gp = GenericLGSSM(state_dim=nu_statedim, obs_dim=obs_dim,
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'ssmgp':
state_dim = {0.5: 4, 1.5: 3, 2.5: 2}[nu_statedim]
gp = GenericLGSSMWithGPNoiseModel(nu=nu_statedim, state_dim=state_dim, obs_dim=obs_dim,
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'dmgp':
linearly_coupled = bool(torch.rand(1).item() > 0.5)
gp = DependentMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt, linearly_coupled=linearly_coupled,
length_scale_init=0.5 + torch.rand(obs_dim))
targets = torch.randn(T, obs_dim)
gp_log_prob = gp.log_prob(targets)
if model == 'imgp':
assert gp_log_prob.shape == (obs_dim,)
else:
assert gp_log_prob.dim() == 0
# compare matern log probs to vanilla GP result via multivariate normal
if model == 'imgp':
times = dt * torch.arange(T).double()
for dim in range(obs_dim):
lengthscale = gp.kernel.length_scale[dim]
variance = gp.kernel.kernel_scale.pow(2)[dim]
obs_noise = gp.obs_noise_scale.pow(2)[dim]
kernel = {0.5: pyro.contrib.gp.kernels.Exponential,
1.5: pyro.contrib.gp.kernels.Matern32,
2.5: pyro.contrib.gp.kernels.Matern52}[nu_statedim]
kernel = kernel(input_dim=1, lengthscale=lengthscale, variance=variance)
# XXX kernel(times) loads old parameters from param store
kernel = kernel.forward(times) + obs_noise * torch.eye(T)
mvn = torch.distributions.MultivariateNormal(torch.zeros(T), kernel)
mvn_log_prob = mvn.log_prob(targets[:, dim])
assert_equal(mvn_log_prob, gp_log_prob[dim], prec=1e-4)
for S in [1, 5]:
if model in ['imgp', 'lcmgp', 'dmgp', 'lcdgp']:
dts = torch.rand(S).cumsum(dim=-1)
predictive = gp.forecast(targets, dts)
else:
predictive = gp.forecast(targets, S)
assert predictive.loc.shape == (S, obs_dim)
if model == 'imgp':
assert predictive.scale.shape == (S, obs_dim)
# assert monotonic increase of predictive noise
if S > 1:
delta = predictive.scale[1:S, :] - predictive.scale[0:S-1, :]
assert (delta > 0.0).sum() == (S - 1) * obs_dim
else:
assert predictive.covariance_matrix.shape == (S, obs_dim, obs_dim)
# assert monotonic increase of predictive noise
if S > 1:
dets = predictive.covariance_matrix.det()
delta = dets[1:S] - dets[0:S-1]
assert (delta > 0.0).sum() == (S - 1)
if model in ['imgp', 'lcmgp', 'dmgp', 'lcdgp']:
# the distant future
dts = torch.tensor([500.0])
predictive = gp.forecast(targets, dts)
# assert mean reverting behavior for GP models
assert_equal(predictive.loc, torch.zeros(1, obs_dim))
@pytest.mark.parametrize('obs_dim', [1, 3])
def test_dependent_matern_gp(obs_dim):
dt = 0.5 + torch.rand(1).item()
gp = DependentMaternGP(nu=1.5, obs_dim=obs_dim, dt=dt,
length_scale_init=0.5 + torch.rand(obs_dim))
# make sure stationary covariance matrix satisfies the relevant
# matrix riccati equation
lengthscale = gp.kernel.length_scale.unsqueeze(-1).unsqueeze(-1)
F = torch.tensor([[0.0, 1.0], [0.0, 0.0]])
mask1 = torch.tensor([[0.0, 0.0], [-3.0, 0.0]])
mask2 = torch.tensor([[0.0, 0.0], [0.0, -math.sqrt(12.0)]])
F = block_diag_embed(F + mask1 / lengthscale.pow(2.0) + mask2 / lengthscale)
stat_cov = gp._stationary_covariance()
wiener_cov = gp._get_wiener_cov()
wiener_cov *= torch.tensor([[0.0, 0.0], [0.0, 1.0]]).repeat(obs_dim, obs_dim)
expected_zero = torch.matmul(F, stat_cov) + torch.matmul(stat_cov, F.transpose(-1, -2)) + wiener_cov
assert_equal(expected_zero, torch.zeros(gp.full_state_dim, gp.full_state_dim))
| # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import pytest
import torch
import pyro
from pyro.contrib.timeseries import (
DependentMaternGP,
GenericLGSSM,
GenericLGSSMWithGPNoiseModel,
IndependentMaternGP,
LinearlyCoupledMaternGP,
)
from pyro.ops.tensor_utils import block_diag_embed
from tests.common import assert_equal
@pytest.mark.parametrize('model,obs_dim,nu_statedim', [('ssmgp', 3, 1.5), ('ssmgp', 2, 2.5),
('lcmgp', 3, 1.5), ('lcmgp', 2, 2.5),
('imgp', 1, 0.5), ('imgp', 2, 0.5),
('imgp', 1, 1.5), ('imgp', 3, 1.5),
('imgp', 1, 2.5), ('imgp', 3, 2.5),
('dmgp', 1, 1.5), ('dmgp', 2, 1.5),
('dmgp', 3, 1.5),
('glgssm', 1, 3), ('glgssm', 3, 1)])
@pytest.mark.parametrize('T', [11, 37])
def test_timeseries_models(model, nu_statedim, obs_dim, T):
torch.set_default_tensor_type('torch.DoubleTensor')
dt = 0.1 + torch.rand(1).item()
if model == 'lcmgp':
num_gps = 2
gp = LinearlyCoupledMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt, num_gps=num_gps,
length_scale_init=0.5 + torch.rand(num_gps),
kernel_scale_init=0.5 + torch.rand(num_gps),
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'imgp':
gp = IndependentMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt,
length_scale_init=0.5 + torch.rand(obs_dim),
kernel_scale_init=0.5 + torch.rand(obs_dim),
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'glgssm':
gp = GenericLGSSM(state_dim=nu_statedim, obs_dim=obs_dim,
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'ssmgp':
state_dim = {0.5: 4, 1.5: 3, 2.5: 2}[nu_statedim]
gp = GenericLGSSMWithGPNoiseModel(nu=nu_statedim, state_dim=state_dim, obs_dim=obs_dim,
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'dmgp':
linearly_coupled = bool(torch.rand(1).item() > 0.5)
gp = DependentMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt, linearly_coupled=linearly_coupled,
length_scale_init=0.5 + torch.rand(obs_dim))
targets = torch.randn(T, obs_dim)
gp_log_prob = gp.log_prob(targets)
if model == 'imgp':
assert gp_log_prob.shape == (obs_dim,)
else:
assert gp_log_prob.dim() == 0
# compare matern log probs to vanilla GP result via multivariate normal
if model == 'imgp':
times = dt * torch.arange(T).double()
for dim in range(obs_dim):
lengthscale = gp.kernel.length_scale[dim]
variance = gp.kernel.kernel_scale.pow(2)[dim]
obs_noise = gp.obs_noise_scale.pow(2)[dim]
kernel = {0.5: pyro.contrib.gp.kernels.Exponential,
1.5: pyro.contrib.gp.kernels.Matern32,
2.5: pyro.contrib.gp.kernels.Matern52}[nu_statedim]
kernel = kernel(input_dim=1, lengthscale=lengthscale, variance=variance)
# XXX kernel(times) loads old parameters from param store
kernel = kernel.forward(times) + obs_noise * torch.eye(T)
mvn = torch.distributions.MultivariateNormal(torch.zeros(T), kernel)
mvn_log_prob = mvn.log_prob(targets[:, dim])
assert_equal(mvn_log_prob, gp_log_prob[dim], prec=1e-4)
for S in [1, 5]:
if model in ['imgp', 'lcmgp', 'dmgp', 'lcdgp']:
dts = torch.rand(S).cumsum(dim=-1)
predictive = gp.forecast(targets, dts)
else:
predictive = gp.forecast(targets, S)
assert predictive.loc.shape == (S, obs_dim)
if model == 'imgp':
assert predictive.scale.shape == (S, obs_dim)
# assert monotonic increase of predictive noise
if S > 1:
delta = predictive.scale[1:S, :] - predictive.scale[0:S-1, :]
assert (delta > 0.0).sum() == (S - 1) * obs_dim
else:
assert predictive.covariance_matrix.shape == (S, obs_dim, obs_dim)
# assert monotonic increase of predictive noise
if S > 1:
dets = predictive.covariance_matrix.det()
delta = dets[1:S] - dets[0:S-1]
assert (delta > 0.0).sum() == (S - 1)
if model in ['imgp', 'lcmgp', 'dmgp', 'lcdgp']:
# the distant future
dts = torch.tensor([500.0])
predictive = gp.forecast(targets, dts)
# assert mean reverting behavior for GP models
assert_equal(predictive.loc, torch.zeros(1, obs_dim))
@pytest.mark.parametrize('obs_dim', [1, 3])
def test_dependent_matern_gp(obs_dim):
dt = 0.5 + torch.rand(1).item()
gp = DependentMaternGP(nu=1.5, obs_dim=obs_dim, dt=dt,
length_scale_init=0.5 + torch.rand(obs_dim))
# make sure stationary covariance matrix satisfies the relevant
# matrix riccati equation
lengthscale = gp.kernel.length_scale.unsqueeze(-1).unsqueeze(-1)
F = torch.tensor([[0.0, 1.0], [0.0, 0.0]])
mask1 = torch.tensor([[0.0, 0.0], [-3.0, 0.0]])
mask2 = torch.tensor([[0.0, 0.0], [0.0, -math.sqrt(12.0)]])
F = block_diag_embed(F + mask1 / lengthscale.pow(2.0) + mask2 / lengthscale)
stat_cov = gp._stationary_covariance()
wiener_cov = gp._get_wiener_cov()
wiener_cov *= torch.tensor([[0.0, 0.0], [0.0, 1.0]]).repeat(obs_dim, obs_dim)
expected_zero = torch.matmul(F, stat_cov) + torch.matmul(stat_cov, F.transpose(-1, -2)) + wiener_cov
assert_equal(expected_zero, torch.zeros(gp.full_state_dim, gp.full_state_dim))
| en | 0.6132 | # Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 # compare matern log probs to vanilla GP result via multivariate normal # XXX kernel(times) loads old parameters from param store # assert monotonic increase of predictive noise # assert monotonic increase of predictive noise # the distant future # assert mean reverting behavior for GP models # make sure stationary covariance matrix satisfies the relevant # matrix riccati equation | 1.85627 | 2 |
leetcode/easy/35-Search_insert_position.py | shubhamoli/practice | 1 | 6632399 | <reponame>shubhamoli/practice
"""
Leetcode #35
"""
from typing import List
class Solution:
def searchInsert_OPTI(self, nums: List[int], target: int) -> int:
if target <= nums[0]:
return 0
if target == nums[-1]:
return len(nums)-1
if target > nums[-1]:
return len(nums)
l = 0
r = len(nums) - 1
while l <= r:
m = (l+r) // 2
if nums[m] == target:
return m
if target < nums[m]:
r = m - 1
if target > nums[m]:
l = m + 1
return l
def searchInsert(self, nums: List[int], target: int) -> int:
if not nums:
return -1
if target <= nums[0]:
return 0
if target == nums[-1]:
return len(nums)-1
if target > nums[-1]:
return len(nums)
i = 0
while i < len(nums):
if nums[i] >= target:
return i
else:
i += 1
return i
if __name__ == "__main__":
solution = Solution()
assert solution.searchInsert([1,3,5,6], 5) == 2
assert solution.searchInsert([1,3,5,6], 2) == 1
assert solution.searchInsert([1,3,5,6], 7) == 4
assert solution.searchInsert([1,3,5,6], 0) == 0
assert solution.searchInsert_OPTI([1,3,5,6], 5) == 2
assert solution.searchInsert_OPTI([1,3,5,6], 2) == 1
assert solution.searchInsert_OPTI([1,3,5,6], 7) == 4
assert solution.searchInsert_OPTI([1,3,5,6], 0) == 0
| """
Leetcode #35
"""
from typing import List
class Solution:
def searchInsert_OPTI(self, nums: List[int], target: int) -> int:
if target <= nums[0]:
return 0
if target == nums[-1]:
return len(nums)-1
if target > nums[-1]:
return len(nums)
l = 0
r = len(nums) - 1
while l <= r:
m = (l+r) // 2
if nums[m] == target:
return m
if target < nums[m]:
r = m - 1
if target > nums[m]:
l = m + 1
return l
def searchInsert(self, nums: List[int], target: int) -> int:
if not nums:
return -1
if target <= nums[0]:
return 0
if target == nums[-1]:
return len(nums)-1
if target > nums[-1]:
return len(nums)
i = 0
while i < len(nums):
if nums[i] >= target:
return i
else:
i += 1
return i
if __name__ == "__main__":
solution = Solution()
assert solution.searchInsert([1,3,5,6], 5) == 2
assert solution.searchInsert([1,3,5,6], 2) == 1
assert solution.searchInsert([1,3,5,6], 7) == 4
assert solution.searchInsert([1,3,5,6], 0) == 0
assert solution.searchInsert_OPTI([1,3,5,6], 5) == 2
assert solution.searchInsert_OPTI([1,3,5,6], 2) == 1
assert solution.searchInsert_OPTI([1,3,5,6], 7) == 4
assert solution.searchInsert_OPTI([1,3,5,6], 0) == 0 | sv | 0.218448 | Leetcode #35 | 3.563435 | 4 |
Task/Arithmetic-geometric-mean/Python/arithmetic-geometric-mean-2.py | mullikine/RosettaCodeData | 5 | 6632400 | <filename>Task/Arithmetic-geometric-mean/Python/arithmetic-geometric-mean-2.py<gh_stars>1-10
from decimal import Decimal, getcontext
def agm(a, g, tolerance=Decimal("1e-65")):
while True:
a, g = (a + g) / 2, (a * g).sqrt()
if abs(a - g) < tolerance:
return a
getcontext().prec = 70
print agm(Decimal(1), 1 / Decimal(2).sqrt())
| <filename>Task/Arithmetic-geometric-mean/Python/arithmetic-geometric-mean-2.py<gh_stars>1-10
from decimal import Decimal, getcontext
def agm(a, g, tolerance=Decimal("1e-65")):
while True:
a, g = (a + g) / 2, (a * g).sqrt()
if abs(a - g) < tolerance:
return a
getcontext().prec = 70
print agm(Decimal(1), 1 / Decimal(2).sqrt())
| none | 1 | 2.830511 | 3 |
|
podcast_dl/cli.py | kissgyorgy/simple-podcast-dl | 48 | 6632401 | <filename>podcast_dl/cli.py
#!/usr/bin/env python3
import re
import sys
import atexit
import asyncio
import functools
from pathlib import Path
from typing import List, Tuple
from operator import attrgetter
import httpx
import click
from .site_parser import parse_site, InvalidSite
from .podcasts import PODCASTS
from .podcast_dl import (
ensure_download_dir,
download_rss,
get_all_rss_items,
filter_rss_items,
make_episodes,
find_missing,
download_episodes,
)
HELP = """
Download podcast episodes to the given directory
URL or domain or short name for the PODCAST argument can be specified,
e.g. pythonbytes.fm or talkpython or https://talkpython.fm
"""
@functools.total_ordering
class EpisodeParam:
def __init__(self, original: str):
self.original = original
self._spec = original.upper()
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
"""Case insensitive equality."""
if self.__class__ is not other.__class__:
return self._spec == other.upper()
return self._spec == other._spec
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._spec < other._spec
def __str__(self):
return self.original
def __repr__(self):
return repr(self.original)
class EpisodeList(click.ParamType):
name = "episodelist"
def convert(self, value, param=None, ctx=None) -> Tuple[List[EpisodeParam], int]:
biggest_last_n = 0
episodes = set()
episode_range_re = re.compile(r"^([0-9]{1,4})-([0-9]{1,4})$")
for param in value.split(","):
spec = param.upper()
if spec.isnumeric():
episodes.add(EpisodeParam(spec.zfill(4)))
continue
if spec == "LAST":
biggest_last_n = max(biggest_last_n, 1)
continue
if spec.startswith("LAST:"):
# will be added at the end once, when we know the biggest n value
n = int(spec.split(":")[1])
biggest_last_n = max(biggest_last_n, n)
continue
m = episode_range_re.match(spec)
if m:
first, last = m.group(1, 2)
start, end = int(first), int(last) + 1
episodes |= set(EpisodeParam(f"{e:04}") for e in range(start, end))
continue
if spec:
episodes.add(EpisodeParam(param))
return sorted(episodes), biggest_last_n
def list_podcasts(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo("The following podcasts are supported:")
longest_name = max(len(p.name) for p in PODCASTS)
longest_title = max(len(p.title) for p in PODCASTS)
format_str = "{:<%s}{:<%s}{}" % (longest_name + 4, longest_title + 4)
click.echo(format_str.format("Name", "Title", "Webpage"))
click.echo(format_str.format("----", "-----", "-------"))
for podcast in sorted(PODCASTS, key=attrgetter("name")):
click.echo(format_str.format(podcast.name, podcast.title, podcast.url))
ctx.exit()
@click.command(help=HELP, context_settings={"help_option_names": ["--help", "-h"]})
@click.argument("podcast_name", metavar="PODCAST", required=False)
@click.option(
"-d",
"--download-dir",
type=Path,
default=None,
envvar="DOWNLOAD_DIR",
help=(
"Where to save downloaded episodes. Can be specified by the "
"DOWNLOAD_DIR environment variable. [default: name of PODCAST]"
),
)
@click.option(
"-e",
"--episodes",
"episodes_param",
help="Episodes to download.",
type=EpisodeList(),
)
@click.option(
"-s", "--show-episodes", help="Show the list of episodes for PODCAST.", is_flag=True
)
@click.option(
"-l",
"--list-podcasts",
help="List of supported podcasts, ordered by name.",
is_flag=True,
is_eager=True,
expose_value=False,
callback=list_podcasts,
)
@click.option(
"-p",
"--progress",
"show_progressbar",
is_flag=True,
help="Show progress bar instead of detailed messages during download.",
)
@click.option(
"-t",
"--max-threads",
type=click.IntRange(0, 10),
default=10,
envvar="MAX_THREADS",
help=(
"The maximum number of simultaneous downloads. Can be specified"
" with the MAX_THREADS environment variable."
),
show_default=True,
)
@click.option(
"-v", "--verbose", is_flag=True, help="Show detailed informations during download."
)
@click.version_option(None, "-V", "--version")
@click.pass_context
def main(
ctx,
podcast_name,
download_dir,
max_threads,
episodes_param,
show_episodes,
show_progressbar,
verbose,
):
if len(sys.argv) == 1:
help_text = ctx.command.get_help(ctx)
click.echo(help_text)
return 0
# We have to handle this because it's not required,
# to be able to show help when run without arguments
if podcast_name is None:
raise click.UsageError('Missing argument "PODCAST".', ctx=ctx)
try:
podcast = parse_site(podcast_name)
except InvalidSite:
raise click.BadArgumentUsage(
f'The given podcast "{podcast_name}" is not supported or invalid.\n'
f'See the list of supported podcasts with "{ctx.info_name} --list-podcasts"',
ctx=ctx,
)
vprint = click.secho if verbose else _noprint
loop = _make_asyncio_loop()
http = _make_async_http_client(loop)
rss_root = loop.run_until_complete(download_rss(http, podcast.rss))
all_rss_items = get_all_rss_items(rss_root, podcast.rss_parser)
if episodes_param is not None:
episode_params, last_n = episodes_param
rss_items, unknown_episodes = filter_rss_items(
all_rss_items, episode_params, last_n
)
_warn_about_unknown_episodes(unknown_episodes)
else:
rss_items = all_rss_items
if show_episodes:
_list_episodes(rss_items)
return 0
if download_dir is None:
download_dir = Path(podcast.name)
ensure_download_dir(download_dir)
episodes = make_episodes(download_dir, rss_items)
missing_episodes = find_missing(episodes, vprint)
if not missing_episodes:
click.secho("Every episode is downloaded.", fg="green")
return 0
click.echo(f"Found a total of {len(missing_episodes)} missing episodes.")
progressbar = _make_progressbar(show_progressbar, len(missing_episodes))
dl_coro = download_episodes(
http, missing_episodes, max_threads, vprint, progressbar
)
try:
loop.run_until_complete(dl_coro)
except KeyboardInterrupt:
for task in asyncio.Task.all_tasks():
task.cancel()
click.secho("CTRL-C pressed, aborting...", fg="yellow", err=True)
return 1
click.secho("Done.", fg="green")
return 0
def _noprint(*args, **kwargs):
"""Do nothing with the arguments. Used for suppressing print output."""
def _list_episodes(rss_items):
click.echo("List of episodes:")
for item in rss_items:
episodenum = item.episode or " N/A"
click.echo(f"{episodenum} - {item.title}")
def _make_asyncio_loop():
loop = asyncio.get_event_loop()
atexit.register(loop.close)
return loop
def _make_async_http_client(loop):
http = httpx.AsyncClient()
atexit.register(lambda: loop.run_until_complete(http.aclose()))
return http
def _warn_about_unknown_episodes(unknown_episodes):
if unknown_episodes:
click.secho(
"WARNING: Unknown episode numbers:"
+ ", ".join(str(e) for e in unknown_episodes),
fg="yellow",
err=True,
)
def _make_progressbar(show_progressbar, length):
if show_progressbar:
return click.progressbar(length=length)
else:
return _NoProgressbar()
class _NoProgressbar:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def update(self, value):
"""Do nothing. Used when no progress bar needed."""
| <filename>podcast_dl/cli.py
#!/usr/bin/env python3
import re
import sys
import atexit
import asyncio
import functools
from pathlib import Path
from typing import List, Tuple
from operator import attrgetter
import httpx
import click
from .site_parser import parse_site, InvalidSite
from .podcasts import PODCASTS
from .podcast_dl import (
ensure_download_dir,
download_rss,
get_all_rss_items,
filter_rss_items,
make_episodes,
find_missing,
download_episodes,
)
HELP = """
Download podcast episodes to the given directory
URL or domain or short name for the PODCAST argument can be specified,
e.g. pythonbytes.fm or talkpython or https://talkpython.fm
"""
@functools.total_ordering
class EpisodeParam:
def __init__(self, original: str):
self.original = original
self._spec = original.upper()
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
"""Case insensitive equality."""
if self.__class__ is not other.__class__:
return self._spec == other.upper()
return self._spec == other._spec
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._spec < other._spec
def __str__(self):
return self.original
def __repr__(self):
return repr(self.original)
class EpisodeList(click.ParamType):
name = "episodelist"
def convert(self, value, param=None, ctx=None) -> Tuple[List[EpisodeParam], int]:
biggest_last_n = 0
episodes = set()
episode_range_re = re.compile(r"^([0-9]{1,4})-([0-9]{1,4})$")
for param in value.split(","):
spec = param.upper()
if spec.isnumeric():
episodes.add(EpisodeParam(spec.zfill(4)))
continue
if spec == "LAST":
biggest_last_n = max(biggest_last_n, 1)
continue
if spec.startswith("LAST:"):
# will be added at the end once, when we know the biggest n value
n = int(spec.split(":")[1])
biggest_last_n = max(biggest_last_n, n)
continue
m = episode_range_re.match(spec)
if m:
first, last = m.group(1, 2)
start, end = int(first), int(last) + 1
episodes |= set(EpisodeParam(f"{e:04}") for e in range(start, end))
continue
if spec:
episodes.add(EpisodeParam(param))
return sorted(episodes), biggest_last_n
def list_podcasts(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo("The following podcasts are supported:")
longest_name = max(len(p.name) for p in PODCASTS)
longest_title = max(len(p.title) for p in PODCASTS)
format_str = "{:<%s}{:<%s}{}" % (longest_name + 4, longest_title + 4)
click.echo(format_str.format("Name", "Title", "Webpage"))
click.echo(format_str.format("----", "-----", "-------"))
for podcast in sorted(PODCASTS, key=attrgetter("name")):
click.echo(format_str.format(podcast.name, podcast.title, podcast.url))
ctx.exit()
@click.command(help=HELP, context_settings={"help_option_names": ["--help", "-h"]})
@click.argument("podcast_name", metavar="PODCAST", required=False)
@click.option(
"-d",
"--download-dir",
type=Path,
default=None,
envvar="DOWNLOAD_DIR",
help=(
"Where to save downloaded episodes. Can be specified by the "
"DOWNLOAD_DIR environment variable. [default: name of PODCAST]"
),
)
@click.option(
"-e",
"--episodes",
"episodes_param",
help="Episodes to download.",
type=EpisodeList(),
)
@click.option(
"-s", "--show-episodes", help="Show the list of episodes for PODCAST.", is_flag=True
)
@click.option(
"-l",
"--list-podcasts",
help="List of supported podcasts, ordered by name.",
is_flag=True,
is_eager=True,
expose_value=False,
callback=list_podcasts,
)
@click.option(
"-p",
"--progress",
"show_progressbar",
is_flag=True,
help="Show progress bar instead of detailed messages during download.",
)
@click.option(
"-t",
"--max-threads",
type=click.IntRange(0, 10),
default=10,
envvar="MAX_THREADS",
help=(
"The maximum number of simultaneous downloads. Can be specified"
" with the MAX_THREADS environment variable."
),
show_default=True,
)
@click.option(
"-v", "--verbose", is_flag=True, help="Show detailed informations during download."
)
@click.version_option(None, "-V", "--version")
@click.pass_context
def main(
ctx,
podcast_name,
download_dir,
max_threads,
episodes_param,
show_episodes,
show_progressbar,
verbose,
):
if len(sys.argv) == 1:
help_text = ctx.command.get_help(ctx)
click.echo(help_text)
return 0
# We have to handle this because it's not required,
# to be able to show help when run without arguments
if podcast_name is None:
raise click.UsageError('Missing argument "PODCAST".', ctx=ctx)
try:
podcast = parse_site(podcast_name)
except InvalidSite:
raise click.BadArgumentUsage(
f'The given podcast "{podcast_name}" is not supported or invalid.\n'
f'See the list of supported podcasts with "{ctx.info_name} --list-podcasts"',
ctx=ctx,
)
vprint = click.secho if verbose else _noprint
loop = _make_asyncio_loop()
http = _make_async_http_client(loop)
rss_root = loop.run_until_complete(download_rss(http, podcast.rss))
all_rss_items = get_all_rss_items(rss_root, podcast.rss_parser)
if episodes_param is not None:
episode_params, last_n = episodes_param
rss_items, unknown_episodes = filter_rss_items(
all_rss_items, episode_params, last_n
)
_warn_about_unknown_episodes(unknown_episodes)
else:
rss_items = all_rss_items
if show_episodes:
_list_episodes(rss_items)
return 0
if download_dir is None:
download_dir = Path(podcast.name)
ensure_download_dir(download_dir)
episodes = make_episodes(download_dir, rss_items)
missing_episodes = find_missing(episodes, vprint)
if not missing_episodes:
click.secho("Every episode is downloaded.", fg="green")
return 0
click.echo(f"Found a total of {len(missing_episodes)} missing episodes.")
progressbar = _make_progressbar(show_progressbar, len(missing_episodes))
dl_coro = download_episodes(
http, missing_episodes, max_threads, vprint, progressbar
)
try:
loop.run_until_complete(dl_coro)
except KeyboardInterrupt:
for task in asyncio.Task.all_tasks():
task.cancel()
click.secho("CTRL-C pressed, aborting...", fg="yellow", err=True)
return 1
click.secho("Done.", fg="green")
return 0
def _noprint(*args, **kwargs):
"""Do nothing with the arguments. Used for suppressing print output."""
def _list_episodes(rss_items):
click.echo("List of episodes:")
for item in rss_items:
episodenum = item.episode or " N/A"
click.echo(f"{episodenum} - {item.title}")
def _make_asyncio_loop():
loop = asyncio.get_event_loop()
atexit.register(loop.close)
return loop
def _make_async_http_client(loop):
http = httpx.AsyncClient()
atexit.register(lambda: loop.run_until_complete(http.aclose()))
return http
def _warn_about_unknown_episodes(unknown_episodes):
if unknown_episodes:
click.secho(
"WARNING: Unknown episode numbers:"
+ ", ".join(str(e) for e in unknown_episodes),
fg="yellow",
err=True,
)
def _make_progressbar(show_progressbar, length):
if show_progressbar:
return click.progressbar(length=length)
else:
return _NoProgressbar()
class _NoProgressbar:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def update(self, value):
"""Do nothing. Used when no progress bar needed."""
| en | 0.739628 | #!/usr/bin/env python3 Download podcast episodes to the given directory URL or domain or short name for the PODCAST argument can be specified, e.g. pythonbytes.fm or talkpython or https://talkpython.fm Case insensitive equality. # will be added at the end once, when we know the biggest n value # We have to handle this because it's not required, # to be able to show help when run without arguments Do nothing with the arguments. Used for suppressing print output. Do nothing. Used when no progress bar needed. | 2.58855 | 3 |
tests/lav.py | sublee/hangulize | 145 | 6632402 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.lav import Latvian
class LatvianTestCase(HangulizeTestCase):
lang = Latvian()
def test_people(self):
self.assert_examples({
u'Alberts': u'알베르츠',
u'<NAME>': u'구나르스 아스트라',
u'<NAME>': u'헬무츠 발데리스',
u'<NAME>': u'야니스 발로디스',
u'Krišjā<NAME>': u'크리샤니스 바론스',
u'<NAME>': u'미하일스 바리슈니코우스',
u'<NAME>': u'비즈마 벨셰비차',
u'<NAME>': u'에두아르츠 베르클라우스',
u'<NAME>': u'에르네스츠 블랑크스',
u'<NAME>': u'루돌프스 블라우마니스',
u'Aleksandrs Čaks': u'알렉산드르스 착스',
u'Jānis Čakste': u'야니스 착스테',
u'Emīls Dārziņš': u'에밀스 다르진슈',
u'Eliass Eliezers Desslers': u'엘리아스 엘리에제르스 데슬레르스',
u'Sergejs Eizenšteins': u'세르게이스 에이젠슈테인스',
u'Movša Feigins': u'모우샤 페이긴스',
u'Elīna Garanča': u'엘리나 가란차',
u'Ernests Gulbis': u'에르네스츠 굴비스',
u'Uvis Helmanis': u'우비스 헬마니스',
u'Artūrs Irbe': u'아르투르스 이르베',
u'Kārlis Irbītis': u'카를리스 이르비티스',
u'Gatis Jahovičs': u'가티스 야호비치',
u'Kaspars Kambala': u'카스파르스 캄발라',
u'Aleksandrs Koblencs': u'알렉산드르스 코블렌츠',
u'Gustavs Klucis': u'구스타우스 클루치스',
u'Ābrams Izāks Kūks': u'아브람스 이작스 쿡스',
u'Aleksandrs Kovaļevskis': u'알렉산드르스 코발레우스키스',
u'Miķelis Krogzems': u'미첼리스 크로그젬스',
u'Juris Kronbergs': u'유리스 크론베르크스',
u'Atis Kronvalds': u'아티스 크론발츠',
u'Alberts Kviesis': u'알베르츠 크비에시스',
u'Aleksandrs Laime': u'알렉산드르스 라이메',
u'Nikolajs Loskis': u'니콜라이스 로스키스',
u'Jevgēnija Ļisicina': u'예우게니야 리시치나',
u'Zigfrīds Anna Meierovics': u'직프리츠 안나 메이에로비츠',
u'Evgenijs Millers': u'에우게니스 밀레르스',
u'Kārlis Mīlenbahs': u'카를리스 밀렌바흐스',
u'Stanislavs Olijars': u'스타니슬라우스 올리야르스',
u'Elvīra Ozoliņa': u'엘비라 오졸리냐',
u'Vilhelms Ostvalds': u'빌헬름스 오스트발츠',
u'Sandis Ozoliņš': u'산디스 오졸린슈',
u'Valdemārs Ozoliņš': u'발데마르스 오졸린슈',
u'Artis Pabriks': u'아르티스 파브릭스',
u'Karlis Padegs': u'카를리스 파덱스',
u'Marian Pahars': u'마리안 파하르스',
u'Vladimirs Petrovs': u'블라디미르스 페트로우스',
u'Andrejs Pumpurs': u'안드레이스 품푸르스',
u'Mārtiņš Rubenis': u'마르틴슈 루베니스',
u'Juris Rubenis': u'유리스 루베니스',
u'Elza Rozenberga': u'엘자 로젠베르가',
u'Uļjana Semjonova': u'울랴나 세묘노바',
u'Māris Štrombergs': u'마리스 슈트롬베르크스',
u'Pēteris Stučka': u'페테리스 스투치카',
u'Viktors Ščerbatihs': u'빅토르스 슈체르바티흐스',
u'Haralds Silovs': u'하랄츠 실로우스',
u'Andris Šķēle': u'안드리스 슈첼레',
u'Ernests Štālbergs': u'에르네스츠 슈탈베르크스',
u'Guntis Ulmanis': u'군티스 울마니스',
u'Kārlis Ulmanis': u'카를리스 울마니스',
u'Romāns Vainšteins': u'로만스 바인슈테인스',
u'Krišjānis Valdemārs': u'크리샤니스 발데마르스',
u'Miķelis Valters': u'미첼리스 발테르스',
u'Valdis Valters': u'발디스 발테르스',
u'Aleksandrs Vanags': u'알렉산드르스 바낙스',
u'Ojārs Vācietis': u'오야르스 바치에티스',
u'Eduards Veidenbaums': u'에두아르츠 베이덴바움스',
u'Makss Veinreihs': u'막스 베인레이흐스',
u'Visvaldis': u'비스발디스',
u'Jāzeps Vītols': u'야젭스 비톨스',
u'Māris Verpakovskis': u'마리스 베르파코우스키스',
u'Aleksandrs Voitkevičs': u'알렉산드르스 보이트케비치',
u'Kārlis Zariņš': u'카를리스 자린슈',
u'Gustavs Zemgals': u'구스타우스 젬갈스',
u'Valdis Zatlers': u'발디스 자틀레르스',
u'Imants Ziedonis': u'이만츠 지에도니스',
u'Sergejs Žoltoks': u'세르게이스 졸톡스',
})
def test_places(self):
self.assert_examples({
u'Daugava': u'다우가바',
u'Daugavpils': u'다우가우필스',
u'Grobiņa': u'그로비냐',
u'Jēkabpils': u'예캅필스',
u'Jelgava': u'옐가바',
u'Jersika': u'예르시카',
u'Jūrmala': u'유르말라',
u'Koknese': u'코크네세',
u'Kurzeme': u'쿠르제메',
u'Latgale': u'라트갈레',
u'Latvija': u'라트비야',
u'Liepāja': u'리에파야',
u'Rēzekne': u'레제크네',
u'Rīga': u'리가',
u'Valmiera': u'발미에라',
u'Ventspils': u'벤츠필스',
u'Vidzeme': u'비제메',
u'Zemgale': u'젬갈레',
}) | # -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.lav import Latvian
class LatvianTestCase(HangulizeTestCase):
lang = Latvian()
def test_people(self):
self.assert_examples({
u'Alberts': u'알베르츠',
u'<NAME>': u'구나르스 아스트라',
u'<NAME>': u'헬무츠 발데리스',
u'<NAME>': u'야니스 발로디스',
u'Krišjā<NAME>': u'크리샤니스 바론스',
u'<NAME>': u'미하일스 바리슈니코우스',
u'<NAME>': u'비즈마 벨셰비차',
u'<NAME>': u'에두아르츠 베르클라우스',
u'<NAME>': u'에르네스츠 블랑크스',
u'<NAME>': u'루돌프스 블라우마니스',
u'Aleksandrs Čaks': u'알렉산드르스 착스',
u'Jānis Čakste': u'야니스 착스테',
u'Emīls Dārziņš': u'에밀스 다르진슈',
u'Eliass Eliezers Desslers': u'엘리아스 엘리에제르스 데슬레르스',
u'Sergejs Eizenšteins': u'세르게이스 에이젠슈테인스',
u'Movša Feigins': u'모우샤 페이긴스',
u'Elīna Garanča': u'엘리나 가란차',
u'Ernests Gulbis': u'에르네스츠 굴비스',
u'Uvis Helmanis': u'우비스 헬마니스',
u'Artūrs Irbe': u'아르투르스 이르베',
u'Kārlis Irbītis': u'카를리스 이르비티스',
u'Gatis Jahovičs': u'가티스 야호비치',
u'Kaspars Kambala': u'카스파르스 캄발라',
u'Aleksandrs Koblencs': u'알렉산드르스 코블렌츠',
u'Gustavs Klucis': u'구스타우스 클루치스',
u'Ābrams Izāks Kūks': u'아브람스 이작스 쿡스',
u'Aleksandrs Kovaļevskis': u'알렉산드르스 코발레우스키스',
u'Miķelis Krogzems': u'미첼리스 크로그젬스',
u'Juris Kronbergs': u'유리스 크론베르크스',
u'Atis Kronvalds': u'아티스 크론발츠',
u'Alberts Kviesis': u'알베르츠 크비에시스',
u'Aleksandrs Laime': u'알렉산드르스 라이메',
u'Nikolajs Loskis': u'니콜라이스 로스키스',
u'Jevgēnija Ļisicina': u'예우게니야 리시치나',
u'Zigfrīds Anna Meierovics': u'직프리츠 안나 메이에로비츠',
u'Evgenijs Millers': u'에우게니스 밀레르스',
u'Kārlis Mīlenbahs': u'카를리스 밀렌바흐스',
u'Stanislavs Olijars': u'스타니슬라우스 올리야르스',
u'Elvīra Ozoliņa': u'엘비라 오졸리냐',
u'Vilhelms Ostvalds': u'빌헬름스 오스트발츠',
u'Sandis Ozoliņš': u'산디스 오졸린슈',
u'Valdemārs Ozoliņš': u'발데마르스 오졸린슈',
u'Artis Pabriks': u'아르티스 파브릭스',
u'Karlis Padegs': u'카를리스 파덱스',
u'Marian Pahars': u'마리안 파하르스',
u'Vladimirs Petrovs': u'블라디미르스 페트로우스',
u'Andrejs Pumpurs': u'안드레이스 품푸르스',
u'Mārtiņš Rubenis': u'마르틴슈 루베니스',
u'Juris Rubenis': u'유리스 루베니스',
u'Elza Rozenberga': u'엘자 로젠베르가',
u'Uļjana Semjonova': u'울랴나 세묘노바',
u'Māris Štrombergs': u'마리스 슈트롬베르크스',
u'Pēteris Stučka': u'페테리스 스투치카',
u'Viktors Ščerbatihs': u'빅토르스 슈체르바티흐스',
u'Haralds Silovs': u'하랄츠 실로우스',
u'Andris Šķēle': u'안드리스 슈첼레',
u'Ernests Štālbergs': u'에르네스츠 슈탈베르크스',
u'Guntis Ulmanis': u'군티스 울마니스',
u'Kārlis Ulmanis': u'카를리스 울마니스',
u'Romāns Vainšteins': u'로만스 바인슈테인스',
u'Krišjānis Valdemārs': u'크리샤니스 발데마르스',
u'Miķelis Valters': u'미첼리스 발테르스',
u'Valdis Valters': u'발디스 발테르스',
u'Aleksandrs Vanags': u'알렉산드르스 바낙스',
u'Ojārs Vācietis': u'오야르스 바치에티스',
u'Eduards Veidenbaums': u'에두아르츠 베이덴바움스',
u'Makss Veinreihs': u'막스 베인레이흐스',
u'Visvaldis': u'비스발디스',
u'Jāzeps Vītols': u'야젭스 비톨스',
u'Māris Verpakovskis': u'마리스 베르파코우스키스',
u'Aleksandrs Voitkevičs': u'알렉산드르스 보이트케비치',
u'Kārlis Zariņš': u'카를리스 자린슈',
u'Gustavs Zemgals': u'구스타우스 젬갈스',
u'Valdis Zatlers': u'발디스 자틀레르스',
u'Imants Ziedonis': u'이만츠 지에도니스',
u'Sergejs Žoltoks': u'세르게이스 졸톡스',
})
def test_places(self):
self.assert_examples({
u'Daugava': u'다우가바',
u'Daugavpils': u'다우가우필스',
u'Grobiņa': u'그로비냐',
u'Jēkabpils': u'예캅필스',
u'Jelgava': u'옐가바',
u'Jersika': u'예르시카',
u'Jūrmala': u'유르말라',
u'Koknese': u'코크네세',
u'Kurzeme': u'쿠르제메',
u'Latgale': u'라트갈레',
u'Latvija': u'라트비야',
u'Liepāja': u'리에파야',
u'Rēzekne': u'레제크네',
u'Rīga': u'리가',
u'Valmiera': u'발미에라',
u'Ventspils': u'벤츠필스',
u'Vidzeme': u'비제메',
u'Zemgale': u'젬갈레',
}) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.41372 | 2 |
comparer/migrations/0008_remove_rankingbrowserpluginmodel_show_categories.py | dzejkobi/institution-comparisoner | 0 | 6632403 | <gh_stars>0
# Generated by Django 3.1.12 on 2021-07-08 20:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('comparer', '0007_auto_20210708_1723'),
]
operations = [
migrations.RemoveField(
model_name='rankingbrowserpluginmodel',
name='show_categories',
),
]
| # Generated by Django 3.1.12 on 2021-07-08 20:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('comparer', '0007_auto_20210708_1723'),
]
operations = [
migrations.RemoveField(
model_name='rankingbrowserpluginmodel',
name='show_categories',
),
] | en | 0.763282 | # Generated by Django 3.1.12 on 2021-07-08 20:57 | 1.397203 | 1 |
great_expectations/util.py | scarrucciu/great_expectations | 0 | 6632404 | <gh_stars>0
import os
import pandas as pd
import json
import logging
from six import string_types
import great_expectations.dataset as dataset
from great_expectations.data_context import DataContext
logger = logging.getLogger(__name__)
def _convert_to_dataset_class(df, dataset_class, expectation_suite=None, profiler=None):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
"""
# TODO: Refactor this method to use the new ClassConfig (module_name and class_name convention).
if expectation_suite is not None:
# Create a dataset of the new class type, and manually initialize expectations according to
# the provided expectation suite
new_df = dataset_class.from_dataset(df)
new_df._initialize_expectations(expectation_suite)
else:
# Instantiate the new Dataset with default expectations
new_df = dataset_class.from_dataset(df)
if profiler is not None:
new_df.profile(profiler)
return new_df
def read_csv(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
# TODO: Refactor this method to use the new ClassConfig (module_name and class_name convention).
df = pd.read_csv(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_json(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
accessor_func=None,
profiler=None,
*args, **kwargs
):
if accessor_func is not None:
json_obj = json.load(open(filename, 'rb'))
json_obj = accessor_func(json_obj)
df = pd.read_json(json.dumps(json_obj), *args, **kwargs)
else:
df = pd.read_json(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_excel(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
"""Read a file using Pandas read_excel and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset or ordered dict of great_expectations datasets,
if multiple worksheets are imported
"""
df = pd.read_excel(filename, *args, **kwargs)
if isinstance(df, dict):
for key in df:
df[key] = _convert_to_dataset_class(
df[key], dataset_class, expectation_suite, profiler)
else:
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_table(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
"""Read a file using Pandas read_table and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
df = pd.read_table(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_parquet(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
"""Read a file using Pandas read_parquet and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
df = pd.read_parquet(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def from_pandas(pandas_df,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None
):
"""Read a Pandas data frame and return a great_expectations dataset.
Args:
pandas_df (Pandas df): Pandas data frame
dataset_class (Dataset class) = dataset.pandas_dataset.PandasDataset:
class to which to convert resulting Pandas df
expectation_suite (string) = None: path to great_expectations expectation suite file
profiler (profiler class) = None: The profiler that should
be run on the dataset to establish a baseline expectation suite.
Returns:
great_expectations dataset
"""
return _convert_to_dataset_class(
pandas_df,
dataset_class,
expectation_suite,
profiler
)
def validate(data_asset, expectation_suite=None, data_asset_name=None, data_context=None, data_asset_type=None, *args, **kwargs):
"""Validate the provided data asset using the provided expectation suite"""
if expectation_suite is None and data_context is None:
raise ValueError(
"Either an expectation suite or a DataContext is required for validation.")
if expectation_suite is None:
logger.info("Using expectation suite from DataContext.")
# Allow data_context to be a string, and try loading it from path in that case
if isinstance(data_context, string_types):
data_context = DataContext(data_context)
expectation_suite = data_context.get_expectation_suite(data_asset_name)
else:
if data_asset_name in expectation_suite:
logger.info("Using expectation suite with name %s" %
expectation_suite["data_asset_name"])
else:
logger.info("Using expectation suite with no data_asset_name")
# If the object is already a Dataset type, then this is purely a convenience method
# and no conversion is needed
if isinstance(data_asset, dataset.Dataset) and data_asset_type is None:
return data_asset.validate(expectation_suite=expectation_suite, data_context=data_context, *args, **kwargs)
elif data_asset_type is None:
# Guess the GE data_asset_type based on the type of the data_asset
if isinstance(data_asset, pd.DataFrame):
data_asset_type = dataset.PandasDataset
# Add other data_asset_type conditions here as needed
# Otherwise, we will convert for the user to a subclass of the
# existing class to enable new expectations, but only for datasets
if not isinstance(data_asset, (dataset.Dataset, pd.DataFrame)):
raise ValueError(
"The validate util method only supports dataset validations, including custom subclasses. For other data asset types, use the object's own validate method.")
if not issubclass(type(data_asset), data_asset_type):
if isinstance(data_asset, (pd.DataFrame)) and issubclass(data_asset_type, dataset.PandasDataset):
pass # This is a special type of allowed coercion
else:
raise ValueError(
"The validate util method only supports validation for subtypes of the provided data_asset_type.")
data_asset_ = _convert_to_dataset_class(
data_asset, data_asset_type, expectation_suite)
return data_asset_.validate(*args, data_context=data_context, **kwargs)
| import os
import pandas as pd
import json
import logging
from six import string_types
import great_expectations.dataset as dataset
from great_expectations.data_context import DataContext
logger = logging.getLogger(__name__)
def _convert_to_dataset_class(df, dataset_class, expectation_suite=None, profiler=None):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
"""
# TODO: Refactor this method to use the new ClassConfig (module_name and class_name convention).
if expectation_suite is not None:
# Create a dataset of the new class type, and manually initialize expectations according to
# the provided expectation suite
new_df = dataset_class.from_dataset(df)
new_df._initialize_expectations(expectation_suite)
else:
# Instantiate the new Dataset with default expectations
new_df = dataset_class.from_dataset(df)
if profiler is not None:
new_df.profile(profiler)
return new_df
def read_csv(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
# TODO: Refactor this method to use the new ClassConfig (module_name and class_name convention).
df = pd.read_csv(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_json(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
accessor_func=None,
profiler=None,
*args, **kwargs
):
if accessor_func is not None:
json_obj = json.load(open(filename, 'rb'))
json_obj = accessor_func(json_obj)
df = pd.read_json(json.dumps(json_obj), *args, **kwargs)
else:
df = pd.read_json(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_excel(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
"""Read a file using Pandas read_excel and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset or ordered dict of great_expectations datasets,
if multiple worksheets are imported
"""
df = pd.read_excel(filename, *args, **kwargs)
if isinstance(df, dict):
for key in df:
df[key] = _convert_to_dataset_class(
df[key], dataset_class, expectation_suite, profiler)
else:
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_table(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
"""Read a file using Pandas read_table and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
df = pd.read_table(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def read_parquet(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None,
*args, **kwargs
):
"""Read a file using Pandas read_parquet and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
df = pd.read_parquet(filename, *args, **kwargs)
df = _convert_to_dataset_class(
df, dataset_class, expectation_suite, profiler)
return df
def from_pandas(pandas_df,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectation_suite=None,
profiler=None
):
"""Read a Pandas data frame and return a great_expectations dataset.
Args:
pandas_df (Pandas df): Pandas data frame
dataset_class (Dataset class) = dataset.pandas_dataset.PandasDataset:
class to which to convert resulting Pandas df
expectation_suite (string) = None: path to great_expectations expectation suite file
profiler (profiler class) = None: The profiler that should
be run on the dataset to establish a baseline expectation suite.
Returns:
great_expectations dataset
"""
return _convert_to_dataset_class(
pandas_df,
dataset_class,
expectation_suite,
profiler
)
def validate(data_asset, expectation_suite=None, data_asset_name=None, data_context=None, data_asset_type=None, *args, **kwargs):
"""Validate the provided data asset using the provided expectation suite"""
if expectation_suite is None and data_context is None:
raise ValueError(
"Either an expectation suite or a DataContext is required for validation.")
if expectation_suite is None:
logger.info("Using expectation suite from DataContext.")
# Allow data_context to be a string, and try loading it from path in that case
if isinstance(data_context, string_types):
data_context = DataContext(data_context)
expectation_suite = data_context.get_expectation_suite(data_asset_name)
else:
if data_asset_name in expectation_suite:
logger.info("Using expectation suite with name %s" %
expectation_suite["data_asset_name"])
else:
logger.info("Using expectation suite with no data_asset_name")
# If the object is already a Dataset type, then this is purely a convenience method
# and no conversion is needed
if isinstance(data_asset, dataset.Dataset) and data_asset_type is None:
return data_asset.validate(expectation_suite=expectation_suite, data_context=data_context, *args, **kwargs)
elif data_asset_type is None:
# Guess the GE data_asset_type based on the type of the data_asset
if isinstance(data_asset, pd.DataFrame):
data_asset_type = dataset.PandasDataset
# Add other data_asset_type conditions here as needed
# Otherwise, we will convert for the user to a subclass of the
# existing class to enable new expectations, but only for datasets
if not isinstance(data_asset, (dataset.Dataset, pd.DataFrame)):
raise ValueError(
"The validate util method only supports dataset validations, including custom subclasses. For other data asset types, use the object's own validate method.")
if not issubclass(type(data_asset), data_asset_type):
if isinstance(data_asset, (pd.DataFrame)) and issubclass(data_asset_type, dataset.PandasDataset):
pass # This is a special type of allowed coercion
else:
raise ValueError(
"The validate util method only supports validation for subtypes of the provided data_asset_type.")
data_asset_ = _convert_to_dataset_class(
data_asset, data_asset_type, expectation_suite)
return data_asset_.validate(*args, data_context=data_context, **kwargs) | en | 0.698052 | Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite # TODO: Refactor this method to use the new ClassConfig (module_name and class_name convention). # Create a dataset of the new class type, and manually initialize expectations according to # the provided expectation suite # Instantiate the new Dataset with default expectations # TODO: Refactor this method to use the new ClassConfig (module_name and class_name convention). Read a file using Pandas read_excel and return a great_expectations dataset. Args: filename (string): path to file to read dataset_class (Dataset class): class to which to convert resulting Pandas df expectation_suite (string): path to great_expectations expectation suite file profiler (Profiler class): profiler to use when creating the dataset (default is None) Returns: great_expectations dataset or ordered dict of great_expectations datasets, if multiple worksheets are imported Read a file using Pandas read_table and return a great_expectations dataset. Args: filename (string): path to file to read dataset_class (Dataset class): class to which to convert resulting Pandas df expectation_suite (string): path to great_expectations expectation suite file profiler (Profiler class): profiler to use when creating the dataset (default is None) Returns: great_expectations dataset Read a file using Pandas read_parquet and return a great_expectations dataset. Args: filename (string): path to file to read dataset_class (Dataset class): class to which to convert resulting Pandas df expectation_suite (string): path to great_expectations expectation suite file profiler (Profiler class): profiler to use when creating the dataset (default is None) Returns: great_expectations dataset Read a Pandas data frame and return a great_expectations dataset. Args: pandas_df (Pandas df): Pandas data frame dataset_class (Dataset class) = dataset.pandas_dataset.PandasDataset: class to which to convert resulting Pandas df expectation_suite (string) = None: path to great_expectations expectation suite file profiler (profiler class) = None: The profiler that should be run on the dataset to establish a baseline expectation suite. Returns: great_expectations dataset Validate the provided data asset using the provided expectation suite # Allow data_context to be a string, and try loading it from path in that case # If the object is already a Dataset type, then this is purely a convenience method # and no conversion is needed # Guess the GE data_asset_type based on the type of the data_asset # Add other data_asset_type conditions here as needed # Otherwise, we will convert for the user to a subclass of the # existing class to enable new expectations, but only for datasets # This is a special type of allowed coercion | 2.818139 | 3 |
sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py | marcelarosalesj/x.stx-config | 0 | 6632405 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
"""
Client side of the conductor RPC API.
"""
from sysinv.objects import base as objects_base
import sysinv.openstack.common.rpc.proxy
from sysinv.openstack.common import log
LOG = log.getLogger(__name__)
MANAGER_TOPIC = 'sysinv.conductor_manager'
class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
"""Client side of the conductor RPC API.
API version history:
1.0 - Initial version.
1.1 - Used for R5
"""
RPC_API_VERSION = '1.1'
def __init__(self, topic=None):
if topic is None:
topic = MANAGER_TOPIC
super(ConductorAPI, self).__init__(
topic=topic,
serializer=objects_base.SysinvObjectSerializer(),
default_version='1.0',
version_cap=self.RPC_API_VERSION)
def handle_dhcp_lease(self, context, tags, mac, ip_address, cid=None):
"""Synchronously, have a conductor handle a DHCP lease update.
Handling depends on the interface:
- management interface: creates an ihost
- infrastructure interface: just updated the dnsmasq config
:param context: request context.
:param tags: specifies the interface type (mgmt or infra)
:param mac: MAC for the lease
:param ip_address: IP address for the lease
:param cid: Client ID for the lease
"""
return self.call(context,
self.make_msg('handle_dhcp_lease',
tags=tags,
mac=mac,
ip_address=ip_address,
cid=cid))
def create_ihost(self, context, values):
"""Synchronously, have a conductor create an ihost.
Create an ihost in the database and return an object.
:param context: request context.
:param values: dictionary with initial values for new ihost object
:returns: created ihost object, including all fields.
"""
return self.call(context,
self.make_msg('create_ihost',
values=values))
def update_ihost(self, context, ihost_obj):
"""Synchronously, have a conductor update the ihosts's information.
Update the ihost's information in the database and return an object.
:param context: request context.
:param ihost_obj: a changed (but not saved) ihost object.
:returns: updated ihost object, including all fields.
"""
return self.call(context,
self.make_msg('update_ihost',
ihost_obj=ihost_obj))
def configure_ihost(self, context, host,
do_worker_apply=False):
"""Synchronously, have a conductor configure an ihost.
Does the following tasks:
- Update puppet hiera configuration files for the ihost.
- Add (or update) a host entry in the dnsmasq.conf file.
- Set up PXE configuration to run installer
:param context: request context.
:param host: an ihost object.
:param do_worker_apply: apply the newly created worker manifests.
"""
return self.call(context,
self.make_msg('configure_ihost',
host=host,
do_worker_apply=do_worker_apply))
# TODO(CephPoolsDecouple): remove
def configure_osd_pools(self, context, ceph_backend=None, new_pool_size=None, new_pool_min_size=None):
"""Configure or update configuration of the OSD pools.
If none of the optionals are provided then all pools are updated based on DB configuration.
:param context: an admin context.
:param ceph_backend: Optional ceph backend object of a tier
:param new_pool_size: Optional override for replication number.
:param new_pool_min_size: Optional override for minimum replication number.
"""
return self.call(context,
self.make_msg('configure_osd_pools',
ceph_backend=ceph_backend,
new_pool_size=new_pool_size,
new_pool_min_size=new_pool_min_size))
def remove_host_config(self, context, host_uuid):
"""Synchronously, have a conductor remove configuration for a host.
Does the following tasks:
- Remove the hiera config files for the host.
:param context: request context.
:param host_uuid: uuid of the host.
"""
return self.call(context,
self.make_msg('remove_host_config',
host_uuid=host_uuid))
def unconfigure_ihost(self, context, ihost_obj):
"""Synchronously, have a conductor unconfigure an ihost.
Does the following tasks:
- Remove hiera config files for the ihost.
- Remove the host entry from the dnsmasq.conf file.
- Remove the PXE configuration
:param context: request context.
:param ihost_obj: an ihost object.
"""
return self.call(context,
self.make_msg('unconfigure_ihost',
ihost_obj=ihost_obj))
def create_controller_filesystems(self, context, rootfs_device):
"""Synchronously, create the controller file systems.
Does the following tasks:
- queries OS for root disk size
- creates the controller file systems.
- queries system to get region info for img_conversion_size setup.
:param context: request context..
:param rootfs_device: the root disk device
"""
return self.call(context,
self.make_msg('create_controller_filesystems',
rootfs_device=rootfs_device))
def get_ihost_by_macs(self, context, ihost_macs):
"""Finds ihost db entry based upon the mac list
This method returns an ihost if it matches a mac
:param context: an admin context
:param ihost_macs: list of mac addresses
:returns: ihost object, including all fields.
"""
return self.call(context,
self.make_msg('get_ihost_by_macs',
ihost_macs=ihost_macs))
def get_ihost_by_hostname(self, context, ihost_hostname):
"""Finds ihost db entry based upon the ihost hostname
This method returns an ihost if it matches the
hostname.
:param context: an admin context
:param ihost_hostname: ihost hostname
:returns: ihost object, including all fields.
"""
return self.call(context,
self.make_msg('get_ihost_by_hostname',
ihost_hostname=ihost_hostname))
def iport_update_by_ihost(self, context,
ihost_uuid, inic_dict_array):
"""Create iports for an ihost with the supplied data.
This method allows records for iports for ihost to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param inic_dict_array: initial values for iport objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('iport_update_by_ihost',
ihost_uuid=ihost_uuid,
inic_dict_array=inic_dict_array))
def lldp_agent_update_by_host(self, context,
host_uuid, agent_dict_array):
"""Create lldp_agents for an ihost with the supplied data.
This method allows records for lldp_agents for a host to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param agent_dict_array: initial values for lldp_agent objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('lldp_agent_update_by_host',
host_uuid=host_uuid,
agent_dict_array=agent_dict_array))
def lldp_neighbour_update_by_host(self, context,
host_uuid, neighbour_dict_array):
"""Create lldp_neighbours for an ihost with the supplied data.
This method allows records for lldp_neighbours for a host to be
created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param neighbour_dict_array: initial values for lldp_neighbour objects
:returns: pass or fail
"""
return self.call(
context,
self.make_msg('lldp_neighbour_update_by_host',
host_uuid=host_uuid,
neighbour_dict_array=neighbour_dict_array))
def pci_device_update_by_host(self, context,
host_uuid, pci_device_dict_array):
"""Create pci_devices for an ihost with the supplied data.
This method allows records for pci_devices for ihost to be created.
:param context: an admin context
:param host_uuid: ihost uuid unique id
:param pci_device_dict_array: initial values for device objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('pci_device_update_by_host',
host_uuid=host_uuid,
pci_device_dict_array=pci_device_dict_array))
def inumas_update_by_ihost(self, context,
ihost_uuid, inuma_dict_array):
"""Create inumas for an ihost with the supplied data.
This method allows records for inumas for ihost to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param inuma_dict_array: initial values for inuma objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('inumas_update_by_ihost',
ihost_uuid=ihost_uuid,
inuma_dict_array=inuma_dict_array))
def icpus_update_by_ihost(self, context,
ihost_uuid, icpu_dict_array,
force_grub_update,
):
"""Create cpus for an ihost with the supplied data.
This method allows records for cpus for ihost to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param icpu_dict_array: initial values for cpu objects
:param force_grub_update: bool value to force grub update
:returns: pass or fail
"""
return self.call(context,
self.make_msg('icpus_update_by_ihost',
ihost_uuid=ihost_uuid,
icpu_dict_array=icpu_dict_array,
force_grub_update=force_grub_update))
def imemory_update_by_ihost(self, context,
ihost_uuid, imemory_dict_array,
force_update=False):
"""Create or update memory for an ihost with the supplied data.
This method allows records for memory for ihost to be created,
or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param imemory_dict_array: initial values for memory objects
:param force_update: force a memory update
:returns: pass or fail
"""
return self.call(context,
self.make_msg('imemory_update_by_ihost',
ihost_uuid=ihost_uuid,
imemory_dict_array=imemory_dict_array,
force_update=force_update))
def idisk_update_by_ihost(self, context,
ihost_uuid, idisk_dict_array):
"""Create or update disk for an ihost with the supplied data.
This method allows records for disk for ihost to be created,
or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param idisk_dict_array: initial values for disk objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('idisk_update_by_ihost',
ihost_uuid=ihost_uuid,
idisk_dict_array=idisk_dict_array))
def ilvg_update_by_ihost(self, context,
ihost_uuid, ilvg_dict_array):
"""Create or update local volume group for an ihost with the supplied
data.
This method allows records for a local volume group for ihost to be
created, or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ilvg_dict_array: initial values for local volume group objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ilvg_update_by_ihost',
ihost_uuid=ihost_uuid,
ilvg_dict_array=ilvg_dict_array))
def ipv_update_by_ihost(self, context,
ihost_uuid, ipv_dict_array):
"""Create or update physical volume for an ihost with the supplied
data.
This method allows records for a physical volume for ihost to be
created, or updated.
R5 - Moved to version 1.1 as partition schema is no longer applicable
to R4
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ipv_dict_array: initial values for physical volume objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ipv_update_by_ihost',
ihost_uuid=ihost_uuid,
ipv_dict_array=ipv_dict_array),
version='1.1')
def ipartition_update_by_ihost(self, context,
ihost_uuid, ipart_dict_array):
"""Create or update partitions for an ihost with the supplied data.
This method allows records for a host's partition to be created or
updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ipart_dict_array: initial values for partition objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ipartition_update_by_ihost',
ihost_uuid=ihost_uuid,
ipart_dict_array=ipart_dict_array))
def update_partition_config(self, context, partition):
"""Asynchronously, have a conductor configure the physical volume
partitions.
:param context: request context.
:param partition: dict with partition details.
"""
LOG.debug("ConductorApi.update_partition_config: sending"
" partition to conductor")
return self.cast(context, self.make_msg('update_partition_config',
partition=partition))
def iplatform_update_by_ihost(self, context,
ihost_uuid, imsg_dict):
"""Create or update memory for an ihost with the supplied data.
This method allows records for memory for ihost to be created,
or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param imsg_dict: inventory message dict
:returns: pass or fail
"""
return self.call(context,
self.make_msg('iplatform_update_by_ihost',
ihost_uuid=ihost_uuid,
imsg_dict=imsg_dict))
def upgrade_ihost(self, context, host, load):
"""Synchronously, have a conductor upgrade a host.
Does the following tasks:
- Update the pxelinux.cfg file.
:param context: request context.
:param host: an ihost object.
:param load: a load object.
"""
return self.call(context,
self.make_msg('upgrade_ihost_pxe_config', host=host, load=load))
def configure_isystemname(self, context, systemname):
"""Synchronously, have a conductor configure the system name.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each update their /etc/platform/motd.system
:param context: request context.
:param systemname: the systemname
"""
LOG.debug("ConductorApi.configure_isystemname: sending"
" systemname to conductor")
return self.call(context,
self.make_msg('configure_isystemname',
systemname=systemname))
def configure_system_https(self, context):
"""Synchronously, have a conductor configure the system https/http
configuration.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the https/http selected manifests
:param context: request context.
"""
LOG.debug("ConductorApi.configure_system_https/http: sending"
" configure_system_https to conductor")
return self.call(context, self.make_msg('configure_system_https'))
def configure_system_timezone(self, context):
"""Synchronously, have a conductor configure the system timezone.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the timezone manifest
:param context: request context.
"""
LOG.debug("ConductorApi.configure_system_timezone: sending"
" system_timezone to conductor")
return self.call(context, self.make_msg('configure_system_timezone'))
def update_route_config(self, context):
"""Synchronously, have a conductor configure static route.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the route manifest
:param context: request context.
"""
LOG.debug("ConductorApi.update_route_config: sending"
" update_route_config to conductor")
return self.call(context, self.make_msg('update_route_config'))
def update_sriov_config(self, context, host_uuid):
"""Synchronously, have a conductor configure sriov config.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the network manifest
:param context: request context.
:param host_uuid: the host unique uuid
"""
LOG.debug("ConductorApi.update_sriov_config: sending "
"update_sriov_config to conductor")
return self.call(context, self.make_msg('update_sriov_config',
host_uuid=host_uuid))
def update_distributed_cloud_role(self, context):
"""Synchronously, have a conductor configure the distributed cloud
role of the system.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the config manifest
:param context: request context.
"""
LOG.debug("ConductorApi.update_distributed_cloud_role: sending"
" distributed_cloud_role to conductor")
return self.call(context, self.make_msg('update_distributed_cloud_role'))
def subfunctions_update_by_ihost(self, context, ihost_uuid, subfunctions):
"""Create or update local volume group for an ihost with the supplied
data.
This method allows records for a local volume group for ihost to be
created, or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param subfunctions: subfunctions of the host
:returns: pass or fail
"""
return self.call(context,
self.make_msg('subfunctions_update_by_ihost',
ihost_uuid=ihost_uuid,
subfunctions=subfunctions))
def configure_osd_istor(self, context, istor_obj):
"""Synchronously, have a conductor configure an OSD istor.
Does the following tasks:
- Allocates an OSD.
- Creates or resizes the OSD pools as necessary.
:param context: request context.
:param istor_obj: an istor object.
:returns: istor object, with updated osdid
"""
return self.call(context,
self.make_msg('configure_osd_istor',
istor_obj=istor_obj))
def unconfigure_osd_istor(self, context, istor_obj):
"""Synchronously, have a conductor unconfigure an istor.
Does the following tasks:
- Removes the OSD from the crush map.
- Deletes the OSD's auth key.
- Deletes the OSD.
:param context: request context.
:param istor_obj: an istor object.
"""
return self.call(context,
self.make_msg('unconfigure_osd_istor',
istor_obj=istor_obj))
def restore_ceph_config(self, context, after_storage_enabled=False):
"""Restore Ceph configuration during Backup and Restore process.
:param context: request context.
:returns: return True if restore is successful or no need to restore
"""
return self.call(context,
self.make_msg('restore_ceph_config',
after_storage_enabled=after_storage_enabled))
def get_ceph_pool_replication(self, context, ceph_backend=None):
"""Get ceph storage backend pool replication parameters
:param context: request context.
:param ceph_backend: ceph backend object type for a tier
:returns: tuple with (replication, min_replication)
"""
return self.call(context,
self.make_msg('get_ceph_pool_replication',
ceph_backend=ceph_backend))
def delete_osd_pool(self, context, pool_name):
"""delete an OSD pool
:param context: request context.
:param pool_name: the name of the OSD pool
"""
return self.call(context,
self.make_msg('delete_osd_pool',
pool_name=pool_name))
def list_osd_pools(self, context):
"""list OSD pools
:param context: request context.
"""
return self.call(context,
self.make_msg('list_osd_pools'))
def get_osd_pool_quota(self, context, pool_name):
"""Get the quota for an OSD pool
:param context: request context.
:param pool_name: the name of the OSD pool
:returns: dictionary with {"max_objects": num, "max_bytes": num}
"""
return self.call(context,
self.make_msg('get_osd_pool_quota',
pool_name=pool_name))
def set_osd_pool_quota(self, context, pool, max_bytes=0, max_objects=0):
"""Set the quota for an OSD pool
:param context: request context.
:param pool: the name of the OSD pool
"""
return self.call(context,
self.make_msg('set_osd_pool_quota',
pool=pool, max_bytes=max_bytes,
max_objects=max_objects))
def get_ceph_primary_tier_size(self, context):
"""Get the size of the primary storage tier in the ceph cluster.
:param context: request context.
:returns: integer size in GB.
"""
return self.call(context,
self.make_msg('get_ceph_primary_tier_size'))
def get_ceph_tier_size(self, context, tier_name):
"""Get the size of a storage tier in the ceph cluster.
:param context: request context.
:param tier_name: name of the storage tier of interest.
:returns: integer size in GB.
"""
return self.call(context,
self.make_msg('get_ceph_tier_size',
tier_name=tier_name))
def get_ceph_cluster_df_stats(self, context):
"""Get the usage information for the ceph cluster.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_ceph_cluster_df_stats'))
def get_ceph_pools_df_stats(self, context):
"""Get the usage information for the ceph pools.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_ceph_pools_df_stats'))
def get_cinder_lvm_usage(self, context):
"""Get the usage information for the LVM pools.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_cinder_lvm_usage'))
def get_cinder_volume_type_names(self, context):
"""Get the names of all currently defined cinder volume types.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_cinder_volume_type_names'))
def kill_ceph_storage_monitor(self, context):
"""Stop the ceph storage monitor.
pmon will not restart it. This should only be used in an
upgrade/rollback
:param context: request context.
"""
return self.call(context,
self.make_msg('kill_ceph_storage_monitor'))
def update_dns_config(self, context):
"""Synchronously, have the conductor update the DNS configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_dns_config'))
def update_ntp_config(self, context, service_change=False):
"""Synchronously, have the conductor update the NTP configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_ntp_config',
service_change=service_change))
def update_ptp_config(self, context):
"""Synchronously, have the conductor update the PTP configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_ptp_config'))
def update_system_mode_config(self, context):
"""Synchronously, have the conductor update the system mode
configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_system_mode_config'))
def update_security_feature_config(self, context):
"""Synchronously, have the conductor update the security_feature
configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_security_feature_config'))
def update_oam_config(self, context):
"""Synchronously, have the conductor update the OAM configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_oam_config'))
def update_user_config(self, context):
"""Synchronously, have the conductor update the user configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_user_config'))
def update_storage_config(self, context, update_storage=False,
reinstall_required=False, reboot_required=True,
filesystem_list=None):
"""Synchronously, have the conductor update the storage configuration.
:param context: request context.
"""
return self.call(
context, self.make_msg(
'update_storage_config',
update_storage=update_storage,
reinstall_required=reinstall_required,
reboot_required=reboot_required,
filesystem_list=filesystem_list
)
)
def update_lvm_config(self, context):
"""Synchronously, have the conductor update the LVM configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_lvm_config'))
def update_ceph_base_config(self, context, personalities):
"""Synchronously, have the conductor update the configuration
for monitors and ceph.conf.
:param context: request context.
:param personalities: list of host personalities.
"""
return self.call(
context, self.make_msg(
'update_ceph_base_config',
personalities=personalities
)
)
def update_ceph_osd_config(self, context, host, stor_uuid, runtime_manifests):
"""Synchronously, have the conductor update the configuration
for an OSD.
:param context: request context.
:param host: a host to update OSDs on.
:param stor_uuid: uuid of a storage device
:param runtime_manifests: True if puppet manifests are to be applied at
runtime.
"""
return self.call(
context, self.make_msg(
'update_ceph_osd_config',
host=host,
stor_uuid=stor_uuid,
runtime_manifests=runtime_manifests
)
)
def update_drbd_config(self, context):
"""Synchronously, have the conductor update the drbd configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_drbd_config'))
def update_remotelogging_config(self, context, timeout=None):
"""Synchronously, have the conductor update the remotelogging
configuration.
:param context: request context.
:param ihost_uuid: ihost uuid unique id
"""
return self.call(context,
self.make_msg('update_remotelogging_config'), timeout=timeout)
def get_magnum_cluster_count(self, context):
"""Synchronously, have the conductor get magnum cluster count
configuration.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_magnum_cluster_count'))
def update_infra_config(self, context):
"""Synchronously, have the conductor update the infrastructure network
configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_infra_config'))
def update_lvm_cinder_config(self, context):
"""Synchronously, have the conductor update Cinder LVM on a controller.
:param context: request context.
"""
return self.call(context,
self.make_msg('update_lvm_cinder_config'))
def update_install_uuid(self, context, host_uuid, install_uuid):
"""Synchronously, have an agent update install_uuid on
a host.
:param context: request context.
:parm host_uuid: host uuid to update the install_uuid
:parm install_uuid: install_uuid
"""
return self.call(context,
self.make_msg('update_install_uuid',
host_uuid=host_uuid,
install_uuid=install_uuid))
def update_ceph_config(self, context, sb_uuid, services):
"""Synchronously, have the conductor update Ceph on a controller
:param context: request context
:param sb_uuid: uuid of the storage backed to apply the ceph config
:param services: list of services using Ceph.
"""
return self.call(context,
self.make_msg('update_ceph_config',
sb_uuid=sb_uuid,
services=services))
def update_ceph_external_config(self, context, sb_uuid, services):
"""Synchronously, have the conductor update External Ceph on a controller
:param context: request context
:param sb_uuid: uuid of the storage backed to apply the external ceph config
:param services: list of services using Ceph.
"""
return self.call(context,
self.make_msg('update_ceph_external_config',
sb_uuid=sb_uuid,
services=services))
def config_update_nova_local_backed_hosts(self, context, instance_backing):
"""Synchronously, have the conductor set the hosts with worker
functionality and with a certain nova-local instance backing to
config out-of-date.
:param context: request context
:param instance_backing: the host's instance backing
"""
return self.call(context,
self.make_msg('config_update_nova_local_backed_hosts',
instance_backing=instance_backing))
def update_external_cinder_config(self, context):
"""Synchronously, have the conductor update Cinder Exernal(shared)
on a controller.
:param context: request context.
"""
return self.call(context,
self.make_msg('update_external_cinder_config'))
def update_ceph_services(self, context, sb_uuid):
"""Synchronously, have the conductor update Ceph tier services
:param context: request context
:param sb_uuid: uuid of the storage backed to apply the service update.
"""
return self.call(context,
self.make_msg('update_ceph_services', sb_uuid=sb_uuid))
def get_k8s_namespaces(self, context):
"""Synchronously, get Kubernetes namespaces
:returns: list of namespacea
"""
return self.call(context,
self.make_msg('get_k8s_namespaces'))
def report_config_status(self, context, iconfig,
status, error=None):
""" Callback from Sysinv Agent on manifest apply success or failure
Finalize configuration after manifest apply successfully or perform
cleanup, log errors and raise alarms in case of failures.
:param context: request context
:param iconfig: configuration context
:param status: operation status
:param error: serialized exception as a dict of type:
error = {
'class': str(ex.__class__.__name__),
'module': str(ex.__class__.__module__),
'message': six.text_type(ex),
'tb': traceback.format_exception(*ex),
'args': ex.args,
'kwargs': ex.kwargs
}
The iconfig context is expected to contain a valid REPORT_TOPIC key,
so that we can correctly identify the set of manifests executed.
"""
return self.call(context,
self.make_msg('report_config_status',
iconfig=iconfig,
status=status,
error=error))
def update_cpu_config(self, context, host_uuid):
"""Synchronously, have the conductor update the cpu
configuration.
:param context: request context.
:param host_uuid: host unique uuid
"""
return self.call(context, self.make_msg('update_cpu_config',
host_uuid=host_uuid))
def iconfig_update_by_ihost(self, context,
ihost_uuid, imsg_dict):
"""Create or update iconfig for an ihost with the supplied data.
This method allows records for iconfig for ihost to be updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param imsg_dict: inventory message dict
:returns: pass or fail
"""
return self.call(context,
self.make_msg('iconfig_update_by_ihost',
ihost_uuid=ihost_uuid,
imsg_dict=imsg_dict))
def iinterface_get_providernets(self,
context,
pn_names=None):
"""Call neutron to get PN MTUs based on PN names
This method does not update any records in the db
:param context: an admin context
:param pn_names: a list of providenet names
:returns: pass or fail
"""
pn_dict = self.call(context,
self.make_msg('iinterface_get_providernets',
pn_names=pn_names))
return pn_dict
def mgmt_ip_set_by_ihost(self,
context,
ihost_uuid,
mgmt_ip):
"""Call sysinv to update host mgmt_ip (removes previous entry if
necessary)
:param context: an admin context
:param ihost_uuid: ihost uuid
:param mgmt_ip: mgmt_ip
:returns: Address
"""
return self.call(context,
self.make_msg('mgmt_ip_set_by_ihost',
ihost_uuid=ihost_uuid,
mgmt_ip=mgmt_ip))
def infra_ip_set_by_ihost(self,
context,
ihost_uuid,
infra_ip):
"""Call sysinv to update host infra_ip (removes previous entry if
necessary)
:param context: an admin context
:param ihost_uuid: ihost uuid
:param infra_ip: infra_ip
:returns: Address
"""
return self.call(context,
self.make_msg('infra_ip_set_by_ihost',
ihost_uuid=ihost_uuid,
infra_ip=infra_ip))
def neutron_extension_list(self, context):
"""
Send a request to neutron to query the supported extension list.
"""
return self.call(context, self.make_msg('neutron_extension_list'))
def neutron_bind_interface(self, context, host_uuid, interface_uuid,
network_type, providernets, mtu,
vlans=None, test=False):
"""
Send a request to neutron to bind an interface to a set of provider
networks, and inform neutron of some key attributes of the interface
for semantic checking purposes.
"""
return self.call(context,
self.make_msg('neutron_bind_interface',
host_uuid=host_uuid,
interface_uuid=interface_uuid,
network_type=network_type,
providernets=providernets,
mtu=mtu,
vlans=vlans,
test=test))
def neutron_unbind_interface(self, context, host_uuid, interface_uuid):
"""
Send a request to neutron to unbind an interface from a set of
provider networks.
"""
return self.call(context,
self.make_msg('neutron_unbind_interface',
host_uuid=host_uuid,
interface_uuid=interface_uuid))
def vim_host_add(self, context, api_token, ihost_uuid,
hostname, subfunctions, administrative,
operational, availability,
subfunction_oper, subfunction_avail, timeout):
"""
Asynchronously, notify VIM of host add
"""
return self.cast(context,
self.make_msg('vim_host_add',
api_token=api_token,
ihost_uuid=ihost_uuid,
hostname=hostname,
personality=subfunctions,
administrative=administrative,
operational=operational,
availability=availability,
subfunction_oper=subfunction_oper,
subfunction_avail=subfunction_avail,
timeout=timeout))
def mtc_host_add(self, context, mtc_address, mtc_port, ihost_mtc_dict):
"""
Asynchronously, notify mtce of host add
"""
return self.cast(context,
self.make_msg('mtc_host_add',
mtc_address=mtc_address,
mtc_port=mtc_port,
ihost_mtc_dict=ihost_mtc_dict))
def notify_subfunctions_config(self, context,
ihost_uuid, ihost_notify_dict):
"""
Synchronously, notify sysinv of host subfunctions config status
"""
return self.call(context,
self.make_msg('notify_subfunctions_config',
ihost_uuid=ihost_uuid,
ihost_notify_dict=ihost_notify_dict))
def ilvg_get_nova_ilvg_by_ihost(self,
context,
ihost_uuid):
"""
Gets the nova ilvg by ihost.
returns the nova ilvg if added to the host else returns empty
list
"""
ilvgs = self.call(context,
self.make_msg('ilvg_get_nova_ilvg_by_ihost',
ihost_uuid=ihost_uuid))
return ilvgs
def get_platform_interfaces(self, context, ihost_id):
"""Synchronously, have a agent collect platform interfaces for this
ihost.
Gets the mgmt, infra interface names and numa node
:param context: request context.
:param ihost_id: id of this host
:returns: a list of interfaces and their associated numa nodes.
"""
return self.call(context,
self.make_msg('platform_interfaces',
ihost_id=ihost_id))
def ibm_deprovision_by_ihost(self, context, ihost_uuid, ibm_msg_dict):
"""Update ihost upon notification of board management controller
deprovisioning.
This method also allows a dictionary of values to be passed in to
affort additional controls, if and as needed.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ibm_msg_dict: values for additional controls or changes
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ibm_deprovision_by_ihost',
ihost_uuid=ihost_uuid,
ibm_msg_dict=ibm_msg_dict))
def configure_ttys_dcd(self, context, uuid, ttys_dcd):
"""Synchronously, have a conductor configure the dcd.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who has the uuid updates dcd
:param context: request context.
:param uuid: the host uuid
:param ttys_dcd: the flag to enable/disable dcd
"""
LOG.debug("ConductorApi.configure_ttys_dcd: sending (%s %s) to "
"conductor" % (uuid, ttys_dcd))
return self.call(context,
self.make_msg('configure_ttys_dcd',
uuid=uuid, ttys_dcd=ttys_dcd))
def get_host_ttys_dcd(self, context, ihost_id):
"""Synchronously, have a agent collect carrier detect state for this
ihost.
:param context: request context.
:param ihost_id: id of this host
:returns: ttys_dcd.
"""
return self.call(context,
self.make_msg('get_host_ttys_dcd',
ihost_id=ihost_id))
def start_import_load(self, context, path_to_iso, path_to_sig):
"""Synchronously, mount the ISO and validate the load for import
:param context: request context.
:param path_to_iso: the file path of the iso on this host
:param path_to_sig: the file path of the iso's detached signature on
this host
:returns: the newly create load object.
"""
return self.call(context,
self.make_msg('start_import_load',
path_to_iso=path_to_iso,
path_to_sig=path_to_sig))
def import_load(self, context, path_to_iso, new_load):
"""Asynchronously, import a load and add it to the database
:param context: request context.
:param path_to_iso: the file path of the iso on this host
:param new_load: the load object
:returns: none.
"""
return self.cast(context,
self.make_msg('import_load',
path_to_iso=path_to_iso,
new_load=new_load))
def delete_load(self, context, load_id):
"""Asynchronously, cleanup a load from both controllers
:param context: request context.
:param load_id: id of load to be deleted
:returns: none.
"""
return self.cast(context,
self.make_msg('delete_load',
load_id=load_id))
def finalize_delete_load(self, context):
"""Asynchronously, delete the load from the database
:param context: request context.
:returns: none.
"""
return self.cast(context,
self.make_msg('finalize_delete_load'))
def load_update_by_host(self, context, ihost_id, version):
"""Update the host_upgrade table with the running SW_VERSION
:param context: request context.
:param ihost_id: the host id
:param version: the SW_VERSION from the host
:returns: none.
"""
return self.call(context,
self.make_msg('load_update_by_host',
ihost_id=ihost_id, sw_version=version))
def update_service_config(self, context, service=None, do_apply=False):
"""Synchronously, have the conductor update the service parameter.
:param context: request context.
:param do_apply: apply the newly created manifests.
"""
return self.call(context, self.make_msg('update_service_config',
service=service,
do_apply=do_apply))
def start_upgrade(self, context, upgrade):
"""Asynchronously, have the conductor start the upgrade
:param context: request context.
:param upgrade: the upgrade object.
"""
return self.cast(context, self.make_msg('start_upgrade',
upgrade=upgrade))
def activate_upgrade(self, context, upgrade):
"""Asynchronously, have the conductor perform the upgrade activation.
:param context: request context.
:param upgrade: the upgrade object.
"""
return self.cast(context, self.make_msg('activate_upgrade',
upgrade=upgrade))
def complete_upgrade(self, context, upgrade, state):
"""Asynchronously, have the conductor complete the upgrade.
:param context: request context.
:param upgrade: the upgrade object.
:param state: the state of the upgrade before completing
"""
return self.cast(context, self.make_msg('complete_upgrade',
upgrade=upgrade, state=state))
def abort_upgrade(self, context, upgrade):
"""Synchronously, have the conductor abort the upgrade.
:param context: request context.
:param upgrade: the upgrade object.
"""
return self.call(context, self.make_msg('abort_upgrade',
upgrade=upgrade))
def complete_simplex_backup(self, context, success):
"""Asynchronously, complete the simplex upgrade start process
:param context: request context.
:param success: If the create_simplex_backup call completed
"""
return self.cast(context, self.make_msg('complete_simplex_backup',
success=success))
def get_system_health(self, context, force=False, upgrade=False):
"""
Performs a system health check.
:param context: request context.
:param force: set to true to ignore minor and warning alarms
:param upgrade: set to true to perform an upgrade health check
"""
return self.call(context,
self.make_msg('get_system_health',
force=force, upgrade=upgrade))
def reserve_ip_for_first_storage_node(self, context):
"""
Reserve ip address for the first storage node for Ceph monitor
when installing Ceph as a second backend
:param context: request context.
"""
self.call(context,
self.make_msg('reserve_ip_for_first_storage_node'))
def reserve_ip_for_cinder(self, context):
"""
Reserve ip address for Cinder's services
:param context: request context.
"""
self.call(context,
self.make_msg('reserve_ip_for_cinder'))
def update_sdn_controller_config(self, context):
"""Synchronously, have the conductor update the SDN controller config.
:param context: request context.
"""
return self.call(context,
self.make_msg('update_sdn_controller_config'))
def update_sdn_enabled(self, context):
"""Synchronously, have the conductor update the SDN enabled flag
:param context: request context.
"""
return self.call(context,
self.make_msg('update_sdn_enabled'))
def update_vswitch_type(self, context):
"""Synchronously, have the conductor update the system vswitch type
:param context: request context.
"""
return self.call(context,
self.make_msg('update_vswitch_type'))
def create_barbican_secret(self, context, name, payload):
"""Calls Barbican API to create a secret
:param context: request context.
:param name: secret name
:param payload: secret payload
"""
return self.call(context,
self.make_msg('create_barbican_secret',
name=name,
payload=payload))
def delete_barbican_secret(self, context, name):
"""Calls Barbican API to delete a secret
:param context: request context.
:param name: secret name
"""
return self.call(context,
self.make_msg('delete_barbican_secret',
name=name))
def update_snmp_config(self, context):
"""Synchronously, have a conductor configure the SNMP configuration.
Does the following tasks:
- Update puppet hiera configuration file and apply run time manifest
:param context: request context.
"""
return self.call(context,
self.make_msg('update_snmp_config'))
def ceph_manager_config_complete(self, context, applied_config):
self.call(context,
self.make_msg('ceph_service_config_complete',
applied_config=applied_config))
def get_controllerfs_lv_sizes(self, context):
return self.call(context,
self.make_msg('get_controllerfs_lv_sizes'))
def get_cinder_gib_pv_sizes(self, context):
return self.call(context,
self.make_msg('get_cinder_gib_pv_sizes'))
def get_cinder_partition_size(self, context):
return self.call(context,
self.make_msg('get_cinder_partition_size'))
def validate_emc_removal(self, context):
"""
Check that it is safe to remove the EMC SAN
"""
return self.call(context, self.make_msg('validate_emc_removal'))
def validate_hpe3par_removal(self, context, backend):
"""
Check that it is safe to remove the HPE 3PAR storage array
"""
return self.call(context,
self.make_msg('validate_hpe3par_removal',
backend=backend))
def validate_hpelefthand_removal(self, context):
"""
Check that it is safe to remove the HPE Lefthand storage array
"""
return self.call(context, self.make_msg('validate_hpelefthand_removal'))
def region_has_ceph_backend(self, context):
"""
Send a request to primary region to see if ceph backend is configured
"""
return self.call(context, self.make_msg('region_has_ceph_backend'))
def get_system_tpmconfig(self, context):
"""
Retrieve the system tpmconfig object
"""
return self.call(context, self.make_msg('get_system_tpmconfig'))
def get_tpmdevice_by_host(self, context, host_id):
"""
Retrieve the tpmdevice object for this host
"""
return self.call(context,
self.make_msg('get_tpmdevice_by_host',
host_id=host_id))
def update_tpm_config(self, context, tpm_context):
"""Synchronously, have the conductor update the TPM config.
:param context: request context.
:param tpm_context: TPM object context
"""
return self.call(context,
self.make_msg('update_tpm_config',
tpm_context=tpm_context))
def update_tpm_config_manifests(self, context, delete_tpm_file=None):
"""Synchronously, have the conductor update the TPM config manifests.
:param context: request context.
:param delete_tpm_file: tpm file to delete, optional
"""
return self.call(context,
self.make_msg('update_tpm_config_manifests',
delete_tpm_file=delete_tpm_file))
def tpm_config_update_by_host(self, context,
host_uuid, response_dict):
"""Get TPM configuration status from Agent host.
This method allows for alarms to be raised for hosts if TPM
is not configured properly.
:param context: an admin context
:param host_uuid: host unique id
:param response_dict: configuration status
:returns: pass or fail
"""
return self.call(
context,
self.make_msg('tpm_config_update_by_host',
host_uuid=host_uuid,
response_dict=response_dict))
def tpm_device_update_by_host(self, context,
host_uuid, tpmdevice_dict):
"""Synchronously , have the conductor create or update
a tpmdevice per host.
:param context: request context.
:param host_uuid: uuid or id of the host
:param tpmdevice_dict: a dictionary of tpm device attributes
:returns: tpmdevice object
"""
return self.call(
context,
self.make_msg('tpm_device_update_by_host',
host_uuid=host_uuid,
tpmdevice_dict=tpmdevice_dict))
def cinder_prepare_db_for_volume_restore(self, context):
"""
Send a request to cinder to remove all volume snapshots and set all
volumes to error state in preparation for restoring all volumes.
This is needed for cinder disk replacement.
"""
return self.call(context,
self.make_msg('cinder_prepare_db_for_volume_restore'))
def cinder_has_external_backend(self, context):
"""
Check if cinder has loosely coupled external backends.
These are the possible backends: emc_vnx, hpe3par, hpelefthand
:param context: request context.
"""
return self.call(context,
self.make_msg('cinder_has_external_backend'))
def get_ceph_object_pool_name(self, context):
"""
Get Rados Gateway object data pool name
:param context: request context.
"""
return self.call(context,
self.make_msg('get_ceph_object_pool_name'))
def get_software_upgrade_status(self, context):
"""
Software upgrade status is needed by ceph-manager to take ceph specific
upgrade actions
This rpcapi function is added to signal that conductor's
get_software_upgrade_status function is used by an RPC client
ceph-manager however doesn't call rpcapi.get_software_upgrade_status and
instead it uses oslo_messaging to construct a call on conductor's topic
for this function. The reason is that sysinv is using an old version of
openstack common and messaging libraries incompatible with the one used
by ceph-manager.
"""
return self.call(context,
self.make_msg('get_software_upgrade_status'))
def update_firewall_config(self, context, ip_version, contents):
"""Synchronously, have the conductor update the firewall config
and manifest.
:param context: request context.
:param ip_version: IP version.
:param contents: file content of custom firewall rules.
"""
return self.call(context,
self.make_msg('update_firewall_config',
ip_version=ip_version,
contents=contents))
def distribute_ceph_external_config(self, context, ceph_conf_filename):
"""Synchronously, have the conductor update the Ceph configuration
file for external cluster.
:param context: request context.
:param ceph_conf_filename: Ceph conf file
"""
return self.call(context,
self.make_msg('distribute_ceph_external_config',
ceph_conf_filename=ceph_conf_filename))
def store_ceph_external_config(self, context, contents, ceph_conf_filename):
"""Synchronously, have the conductor to write the ceph config file content
to /opt/platform/config
:param context: request context.
:param contents: file content of the Ceph conf file
:param ceph_conf_filename: Ceph conf file
"""
return self.call(context,
self.make_msg('store_ceph_external_config',
contents=contents,
ceph_conf_filename=ceph_conf_filename))
def update_partition_information(self, context, partition_data):
"""Synchronously, have the conductor update partition information.
:param context: request context.
:param host_uuid: host UUID
:param partition_uuid: partition UUID
:param info: dict containing partition information to update
"""
return self.call(context,
self.make_msg('update_partition_information',
partition_data=partition_data))
def install_license_file(self, context, contents):
"""Sychronously, have the conductor install the license file.
:param context: request context.
:param contents: content of license file.
"""
return self.call(context,
self.make_msg('install_license_file',
contents=contents))
def config_certificate(self, context, pem_contents, config_dict):
"""Synchronously, have the conductor configure the certificate.
:param context: request context.
:param pem_contents: contents of certificate in pem format.
:param config_dict: dictionary of certificate config attributes.
"""
return self.call(context,
self.make_msg('config_certificate',
pem_contents=pem_contents,
config_dict=config_dict,
))
def get_helm_chart_namespaces(self, context, chart_name):
"""Get supported chart namespaces.
This method retrieves the namespace supported by a given chart.
:param context: request context.
:param chart_name: name of the chart
:returns: list of supported namespaces that associated overrides may be
provided.
"""
return self.call(context,
self.make_msg('get_helm_chart_namespaces',
chart_name=chart_name))
def get_helm_chart_overrides(self, context, chart_name, cnamespace=None):
"""Get the overrides for a supported chart.
:param context: request context.
:param chart_name: name of a supported chart
:param cnamespace: (optional) namespace
:returns: dict of overrides.
"""
return self.call(context,
self.make_msg('get_helm_chart_overrides',
chart_name=chart_name,
cnamespace=cnamespace))
def get_helm_application_namespaces(self, context, app_name):
"""Get supported application namespaces.
:param app_name: name of the bundle of charts required to support an
application
:returns: dict of charts and supported namespaces that associated
overrides may be provided.
"""
return self.call(context,
self.make_msg('get_helm_application_namespaces',
app_name=app_name))
def get_helm_application_overrides(self, context, app_name, cnamespace=None):
"""Get the overrides for a supported set of charts.
:param context: request context.
:param app_name: name of a supported application (set of charts)
:param cnamespace: (optional) namespace
:returns: dict of overrides.
"""
return self.call(context,
self.make_msg('get_helm_application_overrides',
app_name=app_name,
cnamespace=cnamespace))
def merge_overrides(self, context, file_overrides=[], set_overrides=[]):
"""Merge the file and set overrides into a single chart overrides.
:param context: request context.
:param file_overrides: (optional) list of overrides from files
:param set_overrides: (optional) list of parameter overrides
:returns: merged overrides string
"""
return self.call(context,
self.make_msg('merge_overrides',
file_overrides=file_overrides,
set_overrides=set_overrides))
def update_kubernetes_label(self, context, host_uuid, label_dict):
"""Synchronously, have the conductor update kubernetes label.
:param context: request context.
:param host_uuid: uuid or id of the host
:param label_dict: a dictionary of kubernetes labels
"""
return self.call(context,
self.make_msg('update_kubernetes_label',
host_uuid=host_uuid,
label_dict=label_dict))
def update_host_memory(self, context, host_uuid):
"""Asynchronously, have a conductor update the host memory
:param context: request context.
:param host_uuid: duuid or id of the host.
"""
LOG.info("ConductorApi.update_host_memory: sending"
" host memory update request to conductor")
return self.cast(context, self.make_msg('update_host_memory',
host_uuid=host_uuid))
def update_fernet_keys(self, context, keys):
"""Synchronously, have the conductor update fernet keys.
:param context: request context.
:param keys: a list of fernet keys
"""
return self.call(context, self.make_msg('update_fernet_keys',
keys=keys))
def get_fernet_keys(self, context, key_id=None):
"""Synchronously, have the conductor to retrieve fernet keys.
:param context: request context.
:param key_id: (optional)
:returns: a list of fernet keys.
"""
return self.call(context, self.make_msg('get_fernet_keys',
key_id=key_id))
def perform_app_upload(self, context, rpc_app, tarfile):
"""Handle application upload request
:param context: request context.
:param rpc_app: data object provided in the rpc request
:param tafile: location of application tarfile to be extracted
"""
return self.cast(context,
self.make_msg('perform_app_upload',
rpc_app=rpc_app,
tarfile=tarfile))
def perform_app_apply(self, context, rpc_app, app_not_already_applied):
"""Handle application apply request
:param context: request context.
:param rpc_app: data object provided in the rpc request
:param app_not_already_applied: app not already succesfully applied
"""
return self.cast(context,
self.make_msg(
'perform_app_apply',
rpc_app=rpc_app,
app_not_already_applied=app_not_already_applied))
def perform_app_remove(self, context, rpc_app):
"""Handle application remove request
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self.cast(context,
self.make_msg('perform_app_remove',
rpc_app=rpc_app))
def perform_app_delete(self, context, rpc_app):
"""Handle application delete request
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self.call(context,
self.make_msg('perform_app_delete',
rpc_app=rpc_app))
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
"""
Client side of the conductor RPC API.
"""
from sysinv.objects import base as objects_base
import sysinv.openstack.common.rpc.proxy
from sysinv.openstack.common import log
LOG = log.getLogger(__name__)
MANAGER_TOPIC = 'sysinv.conductor_manager'
class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
"""Client side of the conductor RPC API.
API version history:
1.0 - Initial version.
1.1 - Used for R5
"""
RPC_API_VERSION = '1.1'
def __init__(self, topic=None):
if topic is None:
topic = MANAGER_TOPIC
super(ConductorAPI, self).__init__(
topic=topic,
serializer=objects_base.SysinvObjectSerializer(),
default_version='1.0',
version_cap=self.RPC_API_VERSION)
def handle_dhcp_lease(self, context, tags, mac, ip_address, cid=None):
"""Synchronously, have a conductor handle a DHCP lease update.
Handling depends on the interface:
- management interface: creates an ihost
- infrastructure interface: just updated the dnsmasq config
:param context: request context.
:param tags: specifies the interface type (mgmt or infra)
:param mac: MAC for the lease
:param ip_address: IP address for the lease
:param cid: Client ID for the lease
"""
return self.call(context,
self.make_msg('handle_dhcp_lease',
tags=tags,
mac=mac,
ip_address=ip_address,
cid=cid))
def create_ihost(self, context, values):
"""Synchronously, have a conductor create an ihost.
Create an ihost in the database and return an object.
:param context: request context.
:param values: dictionary with initial values for new ihost object
:returns: created ihost object, including all fields.
"""
return self.call(context,
self.make_msg('create_ihost',
values=values))
def update_ihost(self, context, ihost_obj):
"""Synchronously, have a conductor update the ihosts's information.
Update the ihost's information in the database and return an object.
:param context: request context.
:param ihost_obj: a changed (but not saved) ihost object.
:returns: updated ihost object, including all fields.
"""
return self.call(context,
self.make_msg('update_ihost',
ihost_obj=ihost_obj))
def configure_ihost(self, context, host,
do_worker_apply=False):
"""Synchronously, have a conductor configure an ihost.
Does the following tasks:
- Update puppet hiera configuration files for the ihost.
- Add (or update) a host entry in the dnsmasq.conf file.
- Set up PXE configuration to run installer
:param context: request context.
:param host: an ihost object.
:param do_worker_apply: apply the newly created worker manifests.
"""
return self.call(context,
self.make_msg('configure_ihost',
host=host,
do_worker_apply=do_worker_apply))
# TODO(CephPoolsDecouple): remove
def configure_osd_pools(self, context, ceph_backend=None, new_pool_size=None, new_pool_min_size=None):
"""Configure or update configuration of the OSD pools.
If none of the optionals are provided then all pools are updated based on DB configuration.
:param context: an admin context.
:param ceph_backend: Optional ceph backend object of a tier
:param new_pool_size: Optional override for replication number.
:param new_pool_min_size: Optional override for minimum replication number.
"""
return self.call(context,
self.make_msg('configure_osd_pools',
ceph_backend=ceph_backend,
new_pool_size=new_pool_size,
new_pool_min_size=new_pool_min_size))
def remove_host_config(self, context, host_uuid):
"""Synchronously, have a conductor remove configuration for a host.
Does the following tasks:
- Remove the hiera config files for the host.
:param context: request context.
:param host_uuid: uuid of the host.
"""
return self.call(context,
self.make_msg('remove_host_config',
host_uuid=host_uuid))
def unconfigure_ihost(self, context, ihost_obj):
"""Synchronously, have a conductor unconfigure an ihost.
Does the following tasks:
- Remove hiera config files for the ihost.
- Remove the host entry from the dnsmasq.conf file.
- Remove the PXE configuration
:param context: request context.
:param ihost_obj: an ihost object.
"""
return self.call(context,
self.make_msg('unconfigure_ihost',
ihost_obj=ihost_obj))
def create_controller_filesystems(self, context, rootfs_device):
"""Synchronously, create the controller file systems.
Does the following tasks:
- queries OS for root disk size
- creates the controller file systems.
- queries system to get region info for img_conversion_size setup.
:param context: request context..
:param rootfs_device: the root disk device
"""
return self.call(context,
self.make_msg('create_controller_filesystems',
rootfs_device=rootfs_device))
def get_ihost_by_macs(self, context, ihost_macs):
"""Finds ihost db entry based upon the mac list
This method returns an ihost if it matches a mac
:param context: an admin context
:param ihost_macs: list of mac addresses
:returns: ihost object, including all fields.
"""
return self.call(context,
self.make_msg('get_ihost_by_macs',
ihost_macs=ihost_macs))
def get_ihost_by_hostname(self, context, ihost_hostname):
"""Finds ihost db entry based upon the ihost hostname
This method returns an ihost if it matches the
hostname.
:param context: an admin context
:param ihost_hostname: ihost hostname
:returns: ihost object, including all fields.
"""
return self.call(context,
self.make_msg('get_ihost_by_hostname',
ihost_hostname=ihost_hostname))
def iport_update_by_ihost(self, context,
ihost_uuid, inic_dict_array):
"""Create iports for an ihost with the supplied data.
This method allows records for iports for ihost to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param inic_dict_array: initial values for iport objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('iport_update_by_ihost',
ihost_uuid=ihost_uuid,
inic_dict_array=inic_dict_array))
def lldp_agent_update_by_host(self, context,
host_uuid, agent_dict_array):
"""Create lldp_agents for an ihost with the supplied data.
This method allows records for lldp_agents for a host to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param agent_dict_array: initial values for lldp_agent objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('lldp_agent_update_by_host',
host_uuid=host_uuid,
agent_dict_array=agent_dict_array))
def lldp_neighbour_update_by_host(self, context,
host_uuid, neighbour_dict_array):
"""Create lldp_neighbours for an ihost with the supplied data.
This method allows records for lldp_neighbours for a host to be
created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param neighbour_dict_array: initial values for lldp_neighbour objects
:returns: pass or fail
"""
return self.call(
context,
self.make_msg('lldp_neighbour_update_by_host',
host_uuid=host_uuid,
neighbour_dict_array=neighbour_dict_array))
def pci_device_update_by_host(self, context,
host_uuid, pci_device_dict_array):
"""Create pci_devices for an ihost with the supplied data.
This method allows records for pci_devices for ihost to be created.
:param context: an admin context
:param host_uuid: ihost uuid unique id
:param pci_device_dict_array: initial values for device objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('pci_device_update_by_host',
host_uuid=host_uuid,
pci_device_dict_array=pci_device_dict_array))
def inumas_update_by_ihost(self, context,
ihost_uuid, inuma_dict_array):
"""Create inumas for an ihost with the supplied data.
This method allows records for inumas for ihost to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param inuma_dict_array: initial values for inuma objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('inumas_update_by_ihost',
ihost_uuid=ihost_uuid,
inuma_dict_array=inuma_dict_array))
def icpus_update_by_ihost(self, context,
ihost_uuid, icpu_dict_array,
force_grub_update,
):
"""Create cpus for an ihost with the supplied data.
This method allows records for cpus for ihost to be created.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param icpu_dict_array: initial values for cpu objects
:param force_grub_update: bool value to force grub update
:returns: pass or fail
"""
return self.call(context,
self.make_msg('icpus_update_by_ihost',
ihost_uuid=ihost_uuid,
icpu_dict_array=icpu_dict_array,
force_grub_update=force_grub_update))
def imemory_update_by_ihost(self, context,
ihost_uuid, imemory_dict_array,
force_update=False):
"""Create or update memory for an ihost with the supplied data.
This method allows records for memory for ihost to be created,
or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param imemory_dict_array: initial values for memory objects
:param force_update: force a memory update
:returns: pass or fail
"""
return self.call(context,
self.make_msg('imemory_update_by_ihost',
ihost_uuid=ihost_uuid,
imemory_dict_array=imemory_dict_array,
force_update=force_update))
def idisk_update_by_ihost(self, context,
ihost_uuid, idisk_dict_array):
"""Create or update disk for an ihost with the supplied data.
This method allows records for disk for ihost to be created,
or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param idisk_dict_array: initial values for disk objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('idisk_update_by_ihost',
ihost_uuid=ihost_uuid,
idisk_dict_array=idisk_dict_array))
def ilvg_update_by_ihost(self, context,
ihost_uuid, ilvg_dict_array):
"""Create or update local volume group for an ihost with the supplied
data.
This method allows records for a local volume group for ihost to be
created, or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ilvg_dict_array: initial values for local volume group objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ilvg_update_by_ihost',
ihost_uuid=ihost_uuid,
ilvg_dict_array=ilvg_dict_array))
def ipv_update_by_ihost(self, context,
ihost_uuid, ipv_dict_array):
"""Create or update physical volume for an ihost with the supplied
data.
This method allows records for a physical volume for ihost to be
created, or updated.
R5 - Moved to version 1.1 as partition schema is no longer applicable
to R4
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ipv_dict_array: initial values for physical volume objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ipv_update_by_ihost',
ihost_uuid=ihost_uuid,
ipv_dict_array=ipv_dict_array),
version='1.1')
def ipartition_update_by_ihost(self, context,
ihost_uuid, ipart_dict_array):
"""Create or update partitions for an ihost with the supplied data.
This method allows records for a host's partition to be created or
updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ipart_dict_array: initial values for partition objects
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ipartition_update_by_ihost',
ihost_uuid=ihost_uuid,
ipart_dict_array=ipart_dict_array))
def update_partition_config(self, context, partition):
"""Asynchronously, have a conductor configure the physical volume
partitions.
:param context: request context.
:param partition: dict with partition details.
"""
LOG.debug("ConductorApi.update_partition_config: sending"
" partition to conductor")
return self.cast(context, self.make_msg('update_partition_config',
partition=partition))
def iplatform_update_by_ihost(self, context,
ihost_uuid, imsg_dict):
"""Create or update memory for an ihost with the supplied data.
This method allows records for memory for ihost to be created,
or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param imsg_dict: inventory message dict
:returns: pass or fail
"""
return self.call(context,
self.make_msg('iplatform_update_by_ihost',
ihost_uuid=ihost_uuid,
imsg_dict=imsg_dict))
def upgrade_ihost(self, context, host, load):
"""Synchronously, have a conductor upgrade a host.
Does the following tasks:
- Update the pxelinux.cfg file.
:param context: request context.
:param host: an ihost object.
:param load: a load object.
"""
return self.call(context,
self.make_msg('upgrade_ihost_pxe_config', host=host, load=load))
def configure_isystemname(self, context, systemname):
"""Synchronously, have a conductor configure the system name.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each update their /etc/platform/motd.system
:param context: request context.
:param systemname: the systemname
"""
LOG.debug("ConductorApi.configure_isystemname: sending"
" systemname to conductor")
return self.call(context,
self.make_msg('configure_isystemname',
systemname=systemname))
def configure_system_https(self, context):
"""Synchronously, have a conductor configure the system https/http
configuration.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the https/http selected manifests
:param context: request context.
"""
LOG.debug("ConductorApi.configure_system_https/http: sending"
" configure_system_https to conductor")
return self.call(context, self.make_msg('configure_system_https'))
def configure_system_timezone(self, context):
"""Synchronously, have a conductor configure the system timezone.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the timezone manifest
:param context: request context.
"""
LOG.debug("ConductorApi.configure_system_timezone: sending"
" system_timezone to conductor")
return self.call(context, self.make_msg('configure_system_timezone'))
def update_route_config(self, context):
"""Synchronously, have a conductor configure static route.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the route manifest
:param context: request context.
"""
LOG.debug("ConductorApi.update_route_config: sending"
" update_route_config to conductor")
return self.call(context, self.make_msg('update_route_config'))
def update_sriov_config(self, context, host_uuid):
"""Synchronously, have a conductor configure sriov config.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the network manifest
:param context: request context.
:param host_uuid: the host unique uuid
"""
LOG.debug("ConductorApi.update_sriov_config: sending "
"update_sriov_config to conductor")
return self.call(context, self.make_msg('update_sriov_config',
host_uuid=host_uuid))
def update_distributed_cloud_role(self, context):
"""Synchronously, have a conductor configure the distributed cloud
role of the system.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who each apply the config manifest
:param context: request context.
"""
LOG.debug("ConductorApi.update_distributed_cloud_role: sending"
" distributed_cloud_role to conductor")
return self.call(context, self.make_msg('update_distributed_cloud_role'))
def subfunctions_update_by_ihost(self, context, ihost_uuid, subfunctions):
"""Create or update local volume group for an ihost with the supplied
data.
This method allows records for a local volume group for ihost to be
created, or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param subfunctions: subfunctions of the host
:returns: pass or fail
"""
return self.call(context,
self.make_msg('subfunctions_update_by_ihost',
ihost_uuid=ihost_uuid,
subfunctions=subfunctions))
def configure_osd_istor(self, context, istor_obj):
"""Synchronously, have a conductor configure an OSD istor.
Does the following tasks:
- Allocates an OSD.
- Creates or resizes the OSD pools as necessary.
:param context: request context.
:param istor_obj: an istor object.
:returns: istor object, with updated osdid
"""
return self.call(context,
self.make_msg('configure_osd_istor',
istor_obj=istor_obj))
def unconfigure_osd_istor(self, context, istor_obj):
"""Synchronously, have a conductor unconfigure an istor.
Does the following tasks:
- Removes the OSD from the crush map.
- Deletes the OSD's auth key.
- Deletes the OSD.
:param context: request context.
:param istor_obj: an istor object.
"""
return self.call(context,
self.make_msg('unconfigure_osd_istor',
istor_obj=istor_obj))
def restore_ceph_config(self, context, after_storage_enabled=False):
"""Restore Ceph configuration during Backup and Restore process.
:param context: request context.
:returns: return True if restore is successful or no need to restore
"""
return self.call(context,
self.make_msg('restore_ceph_config',
after_storage_enabled=after_storage_enabled))
def get_ceph_pool_replication(self, context, ceph_backend=None):
"""Get ceph storage backend pool replication parameters
:param context: request context.
:param ceph_backend: ceph backend object type for a tier
:returns: tuple with (replication, min_replication)
"""
return self.call(context,
self.make_msg('get_ceph_pool_replication',
ceph_backend=ceph_backend))
def delete_osd_pool(self, context, pool_name):
"""delete an OSD pool
:param context: request context.
:param pool_name: the name of the OSD pool
"""
return self.call(context,
self.make_msg('delete_osd_pool',
pool_name=pool_name))
def list_osd_pools(self, context):
"""list OSD pools
:param context: request context.
"""
return self.call(context,
self.make_msg('list_osd_pools'))
def get_osd_pool_quota(self, context, pool_name):
"""Get the quota for an OSD pool
:param context: request context.
:param pool_name: the name of the OSD pool
:returns: dictionary with {"max_objects": num, "max_bytes": num}
"""
return self.call(context,
self.make_msg('get_osd_pool_quota',
pool_name=pool_name))
def set_osd_pool_quota(self, context, pool, max_bytes=0, max_objects=0):
"""Set the quota for an OSD pool
:param context: request context.
:param pool: the name of the OSD pool
"""
return self.call(context,
self.make_msg('set_osd_pool_quota',
pool=pool, max_bytes=max_bytes,
max_objects=max_objects))
def get_ceph_primary_tier_size(self, context):
"""Get the size of the primary storage tier in the ceph cluster.
:param context: request context.
:returns: integer size in GB.
"""
return self.call(context,
self.make_msg('get_ceph_primary_tier_size'))
def get_ceph_tier_size(self, context, tier_name):
"""Get the size of a storage tier in the ceph cluster.
:param context: request context.
:param tier_name: name of the storage tier of interest.
:returns: integer size in GB.
"""
return self.call(context,
self.make_msg('get_ceph_tier_size',
tier_name=tier_name))
def get_ceph_cluster_df_stats(self, context):
"""Get the usage information for the ceph cluster.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_ceph_cluster_df_stats'))
def get_ceph_pools_df_stats(self, context):
"""Get the usage information for the ceph pools.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_ceph_pools_df_stats'))
def get_cinder_lvm_usage(self, context):
"""Get the usage information for the LVM pools.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_cinder_lvm_usage'))
def get_cinder_volume_type_names(self, context):
"""Get the names of all currently defined cinder volume types.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_cinder_volume_type_names'))
def kill_ceph_storage_monitor(self, context):
"""Stop the ceph storage monitor.
pmon will not restart it. This should only be used in an
upgrade/rollback
:param context: request context.
"""
return self.call(context,
self.make_msg('kill_ceph_storage_monitor'))
def update_dns_config(self, context):
"""Synchronously, have the conductor update the DNS configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_dns_config'))
def update_ntp_config(self, context, service_change=False):
"""Synchronously, have the conductor update the NTP configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_ntp_config',
service_change=service_change))
def update_ptp_config(self, context):
"""Synchronously, have the conductor update the PTP configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_ptp_config'))
def update_system_mode_config(self, context):
"""Synchronously, have the conductor update the system mode
configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_system_mode_config'))
def update_security_feature_config(self, context):
"""Synchronously, have the conductor update the security_feature
configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_security_feature_config'))
def update_oam_config(self, context):
"""Synchronously, have the conductor update the OAM configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_oam_config'))
def update_user_config(self, context):
"""Synchronously, have the conductor update the user configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_user_config'))
def update_storage_config(self, context, update_storage=False,
reinstall_required=False, reboot_required=True,
filesystem_list=None):
"""Synchronously, have the conductor update the storage configuration.
:param context: request context.
"""
return self.call(
context, self.make_msg(
'update_storage_config',
update_storage=update_storage,
reinstall_required=reinstall_required,
reboot_required=reboot_required,
filesystem_list=filesystem_list
)
)
def update_lvm_config(self, context):
"""Synchronously, have the conductor update the LVM configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_lvm_config'))
def update_ceph_base_config(self, context, personalities):
"""Synchronously, have the conductor update the configuration
for monitors and ceph.conf.
:param context: request context.
:param personalities: list of host personalities.
"""
return self.call(
context, self.make_msg(
'update_ceph_base_config',
personalities=personalities
)
)
def update_ceph_osd_config(self, context, host, stor_uuid, runtime_manifests):
"""Synchronously, have the conductor update the configuration
for an OSD.
:param context: request context.
:param host: a host to update OSDs on.
:param stor_uuid: uuid of a storage device
:param runtime_manifests: True if puppet manifests are to be applied at
runtime.
"""
return self.call(
context, self.make_msg(
'update_ceph_osd_config',
host=host,
stor_uuid=stor_uuid,
runtime_manifests=runtime_manifests
)
)
def update_drbd_config(self, context):
"""Synchronously, have the conductor update the drbd configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_drbd_config'))
def update_remotelogging_config(self, context, timeout=None):
"""Synchronously, have the conductor update the remotelogging
configuration.
:param context: request context.
:param ihost_uuid: ihost uuid unique id
"""
return self.call(context,
self.make_msg('update_remotelogging_config'), timeout=timeout)
def get_magnum_cluster_count(self, context):
"""Synchronously, have the conductor get magnum cluster count
configuration.
:param context: request context.
"""
return self.call(context,
self.make_msg('get_magnum_cluster_count'))
def update_infra_config(self, context):
"""Synchronously, have the conductor update the infrastructure network
configuration.
:param context: request context.
"""
return self.call(context, self.make_msg('update_infra_config'))
def update_lvm_cinder_config(self, context):
"""Synchronously, have the conductor update Cinder LVM on a controller.
:param context: request context.
"""
return self.call(context,
self.make_msg('update_lvm_cinder_config'))
def update_install_uuid(self, context, host_uuid, install_uuid):
"""Synchronously, have an agent update install_uuid on
a host.
:param context: request context.
:parm host_uuid: host uuid to update the install_uuid
:parm install_uuid: install_uuid
"""
return self.call(context,
self.make_msg('update_install_uuid',
host_uuid=host_uuid,
install_uuid=install_uuid))
def update_ceph_config(self, context, sb_uuid, services):
"""Synchronously, have the conductor update Ceph on a controller
:param context: request context
:param sb_uuid: uuid of the storage backed to apply the ceph config
:param services: list of services using Ceph.
"""
return self.call(context,
self.make_msg('update_ceph_config',
sb_uuid=sb_uuid,
services=services))
def update_ceph_external_config(self, context, sb_uuid, services):
"""Synchronously, have the conductor update External Ceph on a controller
:param context: request context
:param sb_uuid: uuid of the storage backed to apply the external ceph config
:param services: list of services using Ceph.
"""
return self.call(context,
self.make_msg('update_ceph_external_config',
sb_uuid=sb_uuid,
services=services))
def config_update_nova_local_backed_hosts(self, context, instance_backing):
"""Synchronously, have the conductor set the hosts with worker
functionality and with a certain nova-local instance backing to
config out-of-date.
:param context: request context
:param instance_backing: the host's instance backing
"""
return self.call(context,
self.make_msg('config_update_nova_local_backed_hosts',
instance_backing=instance_backing))
def update_external_cinder_config(self, context):
"""Synchronously, have the conductor update Cinder Exernal(shared)
on a controller.
:param context: request context.
"""
return self.call(context,
self.make_msg('update_external_cinder_config'))
def update_ceph_services(self, context, sb_uuid):
"""Synchronously, have the conductor update Ceph tier services
:param context: request context
:param sb_uuid: uuid of the storage backed to apply the service update.
"""
return self.call(context,
self.make_msg('update_ceph_services', sb_uuid=sb_uuid))
def get_k8s_namespaces(self, context):
"""Synchronously, get Kubernetes namespaces
:returns: list of namespacea
"""
return self.call(context,
self.make_msg('get_k8s_namespaces'))
def report_config_status(self, context, iconfig,
status, error=None):
""" Callback from Sysinv Agent on manifest apply success or failure
Finalize configuration after manifest apply successfully or perform
cleanup, log errors and raise alarms in case of failures.
:param context: request context
:param iconfig: configuration context
:param status: operation status
:param error: serialized exception as a dict of type:
error = {
'class': str(ex.__class__.__name__),
'module': str(ex.__class__.__module__),
'message': six.text_type(ex),
'tb': traceback.format_exception(*ex),
'args': ex.args,
'kwargs': ex.kwargs
}
The iconfig context is expected to contain a valid REPORT_TOPIC key,
so that we can correctly identify the set of manifests executed.
"""
return self.call(context,
self.make_msg('report_config_status',
iconfig=iconfig,
status=status,
error=error))
def update_cpu_config(self, context, host_uuid):
"""Synchronously, have the conductor update the cpu
configuration.
:param context: request context.
:param host_uuid: host unique uuid
"""
return self.call(context, self.make_msg('update_cpu_config',
host_uuid=host_uuid))
def iconfig_update_by_ihost(self, context,
ihost_uuid, imsg_dict):
"""Create or update iconfig for an ihost with the supplied data.
This method allows records for iconfig for ihost to be updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param imsg_dict: inventory message dict
:returns: pass or fail
"""
return self.call(context,
self.make_msg('iconfig_update_by_ihost',
ihost_uuid=ihost_uuid,
imsg_dict=imsg_dict))
def iinterface_get_providernets(self,
context,
pn_names=None):
"""Call neutron to get PN MTUs based on PN names
This method does not update any records in the db
:param context: an admin context
:param pn_names: a list of providenet names
:returns: pass or fail
"""
pn_dict = self.call(context,
self.make_msg('iinterface_get_providernets',
pn_names=pn_names))
return pn_dict
def mgmt_ip_set_by_ihost(self,
context,
ihost_uuid,
mgmt_ip):
"""Call sysinv to update host mgmt_ip (removes previous entry if
necessary)
:param context: an admin context
:param ihost_uuid: ihost uuid
:param mgmt_ip: mgmt_ip
:returns: Address
"""
return self.call(context,
self.make_msg('mgmt_ip_set_by_ihost',
ihost_uuid=ihost_uuid,
mgmt_ip=mgmt_ip))
def infra_ip_set_by_ihost(self,
context,
ihost_uuid,
infra_ip):
"""Call sysinv to update host infra_ip (removes previous entry if
necessary)
:param context: an admin context
:param ihost_uuid: ihost uuid
:param infra_ip: infra_ip
:returns: Address
"""
return self.call(context,
self.make_msg('infra_ip_set_by_ihost',
ihost_uuid=ihost_uuid,
infra_ip=infra_ip))
def neutron_extension_list(self, context):
"""
Send a request to neutron to query the supported extension list.
"""
return self.call(context, self.make_msg('neutron_extension_list'))
def neutron_bind_interface(self, context, host_uuid, interface_uuid,
network_type, providernets, mtu,
vlans=None, test=False):
"""
Send a request to neutron to bind an interface to a set of provider
networks, and inform neutron of some key attributes of the interface
for semantic checking purposes.
"""
return self.call(context,
self.make_msg('neutron_bind_interface',
host_uuid=host_uuid,
interface_uuid=interface_uuid,
network_type=network_type,
providernets=providernets,
mtu=mtu,
vlans=vlans,
test=test))
def neutron_unbind_interface(self, context, host_uuid, interface_uuid):
"""
Send a request to neutron to unbind an interface from a set of
provider networks.
"""
return self.call(context,
self.make_msg('neutron_unbind_interface',
host_uuid=host_uuid,
interface_uuid=interface_uuid))
def vim_host_add(self, context, api_token, ihost_uuid,
hostname, subfunctions, administrative,
operational, availability,
subfunction_oper, subfunction_avail, timeout):
"""
Asynchronously, notify VIM of host add
"""
return self.cast(context,
self.make_msg('vim_host_add',
api_token=api_token,
ihost_uuid=ihost_uuid,
hostname=hostname,
personality=subfunctions,
administrative=administrative,
operational=operational,
availability=availability,
subfunction_oper=subfunction_oper,
subfunction_avail=subfunction_avail,
timeout=timeout))
def mtc_host_add(self, context, mtc_address, mtc_port, ihost_mtc_dict):
"""
Asynchronously, notify mtce of host add
"""
return self.cast(context,
self.make_msg('mtc_host_add',
mtc_address=mtc_address,
mtc_port=mtc_port,
ihost_mtc_dict=ihost_mtc_dict))
def notify_subfunctions_config(self, context,
ihost_uuid, ihost_notify_dict):
"""
Synchronously, notify sysinv of host subfunctions config status
"""
return self.call(context,
self.make_msg('notify_subfunctions_config',
ihost_uuid=ihost_uuid,
ihost_notify_dict=ihost_notify_dict))
def ilvg_get_nova_ilvg_by_ihost(self,
context,
ihost_uuid):
"""
Gets the nova ilvg by ihost.
returns the nova ilvg if added to the host else returns empty
list
"""
ilvgs = self.call(context,
self.make_msg('ilvg_get_nova_ilvg_by_ihost',
ihost_uuid=ihost_uuid))
return ilvgs
def get_platform_interfaces(self, context, ihost_id):
"""Synchronously, have a agent collect platform interfaces for this
ihost.
Gets the mgmt, infra interface names and numa node
:param context: request context.
:param ihost_id: id of this host
:returns: a list of interfaces and their associated numa nodes.
"""
return self.call(context,
self.make_msg('platform_interfaces',
ihost_id=ihost_id))
def ibm_deprovision_by_ihost(self, context, ihost_uuid, ibm_msg_dict):
"""Update ihost upon notification of board management controller
deprovisioning.
This method also allows a dictionary of values to be passed in to
affort additional controls, if and as needed.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ibm_msg_dict: values for additional controls or changes
:returns: pass or fail
"""
return self.call(context,
self.make_msg('ibm_deprovision_by_ihost',
ihost_uuid=ihost_uuid,
ibm_msg_dict=ibm_msg_dict))
def configure_ttys_dcd(self, context, uuid, ttys_dcd):
"""Synchronously, have a conductor configure the dcd.
Does the following tasks:
- sends a message to conductor
- who sends a message to all inventory agents
- who has the uuid updates dcd
:param context: request context.
:param uuid: the host uuid
:param ttys_dcd: the flag to enable/disable dcd
"""
LOG.debug("ConductorApi.configure_ttys_dcd: sending (%s %s) to "
"conductor" % (uuid, ttys_dcd))
return self.call(context,
self.make_msg('configure_ttys_dcd',
uuid=uuid, ttys_dcd=ttys_dcd))
def get_host_ttys_dcd(self, context, ihost_id):
"""Synchronously, have a agent collect carrier detect state for this
ihost.
:param context: request context.
:param ihost_id: id of this host
:returns: ttys_dcd.
"""
return self.call(context,
self.make_msg('get_host_ttys_dcd',
ihost_id=ihost_id))
def start_import_load(self, context, path_to_iso, path_to_sig):
"""Synchronously, mount the ISO and validate the load for import
:param context: request context.
:param path_to_iso: the file path of the iso on this host
:param path_to_sig: the file path of the iso's detached signature on
this host
:returns: the newly create load object.
"""
return self.call(context,
self.make_msg('start_import_load',
path_to_iso=path_to_iso,
path_to_sig=path_to_sig))
def import_load(self, context, path_to_iso, new_load):
"""Asynchronously, import a load and add it to the database
:param context: request context.
:param path_to_iso: the file path of the iso on this host
:param new_load: the load object
:returns: none.
"""
return self.cast(context,
self.make_msg('import_load',
path_to_iso=path_to_iso,
new_load=new_load))
def delete_load(self, context, load_id):
"""Asynchronously, cleanup a load from both controllers
:param context: request context.
:param load_id: id of load to be deleted
:returns: none.
"""
return self.cast(context,
self.make_msg('delete_load',
load_id=load_id))
def finalize_delete_load(self, context):
"""Asynchronously, delete the load from the database
:param context: request context.
:returns: none.
"""
return self.cast(context,
self.make_msg('finalize_delete_load'))
def load_update_by_host(self, context, ihost_id, version):
"""Update the host_upgrade table with the running SW_VERSION
:param context: request context.
:param ihost_id: the host id
:param version: the SW_VERSION from the host
:returns: none.
"""
return self.call(context,
self.make_msg('load_update_by_host',
ihost_id=ihost_id, sw_version=version))
def update_service_config(self, context, service=None, do_apply=False):
"""Synchronously, have the conductor update the service parameter.
:param context: request context.
:param do_apply: apply the newly created manifests.
"""
return self.call(context, self.make_msg('update_service_config',
service=service,
do_apply=do_apply))
def start_upgrade(self, context, upgrade):
"""Asynchronously, have the conductor start the upgrade
:param context: request context.
:param upgrade: the upgrade object.
"""
return self.cast(context, self.make_msg('start_upgrade',
upgrade=upgrade))
def activate_upgrade(self, context, upgrade):
"""Asynchronously, have the conductor perform the upgrade activation.
:param context: request context.
:param upgrade: the upgrade object.
"""
return self.cast(context, self.make_msg('activate_upgrade',
upgrade=upgrade))
def complete_upgrade(self, context, upgrade, state):
"""Asynchronously, have the conductor complete the upgrade.
:param context: request context.
:param upgrade: the upgrade object.
:param state: the state of the upgrade before completing
"""
return self.cast(context, self.make_msg('complete_upgrade',
upgrade=upgrade, state=state))
def abort_upgrade(self, context, upgrade):
"""Synchronously, have the conductor abort the upgrade.
:param context: request context.
:param upgrade: the upgrade object.
"""
return self.call(context, self.make_msg('abort_upgrade',
upgrade=upgrade))
def complete_simplex_backup(self, context, success):
"""Asynchronously, complete the simplex upgrade start process
:param context: request context.
:param success: If the create_simplex_backup call completed
"""
return self.cast(context, self.make_msg('complete_simplex_backup',
success=success))
def get_system_health(self, context, force=False, upgrade=False):
"""
Performs a system health check.
:param context: request context.
:param force: set to true to ignore minor and warning alarms
:param upgrade: set to true to perform an upgrade health check
"""
return self.call(context,
self.make_msg('get_system_health',
force=force, upgrade=upgrade))
def reserve_ip_for_first_storage_node(self, context):
"""
Reserve ip address for the first storage node for Ceph monitor
when installing Ceph as a second backend
:param context: request context.
"""
self.call(context,
self.make_msg('reserve_ip_for_first_storage_node'))
def reserve_ip_for_cinder(self, context):
"""
Reserve ip address for Cinder's services
:param context: request context.
"""
self.call(context,
self.make_msg('reserve_ip_for_cinder'))
def update_sdn_controller_config(self, context):
"""Synchronously, have the conductor update the SDN controller config.
:param context: request context.
"""
return self.call(context,
self.make_msg('update_sdn_controller_config'))
def update_sdn_enabled(self, context):
"""Synchronously, have the conductor update the SDN enabled flag
:param context: request context.
"""
return self.call(context,
self.make_msg('update_sdn_enabled'))
def update_vswitch_type(self, context):
"""Synchronously, have the conductor update the system vswitch type
:param context: request context.
"""
return self.call(context,
self.make_msg('update_vswitch_type'))
def create_barbican_secret(self, context, name, payload):
"""Calls Barbican API to create a secret
:param context: request context.
:param name: secret name
:param payload: secret payload
"""
return self.call(context,
self.make_msg('create_barbican_secret',
name=name,
payload=payload))
def delete_barbican_secret(self, context, name):
"""Calls Barbican API to delete a secret
:param context: request context.
:param name: secret name
"""
return self.call(context,
self.make_msg('delete_barbican_secret',
name=name))
def update_snmp_config(self, context):
"""Synchronously, have a conductor configure the SNMP configuration.
Does the following tasks:
- Update puppet hiera configuration file and apply run time manifest
:param context: request context.
"""
return self.call(context,
self.make_msg('update_snmp_config'))
def ceph_manager_config_complete(self, context, applied_config):
self.call(context,
self.make_msg('ceph_service_config_complete',
applied_config=applied_config))
def get_controllerfs_lv_sizes(self, context):
return self.call(context,
self.make_msg('get_controllerfs_lv_sizes'))
def get_cinder_gib_pv_sizes(self, context):
return self.call(context,
self.make_msg('get_cinder_gib_pv_sizes'))
def get_cinder_partition_size(self, context):
return self.call(context,
self.make_msg('get_cinder_partition_size'))
def validate_emc_removal(self, context):
"""
Check that it is safe to remove the EMC SAN
"""
return self.call(context, self.make_msg('validate_emc_removal'))
def validate_hpe3par_removal(self, context, backend):
"""
Check that it is safe to remove the HPE 3PAR storage array
"""
return self.call(context,
self.make_msg('validate_hpe3par_removal',
backend=backend))
def validate_hpelefthand_removal(self, context):
"""
Check that it is safe to remove the HPE Lefthand storage array
"""
return self.call(context, self.make_msg('validate_hpelefthand_removal'))
def region_has_ceph_backend(self, context):
"""
Send a request to primary region to see if ceph backend is configured
"""
return self.call(context, self.make_msg('region_has_ceph_backend'))
def get_system_tpmconfig(self, context):
"""
Retrieve the system tpmconfig object
"""
return self.call(context, self.make_msg('get_system_tpmconfig'))
def get_tpmdevice_by_host(self, context, host_id):
"""
Retrieve the tpmdevice object for this host
"""
return self.call(context,
self.make_msg('get_tpmdevice_by_host',
host_id=host_id))
def update_tpm_config(self, context, tpm_context):
"""Synchronously, have the conductor update the TPM config.
:param context: request context.
:param tpm_context: TPM object context
"""
return self.call(context,
self.make_msg('update_tpm_config',
tpm_context=tpm_context))
def update_tpm_config_manifests(self, context, delete_tpm_file=None):
"""Synchronously, have the conductor update the TPM config manifests.
:param context: request context.
:param delete_tpm_file: tpm file to delete, optional
"""
return self.call(context,
self.make_msg('update_tpm_config_manifests',
delete_tpm_file=delete_tpm_file))
def tpm_config_update_by_host(self, context,
host_uuid, response_dict):
"""Get TPM configuration status from Agent host.
This method allows for alarms to be raised for hosts if TPM
is not configured properly.
:param context: an admin context
:param host_uuid: host unique id
:param response_dict: configuration status
:returns: pass or fail
"""
return self.call(
context,
self.make_msg('tpm_config_update_by_host',
host_uuid=host_uuid,
response_dict=response_dict))
def tpm_device_update_by_host(self, context,
host_uuid, tpmdevice_dict):
"""Synchronously , have the conductor create or update
a tpmdevice per host.
:param context: request context.
:param host_uuid: uuid or id of the host
:param tpmdevice_dict: a dictionary of tpm device attributes
:returns: tpmdevice object
"""
return self.call(
context,
self.make_msg('tpm_device_update_by_host',
host_uuid=host_uuid,
tpmdevice_dict=tpmdevice_dict))
def cinder_prepare_db_for_volume_restore(self, context):
"""
Send a request to cinder to remove all volume snapshots and set all
volumes to error state in preparation for restoring all volumes.
This is needed for cinder disk replacement.
"""
return self.call(context,
self.make_msg('cinder_prepare_db_for_volume_restore'))
def cinder_has_external_backend(self, context):
"""
Check if cinder has loosely coupled external backends.
These are the possible backends: emc_vnx, hpe3par, hpelefthand
:param context: request context.
"""
return self.call(context,
self.make_msg('cinder_has_external_backend'))
def get_ceph_object_pool_name(self, context):
"""
Get Rados Gateway object data pool name
:param context: request context.
"""
return self.call(context,
self.make_msg('get_ceph_object_pool_name'))
def get_software_upgrade_status(self, context):
"""
Software upgrade status is needed by ceph-manager to take ceph specific
upgrade actions
This rpcapi function is added to signal that conductor's
get_software_upgrade_status function is used by an RPC client
ceph-manager however doesn't call rpcapi.get_software_upgrade_status and
instead it uses oslo_messaging to construct a call on conductor's topic
for this function. The reason is that sysinv is using an old version of
openstack common and messaging libraries incompatible with the one used
by ceph-manager.
"""
return self.call(context,
self.make_msg('get_software_upgrade_status'))
def update_firewall_config(self, context, ip_version, contents):
"""Synchronously, have the conductor update the firewall config
and manifest.
:param context: request context.
:param ip_version: IP version.
:param contents: file content of custom firewall rules.
"""
return self.call(context,
self.make_msg('update_firewall_config',
ip_version=ip_version,
contents=contents))
def distribute_ceph_external_config(self, context, ceph_conf_filename):
"""Synchronously, have the conductor update the Ceph configuration
file for external cluster.
:param context: request context.
:param ceph_conf_filename: Ceph conf file
"""
return self.call(context,
self.make_msg('distribute_ceph_external_config',
ceph_conf_filename=ceph_conf_filename))
def store_ceph_external_config(self, context, contents, ceph_conf_filename):
"""Synchronously, have the conductor to write the ceph config file content
to /opt/platform/config
:param context: request context.
:param contents: file content of the Ceph conf file
:param ceph_conf_filename: Ceph conf file
"""
return self.call(context,
self.make_msg('store_ceph_external_config',
contents=contents,
ceph_conf_filename=ceph_conf_filename))
def update_partition_information(self, context, partition_data):
"""Synchronously, have the conductor update partition information.
:param context: request context.
:param host_uuid: host UUID
:param partition_uuid: partition UUID
:param info: dict containing partition information to update
"""
return self.call(context,
self.make_msg('update_partition_information',
partition_data=partition_data))
def install_license_file(self, context, contents):
"""Sychronously, have the conductor install the license file.
:param context: request context.
:param contents: content of license file.
"""
return self.call(context,
self.make_msg('install_license_file',
contents=contents))
def config_certificate(self, context, pem_contents, config_dict):
"""Synchronously, have the conductor configure the certificate.
:param context: request context.
:param pem_contents: contents of certificate in pem format.
:param config_dict: dictionary of certificate config attributes.
"""
return self.call(context,
self.make_msg('config_certificate',
pem_contents=pem_contents,
config_dict=config_dict,
))
def get_helm_chart_namespaces(self, context, chart_name):
"""Get supported chart namespaces.
This method retrieves the namespace supported by a given chart.
:param context: request context.
:param chart_name: name of the chart
:returns: list of supported namespaces that associated overrides may be
provided.
"""
return self.call(context,
self.make_msg('get_helm_chart_namespaces',
chart_name=chart_name))
def get_helm_chart_overrides(self, context, chart_name, cnamespace=None):
"""Get the overrides for a supported chart.
:param context: request context.
:param chart_name: name of a supported chart
:param cnamespace: (optional) namespace
:returns: dict of overrides.
"""
return self.call(context,
self.make_msg('get_helm_chart_overrides',
chart_name=chart_name,
cnamespace=cnamespace))
def get_helm_application_namespaces(self, context, app_name):
"""Get supported application namespaces.
:param app_name: name of the bundle of charts required to support an
application
:returns: dict of charts and supported namespaces that associated
overrides may be provided.
"""
return self.call(context,
self.make_msg('get_helm_application_namespaces',
app_name=app_name))
def get_helm_application_overrides(self, context, app_name, cnamespace=None):
"""Get the overrides for a supported set of charts.
:param context: request context.
:param app_name: name of a supported application (set of charts)
:param cnamespace: (optional) namespace
:returns: dict of overrides.
"""
return self.call(context,
self.make_msg('get_helm_application_overrides',
app_name=app_name,
cnamespace=cnamespace))
def merge_overrides(self, context, file_overrides=[], set_overrides=[]):
"""Merge the file and set overrides into a single chart overrides.
:param context: request context.
:param file_overrides: (optional) list of overrides from files
:param set_overrides: (optional) list of parameter overrides
:returns: merged overrides string
"""
return self.call(context,
self.make_msg('merge_overrides',
file_overrides=file_overrides,
set_overrides=set_overrides))
def update_kubernetes_label(self, context, host_uuid, label_dict):
"""Synchronously, have the conductor update kubernetes label.
:param context: request context.
:param host_uuid: uuid or id of the host
:param label_dict: a dictionary of kubernetes labels
"""
return self.call(context,
self.make_msg('update_kubernetes_label',
host_uuid=host_uuid,
label_dict=label_dict))
def update_host_memory(self, context, host_uuid):
"""Asynchronously, have a conductor update the host memory
:param context: request context.
:param host_uuid: duuid or id of the host.
"""
LOG.info("ConductorApi.update_host_memory: sending"
" host memory update request to conductor")
return self.cast(context, self.make_msg('update_host_memory',
host_uuid=host_uuid))
def update_fernet_keys(self, context, keys):
"""Synchronously, have the conductor update fernet keys.
:param context: request context.
:param keys: a list of fernet keys
"""
return self.call(context, self.make_msg('update_fernet_keys',
keys=keys))
def get_fernet_keys(self, context, key_id=None):
"""Synchronously, have the conductor to retrieve fernet keys.
:param context: request context.
:param key_id: (optional)
:returns: a list of fernet keys.
"""
return self.call(context, self.make_msg('get_fernet_keys',
key_id=key_id))
def perform_app_upload(self, context, rpc_app, tarfile):
"""Handle application upload request
:param context: request context.
:param rpc_app: data object provided in the rpc request
:param tafile: location of application tarfile to be extracted
"""
return self.cast(context,
self.make_msg('perform_app_upload',
rpc_app=rpc_app,
tarfile=tarfile))
def perform_app_apply(self, context, rpc_app, app_not_already_applied):
"""Handle application apply request
:param context: request context.
:param rpc_app: data object provided in the rpc request
:param app_not_already_applied: app not already succesfully applied
"""
return self.cast(context,
self.make_msg(
'perform_app_apply',
rpc_app=rpc_app,
app_not_already_applied=app_not_already_applied))
def perform_app_remove(self, context, rpc_app):
"""Handle application remove request
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self.cast(context,
self.make_msg('perform_app_remove',
rpc_app=rpc_app))
def perform_app_delete(self, context, rpc_app):
"""Handle application delete request
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self.call(context,
self.make_msg('perform_app_delete',
rpc_app=rpc_app))
| en | 0.731296 | # vim: tabstop=4 shiftwidth=4 softtabstop=4 # coding=utf-8 # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Copyright (c) 2013-2018 Wind River Systems, Inc. # Client side of the conductor RPC API. Client side of the conductor RPC API. API version history: 1.0 - Initial version. 1.1 - Used for R5 Synchronously, have a conductor handle a DHCP lease update. Handling depends on the interface: - management interface: creates an ihost - infrastructure interface: just updated the dnsmasq config :param context: request context. :param tags: specifies the interface type (mgmt or infra) :param mac: MAC for the lease :param ip_address: IP address for the lease :param cid: Client ID for the lease Synchronously, have a conductor create an ihost. Create an ihost in the database and return an object. :param context: request context. :param values: dictionary with initial values for new ihost object :returns: created ihost object, including all fields. Synchronously, have a conductor update the ihosts's information. Update the ihost's information in the database and return an object. :param context: request context. :param ihost_obj: a changed (but not saved) ihost object. :returns: updated ihost object, including all fields. Synchronously, have a conductor configure an ihost. Does the following tasks: - Update puppet hiera configuration files for the ihost. - Add (or update) a host entry in the dnsmasq.conf file. - Set up PXE configuration to run installer :param context: request context. :param host: an ihost object. :param do_worker_apply: apply the newly created worker manifests. # TODO(CephPoolsDecouple): remove Configure or update configuration of the OSD pools. If none of the optionals are provided then all pools are updated based on DB configuration. :param context: an admin context. :param ceph_backend: Optional ceph backend object of a tier :param new_pool_size: Optional override for replication number. :param new_pool_min_size: Optional override for minimum replication number. Synchronously, have a conductor remove configuration for a host. Does the following tasks: - Remove the hiera config files for the host. :param context: request context. :param host_uuid: uuid of the host. Synchronously, have a conductor unconfigure an ihost. Does the following tasks: - Remove hiera config files for the ihost. - Remove the host entry from the dnsmasq.conf file. - Remove the PXE configuration :param context: request context. :param ihost_obj: an ihost object. Synchronously, create the controller file systems. Does the following tasks: - queries OS for root disk size - creates the controller file systems. - queries system to get region info for img_conversion_size setup. :param context: request context.. :param rootfs_device: the root disk device Finds ihost db entry based upon the mac list This method returns an ihost if it matches a mac :param context: an admin context :param ihost_macs: list of mac addresses :returns: ihost object, including all fields. Finds ihost db entry based upon the ihost hostname This method returns an ihost if it matches the hostname. :param context: an admin context :param ihost_hostname: ihost hostname :returns: ihost object, including all fields. Create iports for an ihost with the supplied data. This method allows records for iports for ihost to be created. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param inic_dict_array: initial values for iport objects :returns: pass or fail Create lldp_agents for an ihost with the supplied data. This method allows records for lldp_agents for a host to be created. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param agent_dict_array: initial values for lldp_agent objects :returns: pass or fail Create lldp_neighbours for an ihost with the supplied data. This method allows records for lldp_neighbours for a host to be created. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param neighbour_dict_array: initial values for lldp_neighbour objects :returns: pass or fail Create pci_devices for an ihost with the supplied data. This method allows records for pci_devices for ihost to be created. :param context: an admin context :param host_uuid: ihost uuid unique id :param pci_device_dict_array: initial values for device objects :returns: pass or fail Create inumas for an ihost with the supplied data. This method allows records for inumas for ihost to be created. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param inuma_dict_array: initial values for inuma objects :returns: pass or fail Create cpus for an ihost with the supplied data. This method allows records for cpus for ihost to be created. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param icpu_dict_array: initial values for cpu objects :param force_grub_update: bool value to force grub update :returns: pass or fail Create or update memory for an ihost with the supplied data. This method allows records for memory for ihost to be created, or updated. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param imemory_dict_array: initial values for memory objects :param force_update: force a memory update :returns: pass or fail Create or update disk for an ihost with the supplied data. This method allows records for disk for ihost to be created, or updated. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param idisk_dict_array: initial values for disk objects :returns: pass or fail Create or update local volume group for an ihost with the supplied data. This method allows records for a local volume group for ihost to be created, or updated. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param ilvg_dict_array: initial values for local volume group objects :returns: pass or fail Create or update physical volume for an ihost with the supplied data. This method allows records for a physical volume for ihost to be created, or updated. R5 - Moved to version 1.1 as partition schema is no longer applicable to R4 :param context: an admin context :param ihost_uuid: ihost uuid unique id :param ipv_dict_array: initial values for physical volume objects :returns: pass or fail Create or update partitions for an ihost with the supplied data. This method allows records for a host's partition to be created or updated. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param ipart_dict_array: initial values for partition objects :returns: pass or fail Asynchronously, have a conductor configure the physical volume partitions. :param context: request context. :param partition: dict with partition details. Create or update memory for an ihost with the supplied data. This method allows records for memory for ihost to be created, or updated. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param imsg_dict: inventory message dict :returns: pass or fail Synchronously, have a conductor upgrade a host. Does the following tasks: - Update the pxelinux.cfg file. :param context: request context. :param host: an ihost object. :param load: a load object. Synchronously, have a conductor configure the system name. Does the following tasks: - sends a message to conductor - who sends a message to all inventory agents - who each update their /etc/platform/motd.system :param context: request context. :param systemname: the systemname Synchronously, have a conductor configure the system https/http configuration. Does the following tasks: - sends a message to conductor - who sends a message to all inventory agents - who each apply the https/http selected manifests :param context: request context. Synchronously, have a conductor configure the system timezone. Does the following tasks: - sends a message to conductor - who sends a message to all inventory agents - who each apply the timezone manifest :param context: request context. Synchronously, have a conductor configure static route. Does the following tasks: - sends a message to conductor - who sends a message to all inventory agents - who each apply the route manifest :param context: request context. Synchronously, have a conductor configure sriov config. Does the following tasks: - sends a message to conductor - who sends a message to all inventory agents - who each apply the network manifest :param context: request context. :param host_uuid: the host unique uuid Synchronously, have a conductor configure the distributed cloud role of the system. Does the following tasks: - sends a message to conductor - who sends a message to all inventory agents - who each apply the config manifest :param context: request context. Create or update local volume group for an ihost with the supplied data. This method allows records for a local volume group for ihost to be created, or updated. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param subfunctions: subfunctions of the host :returns: pass or fail Synchronously, have a conductor configure an OSD istor. Does the following tasks: - Allocates an OSD. - Creates or resizes the OSD pools as necessary. :param context: request context. :param istor_obj: an istor object. :returns: istor object, with updated osdid Synchronously, have a conductor unconfigure an istor. Does the following tasks: - Removes the OSD from the crush map. - Deletes the OSD's auth key. - Deletes the OSD. :param context: request context. :param istor_obj: an istor object. Restore Ceph configuration during Backup and Restore process. :param context: request context. :returns: return True if restore is successful or no need to restore Get ceph storage backend pool replication parameters :param context: request context. :param ceph_backend: ceph backend object type for a tier :returns: tuple with (replication, min_replication) delete an OSD pool :param context: request context. :param pool_name: the name of the OSD pool list OSD pools :param context: request context. Get the quota for an OSD pool :param context: request context. :param pool_name: the name of the OSD pool :returns: dictionary with {"max_objects": num, "max_bytes": num} Set the quota for an OSD pool :param context: request context. :param pool: the name of the OSD pool Get the size of the primary storage tier in the ceph cluster. :param context: request context. :returns: integer size in GB. Get the size of a storage tier in the ceph cluster. :param context: request context. :param tier_name: name of the storage tier of interest. :returns: integer size in GB. Get the usage information for the ceph cluster. :param context: request context. Get the usage information for the ceph pools. :param context: request context. Get the usage information for the LVM pools. :param context: request context. Get the names of all currently defined cinder volume types. :param context: request context. Stop the ceph storage monitor. pmon will not restart it. This should only be used in an upgrade/rollback :param context: request context. Synchronously, have the conductor update the DNS configuration. :param context: request context. Synchronously, have the conductor update the NTP configuration. :param context: request context. Synchronously, have the conductor update the PTP configuration. :param context: request context. Synchronously, have the conductor update the system mode configuration. :param context: request context. Synchronously, have the conductor update the security_feature configuration. :param context: request context. Synchronously, have the conductor update the OAM configuration. :param context: request context. Synchronously, have the conductor update the user configuration. :param context: request context. Synchronously, have the conductor update the storage configuration. :param context: request context. Synchronously, have the conductor update the LVM configuration. :param context: request context. Synchronously, have the conductor update the configuration for monitors and ceph.conf. :param context: request context. :param personalities: list of host personalities. Synchronously, have the conductor update the configuration for an OSD. :param context: request context. :param host: a host to update OSDs on. :param stor_uuid: uuid of a storage device :param runtime_manifests: True if puppet manifests are to be applied at runtime. Synchronously, have the conductor update the drbd configuration. :param context: request context. Synchronously, have the conductor update the remotelogging configuration. :param context: request context. :param ihost_uuid: ihost uuid unique id Synchronously, have the conductor get magnum cluster count configuration. :param context: request context. Synchronously, have the conductor update the infrastructure network configuration. :param context: request context. Synchronously, have the conductor update Cinder LVM on a controller. :param context: request context. Synchronously, have an agent update install_uuid on a host. :param context: request context. :parm host_uuid: host uuid to update the install_uuid :parm install_uuid: install_uuid Synchronously, have the conductor update Ceph on a controller :param context: request context :param sb_uuid: uuid of the storage backed to apply the ceph config :param services: list of services using Ceph. Synchronously, have the conductor update External Ceph on a controller :param context: request context :param sb_uuid: uuid of the storage backed to apply the external ceph config :param services: list of services using Ceph. Synchronously, have the conductor set the hosts with worker functionality and with a certain nova-local instance backing to config out-of-date. :param context: request context :param instance_backing: the host's instance backing Synchronously, have the conductor update Cinder Exernal(shared) on a controller. :param context: request context. Synchronously, have the conductor update Ceph tier services :param context: request context :param sb_uuid: uuid of the storage backed to apply the service update. Synchronously, get Kubernetes namespaces :returns: list of namespacea Callback from Sysinv Agent on manifest apply success or failure Finalize configuration after manifest apply successfully or perform cleanup, log errors and raise alarms in case of failures. :param context: request context :param iconfig: configuration context :param status: operation status :param error: serialized exception as a dict of type: error = { 'class': str(ex.__class__.__name__), 'module': str(ex.__class__.__module__), 'message': six.text_type(ex), 'tb': traceback.format_exception(*ex), 'args': ex.args, 'kwargs': ex.kwargs } The iconfig context is expected to contain a valid REPORT_TOPIC key, so that we can correctly identify the set of manifests executed. Synchronously, have the conductor update the cpu configuration. :param context: request context. :param host_uuid: host unique uuid Create or update iconfig for an ihost with the supplied data. This method allows records for iconfig for ihost to be updated. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param imsg_dict: inventory message dict :returns: pass or fail Call neutron to get PN MTUs based on PN names This method does not update any records in the db :param context: an admin context :param pn_names: a list of providenet names :returns: pass or fail Call sysinv to update host mgmt_ip (removes previous entry if necessary) :param context: an admin context :param ihost_uuid: ihost uuid :param mgmt_ip: mgmt_ip :returns: Address Call sysinv to update host infra_ip (removes previous entry if necessary) :param context: an admin context :param ihost_uuid: ihost uuid :param infra_ip: infra_ip :returns: Address Send a request to neutron to query the supported extension list. Send a request to neutron to bind an interface to a set of provider networks, and inform neutron of some key attributes of the interface for semantic checking purposes. Send a request to neutron to unbind an interface from a set of provider networks. Asynchronously, notify VIM of host add Asynchronously, notify mtce of host add Synchronously, notify sysinv of host subfunctions config status Gets the nova ilvg by ihost. returns the nova ilvg if added to the host else returns empty list Synchronously, have a agent collect platform interfaces for this ihost. Gets the mgmt, infra interface names and numa node :param context: request context. :param ihost_id: id of this host :returns: a list of interfaces and their associated numa nodes. Update ihost upon notification of board management controller deprovisioning. This method also allows a dictionary of values to be passed in to affort additional controls, if and as needed. :param context: an admin context :param ihost_uuid: ihost uuid unique id :param ibm_msg_dict: values for additional controls or changes :returns: pass or fail Synchronously, have a conductor configure the dcd. Does the following tasks: - sends a message to conductor - who sends a message to all inventory agents - who has the uuid updates dcd :param context: request context. :param uuid: the host uuid :param ttys_dcd: the flag to enable/disable dcd Synchronously, have a agent collect carrier detect state for this ihost. :param context: request context. :param ihost_id: id of this host :returns: ttys_dcd. Synchronously, mount the ISO and validate the load for import :param context: request context. :param path_to_iso: the file path of the iso on this host :param path_to_sig: the file path of the iso's detached signature on this host :returns: the newly create load object. Asynchronously, import a load and add it to the database :param context: request context. :param path_to_iso: the file path of the iso on this host :param new_load: the load object :returns: none. Asynchronously, cleanup a load from both controllers :param context: request context. :param load_id: id of load to be deleted :returns: none. Asynchronously, delete the load from the database :param context: request context. :returns: none. Update the host_upgrade table with the running SW_VERSION :param context: request context. :param ihost_id: the host id :param version: the SW_VERSION from the host :returns: none. Synchronously, have the conductor update the service parameter. :param context: request context. :param do_apply: apply the newly created manifests. Asynchronously, have the conductor start the upgrade :param context: request context. :param upgrade: the upgrade object. Asynchronously, have the conductor perform the upgrade activation. :param context: request context. :param upgrade: the upgrade object. Asynchronously, have the conductor complete the upgrade. :param context: request context. :param upgrade: the upgrade object. :param state: the state of the upgrade before completing Synchronously, have the conductor abort the upgrade. :param context: request context. :param upgrade: the upgrade object. Asynchronously, complete the simplex upgrade start process :param context: request context. :param success: If the create_simplex_backup call completed Performs a system health check. :param context: request context. :param force: set to true to ignore minor and warning alarms :param upgrade: set to true to perform an upgrade health check Reserve ip address for the first storage node for Ceph monitor when installing Ceph as a second backend :param context: request context. Reserve ip address for Cinder's services :param context: request context. Synchronously, have the conductor update the SDN controller config. :param context: request context. Synchronously, have the conductor update the SDN enabled flag :param context: request context. Synchronously, have the conductor update the system vswitch type :param context: request context. Calls Barbican API to create a secret :param context: request context. :param name: secret name :param payload: secret payload Calls Barbican API to delete a secret :param context: request context. :param name: secret name Synchronously, have a conductor configure the SNMP configuration. Does the following tasks: - Update puppet hiera configuration file and apply run time manifest :param context: request context. Check that it is safe to remove the EMC SAN Check that it is safe to remove the HPE 3PAR storage array Check that it is safe to remove the HPE Lefthand storage array Send a request to primary region to see if ceph backend is configured Retrieve the system tpmconfig object Retrieve the tpmdevice object for this host Synchronously, have the conductor update the TPM config. :param context: request context. :param tpm_context: TPM object context Synchronously, have the conductor update the TPM config manifests. :param context: request context. :param delete_tpm_file: tpm file to delete, optional Get TPM configuration status from Agent host. This method allows for alarms to be raised for hosts if TPM is not configured properly. :param context: an admin context :param host_uuid: host unique id :param response_dict: configuration status :returns: pass or fail Synchronously , have the conductor create or update a tpmdevice per host. :param context: request context. :param host_uuid: uuid or id of the host :param tpmdevice_dict: a dictionary of tpm device attributes :returns: tpmdevice object Send a request to cinder to remove all volume snapshots and set all volumes to error state in preparation for restoring all volumes. This is needed for cinder disk replacement. Check if cinder has loosely coupled external backends. These are the possible backends: emc_vnx, hpe3par, hpelefthand :param context: request context. Get Rados Gateway object data pool name :param context: request context. Software upgrade status is needed by ceph-manager to take ceph specific upgrade actions This rpcapi function is added to signal that conductor's get_software_upgrade_status function is used by an RPC client ceph-manager however doesn't call rpcapi.get_software_upgrade_status and instead it uses oslo_messaging to construct a call on conductor's topic for this function. The reason is that sysinv is using an old version of openstack common and messaging libraries incompatible with the one used by ceph-manager. Synchronously, have the conductor update the firewall config and manifest. :param context: request context. :param ip_version: IP version. :param contents: file content of custom firewall rules. Synchronously, have the conductor update the Ceph configuration file for external cluster. :param context: request context. :param ceph_conf_filename: Ceph conf file Synchronously, have the conductor to write the ceph config file content to /opt/platform/config :param context: request context. :param contents: file content of the Ceph conf file :param ceph_conf_filename: Ceph conf file Synchronously, have the conductor update partition information. :param context: request context. :param host_uuid: host UUID :param partition_uuid: partition UUID :param info: dict containing partition information to update Sychronously, have the conductor install the license file. :param context: request context. :param contents: content of license file. Synchronously, have the conductor configure the certificate. :param context: request context. :param pem_contents: contents of certificate in pem format. :param config_dict: dictionary of certificate config attributes. Get supported chart namespaces. This method retrieves the namespace supported by a given chart. :param context: request context. :param chart_name: name of the chart :returns: list of supported namespaces that associated overrides may be provided. Get the overrides for a supported chart. :param context: request context. :param chart_name: name of a supported chart :param cnamespace: (optional) namespace :returns: dict of overrides. Get supported application namespaces. :param app_name: name of the bundle of charts required to support an application :returns: dict of charts and supported namespaces that associated overrides may be provided. Get the overrides for a supported set of charts. :param context: request context. :param app_name: name of a supported application (set of charts) :param cnamespace: (optional) namespace :returns: dict of overrides. Merge the file and set overrides into a single chart overrides. :param context: request context. :param file_overrides: (optional) list of overrides from files :param set_overrides: (optional) list of parameter overrides :returns: merged overrides string Synchronously, have the conductor update kubernetes label. :param context: request context. :param host_uuid: uuid or id of the host :param label_dict: a dictionary of kubernetes labels Asynchronously, have a conductor update the host memory :param context: request context. :param host_uuid: duuid or id of the host. Synchronously, have the conductor update fernet keys. :param context: request context. :param keys: a list of fernet keys Synchronously, have the conductor to retrieve fernet keys. :param context: request context. :param key_id: (optional) :returns: a list of fernet keys. Handle application upload request :param context: request context. :param rpc_app: data object provided in the rpc request :param tafile: location of application tarfile to be extracted Handle application apply request :param context: request context. :param rpc_app: data object provided in the rpc request :param app_not_already_applied: app not already succesfully applied Handle application remove request :param context: request context. :param rpc_app: data object provided in the rpc request Handle application delete request :param context: request context. :param rpc_app: data object provided in the rpc request | 1.719488 | 2 |
Arrays/628_maximum_product_of_three_numbers.py | nikitaKunevich/coding-training | 0 | 6632406 | <reponame>nikitaKunevich/coding-training
'''
Source: Leetcode
628. Maximum Product of Three Numbers
'''
from typing import List
# O(n) time | O(n) space
class SolutionWSortedArray:
def maximumProduct(self, nums: List[int]) -> int:
s = sorted(nums)
return max(s[0] * s[1] * s[-1], s[-3] * s[-2] * s[-1])
# O(N) time | O(1) space
class Solution:
def maximumProduct(self, array: List[int]) -> int:
firstMin, secondMin, firstMax, secondMax, thirdMax = float('inf'), float('inf'), float('-inf'), float('-inf'), float('-inf')
for i in range(len(array)):
if array[i] > firstMax:
thirdMax = secondMax
secondMax = firstMax
firstMax = array[i]
elif array[i] > secondMax and array[i] <=firstMax:
thirdMax = secondMax
secondMax = array[i]
elif array[i] > thirdMax and array[i] <= secondMax:
thirdMax = array[i]
if array[i] < firstMin:
secondMin = firstMin
firstMin = array[i]
elif array[i] < secondMin and array[i] >= firstMin:
secondMin = array[i]
return max(firstMin * secondMin * firstMax, firstMax * secondMax * thirdMax) | '''
Source: Leetcode
628. Maximum Product of Three Numbers
'''
from typing import List
# O(n) time | O(n) space
class SolutionWSortedArray:
def maximumProduct(self, nums: List[int]) -> int:
s = sorted(nums)
return max(s[0] * s[1] * s[-1], s[-3] * s[-2] * s[-1])
# O(N) time | O(1) space
class Solution:
def maximumProduct(self, array: List[int]) -> int:
firstMin, secondMin, firstMax, secondMax, thirdMax = float('inf'), float('inf'), float('-inf'), float('-inf'), float('-inf')
for i in range(len(array)):
if array[i] > firstMax:
thirdMax = secondMax
secondMax = firstMax
firstMax = array[i]
elif array[i] > secondMax and array[i] <=firstMax:
thirdMax = secondMax
secondMax = array[i]
elif array[i] > thirdMax and array[i] <= secondMax:
thirdMax = array[i]
if array[i] < firstMin:
secondMin = firstMin
firstMin = array[i]
elif array[i] < secondMin and array[i] >= firstMin:
secondMin = array[i]
return max(firstMin * secondMin * firstMax, firstMax * secondMax * thirdMax) | en | 0.651879 | Source: Leetcode 628. Maximum Product of Three Numbers # O(n) time | O(n) space # O(N) time | O(1) space | 3.863607 | 4 |
sahara-10.0.0/sahara/plugins/base.py | scottwedge/OpenStack-Stein | 161 | 6632407 | <gh_stars>100-1000
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_config import cfg
from oslo_log import log as logging
import six
from stevedore import enabled
from sahara import conductor as cond
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import labels
from sahara.utils import resources
conductor = cond.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def required(fun):
return abc.abstractmethod(fun)
def required_with_default(fun):
return fun
def optional(fun):
fun.__not_implemented__ = True
return fun
@six.add_metaclass(abc.ABCMeta)
class PluginInterface(resources.BaseResource):
__resource_name__ = 'plugin'
name = 'plugin_interface'
@required
def get_title(self):
"""Plugin title
For example:
"Vanilla Provisioning"
"""
pass
@required_with_default
def get_description(self):
"""Optional description of the plugin
This information is targeted to be displayed in UI.
"""
pass
def to_dict(self):
return {
'name': self.name,
'title': self.get_title(),
'description': self.get_description(),
}
class PluginManager(object):
def __init__(self):
self.plugins = {}
self.default_label_schema = {}
self._load_cluster_plugins()
self.label_handler = labels.LabelHandler(self.plugins)
def _load_cluster_plugins(self):
config_plugins = CONF.plugins
extension_manager = enabled.EnabledExtensionManager(
check_func=lambda ext: ext.name in config_plugins,
namespace='sahara.cluster.plugins',
invoke_on_load=True
)
for ext in extension_manager.extensions:
if ext.name in self.plugins:
raise ex.ConfigurationError(
_("Plugin with name '%s' already exists.") % ext.name)
ext.obj.name = ext.name
self.plugins[ext.name] = ext.obj
LOG.info("Plugin {plugin_name} loaded {entry_point}".format(
plugin_name=ext.name,
entry_point=ext.entry_point_target))
if len(self.plugins) < len(config_plugins):
self.loaded_plugins = set(six.iterkeys(self.plugins))
requested_plugins = set(config_plugins)
LOG.warning("Plugins couldn't be loaded: %s",
", ".join(requested_plugins - self.loaded_plugins))
def get_plugins(self, serialized=False):
if serialized:
return [self.serialize_plugin(name)
for name in PLUGINS.plugins]
return [self.get_plugin(name) for name in PLUGINS.plugins]
def get_plugin(self, plugin_name):
return self.plugins.get(plugin_name)
def is_plugin_implements(self, plugin_name, fun_name):
plugin = self.get_plugin(plugin_name)
fun = getattr(plugin, fun_name)
if not (fun and callable(fun)):
return False
return not hasattr(fun, '__not_implemented__')
def serialize_plugin(self, plugin_name, version=None):
plugin = self.get_plugin(plugin_name)
if plugin:
res = plugin.as_resource()
res._info.update(self.label_handler.get_label_full_details(
plugin_name))
if version:
if version in plugin.get_versions():
res._info.update(plugin.get_version_details(version))
else:
return None
return res
def update_plugin(self, plugin_name, values):
self.label_handler.update_plugin(plugin_name, values)
return self.serialize_plugin(plugin_name)
def validate_plugin_update(self, plugin_name, values):
return self.label_handler.validate_plugin_update(plugin_name, values)
def get_plugin_update_validation_jsonschema(self):
return self.label_handler.get_plugin_update_validation_jsonschema()
def validate_plugin_labels(self, plugin, version):
self.label_handler.validate_plugin_labels(plugin, version)
PLUGINS = None
def setup_plugins():
global PLUGINS
PLUGINS = PluginManager()
| # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_config import cfg
from oslo_log import log as logging
import six
from stevedore import enabled
from sahara import conductor as cond
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import labels
from sahara.utils import resources
conductor = cond.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def required(fun):
return abc.abstractmethod(fun)
def required_with_default(fun):
return fun
def optional(fun):
fun.__not_implemented__ = True
return fun
@six.add_metaclass(abc.ABCMeta)
class PluginInterface(resources.BaseResource):
__resource_name__ = 'plugin'
name = 'plugin_interface'
@required
def get_title(self):
"""Plugin title
For example:
"Vanilla Provisioning"
"""
pass
@required_with_default
def get_description(self):
"""Optional description of the plugin
This information is targeted to be displayed in UI.
"""
pass
def to_dict(self):
return {
'name': self.name,
'title': self.get_title(),
'description': self.get_description(),
}
class PluginManager(object):
def __init__(self):
self.plugins = {}
self.default_label_schema = {}
self._load_cluster_plugins()
self.label_handler = labels.LabelHandler(self.plugins)
def _load_cluster_plugins(self):
config_plugins = CONF.plugins
extension_manager = enabled.EnabledExtensionManager(
check_func=lambda ext: ext.name in config_plugins,
namespace='sahara.cluster.plugins',
invoke_on_load=True
)
for ext in extension_manager.extensions:
if ext.name in self.plugins:
raise ex.ConfigurationError(
_("Plugin with name '%s' already exists.") % ext.name)
ext.obj.name = ext.name
self.plugins[ext.name] = ext.obj
LOG.info("Plugin {plugin_name} loaded {entry_point}".format(
plugin_name=ext.name,
entry_point=ext.entry_point_target))
if len(self.plugins) < len(config_plugins):
self.loaded_plugins = set(six.iterkeys(self.plugins))
requested_plugins = set(config_plugins)
LOG.warning("Plugins couldn't be loaded: %s",
", ".join(requested_plugins - self.loaded_plugins))
def get_plugins(self, serialized=False):
if serialized:
return [self.serialize_plugin(name)
for name in PLUGINS.plugins]
return [self.get_plugin(name) for name in PLUGINS.plugins]
def get_plugin(self, plugin_name):
return self.plugins.get(plugin_name)
def is_plugin_implements(self, plugin_name, fun_name):
plugin = self.get_plugin(plugin_name)
fun = getattr(plugin, fun_name)
if not (fun and callable(fun)):
return False
return not hasattr(fun, '__not_implemented__')
def serialize_plugin(self, plugin_name, version=None):
plugin = self.get_plugin(plugin_name)
if plugin:
res = plugin.as_resource()
res._info.update(self.label_handler.get_label_full_details(
plugin_name))
if version:
if version in plugin.get_versions():
res._info.update(plugin.get_version_details(version))
else:
return None
return res
def update_plugin(self, plugin_name, values):
self.label_handler.update_plugin(plugin_name, values)
return self.serialize_plugin(plugin_name)
def validate_plugin_update(self, plugin_name, values):
return self.label_handler.validate_plugin_update(plugin_name, values)
def get_plugin_update_validation_jsonschema(self):
return self.label_handler.get_plugin_update_validation_jsonschema()
def validate_plugin_labels(self, plugin, version):
self.label_handler.validate_plugin_labels(plugin, version)
PLUGINS = None
def setup_plugins():
global PLUGINS
PLUGINS = PluginManager() | en | 0.82579 | # Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Plugin title For example: "Vanilla Provisioning" Optional description of the plugin This information is targeted to be displayed in UI. | 1.71192 | 2 |
migrations/versions/b25f088d40c2_.py | DerekBev/tasking-manager | 2 | 6632408 | """empty message
Revision ID: b25f088d40c2
Revises: <PASSWORD>
Create Date: 2019-06-11 12:31:41.697842
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b25f088d40c2'
down_revision = '<PASSWORD>9cebaa79c'
branch_labels = None
depends_on = None
def upgrade():
# Remove all task ids in messages for tasks that don't exists anynmore
query = 'UPDATE messages SET task_id = NULL WHERE task_id NOT IN (SELECT id FROM tasks WHERE project_id = messages.project_id);'
op.execute(query)
op.create_foreign_key('messages_tasks', 'messages', 'tasks', ['task_id', 'project_id'], ['id', 'project_id'])
def downgrade():
op.drop_constraint('messages_tasks', 'messages', type_='foreignkey')
| """empty message
Revision ID: b25f088d40c2
Revises: <PASSWORD>
Create Date: 2019-06-11 12:31:41.697842
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b25f088d40c2'
down_revision = '<PASSWORD>9cebaa79c'
branch_labels = None
depends_on = None
def upgrade():
# Remove all task ids in messages for tasks that don't exists anynmore
query = 'UPDATE messages SET task_id = NULL WHERE task_id NOT IN (SELECT id FROM tasks WHERE project_id = messages.project_id);'
op.execute(query)
op.create_foreign_key('messages_tasks', 'messages', 'tasks', ['task_id', 'project_id'], ['id', 'project_id'])
def downgrade():
op.drop_constraint('messages_tasks', 'messages', type_='foreignkey')
| en | 0.627553 | empty message Revision ID: b25f088d40c2 Revises: <PASSWORD> Create Date: 2019-06-11 12:31:41.697842 # revision identifiers, used by Alembic. # Remove all task ids in messages for tasks that don't exists anynmore | 1.517149 | 2 |
Day9/main.py | Tribruin/AdventOfCode2021 | 0 | 6632409 | #!/Users/rblount/.pyenv/versions/AdOfCode/bin/python
import sys
import os
from AOC import AOC
import numpy as np
from scipy.ndimage import label
testing = True
def parse_input(data: AOC) -> np.array:
num_array = np.genfromtxt(data.read_lines(), dtype=int, delimiter=1)
num_array = np.pad(num_array, 1, mode="constant", constant_values=9)
return num_array
def get_neighbors(array: np.array, y: int, x: int) -> list():
adjecent = [
array[y - 1][x],
array[y + 1][x],
array[y][x - 1],
array[y][x + 1],
]
return adjecent
def part1(floor_array: np.array):
y_size, x_size = np.shape(floor_array)
low_points = np.full((y_size, x_size), False, dtype=bool)
for (y, x), val in np.ndenumerate(floor_array):
if (0 < y < y_size - 1) and (0 < x < x_size - 1):
# Skip the values that are along the edge.
adjecent = sorted(get_neighbors(floor_array, y, x))
# check if lowest
# Mark the map True or False
low_points[(y, x)] = (val < adjecent[0])
# overlay the low_points array to the floor_array to get only the low points
low_point_heights = floor_array[low_points]
print(np.sum(low_points) + np.sum(low_point_heights))
def part2(floor_array: np.array):
# THIS IS NOT MY CODE. I cheated!
# Used code from https://gitlab.com/AsbjornOlling/aoc2021/-/blob/master/09/solve.py
# Did not know about label or bincount
basins, _ = label(floor_array != 9)
basin_areas = np.bincount(basins[basins != 0])
top_three = np.sort(basin_areas)[-3:]
print(top_three[0] * top_three[1] * top_three[2])
def main():
# Get the path name and strip to the last 1 or 2 characters
codePath = os.path.dirname(sys.argv[0])
codeDate = int(codePath.split("/")[-1][3:])
codeYear = int(codePath.split("/")[-2])
print(f"Running Advent of Code for Year: {codeYear} - Day {codeDate}")
data = AOC(codeDate, codeYear, test=testing)
floor_array = parse_input(data)
part1(floor_array)
part2(floor_array)
if __name__ == "__main__":
main()
| #!/Users/rblount/.pyenv/versions/AdOfCode/bin/python
import sys
import os
from AOC import AOC
import numpy as np
from scipy.ndimage import label
testing = True
def parse_input(data: AOC) -> np.array:
num_array = np.genfromtxt(data.read_lines(), dtype=int, delimiter=1)
num_array = np.pad(num_array, 1, mode="constant", constant_values=9)
return num_array
def get_neighbors(array: np.array, y: int, x: int) -> list():
adjecent = [
array[y - 1][x],
array[y + 1][x],
array[y][x - 1],
array[y][x + 1],
]
return adjecent
def part1(floor_array: np.array):
y_size, x_size = np.shape(floor_array)
low_points = np.full((y_size, x_size), False, dtype=bool)
for (y, x), val in np.ndenumerate(floor_array):
if (0 < y < y_size - 1) and (0 < x < x_size - 1):
# Skip the values that are along the edge.
adjecent = sorted(get_neighbors(floor_array, y, x))
# check if lowest
# Mark the map True or False
low_points[(y, x)] = (val < adjecent[0])
# overlay the low_points array to the floor_array to get only the low points
low_point_heights = floor_array[low_points]
print(np.sum(low_points) + np.sum(low_point_heights))
def part2(floor_array: np.array):
# THIS IS NOT MY CODE. I cheated!
# Used code from https://gitlab.com/AsbjornOlling/aoc2021/-/blob/master/09/solve.py
# Did not know about label or bincount
basins, _ = label(floor_array != 9)
basin_areas = np.bincount(basins[basins != 0])
top_three = np.sort(basin_areas)[-3:]
print(top_three[0] * top_three[1] * top_three[2])
def main():
# Get the path name and strip to the last 1 or 2 characters
codePath = os.path.dirname(sys.argv[0])
codeDate = int(codePath.split("/")[-1][3:])
codeYear = int(codePath.split("/")[-2])
print(f"Running Advent of Code for Year: {codeYear} - Day {codeDate}")
data = AOC(codeDate, codeYear, test=testing)
floor_array = parse_input(data)
part1(floor_array)
part2(floor_array)
if __name__ == "__main__":
main()
| en | 0.694946 | #!/Users/rblount/.pyenv/versions/AdOfCode/bin/python # Skip the values that are along the edge. # check if lowest # Mark the map True or False # overlay the low_points array to the floor_array to get only the low points # THIS IS NOT MY CODE. I cheated! # Used code from https://gitlab.com/AsbjornOlling/aoc2021/-/blob/master/09/solve.py # Did not know about label or bincount # Get the path name and strip to the last 1 or 2 characters | 2.991008 | 3 |
zerver/management/commands/purge_queue.py | SophieHau/zulip | 0 | 6632410 | <reponame>SophieHau/zulip<gh_stars>0
from argparse import ArgumentParser
from typing import Any
from django.core.management import CommandError
from django.core.management.base import BaseCommand
from zerver.lib.queue import SimpleQueueClient
from zerver.worker.queue_processors import get_active_worker_queues
class Command(BaseCommand):
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(dest="queue_name", type=str, nargs='?',
help="queue to purge", default=None)
parser.add_argument('--all', dest="all", action="store_true",
default=False, help="purge all queues")
help = "Discards all messages from the given queue"
def handle(self, *args: Any, **options: str) -> None:
def purge_queue(queue_name: str) -> None:
queue = SimpleQueueClient()
queue.ensure_queue(queue_name, lambda: None)
queue.channel.queue_purge(queue_name)
if options['all']:
for queue_name in get_active_worker_queues():
purge_queue(queue_name)
print("All queues purged")
elif not options['queue_name']:
raise CommandError("Missing queue_name argument!")
else:
queue_name = options['queue_name']
if not (queue_name in get_active_worker_queues() or
queue_name.startswith("notify_tornado") or
queue_name.startswith("tornado_return")):
raise CommandError("Unknown queue %s" % (queue_name,))
print("Purging queue %s" % (queue_name,))
purge_queue(queue_name)
print("Done")
| from argparse import ArgumentParser
from typing import Any
from django.core.management import CommandError
from django.core.management.base import BaseCommand
from zerver.lib.queue import SimpleQueueClient
from zerver.worker.queue_processors import get_active_worker_queues
class Command(BaseCommand):
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(dest="queue_name", type=str, nargs='?',
help="queue to purge", default=None)
parser.add_argument('--all', dest="all", action="store_true",
default=False, help="purge all queues")
help = "Discards all messages from the given queue"
def handle(self, *args: Any, **options: str) -> None:
def purge_queue(queue_name: str) -> None:
queue = SimpleQueueClient()
queue.ensure_queue(queue_name, lambda: None)
queue.channel.queue_purge(queue_name)
if options['all']:
for queue_name in get_active_worker_queues():
purge_queue(queue_name)
print("All queues purged")
elif not options['queue_name']:
raise CommandError("Missing queue_name argument!")
else:
queue_name = options['queue_name']
if not (queue_name in get_active_worker_queues() or
queue_name.startswith("notify_tornado") or
queue_name.startswith("tornado_return")):
raise CommandError("Unknown queue %s" % (queue_name,))
print("Purging queue %s" % (queue_name,))
purge_queue(queue_name)
print("Done") | none | 1 | 2.230536 | 2 |
|
pnc_cli/buildrecords.py | vibe13/pnc-cli | 2 | 6632411 | from argh import arg
import pnc_cli.common as common
import pnc_cli.cli_types as types
import pnc_cli.utils as utils
from pnc_cli.pnc_api import pnc_api
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_build_records(page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords
"""
data = list_build_records_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_build_records_raw(page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-i", "--id", help="BuildConfiguration ID to retrieve BuildRecords of.", type=types.existing_bc_id)
@arg("-n", "--name", help="BuildConfiguration name to retrieve BuildRecords of.", type=types.existing_bc_name)
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given BuildConfiguration
"""
data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_records_for_build_configuration_raw(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
config_id = common.set_id(pnc_api.build_configs, id, name)
response = utils.checked_api_call(pnc_api.builds, 'get_all_for_build_configuration', configuration_id=config_id,
page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-i", "--id", help="Project ID to retrieve BuildRecords of.")
@arg("-n", "--name", help="Project name to retrieve BuildRecords of.")
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned")
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given Project
"""
data = list_records_for_project_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_records_for_project_raw(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
project_id = common.set_id(pnc_api.projects, id, name)
response = utils.checked_api_call(pnc_api.builds, 'get_all_for_project_0', project_id=project_id, page_size=page_size, page_index=page_index,
sort=sort, q=q)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve.", type=types.existing_build_record)
def get_build_record(id):
"""
Get a specific BuildRecord by ID
"""
data = get_build_record_raw(id)
if data:
return utils.format_json(data)
def get_build_record_raw(id):
response = utils.checked_api_call(pnc_api.builds, 'get_specific', id=id)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve artifacts from.", type=types.existing_build_record)
@arg("-p", "--page-size", help="Limit the amount of Artifacts returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_built_artifacts(id, page_size=200, page_index=0, sort="", q=""):
"""
List Artifacts associated with a BuildRecord
"""
data = list_built_artifacts_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_built_artifacts_raw(id, page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, 'get_built_artifacts', id=id,
page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve dependency Artifacts from.", type=types.existing_build_record)
@arg("-p", "--page-size", help="Limit the amount of Artifacts returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_dependency_artifacts(id, page_size=200, page_index=0, sort="", q=""):
"""
List dependency artifacts associated with a BuildRecord
"""
data = list_dependency_artifacts_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_dependency_artifacts_raw(id, page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, 'get_dependency_artifacts', id=id, page_size=page_size, page_index=page_index, sort=sort,
q=q)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve audited BuildConfiguration from.", type=types.existing_build_record)
def get_audited_configuration_for_record(id):
"""
Get the BuildConfigurationAudited for a given BuildRecord
"""
data = get_audited_configuration_for_record_raw(id)
if data:
return utils.format_json(data)
def get_audited_configuration_for_record_raw(id):
response = utils.checked_api_call(pnc_api.builds, 'get_build_configuration_audited', id=id)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve the log from.", type=types.existing_build_record)
def get_log_for_record(id):
"""
Get the log for a given BuildRecord
"""
data = get_log_for_record_raw(id)
if data:
return data
def get_log_for_record_raw(id):
response = utils.checked_api_call(pnc_api.builds, 'get_logs', id=id)
if response:
return response
@arg("id", help="BuildRecord ID to add an Attribute to.", type=types.existing_build_record)
@arg("key", help="Key for the Attribute.")
@arg("value", help="Value for the Attribute.")
def put_attribute(id, key, value):
utils.checked_api_call(pnc_api.builds, 'put_attribute', id=id, key=key, value=value)
@arg("id", help="BuildRecord ID to remove an Attribute from.", type=types.existing_build_record)
@arg("key", help="Key of the Attribute to remove.")
def remove_attribute(id, key):
utils.checked_api_call(pnc_api.builds, 'remove_attribute', id=id, key=key)
@arg("key", help="Key of the Attribute to query BuildRecords for.")
@arg("value", help="Value of the Attribute to query BuildRecords for.")
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def query_by_attribute(key, value, page_size=200, page_index=0, sort="", q=""):
data = query_by_attribute_raw(key, value, page_size, page_index, sort, q)
if data:
return utils.format_json(data)
def query_by_attribute_raw(key, value, page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, "query_by_attribute", key=key, value=value, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response
@arg("id", help="BuildRecord ID to list Attributes of.", type=types.existing_build_record)
def list_attributes(id):
data = list_attributes_raw(id)
if data:
return utils.format_json_list(data)
def list_attributes_raw(id,):
response = utils.checked_api_call(pnc_api.builds, 'get_attributes', id=id)
if response:
return response.content
| from argh import arg
import pnc_cli.common as common
import pnc_cli.cli_types as types
import pnc_cli.utils as utils
from pnc_cli.pnc_api import pnc_api
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_build_records(page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords
"""
data = list_build_records_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_build_records_raw(page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-i", "--id", help="BuildConfiguration ID to retrieve BuildRecords of.", type=types.existing_bc_id)
@arg("-n", "--name", help="BuildConfiguration name to retrieve BuildRecords of.", type=types.existing_bc_name)
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given BuildConfiguration
"""
data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_records_for_build_configuration_raw(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
config_id = common.set_id(pnc_api.build_configs, id, name)
response = utils.checked_api_call(pnc_api.builds, 'get_all_for_build_configuration', configuration_id=config_id,
page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-i", "--id", help="Project ID to retrieve BuildRecords of.")
@arg("-n", "--name", help="Project name to retrieve BuildRecords of.")
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned")
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given Project
"""
data = list_records_for_project_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_records_for_project_raw(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
project_id = common.set_id(pnc_api.projects, id, name)
response = utils.checked_api_call(pnc_api.builds, 'get_all_for_project_0', project_id=project_id, page_size=page_size, page_index=page_index,
sort=sort, q=q)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve.", type=types.existing_build_record)
def get_build_record(id):
"""
Get a specific BuildRecord by ID
"""
data = get_build_record_raw(id)
if data:
return utils.format_json(data)
def get_build_record_raw(id):
response = utils.checked_api_call(pnc_api.builds, 'get_specific', id=id)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve artifacts from.", type=types.existing_build_record)
@arg("-p", "--page-size", help="Limit the amount of Artifacts returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_built_artifacts(id, page_size=200, page_index=0, sort="", q=""):
"""
List Artifacts associated with a BuildRecord
"""
data = list_built_artifacts_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_built_artifacts_raw(id, page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, 'get_built_artifacts', id=id,
page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve dependency Artifacts from.", type=types.existing_build_record)
@arg("-p", "--page-size", help="Limit the amount of Artifacts returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_dependency_artifacts(id, page_size=200, page_index=0, sort="", q=""):
"""
List dependency artifacts associated with a BuildRecord
"""
data = list_dependency_artifacts_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def list_dependency_artifacts_raw(id, page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, 'get_dependency_artifacts', id=id, page_size=page_size, page_index=page_index, sort=sort,
q=q)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve audited BuildConfiguration from.", type=types.existing_build_record)
def get_audited_configuration_for_record(id):
"""
Get the BuildConfigurationAudited for a given BuildRecord
"""
data = get_audited_configuration_for_record_raw(id)
if data:
return utils.format_json(data)
def get_audited_configuration_for_record_raw(id):
response = utils.checked_api_call(pnc_api.builds, 'get_build_configuration_audited', id=id)
if response:
return response.content
@arg("id", help="BuildRecord ID to retrieve the log from.", type=types.existing_build_record)
def get_log_for_record(id):
"""
Get the log for a given BuildRecord
"""
data = get_log_for_record_raw(id)
if data:
return data
def get_log_for_record_raw(id):
response = utils.checked_api_call(pnc_api.builds, 'get_logs', id=id)
if response:
return response
@arg("id", help="BuildRecord ID to add an Attribute to.", type=types.existing_build_record)
@arg("key", help="Key for the Attribute.")
@arg("value", help="Value for the Attribute.")
def put_attribute(id, key, value):
utils.checked_api_call(pnc_api.builds, 'put_attribute', id=id, key=key, value=value)
@arg("id", help="BuildRecord ID to remove an Attribute from.", type=types.existing_build_record)
@arg("key", help="Key of the Attribute to remove.")
def remove_attribute(id, key):
utils.checked_api_call(pnc_api.builds, 'remove_attribute', id=id, key=key)
@arg("key", help="Key of the Attribute to query BuildRecords for.")
@arg("value", help="Value of the Attribute to query BuildRecords for.")
@arg("-p", "--page-size", help="Limit the amount of BuildRecords returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def query_by_attribute(key, value, page_size=200, page_index=0, sort="", q=""):
data = query_by_attribute_raw(key, value, page_size, page_index, sort, q)
if data:
return utils.format_json(data)
def query_by_attribute_raw(key, value, page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(pnc_api.builds, "query_by_attribute", key=key, value=value, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response
@arg("id", help="BuildRecord ID to list Attributes of.", type=types.existing_build_record)
def list_attributes(id):
data = list_attributes_raw(id)
if data:
return utils.format_json_list(data)
def list_attributes_raw(id,):
response = utils.checked_api_call(pnc_api.builds, 'get_attributes', id=id)
if response:
return response.content
| en | 0.805653 | List all BuildRecords List all BuildRecords for a given BuildConfiguration List all BuildRecords for a given Project Get a specific BuildRecord by ID List Artifacts associated with a BuildRecord List dependency artifacts associated with a BuildRecord Get the BuildConfigurationAudited for a given BuildRecord Get the log for a given BuildRecord | 2.324858 | 2 |
src/python_keycloak_client/aio/mixins.py | roger-schaer/python-keycloak-client | 0 | 6632412 | from python_keycloak_client.aio.abc import AsyncInit
from python_keycloak_client.aio.well_known import KeycloakWellKnown
from python_keycloak_client.mixins import WellKnownMixin as SyncWellKnownMixin
__all__ = (
'WellKnownMixin',
)
class WellKnownMixin(AsyncInit, SyncWellKnownMixin):
def get_path_well_known(self):
raise NotImplementedError()
@property
def well_known(self):
if self._well_known is None:
raise RuntimeError
return self._well_known
async def __async_init__(self) -> 'WellKnownMixin':
async with self._realm._lock:
if self._well_known is None:
p = self.get_path_well_known().format(self._realm.realm_name)
self._well_known = await KeycloakWellKnown(
realm=self._realm,
path=self._realm.client.get_full_url(p)
)
return self
async def close(self):
await self._well_known.close()
| from python_keycloak_client.aio.abc import AsyncInit
from python_keycloak_client.aio.well_known import KeycloakWellKnown
from python_keycloak_client.mixins import WellKnownMixin as SyncWellKnownMixin
__all__ = (
'WellKnownMixin',
)
class WellKnownMixin(AsyncInit, SyncWellKnownMixin):
def get_path_well_known(self):
raise NotImplementedError()
@property
def well_known(self):
if self._well_known is None:
raise RuntimeError
return self._well_known
async def __async_init__(self) -> 'WellKnownMixin':
async with self._realm._lock:
if self._well_known is None:
p = self.get_path_well_known().format(self._realm.realm_name)
self._well_known = await KeycloakWellKnown(
realm=self._realm,
path=self._realm.client.get_full_url(p)
)
return self
async def close(self):
await self._well_known.close()
| none | 1 | 2.181944 | 2 |
|
functions/checks.py | Brettanda/friday-bot | 5 | 6632413 | <reponame>Brettanda/friday-bot
import discord
from typing import TYPE_CHECKING, Union
from discord.ext import commands
# from interactions import Context as SlashContext
from . import exceptions, config
from .custom_contexts import MyContext
if TYPE_CHECKING:
from discord.ext.commands.core import _CheckDecorator
from index import Friday as Bot
async def min_tiers(bot: "Bot", msg: discord.Message) -> tuple:
guild = bot.get_guild(config.support_server_id)
member = await bot.get_or_fetch_member(guild, msg.author.id)
voted, t1_user, t1_guild = await user_voted(bot, member), await user_is_min_tier(bot, member, config.PremiumTiers.tier_1), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_1)
if t1_user or t1_guild:
return (voted, t1_user, t1_guild, t1_user, t1_guild, t1_user, t1_guild, t1_user, t1_guild)
t2_user, t2_guild = await user_is_min_tier(bot, member, config.PremiumTiers.tier_2), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_2)
if t2_user or t2_guild:
return (voted, t1_user, t1_guild, t2_user, t2_guild, t2_user, t2_guild, t2_user, t2_guild)
t3_user, t3_guild = await user_is_min_tier(bot, member, config.PremiumTiers.tier_3), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_3)
if t3_user or t3_guild:
return (voted, t1_user, t1_guild, t2_user, t2_guild, t3_user, t3_guild, t3_user, t3_guild)
t4_user, t4_guild = await user_is_min_tier(bot, member, config.PremiumTiers.tier_4), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_4)
if t4_user or t4_guild:
return (voted, t1_user, t1_guild, t2_user, t2_guild, t3_user, t3_guild, t4_user, t4_guild)
return (voted, False, False, False, False, False, False, False, False)
# def guild_is_tier(tier: str) -> "_CheckDecorator":
def user_is_tier(tier: str) -> "_CheckDecorator":
async def predicate(ctx: "MyContext") -> bool:
return True
return commands.check(predicate)
def is_min_tier(tier: int = config.PremiumTiers.tier_1) -> "_CheckDecorator":
async def predicate(ctx: "MyContext") -> bool:
if ctx.author.id == ctx.bot.owner_id:
return True
guild = ctx.bot.get_guild(config.support_server_id)
member = await ctx.bot.get_or_fetch_member(guild, ctx.author.id)
if member is None:
raise exceptions.NotInSupportServer()
if await user_is_min_tier(ctx.bot, member, tier) or await guild_is_min_tier(ctx.bot, ctx.guild, tier):
return True
else:
raise exceptions.RequiredTier()
return commands.check(predicate)
async def guild_is_min_tier(bot: "Bot", guild: discord.Guild, tier: int = config.PremiumTiers.tier_1) -> bool:
""" Checks if a guild has at least patreon 'tier' """
if guild is None:
return commands.NoPrivateMessage()
guild_tier = await bot.db.query("""SELECT tier FROM patrons WHERE guild_id=$1 LIMIT 1""", str(guild.id))
if guild_tier is None:
return False
return guild_tier >= tier
async def user_is_min_tier(bot: "Bot", user: Union[discord.User, discord.Member], tier: int = config.PremiumTiers.tier_1) -> bool:
""" Checks if a user has at least patreon 'tier' """
if not isinstance(user, discord.Member) or (hasattr(user, "guild") and user.guild.id != config.support_server_id or not hasattr(user, "guild")):
guild = bot.get_guild(config.support_server_id)
user = await bot.get_or_fetch_member(guild, user.id)
if user is None:
raise exceptions.NotInSupportServer()
# if not hasattr(user, "guild"):
# return False
roles = [role.id for role in user.roles]
if config.PremiumTiers().get_role(tier) in roles:
return True
for i in range(tier, len(config.PremiumTiers.roles) - 1):
role = bot.get_guild(config.support_server_id).get_role(config.PremiumTiers().get_role(i))
if role.id in roles:
return True
return False
def is_supporter() -> "_CheckDecorator":
"""" Checks if the user has the 'is supporting' role that ALL patrons get"""
async def predicate(ctx: "MyContext") -> bool:
guild = ctx.bot.get_guild(config.support_server_id)
member = await ctx.bot.get_or_fetch_member(guild, ctx.author.id)
if member is None:
return False
if await user_is_supporter(ctx.bot, member):
return True
else:
raise exceptions.NotSupporter()
return commands.check(predicate)
async def user_is_supporter(bot: "Bot", user: discord.User) -> bool:
if user is None:
raise exceptions.NotInSupportServer()
roles = [role.id for role in user.roles]
if config.patreon_supporting_role not in roles:
raise exceptions.NotSupporter()
return True
def is_supporter_or_voted() -> "_CheckDecorator":
async def predicate(ctx: "MyContext") -> bool:
support_guild = ctx.bot.get_guild(config.support_server_id)
member = await ctx.bot.get_or_fetch_member(support_guild, ctx.author.id)
if member is None:
return False
if await user_is_supporter(ctx.bot, member):
return True
elif await user_voted(ctx.bot, member):
return True
else:
raise exceptions.NotSupporter()
return commands.check(predicate)
async def user_voted(bot: "Bot", user: discord.User) -> bool:
user_id = await bot.db.query("SELECT id FROM votes WHERE id=$1", str(user.id))
if isinstance(user_id, list) and len(user_id) > 0:
user_id = user_id[0]
elif isinstance(user_id, list) and len(user_id) == 0:
user_id = None
return True if user_id is not None else False
def is_admin() -> "_CheckDecorator":
"""Do you have permission to change the setting of the bot"""
return commands.check_any(
commands.is_owner(),
commands.has_guild_permissions(manage_guild=True),
commands.has_guild_permissions(administrator=True))
def slash(user: bool = False, private: bool = True) -> "_CheckDecorator":
# async def predicate(ctx: SlashContext) -> bool:
# if user is True and ctx.guild_id and ctx.guild is None and ctx.channel is None:
# raise exceptions.OnlySlashCommands()
# if not private and not ctx.guild and not ctx.guild_id and ctx.channel_id:
# raise commands.NoPrivateMessage()
# return True
# return commands.check(predicate)
return False
| import discord
from typing import TYPE_CHECKING, Union
from discord.ext import commands
# from interactions import Context as SlashContext
from . import exceptions, config
from .custom_contexts import MyContext
if TYPE_CHECKING:
from discord.ext.commands.core import _CheckDecorator
from index import Friday as Bot
async def min_tiers(bot: "Bot", msg: discord.Message) -> tuple:
guild = bot.get_guild(config.support_server_id)
member = await bot.get_or_fetch_member(guild, msg.author.id)
voted, t1_user, t1_guild = await user_voted(bot, member), await user_is_min_tier(bot, member, config.PremiumTiers.tier_1), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_1)
if t1_user or t1_guild:
return (voted, t1_user, t1_guild, t1_user, t1_guild, t1_user, t1_guild, t1_user, t1_guild)
t2_user, t2_guild = await user_is_min_tier(bot, member, config.PremiumTiers.tier_2), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_2)
if t2_user or t2_guild:
return (voted, t1_user, t1_guild, t2_user, t2_guild, t2_user, t2_guild, t2_user, t2_guild)
t3_user, t3_guild = await user_is_min_tier(bot, member, config.PremiumTiers.tier_3), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_3)
if t3_user or t3_guild:
return (voted, t1_user, t1_guild, t2_user, t2_guild, t3_user, t3_guild, t3_user, t3_guild)
t4_user, t4_guild = await user_is_min_tier(bot, member, config.PremiumTiers.tier_4), await guild_is_min_tier(bot, guild, config.PremiumTiers.tier_4)
if t4_user or t4_guild:
return (voted, t1_user, t1_guild, t2_user, t2_guild, t3_user, t3_guild, t4_user, t4_guild)
return (voted, False, False, False, False, False, False, False, False)
# def guild_is_tier(tier: str) -> "_CheckDecorator":
def user_is_tier(tier: str) -> "_CheckDecorator":
async def predicate(ctx: "MyContext") -> bool:
return True
return commands.check(predicate)
def is_min_tier(tier: int = config.PremiumTiers.tier_1) -> "_CheckDecorator":
async def predicate(ctx: "MyContext") -> bool:
if ctx.author.id == ctx.bot.owner_id:
return True
guild = ctx.bot.get_guild(config.support_server_id)
member = await ctx.bot.get_or_fetch_member(guild, ctx.author.id)
if member is None:
raise exceptions.NotInSupportServer()
if await user_is_min_tier(ctx.bot, member, tier) or await guild_is_min_tier(ctx.bot, ctx.guild, tier):
return True
else:
raise exceptions.RequiredTier()
return commands.check(predicate)
async def guild_is_min_tier(bot: "Bot", guild: discord.Guild, tier: int = config.PremiumTiers.tier_1) -> bool:
""" Checks if a guild has at least patreon 'tier' """
if guild is None:
return commands.NoPrivateMessage()
guild_tier = await bot.db.query("""SELECT tier FROM patrons WHERE guild_id=$1 LIMIT 1""", str(guild.id))
if guild_tier is None:
return False
return guild_tier >= tier
async def user_is_min_tier(bot: "Bot", user: Union[discord.User, discord.Member], tier: int = config.PremiumTiers.tier_1) -> bool:
""" Checks if a user has at least patreon 'tier' """
if not isinstance(user, discord.Member) or (hasattr(user, "guild") and user.guild.id != config.support_server_id or not hasattr(user, "guild")):
guild = bot.get_guild(config.support_server_id)
user = await bot.get_or_fetch_member(guild, user.id)
if user is None:
raise exceptions.NotInSupportServer()
# if not hasattr(user, "guild"):
# return False
roles = [role.id for role in user.roles]
if config.PremiumTiers().get_role(tier) in roles:
return True
for i in range(tier, len(config.PremiumTiers.roles) - 1):
role = bot.get_guild(config.support_server_id).get_role(config.PremiumTiers().get_role(i))
if role.id in roles:
return True
return False
def is_supporter() -> "_CheckDecorator":
"""" Checks if the user has the 'is supporting' role that ALL patrons get"""
async def predicate(ctx: "MyContext") -> bool:
guild = ctx.bot.get_guild(config.support_server_id)
member = await ctx.bot.get_or_fetch_member(guild, ctx.author.id)
if member is None:
return False
if await user_is_supporter(ctx.bot, member):
return True
else:
raise exceptions.NotSupporter()
return commands.check(predicate)
async def user_is_supporter(bot: "Bot", user: discord.User) -> bool:
if user is None:
raise exceptions.NotInSupportServer()
roles = [role.id for role in user.roles]
if config.patreon_supporting_role not in roles:
raise exceptions.NotSupporter()
return True
def is_supporter_or_voted() -> "_CheckDecorator":
async def predicate(ctx: "MyContext") -> bool:
support_guild = ctx.bot.get_guild(config.support_server_id)
member = await ctx.bot.get_or_fetch_member(support_guild, ctx.author.id)
if member is None:
return False
if await user_is_supporter(ctx.bot, member):
return True
elif await user_voted(ctx.bot, member):
return True
else:
raise exceptions.NotSupporter()
return commands.check(predicate)
async def user_voted(bot: "Bot", user: discord.User) -> bool:
user_id = await bot.db.query("SELECT id FROM votes WHERE id=$1", str(user.id))
if isinstance(user_id, list) and len(user_id) > 0:
user_id = user_id[0]
elif isinstance(user_id, list) and len(user_id) == 0:
user_id = None
return True if user_id is not None else False
def is_admin() -> "_CheckDecorator":
"""Do you have permission to change the setting of the bot"""
return commands.check_any(
commands.is_owner(),
commands.has_guild_permissions(manage_guild=True),
commands.has_guild_permissions(administrator=True))
def slash(user: bool = False, private: bool = True) -> "_CheckDecorator":
# async def predicate(ctx: SlashContext) -> bool:
# if user is True and ctx.guild_id and ctx.guild is None and ctx.channel is None:
# raise exceptions.OnlySlashCommands()
# if not private and not ctx.guild and not ctx.guild_id and ctx.channel_id:
# raise commands.NoPrivateMessage()
# return True
# return commands.check(predicate)
return False | en | 0.614297 | # from interactions import Context as SlashContext # def guild_is_tier(tier: str) -> "_CheckDecorator": Checks if a guild has at least patreon 'tier' SELECT tier FROM patrons WHERE guild_id=$1 LIMIT 1 Checks if a user has at least patreon 'tier' # if not hasattr(user, "guild"): # return False " Checks if the user has the 'is supporting' role that ALL patrons get Do you have permission to change the setting of the bot # async def predicate(ctx: SlashContext) -> bool: # if user is True and ctx.guild_id and ctx.guild is None and ctx.channel is None: # raise exceptions.OnlySlashCommands() # if not private and not ctx.guild and not ctx.guild_id and ctx.channel_id: # raise commands.NoPrivateMessage() # return True # return commands.check(predicate) | 2.213725 | 2 |
main.py | ctron/mitemp-gateway | 0 | 6632414 | #!/usr/bin/env python3
import sys
from datetime import datetime
import bluetooth._bluetooth as bluez
import time
import os
import json
import math
import requests
from urllib.parse import urljoin, urlencode, quote, quote_plus
from bluetooth_utils import (toggle_device, enable_le_scan,
parse_le_advertising_events,
disable_le_scan, raw_packet_to_str)
print("Starting up...")
data_schema = os.getenv('DATA_SCHEMA', "urn:drogue:iot:temperature") # vorto:ctron.mitemp.status:1.0.0
geolocation = os.getenv('GEOLOCATION')
if geolocation is not None:
geolocation = json.loads(geolocation)
app_id = os.getenv('APP_ID')
device_id = quote(os.environ['DEVICE_ID'])
device_password = <PASSWORD>('DEVICE_PASSWORD')
# Use 0 for hci0
dev_id = os.getenv('HCI_NUM', 0)
endpoint = os.getenv('ENDPOINT', "https://http.sandbox.drogue.cloud")
print(endpoint)
denc = quote_plus(device_id)
auth = (f"{denc}@{app_id}", device_password)
path = f"/v1/status"
url = urljoin(endpoint, path)
print(url)
toggle_device(dev_id, True)
try:
sock = bluez.hci_open_dev(dev_id)
except:
print("Cannot open bluetooth device %i" % dev_id)
raise
# Set filter to "True" to see only one packet per device
enable_le_scan(sock, filter_duplicates=False)
try:
def le_advertise_packet_handler(mac, adv_type, data, rssi):
data_str = raw_packet_to_str(data)
# Check for ATC preamble
if data_str[6:10] == '1a18':
temp = int(data_str[22:26], 16) / 10
hum = int(data_str[26:28], 16)
batt = int(data_str[28:30], 16)
print("%s - Device: %s Temp: %s°C Humidity: %s%% Batt: %s%%" % \
(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), mac, temp, hum, batt), flush=True)
status = {
"temp": temp,
"hum": hum,
"batt": batt,
"geoloc": geolocation,
}
params = {
"data_schema": data_schema,
"as": mac,
}
# noinspection PyBroadException
try:
res = requests.post(url,
json=status,
auth=auth,
headers={"Content-Type": "application/json"},
params=params
)
print("Result: %s" % res, flush=True)
except:
pass
# Called on new LE packet
parse_le_advertising_events(sock,
handler=le_advertise_packet_handler,
debug=False)
# Scan until Ctrl-C
except KeyboardInterrupt:
disable_le_scan(sock)
| #!/usr/bin/env python3
import sys
from datetime import datetime
import bluetooth._bluetooth as bluez
import time
import os
import json
import math
import requests
from urllib.parse import urljoin, urlencode, quote, quote_plus
from bluetooth_utils import (toggle_device, enable_le_scan,
parse_le_advertising_events,
disable_le_scan, raw_packet_to_str)
print("Starting up...")
data_schema = os.getenv('DATA_SCHEMA', "urn:drogue:iot:temperature") # vorto:ctron.mitemp.status:1.0.0
geolocation = os.getenv('GEOLOCATION')
if geolocation is not None:
geolocation = json.loads(geolocation)
app_id = os.getenv('APP_ID')
device_id = quote(os.environ['DEVICE_ID'])
device_password = <PASSWORD>('DEVICE_PASSWORD')
# Use 0 for hci0
dev_id = os.getenv('HCI_NUM', 0)
endpoint = os.getenv('ENDPOINT', "https://http.sandbox.drogue.cloud")
print(endpoint)
denc = quote_plus(device_id)
auth = (f"{denc}@{app_id}", device_password)
path = f"/v1/status"
url = urljoin(endpoint, path)
print(url)
toggle_device(dev_id, True)
try:
sock = bluez.hci_open_dev(dev_id)
except:
print("Cannot open bluetooth device %i" % dev_id)
raise
# Set filter to "True" to see only one packet per device
enable_le_scan(sock, filter_duplicates=False)
try:
def le_advertise_packet_handler(mac, adv_type, data, rssi):
data_str = raw_packet_to_str(data)
# Check for ATC preamble
if data_str[6:10] == '1a18':
temp = int(data_str[22:26], 16) / 10
hum = int(data_str[26:28], 16)
batt = int(data_str[28:30], 16)
print("%s - Device: %s Temp: %s°C Humidity: %s%% Batt: %s%%" % \
(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), mac, temp, hum, batt), flush=True)
status = {
"temp": temp,
"hum": hum,
"batt": batt,
"geoloc": geolocation,
}
params = {
"data_schema": data_schema,
"as": mac,
}
# noinspection PyBroadException
try:
res = requests.post(url,
json=status,
auth=auth,
headers={"Content-Type": "application/json"},
params=params
)
print("Result: %s" % res, flush=True)
except:
pass
# Called on new LE packet
parse_le_advertising_events(sock,
handler=le_advertise_packet_handler,
debug=False)
# Scan until Ctrl-C
except KeyboardInterrupt:
disable_le_scan(sock)
| en | 0.539681 | #!/usr/bin/env python3 # vorto:ctron.mitemp.status:1.0.0 # Use 0 for hci0 # Set filter to "True" to see only one packet per device # Check for ATC preamble # noinspection PyBroadException # Called on new LE packet # Scan until Ctrl-C | 2.476396 | 2 |
EMNIST.py | zmewshaw/deep_learning | 1 | 6632415 | <gh_stars>1-10
import idx2numpy
import numpy as np
import matplotlib as plt
train_images = "C:/Users/zmews/GitHub/Datasets/mnist/train-images.idx3-ubyte"
train_labels = "C:/Users/zmews/GitHub/Datasets/mnist/train-labels.idx1-ubyte"
test_images = "C:/Users/zmews/GitHub/Datasets/mnist/t10k-images.idx3-ubyte"
test_labels = "C:/Users/zmews/GitHub/Datasets/mnist/t10k-labels.idx1-ubyte"
X_train_orig = idx2numpy.convert_from_file(train_images)
Y_train = idx2numpy.convert_from_file(train_labels)
X_test_orig = idx2numpy.convert_from_file(test_images)
Y_test = idx2numpy.convert_from_file(test_labels)
# alter (flatten) training and test set if necessary (global) - x.reshape(x.shape[0], -1).T - gives a vector
# standardize dataset if necessary
def initializeParametersDeep(layerDims):
return parameters
def lModelForward(X, parameters):
return AL, caches
def computeCost(AL, Y):
return cost
def lModelBackward(AL, Y, caches):
return grads
def updateParameters(parameters, grads, learningRate):
return parameters
# implement with a function: xLayers(X, Y, layerDims, learningRate = x, numIterations = x, printCost = False) | import idx2numpy
import numpy as np
import matplotlib as plt
train_images = "C:/Users/zmews/GitHub/Datasets/mnist/train-images.idx3-ubyte"
train_labels = "C:/Users/zmews/GitHub/Datasets/mnist/train-labels.idx1-ubyte"
test_images = "C:/Users/zmews/GitHub/Datasets/mnist/t10k-images.idx3-ubyte"
test_labels = "C:/Users/zmews/GitHub/Datasets/mnist/t10k-labels.idx1-ubyte"
X_train_orig = idx2numpy.convert_from_file(train_images)
Y_train = idx2numpy.convert_from_file(train_labels)
X_test_orig = idx2numpy.convert_from_file(test_images)
Y_test = idx2numpy.convert_from_file(test_labels)
# alter (flatten) training and test set if necessary (global) - x.reshape(x.shape[0], -1).T - gives a vector
# standardize dataset if necessary
def initializeParametersDeep(layerDims):
return parameters
def lModelForward(X, parameters):
return AL, caches
def computeCost(AL, Y):
return cost
def lModelBackward(AL, Y, caches):
return grads
def updateParameters(parameters, grads, learningRate):
return parameters
# implement with a function: xLayers(X, Y, layerDims, learningRate = x, numIterations = x, printCost = False) | en | 0.59599 | # alter (flatten) training and test set if necessary (global) - x.reshape(x.shape[0], -1).T - gives a vector # standardize dataset if necessary # implement with a function: xLayers(X, Y, layerDims, learningRate = x, numIterations = x, printCost = False) | 2.769572 | 3 |
gramhopper/representable.py | OrBin/Bot-Engine | 0 | 6632416 | <reponame>OrBin/Bot-Engine
class Representable:
"""
A "representable" interface for triggers and responses.
The representation is the name of the trigger/response if given,
or "inline <Type of trigger/response>" otherwise.
"""
def __init__(self):
self.__name = None
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
def __str__(self):
if self.__name:
return self.__name
return f'inline {self.__class__.__name__}'
| class Representable:
"""
A "representable" interface for triggers and responses.
The representation is the name of the trigger/response if given,
or "inline <Type of trigger/response>" otherwise.
"""
def __init__(self):
self.__name = None
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
def __str__(self):
if self.__name:
return self.__name
return f'inline {self.__class__.__name__}' | en | 0.746156 | A "representable" interface for triggers and responses. The representation is the name of the trigger/response if given, or "inline <Type of trigger/response>" otherwise. | 2.616706 | 3 |
Linux/helpers.py | PreCySec/Maldoc-Parser | 0 | 6632417 | <gh_stars>0
import re
import os
import sys
import hexdump
from printy import *
from beautifultable import BeautifulTable
class Helpers:
"""
The Helpers() class has helper methods and regular expressions that are used by all other classes
"""
summary_table = BeautifulTable(maxwidth=200)
summary_table.headers = (["Indication", "Description"])
summary_table.columns.width = 100
# Magic byte regular expressions:
#################################
RTF = b'\x7b\x5c\x72\x74'
OLE = b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1'
OOXML = b'\x50\x4b\x03\x04'
PDF = b'%PDF-'
# OLE related regular expressions:
##################################
MICROSOFT_EXCEL = b'\x4d\x69\x63\x72\x6f\x73\x6f\x66\x74\x20\x45\x78\x63\x65\x6c'
MICROSOFT_OFFICE_WORD = b'\x4d\x69\x63\x72\x6f\x73\x6f\x66\x74\x20\x4f\x66\x66\x69\x63\x65\x20\x57\x6f\x72\x64'
OLE_FILE_MAGIC = b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1'
EQ_EDIT_CLSID_RE = rb'\x02\xce\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x46'
# EQ_EDIT_CLSID_RE = rb'\x02\xce\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00F'
EQUATION_EDITOR_RE = rb'\x4d\x69\x63\x72\x6f\x73\x6f\x66\x74\x20\x45\x71\x75\x61\x74\x69\x6f\x6e\x20\x33\x2e\x30'
equation_regex = r'[[e|E][q|Q][u|U][a|A][t|T][i|I][o|O][n|N]]{8}'
equation_byte_regex = rb'[\x65\x45\x71\x51\x75\x55\x61\x41\x74\x54\x69\x49\x6F\x4F\x6E\x4E]{8}'
OLE_DDE_RE = rb'\x13\s*\x44\x44\x45\x41\x55\x54\x4f[^\x14]+'
# OLE Excel files related regular expressions:
##############################################
BOF_RECORD_RE = rb'\t\x08[\x08|\x10]\x00\x00[\x05|\x06][\x00-\xff]{6}'
BOF_RECORDS_RE = rb'\t\x08[\x08|\x10]\x00\x00[\x05|\x06][\x00-\xff]{6}'
EOF_BOF = rb"\x0a\x00\x00\x00\x09\x08"
BOUNDHSEET_RECORD = rb'\x85\x00[\x01-\x88]\x00[\x00-\xff]{4}[\x00-\x02][\x00|\x01]'
SHEET_NAME_1 = rb'\x85\x00[\x00-\xff]\x00[\x00-\xff]{5}[\x00|\x01]([\x00-\xff]{1,16})\x85\x00'
SHEET_NAME_2 = rb'\x85\x00[\x00-\xff]\x00[\x00-\xff]{5}[\x00|\x01]([\x00-\xff]{1,12})'
# RTF related regular expressions:
##################################
rtf_clean_regex = rb"\x0d|\x0a|\x09|\x20"
rtf_ole_blob_regex = rb"(\x64\x30\x63\x66\x31\x31\x65[\x00-\x66]+)\}"
rtf_binary_blob_regex = rb"[A-Z]\}([\x00-\x66]+)\{\\|\x62\x69\x6e([\x00-\x66]+)\}|\}([\x00-\x66]+)\}|\x6d\x61\x74\x68([\x00-\x66]+)\}|\}([\x00-\x5b]+)|[\x61-\x7a]{3,20}([\x00-\x5b\x61-\x7a]+)\{|\\[\x00-\x5b\x61-\x7a]{3,5}([\x00-\x5b\x61-\x7a]+)"
pe_header_regex = r"4d5a[a-z0-9]{100,500}546869732070726f6772616d"
pe_magic_str = r"4d5a"
# PDF related regular expressions:
##################################
obj_regex = rb"\d{1,2} \d obj[\x00-\xff]+?endobj"
obj_header = rb"\d{1,2} \d obj[\x00-\xff]<<[\x00-\xff]+?>>"
export_data_regex = rb'this\.exportDataObject\((.*?)\)'
filespec_regex = rb'/Type /Filespec|/Type/Filespec'
file_regex = rb'/F \(.*?\)|/F\(.*?\)'
unc_regex = rb'F\(\\\\\\\\\d{1,3}\.d{1,3}\.d{1,3}\.d{1,3}\\\\.*?\) 0 R'
uri_regex = rb'URI \(.* ?\)|URI\(.* ?\)'
emb_file_regex = rb'/Type /EmbeddedFile|/Type/EmbeddedFile'
file_ref_regex = rb'F (\d{1,2}) 0 R'
objstm_regex = rb'/Type /ObjStm'
js_ref_pattern = rb'JS (\d{1,2}) 0 R'
auto_action_pattern = rb'/AA'
open_action_regex = rb'/OpenAction'
o_regex = rb'/O (\d{1,2}) 0 R'
open_a_ref_regex = rb'/OpenAction 9 0 R'
launch_regex = rb'/Launch'
stream_regex = rb'stream([\x00-\xff]+?)endstream'
goto_regex = rb'/GoTo|/GoToR|/GoToE'
# goto_remote_regex = rb'/GoToR'
# goto_emb_regex = rb'/GoToE'
submitform_regex = rb'/SubmitForm'
# Generic regular expressions:
##############################
unicode_regex = rb'[\x20-\x7e]\x00[\x20-\x7e]\x00'
ascii_regex = rb'[\x20-\x7e]{10,1000}'
base64_regex = r'(?:[A-Za-z\d+/]{4})|(?:[A-Za-z\d+/]{3}=|[A-Za-z\d+/]{2}==)'
# OLE object identifier CLSIDs (the CLSID is at raw offset 0x450 in the OLE file):
##################################################################################
CLSIDS = {
rb'\x00\x02\x08\x10\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Sheet.5',
rb'\x00\x02\x08\x11\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Chart.5',
rb'\x00\x02\x08\x20\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Microsoft Excel 97-2003 Worksheet (Excel.Sheet.8)',
rb'\x00\x02\x08\x21\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Chart.8',
rb'\x00\x02\x08\x30\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Sheet.12',
rb'\x00\x02\x08\x32\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel sheet with macro enabled (Excel.SheetMacroEnabled.12)',
rb'\x00\x02\x08\x33\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel binary sheet with macro enabled (Excel.SheetBinaryMacroEnabled.12)',
rb'\x00\x02\x09\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Word 6.0-7.0 Document (Word.Document.6)',
rb'\x00\x02\x09\x06\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Word 97-2003 Document (Word.Document.8)',
rb'\x00\x02\x09\x07\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Word Picture (Word.Picture.8)',
rb'\x00\x02\x0C\x01\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x02\x14\x01\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Windows LNK Shortcut file', #
rb'\x00\x02\x17\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation 2.0 (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x02\x26\x01\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x02\x26\x02\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x02\x26\x03\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x02\xCE\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation 3.0 (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x02\xCE\x02\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation 3.0 (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x02\xCE\x03\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'MathType Equation Object',
rb'\x00\x03\x00\x0B\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x03\x00\x0C\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x03\x00\x0D\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x03\x00\x0E\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x04\x8E\xB4\x3E\x20\x59\x42\x2F\x95\xE0\x55\x7D\xA9\x60\x38\xAF': 'Microsoft Powerpoint.Slide.12',
rb'\x06\x29\x0B\xD3\x48\xAA\x11\xD2\x84\x32\x00\x60\x08\xC3\xFB\xFC': 'Script Moniker, aka Moniker to a Windows Script Component (may trigger CVE-2017-0199)',
rb'\x18\xA0\x6B\x6B\x2F\x3F\x4E\x2B\xA6\x11\x52\xBE\x63\x1B\x2D\x22': 'Word.DocumentMacroEnabled.12 (DOCM)',
rb'\x30\x50\xF4\xD8\x98\xB5\x11\xCF\xBB\x82\x00\xAA\x00\xBD\xCE\x0B': 'HTML Application (may trigger CVE-2017-0199)',
rb'\x44\xF9\xA0\x3B\xA3\xEC\x4F\x3B\x93\x64\x08\xE0\x00\x7F\x21\xDF': 'Control.TaskSymbol (Known Related to CVE-2015-1642 & CVE-2015-2424)',
rb'\x46\xE3\x13\x70\x3F\x7A\x11\xCE\xBE\xD6\x00\xAA\x00\x61\x10\x80': 'Forms.MultiPage',
rb'\x4C\x59\x92\x41\x69\x26\x10\x1B\x99\x92\x00\x00\x0B\x65\xC6\xF9': 'Forms.Image (may trigger CVE-2015-2424)',
rb'\x64\x81\x8D\x10\x4F\x9B\x11\xCF\x86\xEA\x00\xAA\x00\xB9\x29\xE8': 'Microsoft Powerpoint.Show.8',
rb'\x64\x81\x8D\x11\x4F\x9B\x11\xCF\x86\xEA\x00\xAA\x00\xB9\x29\xE8': 'Microsoft Powerpoint.Slide.8',
rb'\x6E\x18\x20\x20\xF4\x60\x11\xCE\x9B\xCD\x00\xAA\x00\x60\x8E\x01': 'ActiveX Control: Forms.Frame',
rb'\x20\x20\x18\x6E\x60\xF4\xCE\x11\x9B\xCD\x00\xAA\x00\x60\x8E\x01': 'ActiveX Control: Microsoft Forms 2.0 Frame',
rb'\x88\xD9\x69\xEB\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.ServerXMLHTTP.5.0',
rb'\x88\xD9\x69\xEA\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.XMLHTTP.5.0',
rb'\x88\xD9\x69\xE7\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.XMLSchemaCache.5.0',
rb'\x88\xD9\x69\xE8\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.XSLTemplate.5.0',
rb'\x97\x8C\x9E\x23\xD4\xB0\x11\xCE\xBF\x2D\x00\xAA\x00\x3F\x40\xD0': 'Microsoft Forms 2.0 Label (Forms.Label.1)',
rb'\xB8\x01\xCA\x65\xA1\xFC\x11\xD0\x85\xAD\x44\x45\x53\x54\x00\x00': 'Adobe Acrobat Document - PDF file',
rb'\xC0\x8A\xFD\x90\xF2\xA1\x11\xD1\x84\x55\x00\xA0\xC9\x1F\x38\x80': 'ShellBrowserWindow',
rb'\xC6\x2A\x69\xF0\x16\xDC\x11\xCE\x9E\x98\x00\xAA\x00\x57\x4A\x4F': 'Forms.Form',
rb'\xCF\x4F\x55\xF4\x8F\x87\x4D\x47\x80\xBB\x58\x08\x16\x4B\xB3\xF8': 'Microsoft Powerpoint.Show.12',
rb'\xD7\x05\x32\x40\xCE\x69\x11\xCD\xA7\x77\x00\xDD\x01\x14\x3C\x57': 'Microsoft Forms 2.0 CommandButton',
rb'\xF2\x0D\xA7\x20\xC0\x2F\x11\xCE\x92\x7B\x08\x00\x09\x5A\xE3\x40': 'OLE Package Object (may contain and run any file)',
rb'\xF4\x14\xC2\x60\x6A\xC0\x11\xCF\xB6\xD1\x00\xAA\x00\xBB\xBB\x58': 'jscript.dll - JScript Language (ProgID: ECMAScript, JavaScript, JScript, LiveScript)',
rb'\xF4\x75\x4C\x9B\x64\xF5\x4B\x40\x8A\xF4\x67\x97\x32\xAC\x06\x07': 'Microsoft Word Document (Word.Document.12)'}
def determine_mimetype(self, data):
"""
Determine the file type by the magic bytes/signatures.
Returns a string indicating the file type ("ole"/"ooxml"/"rtf"/"pdf")
"""
# https://www.garykessler.net/library/file_sigs.html
try:
if self.OLE == data[:len(self.OLE)]:
printy("[y][+] Mime type: Object Linking and Embedding (OLE) Compound File (CF)@")
return "ole"
elif self.OOXML == data[:len(self.OOXML)]:
printy("[y][+] Mime type: Microsoft Office Open XML Format (OOXML) Document@")
return "ooxml"
elif self.RTF == data[:len(self.RTF)]:
printy("[y][+] Mime type: RTF (Rich text format) word processing file - \"{\\rtf\"@")
return "rtf"
elif self.PDF == data[:len(self.PDF)]:
printy("[y][+] Mime type: PDF document - \"%PDF-1.x\"@")
return "pdf"
except TypeError:
return 0
def scan_for_obj_type(self, stream_name, data):
"""
Scans an OLE object to identify its type using the CLSIDS dictionary
"""
printy("[y][+] Attempting to determine the object type@")
for clsid in self.CLSIDS:
if re.findall(clsid, data):
try:
print_string = raw_format("[o>]Object type:@ %s" % self.CLSIDS[clsid])
printy(print_string)
except Exception:
print("[+] Object type: %s" % self.CLSIDS[clsid])
print_string = "Object type: %s" % self.CLSIDS[clsid]
self.add_summary_if_no_duplicates(print_string, stream_name)
else:
self.add_summary_if_no_duplicates(print_string, stream_name)
def deduplicate_table(self, string, summary_string):
"""
Removes duplicates from the final summary table
"""
no_duplicates = True
temp1 = list(self.summary_table.rows)
for row in temp1:
for c in row:
try:
if string in c:
no_duplicates = False
except TypeError:
continue
return no_duplicates
def add_summary_if_no_duplicates(self, summary, desc):
"""
Adds a new row to the summary table if it does not exist in it already.
It is the only function that adds rows to the final summary table.
"""
no_duplicates = self.deduplicate_table(desc, summary)
if no_duplicates:
self.summary_table.rows.append([summary, desc])
def search_indicators_in_string(self, filename, string):
"""
Scans a string against multiple known keywords to extract more indications in the analysis report and
summary table.
"""
no_duplicates = True
if "URLMON" in string or "urlmon" in string or "loadToFile" in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[y>]Indication of file download in extracted strings from:@ %s"
% clean_filename.strip('\x01'))
else:
summary_string = raw_format("[y>]Indication of file download in extracted strings from:@ %s"
% filename)
self.add_summary_if_no_duplicates(summary_string, string)
if ("http:" in string or "https:" in string) and "crl" not in string and "thawte" not in string and \
"verisign" not in string and "symantec" not in string and \
"ocsp" not in string and "openxml" not in string and \
"theme" not in string and "schema" not in string and \
"microsoft" not in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[o>]URL found in extracted strings, from:@ %s" % clean_filename)
else:
summary_string = raw_format("[o>]URL found in extracted strings, from:@ %s" % filename.strip('\x01'))
self.add_summary_if_no_duplicates(summary_string, string)
if "CreateFile" in string or "CreateDirectory" in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[y>]Indication of file creation in extracted strings, from:@ %s"
% "".join(clean_filename.strip('\x01')))
else:
summary_string = raw_format("[y>]Indication of file creation in extracted strings, from:@ %s"
% "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
if "ShellExecute" in string or "Shell32.Shell" in string or "cmd /c" in string or "powershell" in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[o>]Indication of shell command execution in file:@ %s"
% "".join(clean_filename.strip('\x01')))
else:
summary_string = raw_format("[o>]Indication of shell command execution in file:@ %s"
% "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
if (".exe" in string or
".EXE" in string or
".exe" in string or
".sct" in string or
".ocx" in string or
".php" in string or
"ProgramData" in string or
"Desktop" in string or
"Downloads" in string or
"C:\\Users" in string or
".com" in string or
".ocx" in string or
".hta" in string or
".tmp" in string or
".dat" in string or
".txt" in string or
re.findall(r"[a-z]+\.[a-z]", string)) and \
"theme" not in string and \
"_rels" not in string and \
"openxml" not in string and \
"theme" not in string and \
"schema" not in string and \
"crl" not in string and \
"thawte" not in string and \
"verisign" not in string and \
"symantec" not in string and \
"ocsp" not in string and \
"openxml" not in string and \
"theme" not in string and \
"schema" not in string and \
"java" not in string and \
"Java" not in string and \
"jvm" not in string and \
"mscoree.dll" not in string and \
"kernel32.dll" not in string and \
"gdiplus32.dll" not in string and \
"gdiplus.dll" not in string and \
"advapi32.dll" not in string and \
"native" not in string and \
"microsoft" not in string:
if "Ole10Native" in filename:
summary_string = raw_format("[o>]Suspicious file path or possible domain found in:@ %s" % filename.strip('\x01'))
else:
summary_string = raw_format("[o>]Suspicious file path or possible domain found in:@ %s"
% "".join(filename.strip('\x01')))
if no_duplicates and len(string) < 100:
self.add_summary_if_no_duplicates(summary_string, string)
if "This program" in string or "DOS mode" in string:
if "Ole10Native" in filename:
summary_string = raw_format("[r>]Possible PE (Portable Executable) payload in stream:@ %s" % filename.strip('\x01'))
self.add_summary_if_no_duplicates(summary_string, string)
else:
summary_string = raw_format("[r>]Possible PE (Portable Executable) payload in stream:@ %s" % "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
eq = re.findall(self.equation_regex, string)
if eq:
if "Ole10Native" in filename:
summary_string = raw_format("[r>]Possible Equation Editor exploit:@ " % filename.strip('\x01'))
self.add_summary_if_no_duplicates(summary_string, string)
else:
summary_string = raw_format("[r>]Possible Equation Editor exploit:@ %s" % "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
def find_susp_functions_vba(self, filename, decompressed):
"""
Scans decompressed VBA projects for known function keywords to provide nore insight on the code behavior.
"""
if "Auto_Open" in decompressed or "Document_Open" in decompressed:
summary_string = raw_format("[r>]VBA macro auto execution:@ Auto_Open()/Document_Open() found in: %s" % "\\".join(filename))
summary_desc = "%s: Auto_Open()/Document_Open() - will execute VBA code when doc is opened" % "\\".join(filename)
self.add_summary_if_no_duplicates(summary_string, summary_desc)
if "Auto_Close" in decompressed:
summary_string = raw_format("[r>]VBA macro:@ Auto_Close() in:@ %s" % str("\\".join(filename)))
summary_desc = "%s: Auto_Close() - will execute VBA code when doc is closed" \
% "\\".join(filename)
self.add_summary_if_no_duplicates(summary_string, summary_desc)
if "Shell(" in decompressed or "WScript.Shell" in decompressed:
summary_string = raw_format("[r>]VBA macro: the code invokes the shell (Shell()\Wscript.Shell) in:@ %s"
% str("\\".join(filename)))
summary_desc = "%s: Shell() - Macro code will invoke the shell to execute code" \
% "\\".join(filename)
self.add_summary_if_no_duplicates(summary_string, summary_desc)
if "http" in decompressed:
summary_string = raw_format("[r>]VBA macro: URL found in:@ %s" % str("\\".join(filename)))
self.add_summary_if_no_duplicates(summary_string, re.findall(r'http[s]{0,1}\:\/\/.*\..*\/.*\"',
decompressed)[0])
def find_susp_functions_xlm(self, filename, decompressed):
"""
Scans XLM macros in sheets for known function keywords to provide nore insight on the code behavior.
"""
if "HALT()" in decompressed or "RETURN(" in decompressed or "EXEC()" in decompressed or \
"WRITE(" in decompressed or "FOR(" in decompressed or "FOR(" in decompressed or \
"FORMULA(" in decompressed:
summary_string = raw_format("[r>]Excel 4.0 (XLM) macro\n XLM macro functions detected in: %s"
% "\\".join(filename.strip('\x01')))
summary_desc = decompressed[:150]
self.add_summary_if_no_duplicates(summary_string, summary_desc)
| import re
import os
import sys
import hexdump
from printy import *
from beautifultable import BeautifulTable
class Helpers:
"""
The Helpers() class has helper methods and regular expressions that are used by all other classes
"""
summary_table = BeautifulTable(maxwidth=200)
summary_table.headers = (["Indication", "Description"])
summary_table.columns.width = 100
# Magic byte regular expressions:
#################################
RTF = b'\x7b\x5c\x72\x74'
OLE = b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1'
OOXML = b'\x50\x4b\x03\x04'
PDF = b'%PDF-'
# OLE related regular expressions:
##################################
MICROSOFT_EXCEL = b'\x4d\x69\x63\x72\x6f\x73\x6f\x66\x74\x20\x45\x78\x63\x65\x6c'
MICROSOFT_OFFICE_WORD = b'\x4d\x69\x63\x72\x6f\x73\x6f\x66\x74\x20\x4f\x66\x66\x69\x63\x65\x20\x57\x6f\x72\x64'
OLE_FILE_MAGIC = b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1'
EQ_EDIT_CLSID_RE = rb'\x02\xce\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x46'
# EQ_EDIT_CLSID_RE = rb'\x02\xce\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00F'
EQUATION_EDITOR_RE = rb'\x4d\x69\x63\x72\x6f\x73\x6f\x66\x74\x20\x45\x71\x75\x61\x74\x69\x6f\x6e\x20\x33\x2e\x30'
equation_regex = r'[[e|E][q|Q][u|U][a|A][t|T][i|I][o|O][n|N]]{8}'
equation_byte_regex = rb'[\x65\x45\x71\x51\x75\x55\x61\x41\x74\x54\x69\x49\x6F\x4F\x6E\x4E]{8}'
OLE_DDE_RE = rb'\x13\s*\x44\x44\x45\x41\x55\x54\x4f[^\x14]+'
# OLE Excel files related regular expressions:
##############################################
BOF_RECORD_RE = rb'\t\x08[\x08|\x10]\x00\x00[\x05|\x06][\x00-\xff]{6}'
BOF_RECORDS_RE = rb'\t\x08[\x08|\x10]\x00\x00[\x05|\x06][\x00-\xff]{6}'
EOF_BOF = rb"\x0a\x00\x00\x00\x09\x08"
BOUNDHSEET_RECORD = rb'\x85\x00[\x01-\x88]\x00[\x00-\xff]{4}[\x00-\x02][\x00|\x01]'
SHEET_NAME_1 = rb'\x85\x00[\x00-\xff]\x00[\x00-\xff]{5}[\x00|\x01]([\x00-\xff]{1,16})\x85\x00'
SHEET_NAME_2 = rb'\x85\x00[\x00-\xff]\x00[\x00-\xff]{5}[\x00|\x01]([\x00-\xff]{1,12})'
# RTF related regular expressions:
##################################
rtf_clean_regex = rb"\x0d|\x0a|\x09|\x20"
rtf_ole_blob_regex = rb"(\x64\x30\x63\x66\x31\x31\x65[\x00-\x66]+)\}"
rtf_binary_blob_regex = rb"[A-Z]\}([\x00-\x66]+)\{\\|\x62\x69\x6e([\x00-\x66]+)\}|\}([\x00-\x66]+)\}|\x6d\x61\x74\x68([\x00-\x66]+)\}|\}([\x00-\x5b]+)|[\x61-\x7a]{3,20}([\x00-\x5b\x61-\x7a]+)\{|\\[\x00-\x5b\x61-\x7a]{3,5}([\x00-\x5b\x61-\x7a]+)"
pe_header_regex = r"4d5a[a-z0-9]{100,500}546869732070726f6772616d"
pe_magic_str = r"4d5a"
# PDF related regular expressions:
##################################
obj_regex = rb"\d{1,2} \d obj[\x00-\xff]+?endobj"
obj_header = rb"\d{1,2} \d obj[\x00-\xff]<<[\x00-\xff]+?>>"
export_data_regex = rb'this\.exportDataObject\((.*?)\)'
filespec_regex = rb'/Type /Filespec|/Type/Filespec'
file_regex = rb'/F \(.*?\)|/F\(.*?\)'
unc_regex = rb'F\(\\\\\\\\\d{1,3}\.d{1,3}\.d{1,3}\.d{1,3}\\\\.*?\) 0 R'
uri_regex = rb'URI \(.* ?\)|URI\(.* ?\)'
emb_file_regex = rb'/Type /EmbeddedFile|/Type/EmbeddedFile'
file_ref_regex = rb'F (\d{1,2}) 0 R'
objstm_regex = rb'/Type /ObjStm'
js_ref_pattern = rb'JS (\d{1,2}) 0 R'
auto_action_pattern = rb'/AA'
open_action_regex = rb'/OpenAction'
o_regex = rb'/O (\d{1,2}) 0 R'
open_a_ref_regex = rb'/OpenAction 9 0 R'
launch_regex = rb'/Launch'
stream_regex = rb'stream([\x00-\xff]+?)endstream'
goto_regex = rb'/GoTo|/GoToR|/GoToE'
# goto_remote_regex = rb'/GoToR'
# goto_emb_regex = rb'/GoToE'
submitform_regex = rb'/SubmitForm'
# Generic regular expressions:
##############################
unicode_regex = rb'[\x20-\x7e]\x00[\x20-\x7e]\x00'
ascii_regex = rb'[\x20-\x7e]{10,1000}'
base64_regex = r'(?:[A-Za-z\d+/]{4})|(?:[A-Za-z\d+/]{3}=|[A-Za-z\d+/]{2}==)'
# OLE object identifier CLSIDs (the CLSID is at raw offset 0x450 in the OLE file):
##################################################################################
CLSIDS = {
rb'\x00\x02\x08\x10\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Sheet.5',
rb'\x00\x02\x08\x11\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Chart.5',
rb'\x00\x02\x08\x20\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Microsoft Excel 97-2003 Worksheet (Excel.Sheet.8)',
rb'\x00\x02\x08\x21\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Chart.8',
rb'\x00\x02\x08\x30\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel.Sheet.12',
rb'\x00\x02\x08\x32\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel sheet with macro enabled (Excel.SheetMacroEnabled.12)',
rb'\x00\x02\x08\x33\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Excel binary sheet with macro enabled (Excel.SheetBinaryMacroEnabled.12)',
rb'\x00\x02\x09\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Word 6.0-7.0 Document (Word.Document.6)',
rb'\x00\x02\x09\x06\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Word 97-2003 Document (Word.Document.8)',
rb'\x00\x02\x09\x07\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Word Picture (Word.Picture.8)',
rb'\x00\x02\x0C\x01\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x02\x14\x01\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Windows LNK Shortcut file', #
rb'\x00\x02\x17\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation 2.0 (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x02\x26\x01\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x02\x26\x02\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x02\x26\x03\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x02\xCE\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation 3.0 (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x02\xCE\x02\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation 3.0 (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x02\xCE\x03\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'MathType Equation Object',
rb'\x00\x03\x00\x0B\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'Microsoft Equation (Known Related to CVE-2017-11882 or CVE-2018-0802)',
rb'\x00\x03\x00\x0C\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x03\x00\x0D\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x00\x03\x00\x0E\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46': 'OLE Package Object (may contain and run any file)',
rb'\x04\x8E\xB4\x3E\x20\x59\x42\x2F\x95\xE0\x55\x7D\xA9\x60\x38\xAF': 'Microsoft Powerpoint.Slide.12',
rb'\x06\x29\x0B\xD3\x48\xAA\x11\xD2\x84\x32\x00\x60\x08\xC3\xFB\xFC': 'Script Moniker, aka Moniker to a Windows Script Component (may trigger CVE-2017-0199)',
rb'\x18\xA0\x6B\x6B\x2F\x3F\x4E\x2B\xA6\x11\x52\xBE\x63\x1B\x2D\x22': 'Word.DocumentMacroEnabled.12 (DOCM)',
rb'\x30\x50\xF4\xD8\x98\xB5\x11\xCF\xBB\x82\x00\xAA\x00\xBD\xCE\x0B': 'HTML Application (may trigger CVE-2017-0199)',
rb'\x44\xF9\xA0\x3B\xA3\xEC\x4F\x3B\x93\x64\x08\xE0\x00\x7F\x21\xDF': 'Control.TaskSymbol (Known Related to CVE-2015-1642 & CVE-2015-2424)',
rb'\x46\xE3\x13\x70\x3F\x7A\x11\xCE\xBE\xD6\x00\xAA\x00\x61\x10\x80': 'Forms.MultiPage',
rb'\x4C\x59\x92\x41\x69\x26\x10\x1B\x99\x92\x00\x00\x0B\x65\xC6\xF9': 'Forms.Image (may trigger CVE-2015-2424)',
rb'\x64\x81\x8D\x10\x4F\x9B\x11\xCF\x86\xEA\x00\xAA\x00\xB9\x29\xE8': 'Microsoft Powerpoint.Show.8',
rb'\x64\x81\x8D\x11\x4F\x9B\x11\xCF\x86\xEA\x00\xAA\x00\xB9\x29\xE8': 'Microsoft Powerpoint.Slide.8',
rb'\x6E\x18\x20\x20\xF4\x60\x11\xCE\x9B\xCD\x00\xAA\x00\x60\x8E\x01': 'ActiveX Control: Forms.Frame',
rb'\x20\x20\x18\x6E\x60\xF4\xCE\x11\x9B\xCD\x00\xAA\x00\x60\x8E\x01': 'ActiveX Control: Microsoft Forms 2.0 Frame',
rb'\x88\xD9\x69\xEB\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.ServerXMLHTTP.5.0',
rb'\x88\xD9\x69\xEA\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.XMLHTTP.5.0',
rb'\x88\xD9\x69\xE7\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.XMLSchemaCache.5.0',
rb'\x88\xD9\x69\xE8\xF1\x92\x11\xD4\xA6\x5F\x00\x40\x96\x32\x51\xE5': 'Msxml2.XSLTemplate.5.0',
rb'\x97\x8C\x9E\x23\xD4\xB0\x11\xCE\xBF\x2D\x00\xAA\x00\x3F\x40\xD0': 'Microsoft Forms 2.0 Label (Forms.Label.1)',
rb'\xB8\x01\xCA\x65\xA1\xFC\x11\xD0\x85\xAD\x44\x45\x53\x54\x00\x00': 'Adobe Acrobat Document - PDF file',
rb'\xC0\x8A\xFD\x90\xF2\xA1\x11\xD1\x84\x55\x00\xA0\xC9\x1F\x38\x80': 'ShellBrowserWindow',
rb'\xC6\x2A\x69\xF0\x16\xDC\x11\xCE\x9E\x98\x00\xAA\x00\x57\x4A\x4F': 'Forms.Form',
rb'\xCF\x4F\x55\xF4\x8F\x87\x4D\x47\x80\xBB\x58\x08\x16\x4B\xB3\xF8': 'Microsoft Powerpoint.Show.12',
rb'\xD7\x05\x32\x40\xCE\x69\x11\xCD\xA7\x77\x00\xDD\x01\x14\x3C\x57': 'Microsoft Forms 2.0 CommandButton',
rb'\xF2\x0D\xA7\x20\xC0\x2F\x11\xCE\x92\x7B\x08\x00\x09\x5A\xE3\x40': 'OLE Package Object (may contain and run any file)',
rb'\xF4\x14\xC2\x60\x6A\xC0\x11\xCF\xB6\xD1\x00\xAA\x00\xBB\xBB\x58': 'jscript.dll - JScript Language (ProgID: ECMAScript, JavaScript, JScript, LiveScript)',
rb'\xF4\x75\x4C\x9B\x64\xF5\x4B\x40\x8A\xF4\x67\x97\x32\xAC\x06\x07': 'Microsoft Word Document (Word.Document.12)'}
def determine_mimetype(self, data):
"""
Determine the file type by the magic bytes/signatures.
Returns a string indicating the file type ("ole"/"ooxml"/"rtf"/"pdf")
"""
# https://www.garykessler.net/library/file_sigs.html
try:
if self.OLE == data[:len(self.OLE)]:
printy("[y][+] Mime type: Object Linking and Embedding (OLE) Compound File (CF)@")
return "ole"
elif self.OOXML == data[:len(self.OOXML)]:
printy("[y][+] Mime type: Microsoft Office Open XML Format (OOXML) Document@")
return "ooxml"
elif self.RTF == data[:len(self.RTF)]:
printy("[y][+] Mime type: RTF (Rich text format) word processing file - \"{\\rtf\"@")
return "rtf"
elif self.PDF == data[:len(self.PDF)]:
printy("[y][+] Mime type: PDF document - \"%PDF-1.x\"@")
return "pdf"
except TypeError:
return 0
def scan_for_obj_type(self, stream_name, data):
"""
Scans an OLE object to identify its type using the CLSIDS dictionary
"""
printy("[y][+] Attempting to determine the object type@")
for clsid in self.CLSIDS:
if re.findall(clsid, data):
try:
print_string = raw_format("[o>]Object type:@ %s" % self.CLSIDS[clsid])
printy(print_string)
except Exception:
print("[+] Object type: %s" % self.CLSIDS[clsid])
print_string = "Object type: %s" % self.CLSIDS[clsid]
self.add_summary_if_no_duplicates(print_string, stream_name)
else:
self.add_summary_if_no_duplicates(print_string, stream_name)
def deduplicate_table(self, string, summary_string):
"""
Removes duplicates from the final summary table
"""
no_duplicates = True
temp1 = list(self.summary_table.rows)
for row in temp1:
for c in row:
try:
if string in c:
no_duplicates = False
except TypeError:
continue
return no_duplicates
def add_summary_if_no_duplicates(self, summary, desc):
"""
Adds a new row to the summary table if it does not exist in it already.
It is the only function that adds rows to the final summary table.
"""
no_duplicates = self.deduplicate_table(desc, summary)
if no_duplicates:
self.summary_table.rows.append([summary, desc])
def search_indicators_in_string(self, filename, string):
"""
Scans a string against multiple known keywords to extract more indications in the analysis report and
summary table.
"""
no_duplicates = True
if "URLMON" in string or "urlmon" in string or "loadToFile" in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[y>]Indication of file download in extracted strings from:@ %s"
% clean_filename.strip('\x01'))
else:
summary_string = raw_format("[y>]Indication of file download in extracted strings from:@ %s"
% filename)
self.add_summary_if_no_duplicates(summary_string, string)
if ("http:" in string or "https:" in string) and "crl" not in string and "thawte" not in string and \
"verisign" not in string and "symantec" not in string and \
"ocsp" not in string and "openxml" not in string and \
"theme" not in string and "schema" not in string and \
"microsoft" not in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[o>]URL found in extracted strings, from:@ %s" % clean_filename)
else:
summary_string = raw_format("[o>]URL found in extracted strings, from:@ %s" % filename.strip('\x01'))
self.add_summary_if_no_duplicates(summary_string, string)
if "CreateFile" in string or "CreateDirectory" in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[y>]Indication of file creation in extracted strings, from:@ %s"
% "".join(clean_filename.strip('\x01')))
else:
summary_string = raw_format("[y>]Indication of file creation in extracted strings, from:@ %s"
% "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
if "ShellExecute" in string or "Shell32.Shell" in string or "cmd /c" in string or "powershell" in string:
if '\x01' in filename:
clean_filename = filename.strip('\x01')
summary_string = raw_format("[o>]Indication of shell command execution in file:@ %s"
% "".join(clean_filename.strip('\x01')))
else:
summary_string = raw_format("[o>]Indication of shell command execution in file:@ %s"
% "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
if (".exe" in string or
".EXE" in string or
".exe" in string or
".sct" in string or
".ocx" in string or
".php" in string or
"ProgramData" in string or
"Desktop" in string or
"Downloads" in string or
"C:\\Users" in string or
".com" in string or
".ocx" in string or
".hta" in string or
".tmp" in string or
".dat" in string or
".txt" in string or
re.findall(r"[a-z]+\.[a-z]", string)) and \
"theme" not in string and \
"_rels" not in string and \
"openxml" not in string and \
"theme" not in string and \
"schema" not in string and \
"crl" not in string and \
"thawte" not in string and \
"verisign" not in string and \
"symantec" not in string and \
"ocsp" not in string and \
"openxml" not in string and \
"theme" not in string and \
"schema" not in string and \
"java" not in string and \
"Java" not in string and \
"jvm" not in string and \
"mscoree.dll" not in string and \
"kernel32.dll" not in string and \
"gdiplus32.dll" not in string and \
"gdiplus.dll" not in string and \
"advapi32.dll" not in string and \
"native" not in string and \
"microsoft" not in string:
if "Ole10Native" in filename:
summary_string = raw_format("[o>]Suspicious file path or possible domain found in:@ %s" % filename.strip('\x01'))
else:
summary_string = raw_format("[o>]Suspicious file path or possible domain found in:@ %s"
% "".join(filename.strip('\x01')))
if no_duplicates and len(string) < 100:
self.add_summary_if_no_duplicates(summary_string, string)
if "This program" in string or "DOS mode" in string:
if "Ole10Native" in filename:
summary_string = raw_format("[r>]Possible PE (Portable Executable) payload in stream:@ %s" % filename.strip('\x01'))
self.add_summary_if_no_duplicates(summary_string, string)
else:
summary_string = raw_format("[r>]Possible PE (Portable Executable) payload in stream:@ %s" % "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
eq = re.findall(self.equation_regex, string)
if eq:
if "Ole10Native" in filename:
summary_string = raw_format("[r>]Possible Equation Editor exploit:@ " % filename.strip('\x01'))
self.add_summary_if_no_duplicates(summary_string, string)
else:
summary_string = raw_format("[r>]Possible Equation Editor exploit:@ %s" % "".join(filename.strip('\x01')))
self.add_summary_if_no_duplicates(summary_string, string)
def find_susp_functions_vba(self, filename, decompressed):
"""
Scans decompressed VBA projects for known function keywords to provide nore insight on the code behavior.
"""
if "Auto_Open" in decompressed or "Document_Open" in decompressed:
summary_string = raw_format("[r>]VBA macro auto execution:@ Auto_Open()/Document_Open() found in: %s" % "\\".join(filename))
summary_desc = "%s: Auto_Open()/Document_Open() - will execute VBA code when doc is opened" % "\\".join(filename)
self.add_summary_if_no_duplicates(summary_string, summary_desc)
if "Auto_Close" in decompressed:
summary_string = raw_format("[r>]VBA macro:@ Auto_Close() in:@ %s" % str("\\".join(filename)))
summary_desc = "%s: Auto_Close() - will execute VBA code when doc is closed" \
% "\\".join(filename)
self.add_summary_if_no_duplicates(summary_string, summary_desc)
if "Shell(" in decompressed or "WScript.Shell" in decompressed:
summary_string = raw_format("[r>]VBA macro: the code invokes the shell (Shell()\Wscript.Shell) in:@ %s"
% str("\\".join(filename)))
summary_desc = "%s: Shell() - Macro code will invoke the shell to execute code" \
% "\\".join(filename)
self.add_summary_if_no_duplicates(summary_string, summary_desc)
if "http" in decompressed:
summary_string = raw_format("[r>]VBA macro: URL found in:@ %s" % str("\\".join(filename)))
self.add_summary_if_no_duplicates(summary_string, re.findall(r'http[s]{0,1}\:\/\/.*\..*\/.*\"',
decompressed)[0])
def find_susp_functions_xlm(self, filename, decompressed):
"""
Scans XLM macros in sheets for known function keywords to provide nore insight on the code behavior.
"""
if "HALT()" in decompressed or "RETURN(" in decompressed or "EXEC()" in decompressed or \
"WRITE(" in decompressed or "FOR(" in decompressed or "FOR(" in decompressed or \
"FORMULA(" in decompressed:
summary_string = raw_format("[r>]Excel 4.0 (XLM) macro\n XLM macro functions detected in: %s"
% "\\".join(filename.strip('\x01')))
summary_desc = decompressed[:150]
self.add_summary_if_no_duplicates(summary_string, summary_desc) | en | 0.43743 | The Helpers() class has helper methods and regular expressions that are used by all other classes # Magic byte regular expressions: ################################# # OLE related regular expressions: ################################## # EQ_EDIT_CLSID_RE = rb'\x02\xce\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00F' # OLE Excel files related regular expressions: ############################################## # RTF related regular expressions: ################################## # PDF related regular expressions: ################################## # goto_remote_regex = rb'/GoToR' # goto_emb_regex = rb'/GoToE' # Generic regular expressions: ############################## # OLE object identifier CLSIDs (the CLSID is at raw offset 0x450 in the OLE file): ################################################################################## # Determine the file type by the magic bytes/signatures. Returns a string indicating the file type ("ole"/"ooxml"/"rtf"/"pdf") # https://www.garykessler.net/library/file_sigs.html Scans an OLE object to identify its type using the CLSIDS dictionary Removes duplicates from the final summary table Adds a new row to the summary table if it does not exist in it already. It is the only function that adds rows to the final summary table. Scans a string against multiple known keywords to extract more indications in the analysis report and summary table. Scans decompressed VBA projects for known function keywords to provide nore insight on the code behavior. Scans XLM macros in sheets for known function keywords to provide nore insight on the code behavior. | 2.724998 | 3 |
manchester_analysis.py | y0081106/comparative_analysis | 0 | 6632418 | import time
import calendar
import codecs
import datetime
import json
import sys
import gzip
import string
import glob
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
global_tweet_counter = 0
time_format = "%a %b %d %H:%M:%S +0000 %Y"
reader = codecs.getreader("utf-8")
local_tweet_list = []
tweets_data_path = 'manchester_tweets.json'
tweets_file = open(tweets_data_path, "r")
frequency_map = {}
for line in tweets_file:
# Try to read tweet JSON into object
tweet_obj = None
try:
tweet_obj = json.loads(line)
except Exception as e:
continue
# Deleted status messages and protected status must be skipped
if ( "delete" in tweet_obj.keys() or "status_withheld" in tweet_obj.keys() ):
continue
# Try to extract the time of the tweet
try:
current_time = datetime.datetime.strptime(tweet_obj['created_at'], time_format)
except:
print (line)
raise
current_time = current_time.replace(second=0)
# Increment tweet count
global_tweet_counter += 1
# If our frequency map already has this time, use it, otherwise add
if ( current_time in frequency_map.keys() ):
time_map = frequency_map[current_time]
time_map["count"] += 1
time_map["list"].append(tweet_obj)
else:
frequency_map[current_time] = {"count":1, "list":[tweet_obj]}
# Fill in any gaps
times = sorted(frequency_map.keys())
first_time = times[0]
last_time = times[-1]
this_time = first_time
time_interval_step = datetime.timedelta(0, 60) # Time step in seconds
while ( this_time <= last_time ):
if ( this_time not in frequency_map.keys() ):
frequency_map[this_time] = {"count":0, "list":[]}
this_time = this_time + time_interval_step
print ("Processed Tweet Count:", global_tweet_counter)
fig, ax = plt.subplots()
fig.set_size_inches(18.5,10.5)
plt.title("Tweet Frequency")
# Sort the times into an array for future use
sorted_times = sorted(frequency_map.keys())
# What time span do these tweets cover?
print ("Time Frame:", sorted_times[0], sorted_times[-1])
# Get a count of tweets per minute
post_freq_list = [frequency_map[x]["count"] for x in sorted_times]
# We'll have ticks every thirty minutes (much more clutters the graph)
smaller_xticks = range(0, len(sorted_times), 30)
plt.xticks(smaller_xticks, [sorted_times[x] for x in smaller_xticks], rotation=90)
# Plot the post frequency
ax.plot(range(len(frequency_map)), [x if x > 0 else 0 for x in post_freq_list], color="blue", label="Posts")
ax.grid(b=True, which=u'major')
ax.legend()
plt.show()
# Create maps for holding counts and tweets for each user
global_user_counter = {}
global_user_map = {}
# Iterate through the time stamps
for t in sorted_times:
time_obj = frequency_map[t]
# For each tweet, pull the screen name and add it to the list
for tweet in time_obj["list"]:
user = tweet["user"]["screen_name"]
if ( user not in global_user_counter ):
global_user_counter[user] = 1
global_user_map[user] = [tweet]
else:
global_user_counter[user] += 1
global_user_map[user].append(tweet)
print ("Unique Users:", len(global_user_counter.keys()))
sorted_users = sorted(global_user_counter, key=global_user_counter.get, reverse=True)
print ("Top Ten Most Prolific Users:")
for u in sorted_users[:10]:
print (u, global_user_counter[u], "\n\t", "Random Tweet:", global_user_map[u][0]["text"], "\n----------")
# A map for hashtag counts
hashtag_counter = {}
# For each minute, pull the list of hashtags and add to the counter
for t in sorted_times:
time_obj = frequency_map[t]
for tweet in time_obj["list"]:
hashtag_list = tweet["entities"]["hashtags"]
for hashtag in hashtag_list:
# We lowercase the hashtag to avoid duplicates (e.g., #MikeBrown vs. #mikebrown)
hashtag_str = hashtag["text"].lower()
if ( hashtag_str not in hashtag_counter ):
hashtag_counter[hashtag_str] = 1
else:
hashtag_counter[hashtag_str] += 1
print ("Unique Hashtags:", len(hashtag_counter.keys()))
sorted_hashtags = sorted(hashtag_counter, key=hashtag_counter.get, reverse=True)
print ("Top Twenty Hashtags:")
for ht in sorted_hashtags[:20]:
print ("\t", "#" + ht, hashtag_counter[ht])
# A map for counting each language
language_counter = {}
for t in sorted_times:
time_obj = frequency_map[t]
for tweet in time_obj["list"]:
lang = tweet["lang"]
if ( lang not in language_counter ):
language_counter[lang] = 1
else:
language_counter[lang] += 1
languages = sorted(language_counter.keys(), key=language_counter.get, reverse=True)
for l in languages:
print (l, language_counter[l])
plt.figure(figsize=(16,8))
# the histogram of the data
plt.bar(
np.arange(len(languages)),
[language_counter[x] for x in languages],
log=True)
plt.xticks(np.arange(len(languages)) + 0.5, languages)
plt.xlabel('Languages')
plt.ylabel('Counts (Log)')
plt.title("Language Frequency")
plt.grid(True)
plt.show()
# A frequency map for timestamps to geo-coded tweets
geo_frequency_map = {}
geo_count = 0
# Save only those tweets with tweet['coordinate']['coordinate'] entity
for t in sorted_times:
geos = list(filter(lambda tweet: tweet["coordinates"] != None and "coordinates" in tweet["coordinates"], frequency_map[t]["list"]))
geo_count += len(geos)
# Add to the timestamp map
geo_frequency_map[t] = {"count": len(geos), "list": geos}
print ("Number of Geo Tweets:", geo_count)
import matplotlib
from mpl_toolkits.basemap import Basemap
# Create a list of all geo-coded tweets
temp_geo_list = [geo_frequency_map[t]["list"] for t in sorted_times]
geo_tweets = reduce(lambda x, y: x + y, temp_geo_list)
# For each geo-coded tweet, extract its GPS coordinates
geo_coord = [x["coordinates"]["coordinates"] for x in geo_tweets]
# Now we build a map of the world using Basemap
land_color = 'lightgray'
water_color = 'lightblue'
fig, ax = plt.subplots(figsize=(18,18))
world_map = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80,
llcrnrlon=-180, urcrnrlon=180, resolution='l')
world_map.fillcontinents(color=land_color, lake_color=water_color, zorder=1)
world_map.drawcoastlines()
world_map.drawparallels(np.arange(-90.,120.,30.))
world_map.drawmeridians(np.arange(0.,420.,60.))
world_map.drawmapboundary(fill_color=water_color, zorder=0)
ax.set_title('World Tweets')
# Convert points from GPS coordinates to (x,y) coordinates
conv_points = [world_map(p[0], p[1]) for p in geo_coord]
x = [p[0] for p in conv_points]
y = [p[1] for p in conv_points]
world_map.scatter(x, y, s=100, marker='x', color="red", zorder=2)
plt.show()
| import time
import calendar
import codecs
import datetime
import json
import sys
import gzip
import string
import glob
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
global_tweet_counter = 0
time_format = "%a %b %d %H:%M:%S +0000 %Y"
reader = codecs.getreader("utf-8")
local_tweet_list = []
tweets_data_path = 'manchester_tweets.json'
tweets_file = open(tweets_data_path, "r")
frequency_map = {}
for line in tweets_file:
# Try to read tweet JSON into object
tweet_obj = None
try:
tweet_obj = json.loads(line)
except Exception as e:
continue
# Deleted status messages and protected status must be skipped
if ( "delete" in tweet_obj.keys() or "status_withheld" in tweet_obj.keys() ):
continue
# Try to extract the time of the tweet
try:
current_time = datetime.datetime.strptime(tweet_obj['created_at'], time_format)
except:
print (line)
raise
current_time = current_time.replace(second=0)
# Increment tweet count
global_tweet_counter += 1
# If our frequency map already has this time, use it, otherwise add
if ( current_time in frequency_map.keys() ):
time_map = frequency_map[current_time]
time_map["count"] += 1
time_map["list"].append(tweet_obj)
else:
frequency_map[current_time] = {"count":1, "list":[tweet_obj]}
# Fill in any gaps
times = sorted(frequency_map.keys())
first_time = times[0]
last_time = times[-1]
this_time = first_time
time_interval_step = datetime.timedelta(0, 60) # Time step in seconds
while ( this_time <= last_time ):
if ( this_time not in frequency_map.keys() ):
frequency_map[this_time] = {"count":0, "list":[]}
this_time = this_time + time_interval_step
print ("Processed Tweet Count:", global_tweet_counter)
fig, ax = plt.subplots()
fig.set_size_inches(18.5,10.5)
plt.title("Tweet Frequency")
# Sort the times into an array for future use
sorted_times = sorted(frequency_map.keys())
# What time span do these tweets cover?
print ("Time Frame:", sorted_times[0], sorted_times[-1])
# Get a count of tweets per minute
post_freq_list = [frequency_map[x]["count"] for x in sorted_times]
# We'll have ticks every thirty minutes (much more clutters the graph)
smaller_xticks = range(0, len(sorted_times), 30)
plt.xticks(smaller_xticks, [sorted_times[x] for x in smaller_xticks], rotation=90)
# Plot the post frequency
ax.plot(range(len(frequency_map)), [x if x > 0 else 0 for x in post_freq_list], color="blue", label="Posts")
ax.grid(b=True, which=u'major')
ax.legend()
plt.show()
# Create maps for holding counts and tweets for each user
global_user_counter = {}
global_user_map = {}
# Iterate through the time stamps
for t in sorted_times:
time_obj = frequency_map[t]
# For each tweet, pull the screen name and add it to the list
for tweet in time_obj["list"]:
user = tweet["user"]["screen_name"]
if ( user not in global_user_counter ):
global_user_counter[user] = 1
global_user_map[user] = [tweet]
else:
global_user_counter[user] += 1
global_user_map[user].append(tweet)
print ("Unique Users:", len(global_user_counter.keys()))
sorted_users = sorted(global_user_counter, key=global_user_counter.get, reverse=True)
print ("Top Ten Most Prolific Users:")
for u in sorted_users[:10]:
print (u, global_user_counter[u], "\n\t", "Random Tweet:", global_user_map[u][0]["text"], "\n----------")
# A map for hashtag counts
hashtag_counter = {}
# For each minute, pull the list of hashtags and add to the counter
for t in sorted_times:
time_obj = frequency_map[t]
for tweet in time_obj["list"]:
hashtag_list = tweet["entities"]["hashtags"]
for hashtag in hashtag_list:
# We lowercase the hashtag to avoid duplicates (e.g., #MikeBrown vs. #mikebrown)
hashtag_str = hashtag["text"].lower()
if ( hashtag_str not in hashtag_counter ):
hashtag_counter[hashtag_str] = 1
else:
hashtag_counter[hashtag_str] += 1
print ("Unique Hashtags:", len(hashtag_counter.keys()))
sorted_hashtags = sorted(hashtag_counter, key=hashtag_counter.get, reverse=True)
print ("Top Twenty Hashtags:")
for ht in sorted_hashtags[:20]:
print ("\t", "#" + ht, hashtag_counter[ht])
# A map for counting each language
language_counter = {}
for t in sorted_times:
time_obj = frequency_map[t]
for tweet in time_obj["list"]:
lang = tweet["lang"]
if ( lang not in language_counter ):
language_counter[lang] = 1
else:
language_counter[lang] += 1
languages = sorted(language_counter.keys(), key=language_counter.get, reverse=True)
for l in languages:
print (l, language_counter[l])
plt.figure(figsize=(16,8))
# the histogram of the data
plt.bar(
np.arange(len(languages)),
[language_counter[x] for x in languages],
log=True)
plt.xticks(np.arange(len(languages)) + 0.5, languages)
plt.xlabel('Languages')
plt.ylabel('Counts (Log)')
plt.title("Language Frequency")
plt.grid(True)
plt.show()
# A frequency map for timestamps to geo-coded tweets
geo_frequency_map = {}
geo_count = 0
# Save only those tweets with tweet['coordinate']['coordinate'] entity
for t in sorted_times:
geos = list(filter(lambda tweet: tweet["coordinates"] != None and "coordinates" in tweet["coordinates"], frequency_map[t]["list"]))
geo_count += len(geos)
# Add to the timestamp map
geo_frequency_map[t] = {"count": len(geos), "list": geos}
print ("Number of Geo Tweets:", geo_count)
import matplotlib
from mpl_toolkits.basemap import Basemap
# Create a list of all geo-coded tweets
temp_geo_list = [geo_frequency_map[t]["list"] for t in sorted_times]
geo_tweets = reduce(lambda x, y: x + y, temp_geo_list)
# For each geo-coded tweet, extract its GPS coordinates
geo_coord = [x["coordinates"]["coordinates"] for x in geo_tweets]
# Now we build a map of the world using Basemap
land_color = 'lightgray'
water_color = 'lightblue'
fig, ax = plt.subplots(figsize=(18,18))
world_map = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80,
llcrnrlon=-180, urcrnrlon=180, resolution='l')
world_map.fillcontinents(color=land_color, lake_color=water_color, zorder=1)
world_map.drawcoastlines()
world_map.drawparallels(np.arange(-90.,120.,30.))
world_map.drawmeridians(np.arange(0.,420.,60.))
world_map.drawmapboundary(fill_color=water_color, zorder=0)
ax.set_title('World Tweets')
# Convert points from GPS coordinates to (x,y) coordinates
conv_points = [world_map(p[0], p[1]) for p in geo_coord]
x = [p[0] for p in conv_points]
y = [p[1] for p in conv_points]
world_map.scatter(x, y, s=100, marker='x', color="red", zorder=2)
plt.show()
| en | 0.76105 | # Try to read tweet JSON into object # Deleted status messages and protected status must be skipped # Try to extract the time of the tweet # Increment tweet count # If our frequency map already has this time, use it, otherwise add # Fill in any gaps # Time step in seconds # Sort the times into an array for future use # What time span do these tweets cover? # Get a count of tweets per minute # We'll have ticks every thirty minutes (much more clutters the graph) # Plot the post frequency # Create maps for holding counts and tweets for each user # Iterate through the time stamps # For each tweet, pull the screen name and add it to the list # A map for hashtag counts # For each minute, pull the list of hashtags and add to the counter # We lowercase the hashtag to avoid duplicates (e.g., #MikeBrown vs. #mikebrown) # A map for counting each language # the histogram of the data # A frequency map for timestamps to geo-coded tweets # Save only those tweets with tweet['coordinate']['coordinate'] entity # Add to the timestamp map # Create a list of all geo-coded tweets # For each geo-coded tweet, extract its GPS coordinates # Now we build a map of the world using Basemap # Convert points from GPS coordinates to (x,y) coordinates | 2.860904 | 3 |
test/test_notify_base.py | linkmauve/apprise | 4,764 | 6632419 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import six
import pytest
from datetime import datetime
from datetime import timedelta
from apprise.plugins.NotifyBase import NotifyBase
from apprise import NotifyType
from apprise import NotifyImageSize
from timeit import default_timer
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
def test_notify_base():
"""
API: NotifyBase() object
"""
# invalid types throw exceptions
with pytest.raises(TypeError):
NotifyBase(**{'format': 'invalid'})
# invalid types throw exceptions
with pytest.raises(TypeError):
NotifyBase(**{'overflow': 'invalid'})
# Bad port information
nb = NotifyBase(port='invalid')
assert nb.port is None
nb = NotifyBase(port=10)
assert nb.port == 10
try:
nb.url()
assert False
except NotImplementedError:
# Each sub-module is that inherits this as a parent is required to
# over-ride this function. So direct calls to this throws a not
# implemented error intentionally
assert True
try:
nb.send('test message')
assert False
except NotImplementedError:
# Each sub-module is that inherits this as a parent is required to
# over-ride this function. So direct calls to this throws a not
# implemented error intentionally
assert True
# Throttle overrides..
nb = NotifyBase()
nb.request_rate_per_sec = 0.0
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
# Should be a very fast response time since we set it to zero but we'll
# check for less then 500 to be fair as some testing systems may be slower
# then other
assert elapsed < 0.5
# Concurrent calls should achieve the same response
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
assert elapsed < 0.5
nb = NotifyBase()
nb.request_rate_per_sec = 1.0
# Set our time to now
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
# A first call to throttle (Without telling it a time previously ran) does
# not block for any length of time; it just merely sets us up for
# concurrent calls to block
assert elapsed < 0.5
# Concurrent calls could take up to the rate_per_sec though...
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
nb = NotifyBase()
nb.request_rate_per_sec = 1.0
# Set our time to now
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
# because we told it that we had already done a previous action (now)
# the throttle holds out until the right time has passed
assert elapsed > 0.5 and elapsed < 1.5
# Concurrent calls could take up to the rate_per_sec though...
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
nb = NotifyBase()
start_time = default_timer()
nb.request_rate_per_sec = 1.0
# Force a time in the past
nb.throttle(last_io=(datetime.now() - timedelta(seconds=20)))
elapsed = default_timer() - start_time
# Should be a very fast response time since we set it to zero but we'll
# check for less then 500 to be fair as some testing systems may be slower
# then other
assert elapsed < 0.5
# Force a throttle time
start_time = default_timer()
nb.throttle(wait=0.5)
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
# our NotifyBase wasn't initialized with an ImageSize so this will fail
assert nb.image_url(notify_type=NotifyType.INFO) is None
assert nb.image_path(notify_type=NotifyType.INFO) is None
assert nb.image_raw(notify_type=NotifyType.INFO) is None
# Color handling
assert nb.color(notify_type='invalid') is None
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=None),
six.string_types)
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=int), int)
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=tuple), tuple)
# Create an object
nb = NotifyBase()
# Force an image size since the default doesn't have one
nb.image_size = NotifyImageSize.XY_256
# We'll get an object this time around
assert nb.image_url(notify_type=NotifyType.INFO) is not None
assert nb.image_path(notify_type=NotifyType.INFO) is not None
assert nb.image_raw(notify_type=NotifyType.INFO) is not None
# But we will not get a response with an invalid notification type
assert nb.image_url(notify_type='invalid') is None
assert nb.image_path(notify_type='invalid') is None
assert nb.image_raw(notify_type='invalid') is None
# Static function testing
assert NotifyBase.escape_html("<content>'\t \n</content>") == \
'<content>'  \n</content>'
assert NotifyBase.escape_html(
"<content>'\t \n</content>", convert_new_lines=True) == \
'<content>'  <br/></content>'
# Test invalid data
assert NotifyBase.split_path(None) == []
assert NotifyBase.split_path(object()) == []
assert NotifyBase.split_path(42) == []
assert NotifyBase.split_path(
'/path/?name=Dr%20Disrespect', unquote=False) == \
['path', '?name=Dr%20Disrespect']
assert NotifyBase.split_path(
'/path/?name=Dr%20Disrespect', unquote=True) == \
['path', '?name=Dr Disrespect']
# a slash found inside the path, if escaped properly will not be broken
# by split_path while additional concatinated slashes are ignored
# FYI: %2F = /
assert NotifyBase.split_path(
'/%2F///%2F%2F////%2F%2F%2F////', unquote=True) == \
['/', '//', '///']
# Test invalid data
assert NotifyBase.parse_list(None) == []
assert NotifyBase.parse_list(object()) == []
assert NotifyBase.parse_list(42) == []
result = NotifyBase.parse_list(
',path,?name=Dr%20Disrespect', unquote=False)
assert isinstance(result, list) is True
assert len(result) == 2
assert 'path' in result
assert '?name=Dr%20Disrespect' in result
result = NotifyBase.parse_list(',path,?name=Dr%20Disrespect', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 2
assert 'path' in result
assert '?name=<NAME>' in result
# by parse_list while additional concatinated slashes are ignored
# FYI: %2F = /
# In this lit there are actually 4 entries, however parse_list
# eliminates duplicates in addition to unquoting content by default
result = NotifyBase.parse_list(
',%2F,%2F%2F, , , ,%2F%2F%2F, %2F', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 3
assert '/' in result
assert '//' in result
assert '///' in result
# Phone number parsing
assert NotifyBase.parse_phone_no(None) == []
assert NotifyBase.parse_phone_no(object()) == []
assert NotifyBase.parse_phone_no(42) == []
result = NotifyBase.parse_phone_no(
'+1-800-123-1234,(800) 123-4567', unquote=False)
assert isinstance(result, list) is True
assert len(result) == 2
assert '+1-800-123-1234' in result
assert '(800) 123-4567' in result
# %2B == +
result = NotifyBase.parse_phone_no(
'%2B1-800-123-1234,%2B1%20800%20123%204567', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 2
assert '+1-800-123-1234' in result
assert '+1 800 123 4567' in result
# Give nothing, get nothing
assert NotifyBase.escape_html("") == ""
assert NotifyBase.escape_html(None) == ""
assert NotifyBase.escape_html(object()) == ""
# Test quote
assert NotifyBase.unquote('%20') == ' '
assert NotifyBase.quote(' ') == '%20'
assert NotifyBase.unquote(None) == ''
assert NotifyBase.quote(None) == ''
def test_notify_base_urls():
"""
API: NotifyBase() URLs
"""
# Test verify switch whih is used as part of the SSL Verification
# by default all SSL sites are verified unless this flag is set to
# something like 'No', 'False', 'Disabled', etc. Boolean values are
# pretty forgiving.
results = NotifyBase.parse_url('https://localhost:8080/?verify=No')
assert 'verify' in results
assert results['verify'] is False
results = NotifyBase.parse_url('https://localhost:8080/?verify=Yes')
assert 'verify' in results
assert results['verify'] is True
# The default is to verify
results = NotifyBase.parse_url('https://localhost:8080')
assert 'verify' in results
assert results['verify'] is True
# Password Handling
# pass keyword over-rides default password
results = NotifyBase.parse_url('https://user:pass@localhost')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# pass keyword over-rides default password
results = NotifyBase.parse_url(
'https://user:pass@localhost?pass=<PASSWORD>')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# password keyword can also optionally be used
results = NotifyBase.parse_url(
'https://user:pass@localhost?password=<PASSWORD>')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# pass= override password=
# password keyword can also optionally be used
results = NotifyBase.parse_url(
'https://user:pass@localhost?pass=pw1&password=pw2')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# Options
results = NotifyBase.parse_url('https://localhost?format=invalid')
assert 'format' not in results
results = NotifyBase.parse_url('https://localhost?format=text')
assert 'format' in results
assert results['format'] == 'text'
results = NotifyBase.parse_url('https://localhost?format=markdown')
assert 'format' in results
assert results['format'] == 'markdown'
results = NotifyBase.parse_url('https://localhost?format=html')
assert 'format' in results
assert results['format'] == 'html'
results = NotifyBase.parse_url('https://localhost?overflow=invalid')
assert 'overflow' not in results
results = NotifyBase.parse_url('https://localhost?overflow=upstream')
assert 'overflow' in results
assert results['overflow'] == 'upstream'
results = NotifyBase.parse_url('https://localhost?overflow=split')
assert 'overflow' in results
assert results['overflow'] == 'split'
results = NotifyBase.parse_url('https://localhost?overflow=truncate')
assert 'overflow' in results
assert results['overflow'] == 'truncate'
# User Handling
# user keyword over-rides default password
results = NotifyBase.parse_url('https://user:pass@localhost')
assert 'user' in results
assert results['user'] == "user"
# user keyword over-rides default password
results = NotifyBase.parse_url(
'https://user:pass@localhost?user=newuser')
assert 'user' in results
assert results['user'] == "newuser"
# Test invalid urls
assert NotifyBase.parse_url('https://:@/') is None
assert NotifyBase.parse_url('http://:@') is None
assert NotifyBase.parse_url('http://@') is None
assert NotifyBase.parse_url('http:///') is None
assert NotifyBase.parse_url('http://:test/') is None
assert NotifyBase.parse_url('http://pass:test/') is None
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import six
import pytest
from datetime import datetime
from datetime import timedelta
from apprise.plugins.NotifyBase import NotifyBase
from apprise import NotifyType
from apprise import NotifyImageSize
from timeit import default_timer
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
def test_notify_base():
"""
API: NotifyBase() object
"""
# invalid types throw exceptions
with pytest.raises(TypeError):
NotifyBase(**{'format': 'invalid'})
# invalid types throw exceptions
with pytest.raises(TypeError):
NotifyBase(**{'overflow': 'invalid'})
# Bad port information
nb = NotifyBase(port='invalid')
assert nb.port is None
nb = NotifyBase(port=10)
assert nb.port == 10
try:
nb.url()
assert False
except NotImplementedError:
# Each sub-module is that inherits this as a parent is required to
# over-ride this function. So direct calls to this throws a not
# implemented error intentionally
assert True
try:
nb.send('test message')
assert False
except NotImplementedError:
# Each sub-module is that inherits this as a parent is required to
# over-ride this function. So direct calls to this throws a not
# implemented error intentionally
assert True
# Throttle overrides..
nb = NotifyBase()
nb.request_rate_per_sec = 0.0
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
# Should be a very fast response time since we set it to zero but we'll
# check for less then 500 to be fair as some testing systems may be slower
# then other
assert elapsed < 0.5
# Concurrent calls should achieve the same response
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
assert elapsed < 0.5
nb = NotifyBase()
nb.request_rate_per_sec = 1.0
# Set our time to now
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
# A first call to throttle (Without telling it a time previously ran) does
# not block for any length of time; it just merely sets us up for
# concurrent calls to block
assert elapsed < 0.5
# Concurrent calls could take up to the rate_per_sec though...
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
nb = NotifyBase()
nb.request_rate_per_sec = 1.0
# Set our time to now
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
# because we told it that we had already done a previous action (now)
# the throttle holds out until the right time has passed
assert elapsed > 0.5 and elapsed < 1.5
# Concurrent calls could take up to the rate_per_sec though...
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
nb = NotifyBase()
start_time = default_timer()
nb.request_rate_per_sec = 1.0
# Force a time in the past
nb.throttle(last_io=(datetime.now() - timedelta(seconds=20)))
elapsed = default_timer() - start_time
# Should be a very fast response time since we set it to zero but we'll
# check for less then 500 to be fair as some testing systems may be slower
# then other
assert elapsed < 0.5
# Force a throttle time
start_time = default_timer()
nb.throttle(wait=0.5)
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
# our NotifyBase wasn't initialized with an ImageSize so this will fail
assert nb.image_url(notify_type=NotifyType.INFO) is None
assert nb.image_path(notify_type=NotifyType.INFO) is None
assert nb.image_raw(notify_type=NotifyType.INFO) is None
# Color handling
assert nb.color(notify_type='invalid') is None
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=None),
six.string_types)
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=int), int)
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=tuple), tuple)
# Create an object
nb = NotifyBase()
# Force an image size since the default doesn't have one
nb.image_size = NotifyImageSize.XY_256
# We'll get an object this time around
assert nb.image_url(notify_type=NotifyType.INFO) is not None
assert nb.image_path(notify_type=NotifyType.INFO) is not None
assert nb.image_raw(notify_type=NotifyType.INFO) is not None
# But we will not get a response with an invalid notification type
assert nb.image_url(notify_type='invalid') is None
assert nb.image_path(notify_type='invalid') is None
assert nb.image_raw(notify_type='invalid') is None
# Static function testing
assert NotifyBase.escape_html("<content>'\t \n</content>") == \
'<content>'  \n</content>'
assert NotifyBase.escape_html(
"<content>'\t \n</content>", convert_new_lines=True) == \
'<content>'  <br/></content>'
# Test invalid data
assert NotifyBase.split_path(None) == []
assert NotifyBase.split_path(object()) == []
assert NotifyBase.split_path(42) == []
assert NotifyBase.split_path(
'/path/?name=Dr%20Disrespect', unquote=False) == \
['path', '?name=Dr%20Disrespect']
assert NotifyBase.split_path(
'/path/?name=Dr%20Disrespect', unquote=True) == \
['path', '?name=Dr Disrespect']
# a slash found inside the path, if escaped properly will not be broken
# by split_path while additional concatinated slashes are ignored
# FYI: %2F = /
assert NotifyBase.split_path(
'/%2F///%2F%2F////%2F%2F%2F////', unquote=True) == \
['/', '//', '///']
# Test invalid data
assert NotifyBase.parse_list(None) == []
assert NotifyBase.parse_list(object()) == []
assert NotifyBase.parse_list(42) == []
result = NotifyBase.parse_list(
',path,?name=Dr%20Disrespect', unquote=False)
assert isinstance(result, list) is True
assert len(result) == 2
assert 'path' in result
assert '?name=Dr%20Disrespect' in result
result = NotifyBase.parse_list(',path,?name=Dr%20Disrespect', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 2
assert 'path' in result
assert '?name=<NAME>' in result
# by parse_list while additional concatinated slashes are ignored
# FYI: %2F = /
# In this lit there are actually 4 entries, however parse_list
# eliminates duplicates in addition to unquoting content by default
result = NotifyBase.parse_list(
',%2F,%2F%2F, , , ,%2F%2F%2F, %2F', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 3
assert '/' in result
assert '//' in result
assert '///' in result
# Phone number parsing
assert NotifyBase.parse_phone_no(None) == []
assert NotifyBase.parse_phone_no(object()) == []
assert NotifyBase.parse_phone_no(42) == []
result = NotifyBase.parse_phone_no(
'+1-800-123-1234,(800) 123-4567', unquote=False)
assert isinstance(result, list) is True
assert len(result) == 2
assert '+1-800-123-1234' in result
assert '(800) 123-4567' in result
# %2B == +
result = NotifyBase.parse_phone_no(
'%2B1-800-123-1234,%2B1%20800%20123%204567', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 2
assert '+1-800-123-1234' in result
assert '+1 800 123 4567' in result
# Give nothing, get nothing
assert NotifyBase.escape_html("") == ""
assert NotifyBase.escape_html(None) == ""
assert NotifyBase.escape_html(object()) == ""
# Test quote
assert NotifyBase.unquote('%20') == ' '
assert NotifyBase.quote(' ') == '%20'
assert NotifyBase.unquote(None) == ''
assert NotifyBase.quote(None) == ''
def test_notify_base_urls():
"""
API: NotifyBase() URLs
"""
# Test verify switch whih is used as part of the SSL Verification
# by default all SSL sites are verified unless this flag is set to
# something like 'No', 'False', 'Disabled', etc. Boolean values are
# pretty forgiving.
results = NotifyBase.parse_url('https://localhost:8080/?verify=No')
assert 'verify' in results
assert results['verify'] is False
results = NotifyBase.parse_url('https://localhost:8080/?verify=Yes')
assert 'verify' in results
assert results['verify'] is True
# The default is to verify
results = NotifyBase.parse_url('https://localhost:8080')
assert 'verify' in results
assert results['verify'] is True
# Password Handling
# pass keyword over-rides default password
results = NotifyBase.parse_url('https://user:pass@localhost')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# pass keyword over-rides default password
results = NotifyBase.parse_url(
'https://user:pass@localhost?pass=<PASSWORD>')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# password keyword can also optionally be used
results = NotifyBase.parse_url(
'https://user:pass@localhost?password=<PASSWORD>')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# pass= override password=
# password keyword can also optionally be used
results = NotifyBase.parse_url(
'https://user:pass@localhost?pass=pw1&password=pw2')
assert 'password' in results
assert results['password'] == "<PASSWORD>"
# Options
results = NotifyBase.parse_url('https://localhost?format=invalid')
assert 'format' not in results
results = NotifyBase.parse_url('https://localhost?format=text')
assert 'format' in results
assert results['format'] == 'text'
results = NotifyBase.parse_url('https://localhost?format=markdown')
assert 'format' in results
assert results['format'] == 'markdown'
results = NotifyBase.parse_url('https://localhost?format=html')
assert 'format' in results
assert results['format'] == 'html'
results = NotifyBase.parse_url('https://localhost?overflow=invalid')
assert 'overflow' not in results
results = NotifyBase.parse_url('https://localhost?overflow=upstream')
assert 'overflow' in results
assert results['overflow'] == 'upstream'
results = NotifyBase.parse_url('https://localhost?overflow=split')
assert 'overflow' in results
assert results['overflow'] == 'split'
results = NotifyBase.parse_url('https://localhost?overflow=truncate')
assert 'overflow' in results
assert results['overflow'] == 'truncate'
# User Handling
# user keyword over-rides default password
results = NotifyBase.parse_url('https://user:pass@localhost')
assert 'user' in results
assert results['user'] == "user"
# user keyword over-rides default password
results = NotifyBase.parse_url(
'https://user:pass@localhost?user=newuser')
assert 'user' in results
assert results['user'] == "newuser"
# Test invalid urls
assert NotifyBase.parse_url('https://:@/') is None
assert NotifyBase.parse_url('http://:@') is None
assert NotifyBase.parse_url('http://@') is None
assert NotifyBase.parse_url('http:///') is None
assert NotifyBase.parse_url('http://:test/') is None
assert NotifyBase.parse_url('http://pass:test/') is None
| en | 0.855547 | # -*- coding: utf-8 -*- # # Copyright (C) 2019 <NAME> <<EMAIL>> # All rights reserved. # # This code is licensed under the MIT License. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and / or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions : # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Disable logging for a cleaner testing output API: NotifyBase() object # invalid types throw exceptions # invalid types throw exceptions # Bad port information # Each sub-module is that inherits this as a parent is required to # over-ride this function. So direct calls to this throws a not # implemented error intentionally # Each sub-module is that inherits this as a parent is required to # over-ride this function. So direct calls to this throws a not # implemented error intentionally # Throttle overrides.. # Should be a very fast response time since we set it to zero but we'll # check for less then 500 to be fair as some testing systems may be slower # then other # Concurrent calls should achieve the same response # Set our time to now # A first call to throttle (Without telling it a time previously ran) does # not block for any length of time; it just merely sets us up for # concurrent calls to block # Concurrent calls could take up to the rate_per_sec though... # Set our time to now # because we told it that we had already done a previous action (now) # the throttle holds out until the right time has passed # Concurrent calls could take up to the rate_per_sec though... # Force a time in the past # Should be a very fast response time since we set it to zero but we'll # check for less then 500 to be fair as some testing systems may be slower # then other # Force a throttle time # our NotifyBase wasn't initialized with an ImageSize so this will fail # Color handling # Create an object # Force an image size since the default doesn't have one # We'll get an object this time around # But we will not get a response with an invalid notification type # Static function testing # Test invalid data # a slash found inside the path, if escaped properly will not be broken # by split_path while additional concatinated slashes are ignored # FYI: %2F = / # Test invalid data # by parse_list while additional concatinated slashes are ignored # FYI: %2F = / # In this lit there are actually 4 entries, however parse_list # eliminates duplicates in addition to unquoting content by default # Phone number parsing # %2B == + # Give nothing, get nothing # Test quote API: NotifyBase() URLs # Test verify switch whih is used as part of the SSL Verification # by default all SSL sites are verified unless this flag is set to # something like 'No', 'False', 'Disabled', etc. Boolean values are # pretty forgiving. # The default is to verify # Password Handling # pass keyword over-rides default password # pass keyword over-rides default password # password keyword can also optionally be used # pass= override password= # password keyword can also optionally be used # Options # User Handling # user keyword over-rides default password # user keyword over-rides default password # Test invalid urls | 1.862582 | 2 |
OOP/Person_ines.py | bozhikovstanislav/Python-Fundamentals | 0 | 6632420 | <reponame>bozhikovstanislav/Python-Fundamentals
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
if len(name) < 3:
raise Exception("Name's length should not be less than 3 symbols!")
self.__name = name
@property
def age(self):
return self.__age
@age.setter
def age(self, age):
if age < 0:
raise Exception('Age must be positive!')
self.__age = age
def __str__(self):
return f'Name: {self.name}, Age: {self.age}'
class Child(Person):
def __init__(self, name, age):
Person.__init__(self, name, age)
self.age = age
@property
def age(self):
return self.__age
@age.setter
def age(self, age):
if age > 15:
raise Exception("Child's age must be less than 15!")
if age < 0:
raise Exception('Age must be positive!')
self.__age = age
try:
person = Child(input(), int(input()))
print(person)
except Exception as exe:
print(exe) | class Person:
def __init__(self, name, age):
self.name = name
self.age = age
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
if len(name) < 3:
raise Exception("Name's length should not be less than 3 symbols!")
self.__name = name
@property
def age(self):
return self.__age
@age.setter
def age(self, age):
if age < 0:
raise Exception('Age must be positive!')
self.__age = age
def __str__(self):
return f'Name: {self.name}, Age: {self.age}'
class Child(Person):
def __init__(self, name, age):
Person.__init__(self, name, age)
self.age = age
@property
def age(self):
return self.__age
@age.setter
def age(self, age):
if age > 15:
raise Exception("Child's age must be less than 15!")
if age < 0:
raise Exception('Age must be positive!')
self.__age = age
try:
person = Child(input(), int(input()))
print(person)
except Exception as exe:
print(exe) | none | 1 | 4.079394 | 4 |
|
doc8/main.py | MarkusPiotrowski/doc8 | 0 | 6632421 | # Copyright (C) 2014 <NAME> <iv at altlinux dot org>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Check documentation for simple style requirements.
What is checked:
- invalid rst format - D000
- lines should not be longer than 79 characters - D001
- RST exception: line with no whitespace except in the beginning
- RST exception: lines with http or https urls
- RST exception: literal blocks
- RST exception: rst target directives
- no trailing whitespace - D002
- no tabulation for indentation - D003
- no carriage returns (use unix newlines) - D004
- no newline at end of file - D005
"""
import argparse
import collections
import configparser
import logging
import os
import sys
from stevedore import extension
from doc8 import checks
from doc8 import parser as file_parser
from doc8 import utils
from doc8 import version
FILE_PATTERNS = [".rst", ".txt"]
MAX_LINE_LENGTH = 79
CONFIG_FILENAMES = ["doc8.ini", "tox.ini", "pep8.ini", "setup.cfg"]
def split_set_type(text, delimiter=","):
return set([i.strip() for i in text.split(delimiter) if i.strip()])
def merge_sets(sets):
m = set()
for s in sets:
m.update(s)
return m
def parse_ignore_path_errors(entries):
ignore_path_errors = collections.defaultdict(set)
for path in entries:
path, ignored_errors = path.split(";", 1)
path = path.strip()
ignored_errors = split_set_type(ignored_errors, delimiter=";")
ignore_path_errors[path].update(ignored_errors)
return dict(ignore_path_errors)
def extract_config(args):
parser = configparser.RawConfigParser()
read_files = []
if args["config"]:
for fn in args["config"]:
with open(fn, "r") as fh:
parser.readfp(fh, filename=fn)
read_files.append(fn)
else:
read_files.extend(parser.read(CONFIG_FILENAMES))
if not read_files:
return {}
cfg = {}
try:
cfg["max_line_length"] = parser.getint("doc8", "max-line-length")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["ignore"] = split_set_type(parser.get("doc8", "ignore"))
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["ignore_path"] = split_set_type(parser.get("doc8", "ignore-path"))
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
ignore_path_errors = parser.get("doc8", "ignore-path-errors")
ignore_path_errors = split_set_type(ignore_path_errors)
ignore_path_errors = parse_ignore_path_errors(ignore_path_errors)
cfg["ignore_path_errors"] = ignore_path_errors
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["allow_long_titles"] = parser.getboolean("doc8", "allow-long-titles")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["sphinx"] = parser.getboolean("doc8", "sphinx")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["verbose"] = parser.getboolean("doc8", "verbose")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["file_encoding"] = parser.get("doc8", "file-encoding")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["default_extension"] = parser.get("doc8", "default-extension")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
extensions = parser.get("doc8", "extensions")
extensions = extensions.split(",")
extensions = [s.strip() for s in extensions if s.strip()]
if extensions:
cfg["extension"] = extensions
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return cfg
def fetch_checks(cfg):
base = [
checks.CheckValidity(cfg),
checks.CheckTrailingWhitespace(cfg),
checks.CheckIndentationNoTab(cfg),
checks.CheckCarriageReturn(cfg),
checks.CheckMaxLineLength(cfg),
checks.CheckNewlineEndOfFile(cfg),
]
mgr = extension.ExtensionManager(
namespace="doc8.extension.check", invoke_on_load=True, invoke_args=(cfg.copy(),)
)
addons = []
for e in mgr:
addons.append(e.obj)
return base + addons
def setup_logging(verbose):
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
logging.basicConfig(
level=level, format="%(levelname)s: %(message)s", stream=sys.stdout
)
def scan(cfg):
if not cfg.get("quiet"):
print("Scanning...")
files = collections.deque()
ignored_paths = cfg.get("ignore_path", [])
files_ignored = 0
file_iter = utils.find_files(
cfg.get("paths", []), cfg.get("extension", []), ignored_paths
)
default_extension = cfg.get("default_extension")
file_encoding = cfg.get("file_encoding")
for filename, ignoreable in file_iter:
if ignoreable:
files_ignored += 1
if cfg.get("verbose"):
print(" Ignoring '%s'" % (filename))
else:
f = file_parser.parse(
filename, default_extension=default_extension, encoding=file_encoding
)
files.append(f)
if cfg.get("verbose"):
print(" Selecting '%s'" % (filename))
return (files, files_ignored)
def validate(cfg, files, result=None):
if not cfg.get("quiet"):
print("Validating...")
error_counts = {}
ignoreables = frozenset(cfg.get("ignore", []))
ignore_targeted = cfg.get("ignore_path_errors", {})
while files:
f = files.popleft()
if cfg.get("verbose"):
print("Validating %s" % f)
targeted_ignoreables = set(ignore_targeted.get(f.filename, set()))
targeted_ignoreables.update(ignoreables)
for c in fetch_checks(cfg):
check_name = ".".join([c.__class__.__module__, c.__class__.__name__])
error_counts.setdefault(check_name, 0)
try:
extension_matcher = c.EXT_MATCHER
except AttributeError:
pass
else:
if not extension_matcher.match(f.extension):
if cfg.get("verbose"):
print(
" Skipping check '%s' since it does not"
" understand parsing a file with extension '%s'"
% (check_name, f.extension)
)
continue
try:
reports = set(c.REPORTS)
except AttributeError:
pass
else:
reports = reports - targeted_ignoreables
if not reports:
if cfg.get("verbose"):
print(
" Skipping check '%s', determined to only"
" check ignoreable codes" % check_name
)
continue
if cfg.get("verbose"):
print(" Running check '%s'" % check_name)
if isinstance(c, checks.ContentCheck):
for line_num, code, message in c.report_iter(f):
if code in targeted_ignoreables:
continue
if not isinstance(line_num, (float, int)):
line_num = "?"
if cfg.get("verbose"):
print(
" - %s:%s: %s %s" % (f.filename, line_num, code, message)
)
elif not result.capture:
print("%s:%s: %s %s" % (f.filename, line_num, code, message))
result.error(check_name, f.filename, line_num, code, message)
error_counts[check_name] += 1
elif isinstance(c, checks.LineCheck):
for line_num, line in enumerate(f.lines_iter(), 1):
for code, message in c.report_iter(line):
if code in targeted_ignoreables:
continue
if cfg.get("verbose"):
print(
" - %s:%s: %s %s"
% (f.filename, line_num, code, message)
)
elif not result.capture:
print(
"%s:%s: %s %s" % (f.filename, line_num, code, message)
)
result.error(check_name, f.filename, line_num, code, message)
error_counts[check_name] += 1
else:
raise TypeError("Unknown check type: %s, %s" % (type(c), c))
return error_counts
def get_defaults():
return {
"paths": [os.getcwd()],
"config": [],
"allow_long_titles": False,
"ignore": [],
"sphinx": True,
"ignore_path": [],
"ignore_path_errors": [],
"default_extension": "",
"file_encoding": "",
"max_line_length": MAX_LINE_LENGTH,
"extension": list(FILE_PATTERNS),
"quiet": False,
"verbose": False,
"version": False,
}
class Result(object):
def __init__(self):
self.files_selected = 0
self.files_ignored = 0
self.error_counts = {}
self.errors = []
self.capture = False
@property
def total_errors(self):
return len(self.errors)
def error(self, check_name, filename, line_num, code, message):
self.errors.append((check_name, filename, line_num, code, message))
def finish(self, files_selected, files_ignored, error_counts):
self.files_selected = files_selected
self.files_ignored = files_ignored
self.error_counts = error_counts
def report(self):
lines = []
if self.capture:
for error in self.errors:
lines.append("%s:%s: %s %s" % error[1:])
lines.extend(
[
"=" * 8,
"Total files scanned = %s" % (self.files_selected),
"Total files ignored = %s" % (self.files_ignored),
"Total accumulated errors = %s" % (self.total_errors),
]
)
if self.error_counts:
lines.append("Detailed error counts:")
for check_name in sorted(self.error_counts.keys()):
check_errors = self.error_counts[check_name]
lines.append(" - %s = %s" % (check_name, check_errors))
return "\n".join(lines)
def doc8(args=None, **kwargs):
result = Result()
if args is None:
args = get_defaults()
# Force reporting to suppress all output
kwargs["quiet"] = True
kwargs["verbose"] = False
result.capture = True
args["ignore"] = merge_sets(args["ignore"])
cfg = extract_config(args)
args["ignore"].update(cfg.pop("ignore", set()))
if "sphinx" in cfg:
args["sphinx"] = cfg.pop("sphinx")
args["extension"].extend(cfg.pop("extension", []))
args["ignore_path"].extend(cfg.pop("ignore_path", []))
cfg.setdefault("ignore_path_errors", {})
tmp_ignores = parse_ignore_path_errors(args.pop("ignore_path_errors", []))
for path, ignores in tmp_ignores.items():
if path in cfg["ignore_path_errors"]:
cfg["ignore_path_errors"][path].update(ignores)
else:
cfg["ignore_path_errors"][path] = set(ignores)
args.update(cfg)
# Override args with any kwargs
args.update(kwargs.items())
setup_logging(args.get("verbose"))
files, files_ignored = scan(args)
files_selected = len(files)
error_counts = validate(args, files, result=result)
result.finish(files_selected, files_ignored, error_counts)
return result
def main():
defaults = get_defaults()
parser = argparse.ArgumentParser(
prog="doc8",
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"paths",
metavar="path",
type=str,
nargs="*",
help=("path to scan for doc files (default: current directory)."),
default=defaults["paths"],
)
parser.add_argument(
"--config",
metavar="path",
action="append",
help="user config file location"
" (default: %s)." % ", ".join(CONFIG_FILENAMES),
default=defaults["config"],
)
parser.add_argument(
"--allow-long-titles",
action="store_true",
help="allow long section titles (default: false).",
default=defaults["allow_long_titles"],
)
parser.add_argument(
"--ignore",
action="append",
metavar="code",
help="ignore the given error code(s).",
type=split_set_type,
default=defaults["ignore"],
)
parser.add_argument(
"--no-sphinx",
action="store_false",
help="do not ignore sphinx specific false positives.",
default=defaults["sphinx"],
dest="sphinx",
)
parser.add_argument(
"--ignore-path",
action="append",
default=defaults["ignore_path"],
help="ignore the given directory or file (globs are supported).",
metavar="path",
)
parser.add_argument(
"--ignore-path-errors",
action="append",
default=defaults["ignore_path_errors"],
help="ignore the given specific errors in the provided file.",
metavar="path",
)
parser.add_argument(
"--default-extension",
action="store",
help="default file extension to use when a file is"
" found without a file extension.",
default=defaults["default_extension"],
dest="default_extension",
metavar="extension",
)
parser.add_argument(
"--file-encoding",
action="store",
help="set input files text encoding",
default=defaults["file_encoding"],
dest="file_encoding",
metavar="encoding",
)
parser.add_argument(
"--max-line-length",
action="store",
metavar="int",
type=int,
help="maximum allowed line"
" length (default: %s)." % defaults["max_line_length"],
default=defaults["max_line_length"],
)
parser.add_argument(
"-e",
"--extension",
action="append",
metavar="extension",
help="check file extensions of the given type"
" (default: %s)." % ", ".join(defaults["extension"]),
default=defaults["extension"],
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="only print violations",
default=defaults["quiet"],
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
help="run in verbose mode.",
default=defaults["verbose"],
)
parser.add_argument(
"--version",
dest="version",
action="store_true",
help="show the version and exit.",
default=defaults["version"],
)
args = vars(parser.parse_args())
if args.get("version"):
print(version.version_string)
return 0
result = doc8(args)
if not args.get("quiet"):
print(result.report())
if result.total_errors:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| # Copyright (C) 2014 <NAME> <iv at altlinux dot org>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Check documentation for simple style requirements.
What is checked:
- invalid rst format - D000
- lines should not be longer than 79 characters - D001
- RST exception: line with no whitespace except in the beginning
- RST exception: lines with http or https urls
- RST exception: literal blocks
- RST exception: rst target directives
- no trailing whitespace - D002
- no tabulation for indentation - D003
- no carriage returns (use unix newlines) - D004
- no newline at end of file - D005
"""
import argparse
import collections
import configparser
import logging
import os
import sys
from stevedore import extension
from doc8 import checks
from doc8 import parser as file_parser
from doc8 import utils
from doc8 import version
FILE_PATTERNS = [".rst", ".txt"]
MAX_LINE_LENGTH = 79
CONFIG_FILENAMES = ["doc8.ini", "tox.ini", "pep8.ini", "setup.cfg"]
def split_set_type(text, delimiter=","):
return set([i.strip() for i in text.split(delimiter) if i.strip()])
def merge_sets(sets):
m = set()
for s in sets:
m.update(s)
return m
def parse_ignore_path_errors(entries):
ignore_path_errors = collections.defaultdict(set)
for path in entries:
path, ignored_errors = path.split(";", 1)
path = path.strip()
ignored_errors = split_set_type(ignored_errors, delimiter=";")
ignore_path_errors[path].update(ignored_errors)
return dict(ignore_path_errors)
def extract_config(args):
parser = configparser.RawConfigParser()
read_files = []
if args["config"]:
for fn in args["config"]:
with open(fn, "r") as fh:
parser.readfp(fh, filename=fn)
read_files.append(fn)
else:
read_files.extend(parser.read(CONFIG_FILENAMES))
if not read_files:
return {}
cfg = {}
try:
cfg["max_line_length"] = parser.getint("doc8", "max-line-length")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["ignore"] = split_set_type(parser.get("doc8", "ignore"))
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["ignore_path"] = split_set_type(parser.get("doc8", "ignore-path"))
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
ignore_path_errors = parser.get("doc8", "ignore-path-errors")
ignore_path_errors = split_set_type(ignore_path_errors)
ignore_path_errors = parse_ignore_path_errors(ignore_path_errors)
cfg["ignore_path_errors"] = ignore_path_errors
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["allow_long_titles"] = parser.getboolean("doc8", "allow-long-titles")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["sphinx"] = parser.getboolean("doc8", "sphinx")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["verbose"] = parser.getboolean("doc8", "verbose")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["file_encoding"] = parser.get("doc8", "file-encoding")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
cfg["default_extension"] = parser.get("doc8", "default-extension")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
extensions = parser.get("doc8", "extensions")
extensions = extensions.split(",")
extensions = [s.strip() for s in extensions if s.strip()]
if extensions:
cfg["extension"] = extensions
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return cfg
def fetch_checks(cfg):
base = [
checks.CheckValidity(cfg),
checks.CheckTrailingWhitespace(cfg),
checks.CheckIndentationNoTab(cfg),
checks.CheckCarriageReturn(cfg),
checks.CheckMaxLineLength(cfg),
checks.CheckNewlineEndOfFile(cfg),
]
mgr = extension.ExtensionManager(
namespace="doc8.extension.check", invoke_on_load=True, invoke_args=(cfg.copy(),)
)
addons = []
for e in mgr:
addons.append(e.obj)
return base + addons
def setup_logging(verbose):
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
logging.basicConfig(
level=level, format="%(levelname)s: %(message)s", stream=sys.stdout
)
def scan(cfg):
if not cfg.get("quiet"):
print("Scanning...")
files = collections.deque()
ignored_paths = cfg.get("ignore_path", [])
files_ignored = 0
file_iter = utils.find_files(
cfg.get("paths", []), cfg.get("extension", []), ignored_paths
)
default_extension = cfg.get("default_extension")
file_encoding = cfg.get("file_encoding")
for filename, ignoreable in file_iter:
if ignoreable:
files_ignored += 1
if cfg.get("verbose"):
print(" Ignoring '%s'" % (filename))
else:
f = file_parser.parse(
filename, default_extension=default_extension, encoding=file_encoding
)
files.append(f)
if cfg.get("verbose"):
print(" Selecting '%s'" % (filename))
return (files, files_ignored)
def validate(cfg, files, result=None):
if not cfg.get("quiet"):
print("Validating...")
error_counts = {}
ignoreables = frozenset(cfg.get("ignore", []))
ignore_targeted = cfg.get("ignore_path_errors", {})
while files:
f = files.popleft()
if cfg.get("verbose"):
print("Validating %s" % f)
targeted_ignoreables = set(ignore_targeted.get(f.filename, set()))
targeted_ignoreables.update(ignoreables)
for c in fetch_checks(cfg):
check_name = ".".join([c.__class__.__module__, c.__class__.__name__])
error_counts.setdefault(check_name, 0)
try:
extension_matcher = c.EXT_MATCHER
except AttributeError:
pass
else:
if not extension_matcher.match(f.extension):
if cfg.get("verbose"):
print(
" Skipping check '%s' since it does not"
" understand parsing a file with extension '%s'"
% (check_name, f.extension)
)
continue
try:
reports = set(c.REPORTS)
except AttributeError:
pass
else:
reports = reports - targeted_ignoreables
if not reports:
if cfg.get("verbose"):
print(
" Skipping check '%s', determined to only"
" check ignoreable codes" % check_name
)
continue
if cfg.get("verbose"):
print(" Running check '%s'" % check_name)
if isinstance(c, checks.ContentCheck):
for line_num, code, message in c.report_iter(f):
if code in targeted_ignoreables:
continue
if not isinstance(line_num, (float, int)):
line_num = "?"
if cfg.get("verbose"):
print(
" - %s:%s: %s %s" % (f.filename, line_num, code, message)
)
elif not result.capture:
print("%s:%s: %s %s" % (f.filename, line_num, code, message))
result.error(check_name, f.filename, line_num, code, message)
error_counts[check_name] += 1
elif isinstance(c, checks.LineCheck):
for line_num, line in enumerate(f.lines_iter(), 1):
for code, message in c.report_iter(line):
if code in targeted_ignoreables:
continue
if cfg.get("verbose"):
print(
" - %s:%s: %s %s"
% (f.filename, line_num, code, message)
)
elif not result.capture:
print(
"%s:%s: %s %s" % (f.filename, line_num, code, message)
)
result.error(check_name, f.filename, line_num, code, message)
error_counts[check_name] += 1
else:
raise TypeError("Unknown check type: %s, %s" % (type(c), c))
return error_counts
def get_defaults():
return {
"paths": [os.getcwd()],
"config": [],
"allow_long_titles": False,
"ignore": [],
"sphinx": True,
"ignore_path": [],
"ignore_path_errors": [],
"default_extension": "",
"file_encoding": "",
"max_line_length": MAX_LINE_LENGTH,
"extension": list(FILE_PATTERNS),
"quiet": False,
"verbose": False,
"version": False,
}
class Result(object):
def __init__(self):
self.files_selected = 0
self.files_ignored = 0
self.error_counts = {}
self.errors = []
self.capture = False
@property
def total_errors(self):
return len(self.errors)
def error(self, check_name, filename, line_num, code, message):
self.errors.append((check_name, filename, line_num, code, message))
def finish(self, files_selected, files_ignored, error_counts):
self.files_selected = files_selected
self.files_ignored = files_ignored
self.error_counts = error_counts
def report(self):
lines = []
if self.capture:
for error in self.errors:
lines.append("%s:%s: %s %s" % error[1:])
lines.extend(
[
"=" * 8,
"Total files scanned = %s" % (self.files_selected),
"Total files ignored = %s" % (self.files_ignored),
"Total accumulated errors = %s" % (self.total_errors),
]
)
if self.error_counts:
lines.append("Detailed error counts:")
for check_name in sorted(self.error_counts.keys()):
check_errors = self.error_counts[check_name]
lines.append(" - %s = %s" % (check_name, check_errors))
return "\n".join(lines)
def doc8(args=None, **kwargs):
result = Result()
if args is None:
args = get_defaults()
# Force reporting to suppress all output
kwargs["quiet"] = True
kwargs["verbose"] = False
result.capture = True
args["ignore"] = merge_sets(args["ignore"])
cfg = extract_config(args)
args["ignore"].update(cfg.pop("ignore", set()))
if "sphinx" in cfg:
args["sphinx"] = cfg.pop("sphinx")
args["extension"].extend(cfg.pop("extension", []))
args["ignore_path"].extend(cfg.pop("ignore_path", []))
cfg.setdefault("ignore_path_errors", {})
tmp_ignores = parse_ignore_path_errors(args.pop("ignore_path_errors", []))
for path, ignores in tmp_ignores.items():
if path in cfg["ignore_path_errors"]:
cfg["ignore_path_errors"][path].update(ignores)
else:
cfg["ignore_path_errors"][path] = set(ignores)
args.update(cfg)
# Override args with any kwargs
args.update(kwargs.items())
setup_logging(args.get("verbose"))
files, files_ignored = scan(args)
files_selected = len(files)
error_counts = validate(args, files, result=result)
result.finish(files_selected, files_ignored, error_counts)
return result
def main():
defaults = get_defaults()
parser = argparse.ArgumentParser(
prog="doc8",
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"paths",
metavar="path",
type=str,
nargs="*",
help=("path to scan for doc files (default: current directory)."),
default=defaults["paths"],
)
parser.add_argument(
"--config",
metavar="path",
action="append",
help="user config file location"
" (default: %s)." % ", ".join(CONFIG_FILENAMES),
default=defaults["config"],
)
parser.add_argument(
"--allow-long-titles",
action="store_true",
help="allow long section titles (default: false).",
default=defaults["allow_long_titles"],
)
parser.add_argument(
"--ignore",
action="append",
metavar="code",
help="ignore the given error code(s).",
type=split_set_type,
default=defaults["ignore"],
)
parser.add_argument(
"--no-sphinx",
action="store_false",
help="do not ignore sphinx specific false positives.",
default=defaults["sphinx"],
dest="sphinx",
)
parser.add_argument(
"--ignore-path",
action="append",
default=defaults["ignore_path"],
help="ignore the given directory or file (globs are supported).",
metavar="path",
)
parser.add_argument(
"--ignore-path-errors",
action="append",
default=defaults["ignore_path_errors"],
help="ignore the given specific errors in the provided file.",
metavar="path",
)
parser.add_argument(
"--default-extension",
action="store",
help="default file extension to use when a file is"
" found without a file extension.",
default=defaults["default_extension"],
dest="default_extension",
metavar="extension",
)
parser.add_argument(
"--file-encoding",
action="store",
help="set input files text encoding",
default=defaults["file_encoding"],
dest="file_encoding",
metavar="encoding",
)
parser.add_argument(
"--max-line-length",
action="store",
metavar="int",
type=int,
help="maximum allowed line"
" length (default: %s)." % defaults["max_line_length"],
default=defaults["max_line_length"],
)
parser.add_argument(
"-e",
"--extension",
action="append",
metavar="extension",
help="check file extensions of the given type"
" (default: %s)." % ", ".join(defaults["extension"]),
default=defaults["extension"],
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="only print violations",
default=defaults["quiet"],
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
help="run in verbose mode.",
default=defaults["verbose"],
)
parser.add_argument(
"--version",
dest="version",
action="store_true",
help="show the version and exit.",
default=defaults["version"],
)
args = vars(parser.parse_args())
if args.get("version"):
print(version.version_string)
return 0
result = doc8(args)
if not args.get("quiet"):
print(result.report())
if result.total_errors:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| en | 0.769834 | # Copyright (C) 2014 <NAME> <iv at altlinux dot org> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Check documentation for simple style requirements. What is checked: - invalid rst format - D000 - lines should not be longer than 79 characters - D001 - RST exception: line with no whitespace except in the beginning - RST exception: lines with http or https urls - RST exception: literal blocks - RST exception: rst target directives - no trailing whitespace - D002 - no tabulation for indentation - D003 - no carriage returns (use unix newlines) - D004 - no newline at end of file - D005 # Force reporting to suppress all output # Override args with any kwargs | 2.129418 | 2 |
src/managers.py | MrLIVB/BMSTU_CG_CP | 0 | 6632422 | <filename>src/managers.py
from math import floor
from scene import BaseScene
from builddirector import BuildDirector
from musclemodel import Morph
from visibleobject import *
from visualizer import *
from musclemodel import ComplexMuscleModel
class BaseSceneManager(object):
def __init__(self, scene: BaseScene):
self.scene = scene
def execute(self):
pass
class LoadManager(BaseSceneManager):
def __init__(self, scene: BaseScene, build_director: BuildDirector):
super().__init__(scene)
self.director = build_director
def execute(self):
if self.scene is None:
print("No scene")
self.scene.objects.clear()
self.scene.add_object(self.director.create())
class DrawManager(BaseSceneManager):
def __init__(self, scene: BaseScene, draw_factory: BaseDrawerFactory):
super().__init__(scene)
self.drawerFactory = draw_factory
def execute(self):
visualizer = Visualizer()
visualizer.setDrawer(self.drawerFactory)
visualizer.setCamera(self.scene.get_camera())
visualizer.clear()
zbuf = ZBuffer(visualizer)
lights = self.scene.get_lights()
cam = self.scene.get_camera()
for model in self.scene.get_models():
if type(model) is ComplexMuscleModel:
for part in model:
process_polygons = [[polygon, True if polygon.get_normal().scalar_multiplication(cam.direction) > 0 else False] for polygon in part.polygons]
for polygon in process_polygons:
if polygon[1]:
zbuf.process_polygon(polygon[0], lights)
for light in lights:
zbuf.safe_process_point(light.x, light.y, light.z, 900, 900, Qt.blue)
class DrawAnimationManager(DrawManager):
def __init__(self, scene: BaseScene, draw_factory: BaseDrawerFactory, frame):
super().__init__(scene, draw_factory)
self.frame = frame
def execute(self):
visualizer = Visualizer()
visualizer.setDrawer(self.drawerFactory)
visualizer.setCamera(self.scene.get_camera())
visualizer.clear()
zbuf = ZBuffer(visualizer)
lights = self.scene.get_lights()
cam = self.scene.get_camera()
morph = self.scene.get_model(1)
morph: Morph
process_polygons = [[polygon, True if polygon.get_normal().scalar_multiplication(cam.direction) > 0 else False] for polygon in morph.get_frame(self.frame)]
for polygon in process_polygons:
if polygon[1]:
zbuf.process_polygon(polygon[0], lights)
class ZBuffer(object):
def __init__(self, visualizer: Visualizer, width=900, height=900):
self.width = width
self.height = height
self.visualizer = visualizer
self._buf = [[-6000 for _ in range(width)] for __ in range(height)]
def process_polygon(self, polygon: Polygon, light):
color = polygon.get_color(light)
points = polygon.get_points()
x = [floor(points[i].x) for i in range(3)]
y = [floor(points[i].y) for i in range(3)]
ymax = min(max(y), self.height)
ymin = max(min(y), 0)
x1 = x2 = 0
z1 = z2 = 0
for y_current in range(ymin, ymax+1):
first_cycle = 1
for n in range(3):
n1 = 0 if n == 2 else n + 1
if y_current >= max(y[n], y[n1]) or y_current < min(y[n], y[n1]):
# Точка вне
continue
m = float(y[n] - y_current) / (y[n]-y[n1])
if first_cycle == 0:
x2 = x[n] + floor(m * (x[n1] - x[n]))
z2 = points[n].z + m * (points[n1].z - points[n].z)
else:
x1 = x[n] + floor(m * (x[n1] - x[n]))
z1 = points[n].z + m * (points[n1].z - points[n].z)
first_cycle = 0
if x2 < x1:
x2, x1 = x1, x2
z2, z1 = z1, z2
x_max = min(x2, self.width)
x_min = max(x1, 0)
for x_current in range(x_min, x_max):
m = float(x1 - x_current) / (x1 - x2)
z_current = z1 + m * (z2 - z1)
self.process_point(x_current, y_current, int(z_current), color)
def process_point(self, x: int, y: int, z: int, color):
if z > self._buf[x][y]:
self._buf[x][y] = z
self.visualizer.drawPoint(x, y, color)
def safe_process_point(self, x: int, y: int, z: int, width: int, height: int, color):
if x < 0 or x >= width or y < 0 or y >= height:
return
elif z > self._buf[x][y]:
self._buf[x][y] = z
self.visualizer.drawWidePoint(x, y, color)
| <filename>src/managers.py
from math import floor
from scene import BaseScene
from builddirector import BuildDirector
from musclemodel import Morph
from visibleobject import *
from visualizer import *
from musclemodel import ComplexMuscleModel
class BaseSceneManager(object):
def __init__(self, scene: BaseScene):
self.scene = scene
def execute(self):
pass
class LoadManager(BaseSceneManager):
def __init__(self, scene: BaseScene, build_director: BuildDirector):
super().__init__(scene)
self.director = build_director
def execute(self):
if self.scene is None:
print("No scene")
self.scene.objects.clear()
self.scene.add_object(self.director.create())
class DrawManager(BaseSceneManager):
def __init__(self, scene: BaseScene, draw_factory: BaseDrawerFactory):
super().__init__(scene)
self.drawerFactory = draw_factory
def execute(self):
visualizer = Visualizer()
visualizer.setDrawer(self.drawerFactory)
visualizer.setCamera(self.scene.get_camera())
visualizer.clear()
zbuf = ZBuffer(visualizer)
lights = self.scene.get_lights()
cam = self.scene.get_camera()
for model in self.scene.get_models():
if type(model) is ComplexMuscleModel:
for part in model:
process_polygons = [[polygon, True if polygon.get_normal().scalar_multiplication(cam.direction) > 0 else False] for polygon in part.polygons]
for polygon in process_polygons:
if polygon[1]:
zbuf.process_polygon(polygon[0], lights)
for light in lights:
zbuf.safe_process_point(light.x, light.y, light.z, 900, 900, Qt.blue)
class DrawAnimationManager(DrawManager):
def __init__(self, scene: BaseScene, draw_factory: BaseDrawerFactory, frame):
super().__init__(scene, draw_factory)
self.frame = frame
def execute(self):
visualizer = Visualizer()
visualizer.setDrawer(self.drawerFactory)
visualizer.setCamera(self.scene.get_camera())
visualizer.clear()
zbuf = ZBuffer(visualizer)
lights = self.scene.get_lights()
cam = self.scene.get_camera()
morph = self.scene.get_model(1)
morph: Morph
process_polygons = [[polygon, True if polygon.get_normal().scalar_multiplication(cam.direction) > 0 else False] for polygon in morph.get_frame(self.frame)]
for polygon in process_polygons:
if polygon[1]:
zbuf.process_polygon(polygon[0], lights)
class ZBuffer(object):
def __init__(self, visualizer: Visualizer, width=900, height=900):
self.width = width
self.height = height
self.visualizer = visualizer
self._buf = [[-6000 for _ in range(width)] for __ in range(height)]
def process_polygon(self, polygon: Polygon, light):
color = polygon.get_color(light)
points = polygon.get_points()
x = [floor(points[i].x) for i in range(3)]
y = [floor(points[i].y) for i in range(3)]
ymax = min(max(y), self.height)
ymin = max(min(y), 0)
x1 = x2 = 0
z1 = z2 = 0
for y_current in range(ymin, ymax+1):
first_cycle = 1
for n in range(3):
n1 = 0 if n == 2 else n + 1
if y_current >= max(y[n], y[n1]) or y_current < min(y[n], y[n1]):
# Точка вне
continue
m = float(y[n] - y_current) / (y[n]-y[n1])
if first_cycle == 0:
x2 = x[n] + floor(m * (x[n1] - x[n]))
z2 = points[n].z + m * (points[n1].z - points[n].z)
else:
x1 = x[n] + floor(m * (x[n1] - x[n]))
z1 = points[n].z + m * (points[n1].z - points[n].z)
first_cycle = 0
if x2 < x1:
x2, x1 = x1, x2
z2, z1 = z1, z2
x_max = min(x2, self.width)
x_min = max(x1, 0)
for x_current in range(x_min, x_max):
m = float(x1 - x_current) / (x1 - x2)
z_current = z1 + m * (z2 - z1)
self.process_point(x_current, y_current, int(z_current), color)
def process_point(self, x: int, y: int, z: int, color):
if z > self._buf[x][y]:
self._buf[x][y] = z
self.visualizer.drawPoint(x, y, color)
def safe_process_point(self, x: int, y: int, z: int, width: int, height: int, color):
if x < 0 or x >= width or y < 0 or y >= height:
return
elif z > self._buf[x][y]:
self._buf[x][y] = z
self.visualizer.drawWidePoint(x, y, color)
| ru | 0.696929 | # Точка вне | 2.303256 | 2 |
metal_python/models/__init__.py | metal-stack/metal-python | 7 | 6632423 | # coding: utf-8
# flake8: noqa
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from metal_python.models.datastore_ip_search_query import DatastoreIPSearchQuery
from metal_python.models.datastore_machine_search_query import DatastoreMachineSearchQuery
from metal_python.models.datastore_network_search_query import DatastoreNetworkSearchQuery
from metal_python.models.httperrors_http_error_response import HttperrorsHTTPErrorResponse
from metal_python.models.rest_status import RestStatus
from metal_python.models.rest_version import RestVersion
from metal_python.models.v1_bgp_filter import V1BGPFilter
from metal_python.models.v1_board_revisions import V1BoardRevisions
from metal_python.models.v1_boot_info import V1BootInfo
from metal_python.models.v1_chassis_identify_led_state import V1ChassisIdentifyLEDState
from metal_python.models.v1_common import V1Common
from metal_python.models.v1_describable import V1Describable
from metal_python.models.v1_disk import V1Disk
from metal_python.models.v1_disk_partition import V1DiskPartition
from metal_python.models.v1_empty_body import V1EmptyBody
from metal_python.models.v1_filesystem import V1Filesystem
from metal_python.models.v1_filesystem_layout_base import V1FilesystemLayoutBase
from metal_python.models.v1_filesystem_layout_constraints import V1FilesystemLayoutConstraints
from metal_python.models.v1_filesystem_layout_create_request import V1FilesystemLayoutCreateRequest
from metal_python.models.v1_filesystem_layout_match_request import V1FilesystemLayoutMatchRequest
from metal_python.models.v1_filesystem_layout_response import V1FilesystemLayoutResponse
from metal_python.models.v1_filesystem_layout_try_request import V1FilesystemLayoutTryRequest
from metal_python.models.v1_filesystem_layout_update_request import V1FilesystemLayoutUpdateRequest
from metal_python.models.v1_firewall_create_request import V1FirewallCreateRequest
from metal_python.models.v1_firewall_find_request import V1FirewallFindRequest
from metal_python.models.v1_firewall_response import V1FirewallResponse
from metal_python.models.v1_firmwares_response import V1FirmwaresResponse
from metal_python.models.v1_iam_config import V1IAMConfig
from metal_python.models.v1_idm_config import V1IDMConfig
from metal_python.models.v1_ip_allocate_request import V1IPAllocateRequest
from metal_python.models.v1_ip_base import V1IPBase
from metal_python.models.v1_ip_find_request import V1IPFindRequest
from metal_python.models.v1_ip_identifiable import V1IPIdentifiable
from metal_python.models.v1_ip_response import V1IPResponse
from metal_python.models.v1_ip_update_request import V1IPUpdateRequest
from metal_python.models.v1_identifiable import V1Identifiable
from metal_python.models.v1_image_base import V1ImageBase
from metal_python.models.v1_image_create_request import V1ImageCreateRequest
from metal_python.models.v1_image_response import V1ImageResponse
from metal_python.models.v1_image_update_request import V1ImageUpdateRequest
from metal_python.models.v1_issuer_config import V1IssuerConfig
from metal_python.models.v1_logical_volume import V1LogicalVolume
from metal_python.models.v1_machine_abort_reinstall_request import V1MachineAbortReinstallRequest
from metal_python.models.v1_machine_allocate_request import V1MachineAllocateRequest
from metal_python.models.v1_machine_allocation import V1MachineAllocation
from metal_python.models.v1_machine_allocation_network import V1MachineAllocationNetwork
from metal_python.models.v1_machine_bios import V1MachineBIOS
from metal_python.models.v1_machine_base import V1MachineBase
from metal_python.models.v1_machine_block_device import V1MachineBlockDevice
from metal_python.models.v1_machine_console_password_request import V1MachineConsolePasswordRequest
from metal_python.models.v1_machine_console_password_response import V1MachineConsolePasswordResponse
from metal_python.models.v1_machine_finalize_allocation_request import V1MachineFinalizeAllocationRequest
from metal_python.models.v1_machine_find_request import V1MachineFindRequest
from metal_python.models.v1_machine_fru import V1MachineFru
from metal_python.models.v1_machine_hardware import V1MachineHardware
from metal_python.models.v1_machine_hardware_base import V1MachineHardwareBase
from metal_python.models.v1_machine_hardware_extended import V1MachineHardwareExtended
from metal_python.models.v1_machine_ipmi import V1MachineIPMI
from metal_python.models.v1_machine_ipmi_response import V1MachineIPMIResponse
from metal_python.models.v1_machine_ipmi_report import V1MachineIpmiReport
from metal_python.models.v1_machine_ipmi_report_response import V1MachineIpmiReportResponse
from metal_python.models.v1_machine_ipmi_reports import V1MachineIpmiReports
from metal_python.models.v1_machine_network import V1MachineNetwork
from metal_python.models.v1_machine_nic import V1MachineNic
from metal_python.models.v1_machine_nic_extended import V1MachineNicExtended
from metal_python.models.v1_machine_provisioning_event import V1MachineProvisioningEvent
from metal_python.models.v1_machine_recent_provisioning_events import V1MachineRecentProvisioningEvents
from metal_python.models.v1_machine_register_request import V1MachineRegisterRequest
from metal_python.models.v1_machine_reinstall_request import V1MachineReinstallRequest
from metal_python.models.v1_machine_response import V1MachineResponse
from metal_python.models.v1_machine_state import V1MachineState
from metal_python.models.v1_machine_update_firmware_request import V1MachineUpdateFirmwareRequest
from metal_python.models.v1_meta import V1Meta
from metal_python.models.v1_network_allocate_request import V1NetworkAllocateRequest
from metal_python.models.v1_network_base import V1NetworkBase
from metal_python.models.v1_network_create_request import V1NetworkCreateRequest
from metal_python.models.v1_network_find_request import V1NetworkFindRequest
from metal_python.models.v1_network_immutable import V1NetworkImmutable
from metal_python.models.v1_network_response import V1NetworkResponse
from metal_python.models.v1_network_update_request import V1NetworkUpdateRequest
from metal_python.models.v1_network_usage import V1NetworkUsage
from metal_python.models.v1_partition_base import V1PartitionBase
from metal_python.models.v1_partition_boot_configuration import V1PartitionBootConfiguration
from metal_python.models.v1_partition_capacity import V1PartitionCapacity
from metal_python.models.v1_partition_capacity_request import V1PartitionCapacityRequest
from metal_python.models.v1_partition_create_request import V1PartitionCreateRequest
from metal_python.models.v1_partition_response import V1PartitionResponse
from metal_python.models.v1_partition_update_request import V1PartitionUpdateRequest
from metal_python.models.v1_project import V1Project
from metal_python.models.v1_project_create_request import V1ProjectCreateRequest
from metal_python.models.v1_project_find_request import V1ProjectFindRequest
from metal_python.models.v1_project_response import V1ProjectResponse
from metal_python.models.v1_project_update_request import V1ProjectUpdateRequest
from metal_python.models.v1_quota import V1Quota
from metal_python.models.v1_quota_set import V1QuotaSet
from metal_python.models.v1_raid import V1Raid
from metal_python.models.v1_server_capacity import V1ServerCapacity
from metal_python.models.v1_size_constraint import V1SizeConstraint
from metal_python.models.v1_size_constraint_matching_log import V1SizeConstraintMatchingLog
from metal_python.models.v1_size_create_request import V1SizeCreateRequest
from metal_python.models.v1_size_matching_log import V1SizeMatchingLog
from metal_python.models.v1_size_response import V1SizeResponse
from metal_python.models.v1_size_update_request import V1SizeUpdateRequest
from metal_python.models.v1_switch_base import V1SwitchBase
from metal_python.models.v1_switch_connection import V1SwitchConnection
from metal_python.models.v1_switch_nic import V1SwitchNic
from metal_python.models.v1_switch_notify_request import V1SwitchNotifyRequest
from metal_python.models.v1_switch_register_request import V1SwitchRegisterRequest
from metal_python.models.v1_switch_response import V1SwitchResponse
from metal_python.models.v1_switch_sync import V1SwitchSync
from metal_python.models.v1_switch_update_request import V1SwitchUpdateRequest
from metal_python.models.v1_tenant import V1Tenant
from metal_python.models.v1_tenant_response import V1TenantResponse
from metal_python.models.v1_timestamps import V1Timestamps
from metal_python.models.v1_user import V1User
from metal_python.models.v1_vendor_revisions import V1VendorRevisions
from metal_python.models.v1_volume_group import V1VolumeGroup
| # coding: utf-8
# flake8: noqa
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from metal_python.models.datastore_ip_search_query import DatastoreIPSearchQuery
from metal_python.models.datastore_machine_search_query import DatastoreMachineSearchQuery
from metal_python.models.datastore_network_search_query import DatastoreNetworkSearchQuery
from metal_python.models.httperrors_http_error_response import HttperrorsHTTPErrorResponse
from metal_python.models.rest_status import RestStatus
from metal_python.models.rest_version import RestVersion
from metal_python.models.v1_bgp_filter import V1BGPFilter
from metal_python.models.v1_board_revisions import V1BoardRevisions
from metal_python.models.v1_boot_info import V1BootInfo
from metal_python.models.v1_chassis_identify_led_state import V1ChassisIdentifyLEDState
from metal_python.models.v1_common import V1Common
from metal_python.models.v1_describable import V1Describable
from metal_python.models.v1_disk import V1Disk
from metal_python.models.v1_disk_partition import V1DiskPartition
from metal_python.models.v1_empty_body import V1EmptyBody
from metal_python.models.v1_filesystem import V1Filesystem
from metal_python.models.v1_filesystem_layout_base import V1FilesystemLayoutBase
from metal_python.models.v1_filesystem_layout_constraints import V1FilesystemLayoutConstraints
from metal_python.models.v1_filesystem_layout_create_request import V1FilesystemLayoutCreateRequest
from metal_python.models.v1_filesystem_layout_match_request import V1FilesystemLayoutMatchRequest
from metal_python.models.v1_filesystem_layout_response import V1FilesystemLayoutResponse
from metal_python.models.v1_filesystem_layout_try_request import V1FilesystemLayoutTryRequest
from metal_python.models.v1_filesystem_layout_update_request import V1FilesystemLayoutUpdateRequest
from metal_python.models.v1_firewall_create_request import V1FirewallCreateRequest
from metal_python.models.v1_firewall_find_request import V1FirewallFindRequest
from metal_python.models.v1_firewall_response import V1FirewallResponse
from metal_python.models.v1_firmwares_response import V1FirmwaresResponse
from metal_python.models.v1_iam_config import V1IAMConfig
from metal_python.models.v1_idm_config import V1IDMConfig
from metal_python.models.v1_ip_allocate_request import V1IPAllocateRequest
from metal_python.models.v1_ip_base import V1IPBase
from metal_python.models.v1_ip_find_request import V1IPFindRequest
from metal_python.models.v1_ip_identifiable import V1IPIdentifiable
from metal_python.models.v1_ip_response import V1IPResponse
from metal_python.models.v1_ip_update_request import V1IPUpdateRequest
from metal_python.models.v1_identifiable import V1Identifiable
from metal_python.models.v1_image_base import V1ImageBase
from metal_python.models.v1_image_create_request import V1ImageCreateRequest
from metal_python.models.v1_image_response import V1ImageResponse
from metal_python.models.v1_image_update_request import V1ImageUpdateRequest
from metal_python.models.v1_issuer_config import V1IssuerConfig
from metal_python.models.v1_logical_volume import V1LogicalVolume
from metal_python.models.v1_machine_abort_reinstall_request import V1MachineAbortReinstallRequest
from metal_python.models.v1_machine_allocate_request import V1MachineAllocateRequest
from metal_python.models.v1_machine_allocation import V1MachineAllocation
from metal_python.models.v1_machine_allocation_network import V1MachineAllocationNetwork
from metal_python.models.v1_machine_bios import V1MachineBIOS
from metal_python.models.v1_machine_base import V1MachineBase
from metal_python.models.v1_machine_block_device import V1MachineBlockDevice
from metal_python.models.v1_machine_console_password_request import V1MachineConsolePasswordRequest
from metal_python.models.v1_machine_console_password_response import V1MachineConsolePasswordResponse
from metal_python.models.v1_machine_finalize_allocation_request import V1MachineFinalizeAllocationRequest
from metal_python.models.v1_machine_find_request import V1MachineFindRequest
from metal_python.models.v1_machine_fru import V1MachineFru
from metal_python.models.v1_machine_hardware import V1MachineHardware
from metal_python.models.v1_machine_hardware_base import V1MachineHardwareBase
from metal_python.models.v1_machine_hardware_extended import V1MachineHardwareExtended
from metal_python.models.v1_machine_ipmi import V1MachineIPMI
from metal_python.models.v1_machine_ipmi_response import V1MachineIPMIResponse
from metal_python.models.v1_machine_ipmi_report import V1MachineIpmiReport
from metal_python.models.v1_machine_ipmi_report_response import V1MachineIpmiReportResponse
from metal_python.models.v1_machine_ipmi_reports import V1MachineIpmiReports
from metal_python.models.v1_machine_network import V1MachineNetwork
from metal_python.models.v1_machine_nic import V1MachineNic
from metal_python.models.v1_machine_nic_extended import V1MachineNicExtended
from metal_python.models.v1_machine_provisioning_event import V1MachineProvisioningEvent
from metal_python.models.v1_machine_recent_provisioning_events import V1MachineRecentProvisioningEvents
from metal_python.models.v1_machine_register_request import V1MachineRegisterRequest
from metal_python.models.v1_machine_reinstall_request import V1MachineReinstallRequest
from metal_python.models.v1_machine_response import V1MachineResponse
from metal_python.models.v1_machine_state import V1MachineState
from metal_python.models.v1_machine_update_firmware_request import V1MachineUpdateFirmwareRequest
from metal_python.models.v1_meta import V1Meta
from metal_python.models.v1_network_allocate_request import V1NetworkAllocateRequest
from metal_python.models.v1_network_base import V1NetworkBase
from metal_python.models.v1_network_create_request import V1NetworkCreateRequest
from metal_python.models.v1_network_find_request import V1NetworkFindRequest
from metal_python.models.v1_network_immutable import V1NetworkImmutable
from metal_python.models.v1_network_response import V1NetworkResponse
from metal_python.models.v1_network_update_request import V1NetworkUpdateRequest
from metal_python.models.v1_network_usage import V1NetworkUsage
from metal_python.models.v1_partition_base import V1PartitionBase
from metal_python.models.v1_partition_boot_configuration import V1PartitionBootConfiguration
from metal_python.models.v1_partition_capacity import V1PartitionCapacity
from metal_python.models.v1_partition_capacity_request import V1PartitionCapacityRequest
from metal_python.models.v1_partition_create_request import V1PartitionCreateRequest
from metal_python.models.v1_partition_response import V1PartitionResponse
from metal_python.models.v1_partition_update_request import V1PartitionUpdateRequest
from metal_python.models.v1_project import V1Project
from metal_python.models.v1_project_create_request import V1ProjectCreateRequest
from metal_python.models.v1_project_find_request import V1ProjectFindRequest
from metal_python.models.v1_project_response import V1ProjectResponse
from metal_python.models.v1_project_update_request import V1ProjectUpdateRequest
from metal_python.models.v1_quota import V1Quota
from metal_python.models.v1_quota_set import V1QuotaSet
from metal_python.models.v1_raid import V1Raid
from metal_python.models.v1_server_capacity import V1ServerCapacity
from metal_python.models.v1_size_constraint import V1SizeConstraint
from metal_python.models.v1_size_constraint_matching_log import V1SizeConstraintMatchingLog
from metal_python.models.v1_size_create_request import V1SizeCreateRequest
from metal_python.models.v1_size_matching_log import V1SizeMatchingLog
from metal_python.models.v1_size_response import V1SizeResponse
from metal_python.models.v1_size_update_request import V1SizeUpdateRequest
from metal_python.models.v1_switch_base import V1SwitchBase
from metal_python.models.v1_switch_connection import V1SwitchConnection
from metal_python.models.v1_switch_nic import V1SwitchNic
from metal_python.models.v1_switch_notify_request import V1SwitchNotifyRequest
from metal_python.models.v1_switch_register_request import V1SwitchRegisterRequest
from metal_python.models.v1_switch_response import V1SwitchResponse
from metal_python.models.v1_switch_sync import V1SwitchSync
from metal_python.models.v1_switch_update_request import V1SwitchUpdateRequest
from metal_python.models.v1_tenant import V1Tenant
from metal_python.models.v1_tenant_response import V1TenantResponse
from metal_python.models.v1_timestamps import V1Timestamps
from metal_python.models.v1_user import V1User
from metal_python.models.v1_vendor_revisions import V1VendorRevisions
from metal_python.models.v1_volume_group import V1VolumeGroup
| en | 0.768597 | # coding: utf-8 # flake8: noqa metal-api API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501 OpenAPI spec version: v0.15.7 Generated by: https://github.com/swagger-api/swagger-codegen.git # import models into model package | 1.473646 | 1 |
pynotes/helpers/file_exist.py | afonsopacifer/pynotes | 8 | 6632424 | <reponame>afonsopacifer/pynotes<filename>pynotes/helpers/file_exist.py
def file_exist(file_name):
try:
open(file_name, "r")
return True
except IOError:
return False | def file_exist(file_name):
try:
open(file_name, "r")
return True
except IOError:
return False | none | 1 | 2.679046 | 3 |
|
controllers/PySakuraTurning_PID/PySakuraTurning_PID.py | V-ArkS/Sakura | 0 | 6632425 | <gh_stars>0
from controller import Robot
from controller import Camera
from controller import Compass
from controller import PositionSensor
import numpy as np
#import cv2, time
class Vehicle:
stage = 0
MAX_SPEED = 2
time_tmp = 0
robot = Robot()
motors = []
camera = Camera('camera')
compass = Compass('compass')
ds = robot.getDistanceSensor('distance sensor')
#tmp
picked = False
def __init__(self):
#get motors, camera and initialise them.
motorNames = ['left motor', 'right motor', 'tower rotational motor']
for i in range(3):
self.motors.append(self.robot.getMotor(motorNames[i]))
self.motors[i].setPosition(float('inf'))
if i <= 1:
self.motors[i].setVelocity(0)
else:
self.motors[i].setVelocity(0.2)
self.motors[i].setPosition(0)
self.camera.enable(int(self.robot.getBasicTimeStep()))
self.compass.enable(int(self.robot.getBasicTimeStep()))
self.ds.enable(int(self.robot.getBasicTimeStep()))
def getStage(self):
return self.stage
def setStage(self, stage_num):
self.stage = stage_num
def getCompass(self):
return np.angle(complex(self.compass.getValues()[0], self.compass.getValues()[2]))
def getDistanceValue(self):
return self.ds.getValue()
def towerSeeLeft(self):
self.motors[2].setPosition(np.pi / 2)
def towerSeeRight(self):
self.motors[2].setPosition(-np.pi / 2)
def towerRestore(self):
self.motors[2].setPosition(0)
def releaseFood(self):
#release food
pass
#Speed setting functions
def setSpeed(self, left, right):
#set speed for four tracks
self.motors[0].setVelocity(left)
self.motors[1].setVelocity(right)
def turnRound(self, diff):
#set speed for turning left or right
global error_integral
global previous_diff#set variable as global will accelerate the speed
error_integral += diff * ts;
error_derivative = (previous_diff - diff) / ts;
Vc = 0.5* diff + 0.00 * error_derivative + 0.05* error_integral ;
#set as 0.35/0.001/0.02 for mass=40kg
#set as 0.5/0/0.05 respectively for mass=400kg
if Vc > 1:
Vc = 1
if abs(diff) < 0.001:
self.setSpeed(0, 0)
return False
else:
self.setSpeed(-Vc, Vc)
previous_diff = diff
return True
def linePatrol(self):
#get camera image, process it and set speed based on line patrolling algorithm
#return False if there is no line
pass
def boxPatrol(self):
#get camera image and find orange box, then adjust the speed to go to the box
#return False if there is no box
pass
def bridgePatrol(self):
#get camera image and find bridge, then adjust the speed to go to the bridge
#return False if there is no bridge
pass
def archPatrol(self):
#get camera image and find arch, then adjust the speed to go to the arch
#return False if there is no arch
pass
def colourPatrol(self):
#for task 5
pass
if __name__ == "__main__":
TIME_STEP = 64
ts = 1/TIME_STEP
error_integral = 0
error_derivative = 0
previous_diff = 0
vehicle = Vehicle()
vehicle.towerRestore()
while vehicle.robot.step(TIME_STEP) != -1:
target = np.pi / 2
if not (vehicle.turnRound(target - vehicle.getCompass())):
vehicle.setSpeed(0.1, 0.1)
| from controller import Robot
from controller import Camera
from controller import Compass
from controller import PositionSensor
import numpy as np
#import cv2, time
class Vehicle:
stage = 0
MAX_SPEED = 2
time_tmp = 0
robot = Robot()
motors = []
camera = Camera('camera')
compass = Compass('compass')
ds = robot.getDistanceSensor('distance sensor')
#tmp
picked = False
def __init__(self):
#get motors, camera and initialise them.
motorNames = ['left motor', 'right motor', 'tower rotational motor']
for i in range(3):
self.motors.append(self.robot.getMotor(motorNames[i]))
self.motors[i].setPosition(float('inf'))
if i <= 1:
self.motors[i].setVelocity(0)
else:
self.motors[i].setVelocity(0.2)
self.motors[i].setPosition(0)
self.camera.enable(int(self.robot.getBasicTimeStep()))
self.compass.enable(int(self.robot.getBasicTimeStep()))
self.ds.enable(int(self.robot.getBasicTimeStep()))
def getStage(self):
return self.stage
def setStage(self, stage_num):
self.stage = stage_num
def getCompass(self):
return np.angle(complex(self.compass.getValues()[0], self.compass.getValues()[2]))
def getDistanceValue(self):
return self.ds.getValue()
def towerSeeLeft(self):
self.motors[2].setPosition(np.pi / 2)
def towerSeeRight(self):
self.motors[2].setPosition(-np.pi / 2)
def towerRestore(self):
self.motors[2].setPosition(0)
def releaseFood(self):
#release food
pass
#Speed setting functions
def setSpeed(self, left, right):
#set speed for four tracks
self.motors[0].setVelocity(left)
self.motors[1].setVelocity(right)
def turnRound(self, diff):
#set speed for turning left or right
global error_integral
global previous_diff#set variable as global will accelerate the speed
error_integral += diff * ts;
error_derivative = (previous_diff - diff) / ts;
Vc = 0.5* diff + 0.00 * error_derivative + 0.05* error_integral ;
#set as 0.35/0.001/0.02 for mass=40kg
#set as 0.5/0/0.05 respectively for mass=400kg
if Vc > 1:
Vc = 1
if abs(diff) < 0.001:
self.setSpeed(0, 0)
return False
else:
self.setSpeed(-Vc, Vc)
previous_diff = diff
return True
def linePatrol(self):
#get camera image, process it and set speed based on line patrolling algorithm
#return False if there is no line
pass
def boxPatrol(self):
#get camera image and find orange box, then adjust the speed to go to the box
#return False if there is no box
pass
def bridgePatrol(self):
#get camera image and find bridge, then adjust the speed to go to the bridge
#return False if there is no bridge
pass
def archPatrol(self):
#get camera image and find arch, then adjust the speed to go to the arch
#return False if there is no arch
pass
def colourPatrol(self):
#for task 5
pass
if __name__ == "__main__":
TIME_STEP = 64
ts = 1/TIME_STEP
error_integral = 0
error_derivative = 0
previous_diff = 0
vehicle = Vehicle()
vehicle.towerRestore()
while vehicle.robot.step(TIME_STEP) != -1:
target = np.pi / 2
if not (vehicle.turnRound(target - vehicle.getCompass())):
vehicle.setSpeed(0.1, 0.1) | en | 0.842994 | #import cv2, time #tmp #get motors, camera and initialise them. #release food #Speed setting functions #set speed for four tracks #set speed for turning left or right #set variable as global will accelerate the speed #set as 0.35/0.001/0.02 for mass=40kg #set as 0.5/0/0.05 respectively for mass=400kg #get camera image, process it and set speed based on line patrolling algorithm #return False if there is no line #get camera image and find orange box, then adjust the speed to go to the box #return False if there is no box #get camera image and find bridge, then adjust the speed to go to the bridge #return False if there is no bridge #get camera image and find arch, then adjust the speed to go to the arch #return False if there is no arch #for task 5 | 3.057513 | 3 |
01-PythonAlgorithms/random/ques_1.py | spendyala/deeplearning-docker | 0 | 6632426 | '''You have a list of timestamps. Each timestamp is a time when there was a glitch in the network.
If 3 or more glitches happen within one second (duration), you need to print the timestamp of the first glitch in that window.
input_lst = [2.1, 2.5, 2.9, 3.0, 3.6, 3.9, 5.2, 5.9, 6.1, 8.2, 10.2, 11.3, 11.8, 11.9]
The output list should be:
[2.1, 5.2, 11.3]
Glitch windows: [2.1, 2.5, 2.9, 3.0], [5.2, 5.9, 6.1], [11.3, 11.8, 11.9]
You can't consider [3.6, 3.9] since the number of glitches < 3
A particular timestamp will fall into one window only. So since 3.0 already fell into the first window, it can't fall into the second window.
Try to solve this today we can discuss tomorrow.'''
input_lst = [2.1, 2.5, 2.9, 3.0, 3.6, 3.9, 5.2, 5.9, 6.1, 8.2, 10.2, 11.3, 11.8, 11.9]
previous_sec = 0
output_list = []
#while input_lst:
# try:
# peak_first = input_lst[0]
# peak_third = input_lst[2]
# if (int(peak_first) == int(peak_third) or
# peak_third < int(peak_first)+1.2):
# current_sec = int(peak_first)
# if previous_sec+1.2 > peak_third:
# previous_sec = peak_third
# input_lst.pop(0)
# continue
# if previous_sec != current_sec:
# output_list.append(peak_first)
# previous_sec = current_sec
# input_lst.pop(0)
# except Exception:
# break
#print(output_list)
input_lst = [2.1, 2.5, 2.9, 3.0, 3.6, 3.9, 5.2, 5.9, 6.1, 8.2, 10.2, 11.3, 11.8, 11.9]
previous = 0
output_lst = []
current_window = []
gitches_window = []
while input_lst:
try:
first_peek = input_lst[0]
third_peek = input_lst[2]
if third_peek <= first_peek+1:
if first_peek <= previous+1:
print(first_peek)
input_lst.pop(0)
continue
previous = first_peek
output_lst.append(previous)
#print('Starting {}, previous {}'.format(first_peek, previous))
except IndexError:
break
input_lst.pop(0)
print(output_lst)
| '''You have a list of timestamps. Each timestamp is a time when there was a glitch in the network.
If 3 or more glitches happen within one second (duration), you need to print the timestamp of the first glitch in that window.
input_lst = [2.1, 2.5, 2.9, 3.0, 3.6, 3.9, 5.2, 5.9, 6.1, 8.2, 10.2, 11.3, 11.8, 11.9]
The output list should be:
[2.1, 5.2, 11.3]
Glitch windows: [2.1, 2.5, 2.9, 3.0], [5.2, 5.9, 6.1], [11.3, 11.8, 11.9]
You can't consider [3.6, 3.9] since the number of glitches < 3
A particular timestamp will fall into one window only. So since 3.0 already fell into the first window, it can't fall into the second window.
Try to solve this today we can discuss tomorrow.'''
input_lst = [2.1, 2.5, 2.9, 3.0, 3.6, 3.9, 5.2, 5.9, 6.1, 8.2, 10.2, 11.3, 11.8, 11.9]
previous_sec = 0
output_list = []
#while input_lst:
# try:
# peak_first = input_lst[0]
# peak_third = input_lst[2]
# if (int(peak_first) == int(peak_third) or
# peak_third < int(peak_first)+1.2):
# current_sec = int(peak_first)
# if previous_sec+1.2 > peak_third:
# previous_sec = peak_third
# input_lst.pop(0)
# continue
# if previous_sec != current_sec:
# output_list.append(peak_first)
# previous_sec = current_sec
# input_lst.pop(0)
# except Exception:
# break
#print(output_list)
input_lst = [2.1, 2.5, 2.9, 3.0, 3.6, 3.9, 5.2, 5.9, 6.1, 8.2, 10.2, 11.3, 11.8, 11.9]
previous = 0
output_lst = []
current_window = []
gitches_window = []
while input_lst:
try:
first_peek = input_lst[0]
third_peek = input_lst[2]
if third_peek <= first_peek+1:
if first_peek <= previous+1:
print(first_peek)
input_lst.pop(0)
continue
previous = first_peek
output_lst.append(previous)
#print('Starting {}, previous {}'.format(first_peek, previous))
except IndexError:
break
input_lst.pop(0)
print(output_lst)
| en | 0.791844 | You have a list of timestamps. Each timestamp is a time when there was a glitch in the network. If 3 or more glitches happen within one second (duration), you need to print the timestamp of the first glitch in that window. input_lst = [2.1, 2.5, 2.9, 3.0, 3.6, 3.9, 5.2, 5.9, 6.1, 8.2, 10.2, 11.3, 11.8, 11.9] The output list should be: [2.1, 5.2, 11.3] Glitch windows: [2.1, 2.5, 2.9, 3.0], [5.2, 5.9, 6.1], [11.3, 11.8, 11.9] You can't consider [3.6, 3.9] since the number of glitches < 3 A particular timestamp will fall into one window only. So since 3.0 already fell into the first window, it can't fall into the second window. Try to solve this today we can discuss tomorrow. #while input_lst: # try: # peak_first = input_lst[0] # peak_third = input_lst[2] # if (int(peak_first) == int(peak_third) or # peak_third < int(peak_first)+1.2): # current_sec = int(peak_first) # if previous_sec+1.2 > peak_third: # previous_sec = peak_third # input_lst.pop(0) # continue # if previous_sec != current_sec: # output_list.append(peak_first) # previous_sec = current_sec # input_lst.pop(0) # except Exception: # break #print(output_list) #print('Starting {}, previous {}'.format(first_peek, previous)) | 3.834007 | 4 |
build/geometry/tf_conversions/catkin_generated/pkg.develspace.context.pc.py | EurobotMDX/eurobot_2020_odroid_cam | 4 | 6632427 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ros/lidar_ws/src/geometry/tf_conversions/include;/usr/include/eigen3;/opt/ros/kinetic/share/orocos_kdl/../../include".split(';') if "/home/ros/lidar_ws/src/geometry/tf_conversions/include;/usr/include/eigen3;/opt/ros/kinetic/share/orocos_kdl/../../include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;kdl_conversions;tf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltf_conversions;/opt/ros/kinetic/lib/liborocos-kdl.so.1.3.2".split(';') if "-ltf_conversions;/opt/ros/kinetic/lib/liborocos-kdl.so.1.3.2" != "" else []
PROJECT_NAME = "tf_conversions"
PROJECT_SPACE_DIR = "/home/ros/lidar_ws/devel"
PROJECT_VERSION = "1.11.9"
| # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ros/lidar_ws/src/geometry/tf_conversions/include;/usr/include/eigen3;/opt/ros/kinetic/share/orocos_kdl/../../include".split(';') if "/home/ros/lidar_ws/src/geometry/tf_conversions/include;/usr/include/eigen3;/opt/ros/kinetic/share/orocos_kdl/../../include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;kdl_conversions;tf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltf_conversions;/opt/ros/kinetic/lib/liborocos-kdl.so.1.3.2".split(';') if "-ltf_conversions;/opt/ros/kinetic/lib/liborocos-kdl.so.1.3.2" != "" else []
PROJECT_NAME = "tf_conversions"
PROJECT_SPACE_DIR = "/home/ros/lidar_ws/devel"
PROJECT_VERSION = "1.11.9"
| en | 0.409737 | # generated from catkin/cmake/template/pkg.context.pc.in | 1.254698 | 1 |
CIFAR10/fed.py | sxontheway/BalanceFL | 0 | 6632428 | import copy
import time
from collections import OrderedDict
import torch
from data.dataloader import local_client_dataset, test_dataset
from models.utils import *
from utils.train_helper import validate_one_model
from utils.sampling import *
import numpy as np
from multiprocessing import Process
import time
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier}
def load_state_dict(network, state_dict):
"""
restore model from state_dict
"""
network["feat_model"].load_state_dict(state_dict["feat_model"])
network["classifier"].load_state_dict(state_dict["classifier"])
# for name, param in state_dict["feat_model"].items():
# print(name, "\t", param.size())
return network
def check_status(status_list, selected_idx, target_status):
"""
0. original status (1st FL round)
1. server finished sending: server_network --> mp_list
2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list
3. server received: mp_list --> networks[i]
--> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1
---
Return True: when all clients meet conditions, else False
"""
tmp = np.array(status_list)
if (tmp[selected_idx] == target_status).all() == True:
return True
else:
return False
def set_status(status_list, selected_idx, target_status):
"""
see function: check_status
"""
if type(selected_idx) is int:
selected_idx = [selected_idx]
for i in selected_idx:
status_list[i] = target_status
# print(f"set_status {target_status}")
def difference_models_norm_2(model_1, model_2):
"""
Return the norm 2 difference between the two model parameters. Used in FedProx.
"""
tensor_1_backbone = list(model_1["feat_model"].parameters())
tensor_1_classifier = list(model_1["classifier"].parameters())
tensor_2_backbone = list(model_2["feat_model"].parameters())
tensor_2_classifier = list(model_2["classifier"].parameters())
diff_list = [torch.sum((tensor_1_backbone[i] - tensor_2_backbone[i])**2) for i in range(len(tensor_1_backbone))]
diff_list.extend([torch.sum((tensor_1_classifier[i] - tensor_2_classifier[i])**2) for i in range(len(tensor_1_classifier))])
norm = sum(diff_list)
return norm
class Fed_server(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self, init_network, criterion, config, per_client_data,
per_client_label, idx_per_client_train,
test_data, test_label, state_list=None, state_dict_list=None, idx=None
):
super(Fed_server, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = [], [], [], []
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights/self.client_weights.sum()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.server_network = copy.deepcopy(init_network)
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Server (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
""" Server does not need
# list of optimizer_dict. One optimizer for one network
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
"""
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
def local_train(self, selected_idx):
"""
server-side code
"""
# self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(self.server_network) # model transfer
set_status(self.status_list, selected_idx, 1)
if self.local_ep > 10: # is local training
print("Waiting")
# wait until all clients returning the model
while check_status(self.status_list, selected_idx, 2) is False:
time.sleep(0.1)
# mp_list --> self.networks (copys of client models on the server). Prepare for aggregation.
for i in selected_idx:
load_state_dict(self.networks[i], self.state_dict_list[i]) # model transfer
print("===> Local training finished")
def aggregation(self, selected_idx, mode):
"""
server-side code: aggregation
"""
if mode in ["fedavg", "fedavgm", "fedbn", "fedprox"]:
self.aggregate_layers(selected_idx, mode, backbone_only=False)
elif mode == "fedavg_fs":
opt = self.config["fl_opt"]
backbone_only, imprint, spread_out = opt["backbone_only"], opt["imprint"], opt["spread_out"]
self.aggregate_layers(selected_idx, "fedavg", backbone_only=backbone_only)
if imprint:
self.imprint(selected_idx)
if spread_out:
self.spread_out()
# model: self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(self.server_network) # model transfer
set_status(self.status_list, selected_idx, 0) # back to original
print("===> Aggregation finished")
def aggregate_layers(self, selected_idx, mode, backbone_only):
"""
backbone_only: choose to only aggregate backbone
"""
weights_sum = self.client_weights[selected_idx].sum()
with torch.no_grad():
if mode in ["fedavg", "fedprox"]:
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if 'num_batches_tracked' in key:
# num_batches_tracked is a non trainable LongTensor
# and num_batches_tracked are the same for
# all clients for the given datasets
layer.data.copy_(self.networks[0][net_name].state_dict()[key])
else:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx]/weights_sum
temp += weight * self.networks[idx][net_name].state_dict()[key]
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedbn": # https://openreview.net/pdf?id=6YEQUn0QICG
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if 'bn' not in key:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx]/weights_sum
temp += weight * self.networks[idx][net_name].state_dict()[key]
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedavgm":
raise NotImplementedError
def evaluate_global(self, train_dataset=None, test_dataset=None):
"""
Accuracy of the global model and all classes
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.server_network, train_dataset, self.device, per_cls_acc=True)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.server_network, test_dataset, self.device, per_cls_acc=True)
print("===> Evaluation finished\n")
return train_loss_per_cls, train_acc_per_cls, test_loss_per_cls, test_acc_per_cls
def evaluate_global_all(self, train_dataset=None, test_dataset=None):
"""
Accuracy of models of all nodes and all classes
Return: all_results
shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc)
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
all_results = [None for i in range(self.num_clients)]
for idx in range(self.num_clients):
# evaluate on test set: per-class loss/acc
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.networks[idx], train_dataset, self.device, per_cls_acc=True)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.networks[idx], test_dataset, self.device, per_cls_acc=True)
all_results[idx] = train_loss_per_cls, train_acc_per_cls, test_loss_per_cls, test_acc_per_cls
print(f"===> Evaluation finished{idx}\n")
all_results = np.array(all_results).transpose(1,0,2)
return all_results
class Fed_client(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self, init_network, criterion, config, per_client_data,
per_client_label, idx_per_client_train,
test_data, test_label, state_list=None, state_dict_list=None, idx=None
):
super(Fed_client, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = [], [], [], []
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.device = config["device_client"][idx]
self.server_network = copy.deepcopy(init_network)
self.balanced_loader = config["fl_opt"]["balanced_loader"]
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
if config["fl_opt"]["aggregation"] == "fedprox":
self.fedprox = True
else:
self.fedprox = False
self.mu = 0.05
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights/self.client_weights.sum()
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Client {idx} (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
# list of network and optimizer_dict. One optimizer for one network.
if client_i != self.client_idx:
self.networks.append(None)
self.optimizers.append(None)
self.optimizers_stage2.append(None)
else:
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
""" clients do not need
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
"""
def run(self):
"""
client-side code
"""
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
self.networks[self.client_idx]["feat_model"].to(self.device)
self.networks[self.client_idx]["classifier"].to(self.device)
while(1):
while check_status(self.status_list, self.client_idx, 1) is False:
time.sleep(0.1)
# model: mp_list --> server_network
load_state_dict(self.server_network, self.state_dict_list[self.client_idx]) # model transfer
self.train_lt(self.client_idx) # local model updating
# self.networks[i] --> mp_list
self.state_dict_list[self.client_idx] = return_state_dict(self.networks[self.client_idx]) # model transfer
set_status(self.status_list, self.client_idx, 2)
def train_lt(self, idx):
"""
client-side code
---
Argus:
- idx: the index in all clients (e.g., 50) or selected clients (e.g., 10).
If self.prefetch is true: the index in selected clients,
If self.prefetch is true: the index in all clients
"""
idx_in_all = idx
# server broadcast the model to clients
"""
# optimizer will not work if use this, because optimizer needs the params from the model
# self.networks[idx_in_all] = copy.deepcopy(self.server_network)
"""
for net_name, net in self.server_network.items(): # feat_model, classifier
state_dict = self.networks[idx_in_all][net_name].state_dict()
for key, layer in net.state_dict().items():
state_dict[key].data.copy_(layer.data)
for net in self.networks[idx_in_all].values():
net.train()
for net in self.server_network.values():
net.train()
teacher = self.server_network
# torch.cuda.empty_cache()
"""
(Per-cls) Covariance Calculation
"""
if self.feat_aug:
# probability for augmentation for every class
max_num = max(self.local_num_per_cls[idx])
prob = torch.tensor([1.0-i/max_num for i in self.local_num_per_cls[idx]])
# obtain features and labels under eval mode
feat_list, label_list = [], []
# self.networks[idx_in_all]['feat_model'].eval()
for (imgs, labels, indexs) in self.train_loaders[idx]:
with torch.no_grad():
imgs = imgs.to(self.device)
feat_list.append(teacher['feat_model'](imgs).cpu())
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
# self.networks[idx_in_all]['feat_model'].train()
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor([unique_labels.index(i) for i in label_list]) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feats, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feats)
# calculate the variance
sampled_data, sample_label = [], []
per_cls_cov = []
for feats in feats_per_cls:
if len(feats) > 1:
per_cls_cov.append(np.cov(torch.stack(feats, 1).numpy()))
else:
per_cls_cov.append(np.zeros((feats[0].shape[0], feats[0].shape[0])))
per_cls_cov = np.array(per_cls_cov)
# per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])
cov = np.average(per_cls_cov, axis=0, weights=self.local_num_per_cls[idx]) # covariance for feature dimension, shape: e.g., (128, 128)
# pre-generate deviation
divider = 500
pointer = 0
augs = torch.from_numpy(np.random.multivariate_normal(
mean = np.zeros(cov.shape[0]),
cov = cov, # covariance for feature dimension, shape: e.g., (128, 128)
size = divider)).float().to(self.device)
with torch.set_grad_enabled(True):
losses_cls = 0
losses_kd = 0
##########################
#### stage 1 training ####
##########################
for epoch in range(self.local_ep):
"""
model update
"""
if self.local_ep > 10: # locla training mode
print(epoch, end=' ')
if self.balanced_loader:
tmp_loader = self.train_loader_balanced[idx]
else:
tmp_loader = self.train_loaders[idx]
for (imgs, labels, indexs) in tmp_loader:
# to device
imgs = imgs.to(self.device)
# forward
feat = self.networks[idx_in_all]['feat_model'](imgs)
logits = self.networks[idx_in_all]['classifier'](feat)
# do feature space augmentation with a likelihood
if self.feat_aug:
# prob = torch.tensor([1.0 for i in self.local_num_per_cls[idx]])
rand_list = torch.rand(len(labels))
mask = rand_list < prob[torch.tensor([unique_labels.index(i) for i in labels])]
degree = 1
aug_num = sum(mask).item()
feat_aug = feat.clone()
if aug_num > 0:
if pointer + aug_num >= divider:
pointer = 0
feat_aug[mask] = feat_aug[mask] + augs[pointer: pointer+aug_num]*degree
pointer = pointer + aug_num
logits_aug = self.networks[idx_in_all]['classifier'](feat_aug)
# teacher
with torch.no_grad():
feat_teacher = teacher['feat_model'](imgs)
pred_teacher = teacher['classifier'](feat_teacher)
# loss
labels = labels.to(self.device)
if self.config["criterions"]["def_file"].find("LwF") > 0:
if self.feat_aug:
if len(labels) != len(logits_aug):
raise RuntimeError
loss, loss_cls, loss_kd = self.criterion(labels, pred_teacher, logits, logits_aug)
else:
loss, loss_cls, loss_kd = self.criterion(labels, pred_teacher, logits)
elif self.config["criterions"]["def_file"].find("KDLoss") > 0:
loss, loss_cls, loss_kd = self.criterion(
logits, labels, feat, feat_teacher,
classfier_weight=self.networks[idx_in_all]['classifier'].fc.weight
)
# fedprox loss: https://epione.gitlabpages.inria.fr/flhd/federated_learning/FedAvg_FedProx_MNIST_iid_and_noniid.html#federated-training-with-fedprox
if self.fedprox:
prox_loss = difference_models_norm_2(self.networks[idx_in_all], teacher)
# print("FedProx Loss: ", prox_loss, loss)
loss += self.mu/2 * prox_loss
# backward
for optimizer in self.optimizers[idx_in_all].values():
optimizer.zero_grad()
loss.backward()
for optimizer in self.optimizers[idx_in_all].values():
optimizer.step()
# classifier L2-norm
if self.networks[idx_in_all]['classifier'].l2_norm:
self.networks[idx_in_all]['classifier'].weight_norm()
losses_cls += loss_cls.item()
losses_kd += loss_kd.item()
self.losses_cls[idx_in_all] = losses_cls/len(self.train_loaders[idx])/self.local_ep
self.losses_kd[idx_in_all] = losses_kd/len(self.train_loaders[idx])/self.local_ep
##########################
#### stage 2 training ####
##########################
if self.crt:
self.networks[idx_in_all]['feat_model'].eval()
if self.feat_aug:
# obtain features and labels
feat_list = []
label_list = []
for (imgs, labels, indexs) in self.train_loaders[idx]:
imgs = imgs.to(self.device)
with torch.no_grad():
feat_list.append(self.networks[idx_in_all]['feat_model'](imgs).cpu())
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor([unique_labels.index(i) for i in label_list]) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feat, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feat)
# determine the extra sample number for every existing samples
num_per_cls = np.array([len(np.where(label_list==t)[0]) for t in unique_labels]) # e.g., size (6, )
max_num = max(num_per_cls)
gen_nums = [ np.array([max_num//num_per_cls[i]-1 for _ in feats_per_cls[i]]) for i in range(len(unique_labels))]
for cls_i, nums in enumerate(gen_nums):
nums[:max_num % num_per_cls[cls_i]] = nums[:max_num % num_per_cls[cls_i]] + 1
# generate samples
sampled_data, sample_label = [], []
per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])
cov = np.average(per_cls_cov, axis=0, weights=num_per_cls)
# print([np.mean(i) for i in per_cls_cov])
for cls_i, nums in enumerate(gen_nums):
for sample_i, num in enumerate(nums):
if num > 0:
sampled_data.append(
torch.from_numpy(np.random.multivariate_normal(
mean = feats_per_cls[cls_i][sample_i],
cov = cov, # covariance for feature dimension, shape: e.g., (128, 128)
size = num)).float())
sample_label.append(torch.full((num, ), cls_i).long())
# add generated fetaures to training data
feat_list = torch.cat([feat_list, *sampled_data], 0)
label_list = torch.cat([transformed_label_list, *sample_label], 0)
# build new dataloader
feats_dataset = local_client_dataset(feat_list, label_list, self.config)
feats_loader = torch.utils.data.DataLoader(
feats_dataset, batch_size=self.local_bs, shuffle=True, num_workers=0, pin_memory=False)
# train classifier
for epoch in range(5):
for (feats, labels, indexs) in feats_loader:
feats = feats.to(self.device)
labels = labels.to(self.device)
logits = self.networks[idx_in_all]['classifier'](feats)
loss = torch.nn.CrossEntropyLoss()(logits[:, unique_labels], labels)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
# re-sampling without feature augmentation
else:
for epoch in range(5):
for (imgs, labels, indexs) in self.train_loader_balanced[idx]:
# to device
imgs = imgs.to(self.device)
# forward
with torch.no_grad():
feat = self.networks[idx_in_all]['feat_model'](imgs)
logits = self.networks[idx_in_all]['classifier'](feat)
pos_cls = torch.unique(labels).tolist()
transformed_labels = torch.tensor([pos_cls.index(i) for i in labels]).to(self.device)
loss = torch.nn.CrossEntropyLoss()(logits[:, pos_cls], transformed_labels)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
print("=> ", end="")
def fedavg(w):
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] += w[i][k]
w_avg[k] = torch.div(w_avg[k]*1.0, len(w))
return w_avg
# See: https://arxiv.org/abs/1909.06335
def fedavgm(new_ws, old_w, vel, args):
"""
fedavg + momentum
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
global_lr = 1
beta1 = 0
new_w = fedavg(new_ws)
# For the first round: initialize old_w, create an Orderdict to store velocity
if old_w is None:
old_w = new_w
new_v = OrderedDict()
for key in old_w.keys():
new_v[key] = torch.zeros(old_w[key].shape, dtype=old_w[key].dtype).to(args.device)
else:
new_v = copy.deepcopy(vel)
for key in new_w.keys():
delta_w_tmp = old_w[key] - new_w[key]
new_v[key] = beta1*new_v[key] + torch.mul(delta_w_tmp, global_lr)
old_w[key] -= new_v[key]
return old_w, new_v
def fedavgw(new_ws, old_w, args, round_i):
"""
fedavg + adaptive updating parameter
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
new_w = fedavg(new_ws)
# For the first round: initialize old_w
if old_w is None:
old_w = new_w
for key in new_w.keys():
old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))
# for key in new_w.keys():
# if key == "classifier.fc.weight":
# old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))
# else:
# old_w[key] = new_w[key]
return old_w
| import copy
import time
from collections import OrderedDict
import torch
from data.dataloader import local_client_dataset, test_dataset
from models.utils import *
from utils.train_helper import validate_one_model
from utils.sampling import *
import numpy as np
from multiprocessing import Process
import time
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier}
def load_state_dict(network, state_dict):
"""
restore model from state_dict
"""
network["feat_model"].load_state_dict(state_dict["feat_model"])
network["classifier"].load_state_dict(state_dict["classifier"])
# for name, param in state_dict["feat_model"].items():
# print(name, "\t", param.size())
return network
def check_status(status_list, selected_idx, target_status):
"""
0. original status (1st FL round)
1. server finished sending: server_network --> mp_list
2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list
3. server received: mp_list --> networks[i]
--> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1
---
Return True: when all clients meet conditions, else False
"""
tmp = np.array(status_list)
if (tmp[selected_idx] == target_status).all() == True:
return True
else:
return False
def set_status(status_list, selected_idx, target_status):
"""
see function: check_status
"""
if type(selected_idx) is int:
selected_idx = [selected_idx]
for i in selected_idx:
status_list[i] = target_status
# print(f"set_status {target_status}")
def difference_models_norm_2(model_1, model_2):
"""
Return the norm 2 difference between the two model parameters. Used in FedProx.
"""
tensor_1_backbone = list(model_1["feat_model"].parameters())
tensor_1_classifier = list(model_1["classifier"].parameters())
tensor_2_backbone = list(model_2["feat_model"].parameters())
tensor_2_classifier = list(model_2["classifier"].parameters())
diff_list = [torch.sum((tensor_1_backbone[i] - tensor_2_backbone[i])**2) for i in range(len(tensor_1_backbone))]
diff_list.extend([torch.sum((tensor_1_classifier[i] - tensor_2_classifier[i])**2) for i in range(len(tensor_1_classifier))])
norm = sum(diff_list)
return norm
class Fed_server(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self, init_network, criterion, config, per_client_data,
per_client_label, idx_per_client_train,
test_data, test_label, state_list=None, state_dict_list=None, idx=None
):
super(Fed_server, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = [], [], [], []
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights/self.client_weights.sum()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.server_network = copy.deepcopy(init_network)
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Server (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
""" Server does not need
# list of optimizer_dict. One optimizer for one network
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
"""
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
def local_train(self, selected_idx):
"""
server-side code
"""
# self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(self.server_network) # model transfer
set_status(self.status_list, selected_idx, 1)
if self.local_ep > 10: # is local training
print("Waiting")
# wait until all clients returning the model
while check_status(self.status_list, selected_idx, 2) is False:
time.sleep(0.1)
# mp_list --> self.networks (copys of client models on the server). Prepare for aggregation.
for i in selected_idx:
load_state_dict(self.networks[i], self.state_dict_list[i]) # model transfer
print("===> Local training finished")
def aggregation(self, selected_idx, mode):
"""
server-side code: aggregation
"""
if mode in ["fedavg", "fedavgm", "fedbn", "fedprox"]:
self.aggregate_layers(selected_idx, mode, backbone_only=False)
elif mode == "fedavg_fs":
opt = self.config["fl_opt"]
backbone_only, imprint, spread_out = opt["backbone_only"], opt["imprint"], opt["spread_out"]
self.aggregate_layers(selected_idx, "fedavg", backbone_only=backbone_only)
if imprint:
self.imprint(selected_idx)
if spread_out:
self.spread_out()
# model: self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(self.server_network) # model transfer
set_status(self.status_list, selected_idx, 0) # back to original
print("===> Aggregation finished")
def aggregate_layers(self, selected_idx, mode, backbone_only):
"""
backbone_only: choose to only aggregate backbone
"""
weights_sum = self.client_weights[selected_idx].sum()
with torch.no_grad():
if mode in ["fedavg", "fedprox"]:
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if 'num_batches_tracked' in key:
# num_batches_tracked is a non trainable LongTensor
# and num_batches_tracked are the same for
# all clients for the given datasets
layer.data.copy_(self.networks[0][net_name].state_dict()[key])
else:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx]/weights_sum
temp += weight * self.networks[idx][net_name].state_dict()[key]
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedbn": # https://openreview.net/pdf?id=6YEQUn0QICG
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if 'bn' not in key:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx]/weights_sum
temp += weight * self.networks[idx][net_name].state_dict()[key]
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedavgm":
raise NotImplementedError
def evaluate_global(self, train_dataset=None, test_dataset=None):
"""
Accuracy of the global model and all classes
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.server_network, train_dataset, self.device, per_cls_acc=True)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.server_network, test_dataset, self.device, per_cls_acc=True)
print("===> Evaluation finished\n")
return train_loss_per_cls, train_acc_per_cls, test_loss_per_cls, test_acc_per_cls
def evaluate_global_all(self, train_dataset=None, test_dataset=None):
"""
Accuracy of models of all nodes and all classes
Return: all_results
shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc)
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
all_results = [None for i in range(self.num_clients)]
for idx in range(self.num_clients):
# evaluate on test set: per-class loss/acc
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.networks[idx], train_dataset, self.device, per_cls_acc=True)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.networks[idx], test_dataset, self.device, per_cls_acc=True)
all_results[idx] = train_loss_per_cls, train_acc_per_cls, test_loss_per_cls, test_acc_per_cls
print(f"===> Evaluation finished{idx}\n")
all_results = np.array(all_results).transpose(1,0,2)
return all_results
class Fed_client(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self, init_network, criterion, config, per_client_data,
per_client_label, idx_per_client_train,
test_data, test_label, state_list=None, state_dict_list=None, idx=None
):
super(Fed_client, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = [], [], [], []
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.device = config["device_client"][idx]
self.server_network = copy.deepcopy(init_network)
self.balanced_loader = config["fl_opt"]["balanced_loader"]
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
if config["fl_opt"]["aggregation"] == "fedprox":
self.fedprox = True
else:
self.fedprox = False
self.mu = 0.05
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights/self.client_weights.sum()
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Client {idx} (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
# list of network and optimizer_dict. One optimizer for one network.
if client_i != self.client_idx:
self.networks.append(None)
self.optimizers.append(None)
self.optimizers_stage2.append(None)
else:
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
""" clients do not need
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
"""
def run(self):
"""
client-side code
"""
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
self.networks[self.client_idx]["feat_model"].to(self.device)
self.networks[self.client_idx]["classifier"].to(self.device)
while(1):
while check_status(self.status_list, self.client_idx, 1) is False:
time.sleep(0.1)
# model: mp_list --> server_network
load_state_dict(self.server_network, self.state_dict_list[self.client_idx]) # model transfer
self.train_lt(self.client_idx) # local model updating
# self.networks[i] --> mp_list
self.state_dict_list[self.client_idx] = return_state_dict(self.networks[self.client_idx]) # model transfer
set_status(self.status_list, self.client_idx, 2)
def train_lt(self, idx):
"""
client-side code
---
Argus:
- idx: the index in all clients (e.g., 50) or selected clients (e.g., 10).
If self.prefetch is true: the index in selected clients,
If self.prefetch is true: the index in all clients
"""
idx_in_all = idx
# server broadcast the model to clients
"""
# optimizer will not work if use this, because optimizer needs the params from the model
# self.networks[idx_in_all] = copy.deepcopy(self.server_network)
"""
for net_name, net in self.server_network.items(): # feat_model, classifier
state_dict = self.networks[idx_in_all][net_name].state_dict()
for key, layer in net.state_dict().items():
state_dict[key].data.copy_(layer.data)
for net in self.networks[idx_in_all].values():
net.train()
for net in self.server_network.values():
net.train()
teacher = self.server_network
# torch.cuda.empty_cache()
"""
(Per-cls) Covariance Calculation
"""
if self.feat_aug:
# probability for augmentation for every class
max_num = max(self.local_num_per_cls[idx])
prob = torch.tensor([1.0-i/max_num for i in self.local_num_per_cls[idx]])
# obtain features and labels under eval mode
feat_list, label_list = [], []
# self.networks[idx_in_all]['feat_model'].eval()
for (imgs, labels, indexs) in self.train_loaders[idx]:
with torch.no_grad():
imgs = imgs.to(self.device)
feat_list.append(teacher['feat_model'](imgs).cpu())
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
# self.networks[idx_in_all]['feat_model'].train()
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor([unique_labels.index(i) for i in label_list]) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feats, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feats)
# calculate the variance
sampled_data, sample_label = [], []
per_cls_cov = []
for feats in feats_per_cls:
if len(feats) > 1:
per_cls_cov.append(np.cov(torch.stack(feats, 1).numpy()))
else:
per_cls_cov.append(np.zeros((feats[0].shape[0], feats[0].shape[0])))
per_cls_cov = np.array(per_cls_cov)
# per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])
cov = np.average(per_cls_cov, axis=0, weights=self.local_num_per_cls[idx]) # covariance for feature dimension, shape: e.g., (128, 128)
# pre-generate deviation
divider = 500
pointer = 0
augs = torch.from_numpy(np.random.multivariate_normal(
mean = np.zeros(cov.shape[0]),
cov = cov, # covariance for feature dimension, shape: e.g., (128, 128)
size = divider)).float().to(self.device)
with torch.set_grad_enabled(True):
losses_cls = 0
losses_kd = 0
##########################
#### stage 1 training ####
##########################
for epoch in range(self.local_ep):
"""
model update
"""
if self.local_ep > 10: # locla training mode
print(epoch, end=' ')
if self.balanced_loader:
tmp_loader = self.train_loader_balanced[idx]
else:
tmp_loader = self.train_loaders[idx]
for (imgs, labels, indexs) in tmp_loader:
# to device
imgs = imgs.to(self.device)
# forward
feat = self.networks[idx_in_all]['feat_model'](imgs)
logits = self.networks[idx_in_all]['classifier'](feat)
# do feature space augmentation with a likelihood
if self.feat_aug:
# prob = torch.tensor([1.0 for i in self.local_num_per_cls[idx]])
rand_list = torch.rand(len(labels))
mask = rand_list < prob[torch.tensor([unique_labels.index(i) for i in labels])]
degree = 1
aug_num = sum(mask).item()
feat_aug = feat.clone()
if aug_num > 0:
if pointer + aug_num >= divider:
pointer = 0
feat_aug[mask] = feat_aug[mask] + augs[pointer: pointer+aug_num]*degree
pointer = pointer + aug_num
logits_aug = self.networks[idx_in_all]['classifier'](feat_aug)
# teacher
with torch.no_grad():
feat_teacher = teacher['feat_model'](imgs)
pred_teacher = teacher['classifier'](feat_teacher)
# loss
labels = labels.to(self.device)
if self.config["criterions"]["def_file"].find("LwF") > 0:
if self.feat_aug:
if len(labels) != len(logits_aug):
raise RuntimeError
loss, loss_cls, loss_kd = self.criterion(labels, pred_teacher, logits, logits_aug)
else:
loss, loss_cls, loss_kd = self.criterion(labels, pred_teacher, logits)
elif self.config["criterions"]["def_file"].find("KDLoss") > 0:
loss, loss_cls, loss_kd = self.criterion(
logits, labels, feat, feat_teacher,
classfier_weight=self.networks[idx_in_all]['classifier'].fc.weight
)
# fedprox loss: https://epione.gitlabpages.inria.fr/flhd/federated_learning/FedAvg_FedProx_MNIST_iid_and_noniid.html#federated-training-with-fedprox
if self.fedprox:
prox_loss = difference_models_norm_2(self.networks[idx_in_all], teacher)
# print("FedProx Loss: ", prox_loss, loss)
loss += self.mu/2 * prox_loss
# backward
for optimizer in self.optimizers[idx_in_all].values():
optimizer.zero_grad()
loss.backward()
for optimizer in self.optimizers[idx_in_all].values():
optimizer.step()
# classifier L2-norm
if self.networks[idx_in_all]['classifier'].l2_norm:
self.networks[idx_in_all]['classifier'].weight_norm()
losses_cls += loss_cls.item()
losses_kd += loss_kd.item()
self.losses_cls[idx_in_all] = losses_cls/len(self.train_loaders[idx])/self.local_ep
self.losses_kd[idx_in_all] = losses_kd/len(self.train_loaders[idx])/self.local_ep
##########################
#### stage 2 training ####
##########################
if self.crt:
self.networks[idx_in_all]['feat_model'].eval()
if self.feat_aug:
# obtain features and labels
feat_list = []
label_list = []
for (imgs, labels, indexs) in self.train_loaders[idx]:
imgs = imgs.to(self.device)
with torch.no_grad():
feat_list.append(self.networks[idx_in_all]['feat_model'](imgs).cpu())
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor([unique_labels.index(i) for i in label_list]) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feat, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feat)
# determine the extra sample number for every existing samples
num_per_cls = np.array([len(np.where(label_list==t)[0]) for t in unique_labels]) # e.g., size (6, )
max_num = max(num_per_cls)
gen_nums = [ np.array([max_num//num_per_cls[i]-1 for _ in feats_per_cls[i]]) for i in range(len(unique_labels))]
for cls_i, nums in enumerate(gen_nums):
nums[:max_num % num_per_cls[cls_i]] = nums[:max_num % num_per_cls[cls_i]] + 1
# generate samples
sampled_data, sample_label = [], []
per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])
cov = np.average(per_cls_cov, axis=0, weights=num_per_cls)
# print([np.mean(i) for i in per_cls_cov])
for cls_i, nums in enumerate(gen_nums):
for sample_i, num in enumerate(nums):
if num > 0:
sampled_data.append(
torch.from_numpy(np.random.multivariate_normal(
mean = feats_per_cls[cls_i][sample_i],
cov = cov, # covariance for feature dimension, shape: e.g., (128, 128)
size = num)).float())
sample_label.append(torch.full((num, ), cls_i).long())
# add generated fetaures to training data
feat_list = torch.cat([feat_list, *sampled_data], 0)
label_list = torch.cat([transformed_label_list, *sample_label], 0)
# build new dataloader
feats_dataset = local_client_dataset(feat_list, label_list, self.config)
feats_loader = torch.utils.data.DataLoader(
feats_dataset, batch_size=self.local_bs, shuffle=True, num_workers=0, pin_memory=False)
# train classifier
for epoch in range(5):
for (feats, labels, indexs) in feats_loader:
feats = feats.to(self.device)
labels = labels.to(self.device)
logits = self.networks[idx_in_all]['classifier'](feats)
loss = torch.nn.CrossEntropyLoss()(logits[:, unique_labels], labels)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
# re-sampling without feature augmentation
else:
for epoch in range(5):
for (imgs, labels, indexs) in self.train_loader_balanced[idx]:
# to device
imgs = imgs.to(self.device)
# forward
with torch.no_grad():
feat = self.networks[idx_in_all]['feat_model'](imgs)
logits = self.networks[idx_in_all]['classifier'](feat)
pos_cls = torch.unique(labels).tolist()
transformed_labels = torch.tensor([pos_cls.index(i) for i in labels]).to(self.device)
loss = torch.nn.CrossEntropyLoss()(logits[:, pos_cls], transformed_labels)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
print("=> ", end="")
def fedavg(w):
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] += w[i][k]
w_avg[k] = torch.div(w_avg[k]*1.0, len(w))
return w_avg
# See: https://arxiv.org/abs/1909.06335
def fedavgm(new_ws, old_w, vel, args):
"""
fedavg + momentum
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
global_lr = 1
beta1 = 0
new_w = fedavg(new_ws)
# For the first round: initialize old_w, create an Orderdict to store velocity
if old_w is None:
old_w = new_w
new_v = OrderedDict()
for key in old_w.keys():
new_v[key] = torch.zeros(old_w[key].shape, dtype=old_w[key].dtype).to(args.device)
else:
new_v = copy.deepcopy(vel)
for key in new_w.keys():
delta_w_tmp = old_w[key] - new_w[key]
new_v[key] = beta1*new_v[key] + torch.mul(delta_w_tmp, global_lr)
old_w[key] -= new_v[key]
return old_w, new_v
def fedavgw(new_ws, old_w, args, round_i):
"""
fedavg + adaptive updating parameter
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
new_w = fedavg(new_ws)
# For the first round: initialize old_w
if old_w is None:
old_w = new_w
for key in new_w.keys():
old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))
# for key in new_w.keys():
# if key == "classifier.fc.weight":
# old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))
# else:
# old_w[key] = new_w[key]
return old_w
| en | 0.629402 | save model to state_dict restore model from state_dict # for name, param in state_dict["feat_model"].items(): # print(name, "\t", param.size()) 0. original status (1st FL round) 1. server finished sending: server_network --> mp_list 2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list 3. server received: mp_list --> networks[i] --> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1 --- Return True: when all clients meet conditions, else False see function: check_status # print(f"set_status {target_status}") Return the norm 2 difference between the two model parameters. Used in FedProx. Class for client updating and model aggregation # include dataloader or pre-loaded dataset # balanced-sampling dataloader # list to store local data number per class # physical idx of clients (hardcoded) # per-client accuracy and loss ######## init backbone, classifier, optimizer and dataloader ######## Server does not need # list of optimizer_dict. One optimizer for one network self.optimizers.append(init_optimizers(self.networks[client_i], config)) optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0} self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],)) # dataloader num_workers = 0 local_dataset = \ local_client_dataset(per_client_data[client_i], per_client_label[client_i], config) self.train_loaders.append( torch.utils.data.DataLoader( local_dataset, batch_size=self.local_bs, shuffle=True, num_workers=num_workers, pin_memory=False) ) self.train_loader_balanced.append( torch.utils.data.DataLoader( local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(), num_workers=num_workers, pin_memory=False) ) self.local_num_per_cls.append(local_dataset.class_sample_count) # centralized train dataset server-side code # self.server_network --> mp_list # model transfer # is local training # wait until all clients returning the model # mp_list --> self.networks (copys of client models on the server). Prepare for aggregation. # model transfer server-side code: aggregation # model: self.server_network --> mp_list # model transfer # back to original backbone_only: choose to only aggregate backbone # num_batches_tracked is a non trainable LongTensor # and num_batches_tracked are the same for # all clients for the given datasets # Fedavg # update client models # for idx in selected_idx: # self.networks[idx][net_name].state_dict()[key].data.copy_(layer) # https://openreview.net/pdf?id=6YEQUn0QICG # Fedavg # update client models # for idx in selected_idx: # self.networks[idx][net_name].state_dict()[key].data.copy_(layer) Accuracy of the global model and all classes # evaluate on training set # evaluate on test set: per-class loss/acc Accuracy of models of all nodes and all classes Return: all_results shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc) # evaluate on training set # evaluate on test set: per-class loss/acc # evaluate on test set: per-class loss/acc Class for client updating and model aggregation # include dataloader or pre-loaded dataset # balanced-sampling dataloader # list to store local data number per class # physical idx of clients (hardcoded) # per-client accuracy and loss ######## init backbone, classifier, optimizer and dataloader ######## # list of network and optimizer_dict. One optimizer for one network. # dataloader clients do not need # centralized train dataset train_data_all, train_label_all = [], [] for client_i in range(len(per_client_label)): train_data_all = train_data_all + per_client_data[client_i] train_label_all = train_label_all + per_client_label[client_i] self.train_dataset = local_client_dataset(train_data_all, train_label_all, config) self.test_dataset = test_dataset(test_data, test_label, config) client-side code # model: mp_list --> server_network # model transfer # local model updating # self.networks[i] --> mp_list # model transfer client-side code --- Argus: - idx: the index in all clients (e.g., 50) or selected clients (e.g., 10). If self.prefetch is true: the index in selected clients, If self.prefetch is true: the index in all clients # server broadcast the model to clients # optimizer will not work if use this, because optimizer needs the params from the model # self.networks[idx_in_all] = copy.deepcopy(self.server_network) # feat_model, classifier # torch.cuda.empty_cache() (Per-cls) Covariance Calculation # probability for augmentation for every class # obtain features and labels under eval mode # self.networks[idx_in_all]['feat_model'].eval() # self.networks[idx_in_all]['feat_model'].train() # e.g., size (6, ) # e.g., size (n, ) # per-cls features # calculate the variance # per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls]) # covariance for feature dimension, shape: e.g., (128, 128) # pre-generate deviation # covariance for feature dimension, shape: e.g., (128, 128) ########################## #### stage 1 training #### ########################## model update # locla training mode # to device # forward # do feature space augmentation with a likelihood # prob = torch.tensor([1.0 for i in self.local_num_per_cls[idx]]) # teacher # loss # fedprox loss: https://epione.gitlabpages.inria.fr/flhd/federated_learning/FedAvg_FedProx_MNIST_iid_and_noniid.html#federated-training-with-fedprox # print("FedProx Loss: ", prox_loss, loss) # backward # classifier L2-norm ########################## #### stage 2 training #### ########################## # obtain features and labels # e.g., size (6, ) # e.g., size (n, ) # per-cls features # determine the extra sample number for every existing samples # e.g., size (6, ) # generate samples # print([np.mean(i) for i in per_cls_cov]) # covariance for feature dimension, shape: e.g., (128, 128) # add generated fetaures to training data # build new dataloader # train classifier # print(loss) # re-sampling without feature augmentation # to device # forward # print(loss) # See: https://arxiv.org/abs/1909.06335 fedavg + momentum - new_ws (list of OrderedDict): The new calculated global model - old_w (OrderedDict) : Initial state of the global model (which needs to be updated here) # For the first round: initialize old_w, create an Orderdict to store velocity fedavg + adaptive updating parameter - new_ws (list of OrderedDict): The new calculated global model - old_w (OrderedDict) : Initial state of the global model (which needs to be updated here) # For the first round: initialize old_w # for key in new_w.keys(): # if key == "classifier.fc.weight": # old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1)) # else: # old_w[key] = new_w[key] | 2.26222 | 2 |
datasets/wmt20_mlqe_task3/wmt20_mlqe_task3.py | PierreColombo/datasets | 1 | 6632429 | <reponame>PierreColombo/datasets
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WMT MLQE Shared task 3."""
import csv
import glob
import os
import datasets
_CITATION = """
Not available.
"""
_DESCRIPTION = """\
This shared task (part of WMT20) will build on its previous editions
to further examine automatic methods for estimating the quality
of neural machine translation output at run-time, without relying
on reference translations. As in previous years, we cover estimation
at various levels. Important elements introduced this year include: a new
task where sentences are annotated with Direct Assessment (DA)
scores instead of labels based on post-editing; a new multilingual
sentence-level dataset mainly from Wikipedia articles, where the
source articles can be retrieved for document-wide context; the
availability of NMT models to explore system-internal information for the task.
The goal of this task 3 is to predict document-level quality scores as well as fine-grained annotations.
"""
_HOMEPAGE = "http://www.statmt.org/wmt20/quality-estimation-task.html"
_LICENSE = "Unknown"
_URLs = {
"train+dev": "https://github.com/deep-spin/deep-spin.github.io/raw/master/docs/data/wmt2020_qe/qe-task3-enfr-traindev.tar.gz",
"test": "https://github.com/deep-spin/deep-spin.github.io/raw/master/docs/data/wmt2020_qe/qe-enfr-blindtest.tar.gz",
}
_ANNOTATION_CATEGORIES = [
"Addition",
"Agreement",
"Ambiguous Translation",
"Capitalization",
"Character Encoding",
"Company Terminology",
"Date/Time",
"Diacritics",
"Duplication",
"False Friend",
"Grammatical Register",
"Hyphenation",
"Inconsistency",
"Lexical Register",
"Lexical Selection",
"Named Entity",
"Number",
"Omitted Auxiliary Verb",
"Omitted Conjunction",
"Omitted Determiner",
"Omitted Preposition",
"Omitted Pronoun",
"Orthography",
"Other POS Omitted",
"Over-translation",
"Overly Literal",
"POS",
"Punctuation",
"Shouldn't Have Been Translated",
"Shouldn't have been translated",
"Spelling",
"Tense/Mood/Aspect",
"Under-translation",
"Unidiomatic",
"Unintelligible",
"Unit Conversion",
"Untranslated",
"Whitespace",
"Word Order",
"Wrong Auxiliary Verb",
"Wrong Conjunction",
"Wrong Determiner",
"Wrong Language Variety",
"Wrong Preposition",
"Wrong Pronoun",
]
class Wmt20MlqeTask3(datasets.GeneratorBasedBuilder):
"""WMT MLQE Shared task 3."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
version=datasets.Version("1.1.0"),
description="Plain text",
)
]
def _info(self):
features = datasets.Features(
{
"document_id": datasets.Value("string"),
"source_segments": datasets.Sequence(datasets.Value("string")),
"source_tokenized": datasets.Sequence(datasets.Value("string")),
"mt_segments": datasets.Sequence(datasets.Value("string")),
"mt_tokenized": datasets.Sequence(datasets.Value("string")),
"annotations": datasets.Sequence(
{
"segment_id": datasets.Sequence(datasets.Value("int32")),
"annotation_start": datasets.Sequence(datasets.Value("int32")),
"annotation_length": datasets.Sequence(datasets.Value("int32")),
"severity": datasets.ClassLabel(names=["minor", "major", "critical"]),
"severity_weight": datasets.Value("float32"),
"category": datasets.ClassLabel(names=_ANNOTATION_CATEGORIES),
}
),
"token_annotations": datasets.Sequence(
{
"segment_id": datasets.Sequence(datasets.Value("int32")),
"first_token": datasets.Sequence(datasets.Value("int32")),
"last_token": datasets.Sequence(datasets.Value("int32")),
"token_after_gap": datasets.Sequence(datasets.Value("int32")),
"severity": datasets.ClassLabel(names=["minor", "major", "critical"]),
"category": datasets.ClassLabel(names=_ANNOTATION_CATEGORIES),
}
),
"token_index": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("int32")))),
"total_words": datasets.Value("int32"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir["train+dev"], "task3", "train"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir["test"], "test-blind"),
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir["train+dev"], "task3", "dev"),
"split": "dev",
},
),
]
def _generate_examples(self, filepath, split):
""" Yields examples. """
def open_and_read(fp):
with open(fp, encoding="utf-8") as f:
return f.read().splitlines()
for id_, folder in enumerate(sorted(glob.glob(os.path.join(filepath, "*")))):
source_segments = open_and_read(os.path.join(folder, "source.segments"))
source_tokenized = open_and_read(os.path.join(folder, "source.tokenized"))
mt_segments = open_and_read(os.path.join(folder, "mt.segments"))
mt_tokenized = open_and_read(os.path.join(folder, "mt.tokenized"))
if split in ["train", "dev"] and not os.path.exists(os.path.join(folder, "token_index")):
token_index = []
else:
token_index = [
[idx.split(" ") for idx in line.split("\t")]
for line in open_and_read(os.path.join(folder, "token_index"))
if line != ""
]
total_words = open_and_read(os.path.join(folder, "total_words"))[0]
if split in ["train", "dev"]:
with open(os.path.join(folder, "annotations.tsv"), encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
annotations = [
{
"segment_id": row["segment_id"].split(" "),
"annotation_start": row["annotation_start"].split(" "),
"annotation_length": row["annotation_length"].split(" "),
"severity": row["severity"],
"severity_weight": row["severity_weight"],
"category": row["category"],
}
for row in reader
]
with open(os.path.join(folder, "token_annotations.tsv"), encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
token_annotations = [
{
"segment_id": row["segment_id"].split(" "),
"first_token": row["first_token"].replace("-", "-1").split(" "),
"last_token": row["last_token"].replace("-", "-1").split(" "),
"token_after_gap": row["token_after_gap"].replace("-", "-1").split(" "),
"severity": row["severity"],
"category": row["category"],
}
for row in reader
]
else:
annotations = []
token_annotations = []
yield id_, {
"document_id": os.path.basename(folder),
"source_segments": source_segments,
"source_tokenized": source_tokenized,
"mt_segments": mt_segments,
"mt_tokenized": mt_tokenized,
"annotations": annotations,
"token_annotations": token_annotations,
"token_index": token_index,
"total_words": total_words,
}
| # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WMT MLQE Shared task 3."""
import csv
import glob
import os
import datasets
_CITATION = """
Not available.
"""
_DESCRIPTION = """\
This shared task (part of WMT20) will build on its previous editions
to further examine automatic methods for estimating the quality
of neural machine translation output at run-time, without relying
on reference translations. As in previous years, we cover estimation
at various levels. Important elements introduced this year include: a new
task where sentences are annotated with Direct Assessment (DA)
scores instead of labels based on post-editing; a new multilingual
sentence-level dataset mainly from Wikipedia articles, where the
source articles can be retrieved for document-wide context; the
availability of NMT models to explore system-internal information for the task.
The goal of this task 3 is to predict document-level quality scores as well as fine-grained annotations.
"""
_HOMEPAGE = "http://www.statmt.org/wmt20/quality-estimation-task.html"
_LICENSE = "Unknown"
_URLs = {
"train+dev": "https://github.com/deep-spin/deep-spin.github.io/raw/master/docs/data/wmt2020_qe/qe-task3-enfr-traindev.tar.gz",
"test": "https://github.com/deep-spin/deep-spin.github.io/raw/master/docs/data/wmt2020_qe/qe-enfr-blindtest.tar.gz",
}
_ANNOTATION_CATEGORIES = [
"Addition",
"Agreement",
"Ambiguous Translation",
"Capitalization",
"Character Encoding",
"Company Terminology",
"Date/Time",
"Diacritics",
"Duplication",
"False Friend",
"Grammatical Register",
"Hyphenation",
"Inconsistency",
"Lexical Register",
"Lexical Selection",
"Named Entity",
"Number",
"Omitted Auxiliary Verb",
"Omitted Conjunction",
"Omitted Determiner",
"Omitted Preposition",
"Omitted Pronoun",
"Orthography",
"Other POS Omitted",
"Over-translation",
"Overly Literal",
"POS",
"Punctuation",
"Shouldn't Have Been Translated",
"Shouldn't have been translated",
"Spelling",
"Tense/Mood/Aspect",
"Under-translation",
"Unidiomatic",
"Unintelligible",
"Unit Conversion",
"Untranslated",
"Whitespace",
"Word Order",
"Wrong Auxiliary Verb",
"Wrong Conjunction",
"Wrong Determiner",
"Wrong Language Variety",
"Wrong Preposition",
"Wrong Pronoun",
]
class Wmt20MlqeTask3(datasets.GeneratorBasedBuilder):
"""WMT MLQE Shared task 3."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
version=datasets.Version("1.1.0"),
description="Plain text",
)
]
def _info(self):
features = datasets.Features(
{
"document_id": datasets.Value("string"),
"source_segments": datasets.Sequence(datasets.Value("string")),
"source_tokenized": datasets.Sequence(datasets.Value("string")),
"mt_segments": datasets.Sequence(datasets.Value("string")),
"mt_tokenized": datasets.Sequence(datasets.Value("string")),
"annotations": datasets.Sequence(
{
"segment_id": datasets.Sequence(datasets.Value("int32")),
"annotation_start": datasets.Sequence(datasets.Value("int32")),
"annotation_length": datasets.Sequence(datasets.Value("int32")),
"severity": datasets.ClassLabel(names=["minor", "major", "critical"]),
"severity_weight": datasets.Value("float32"),
"category": datasets.ClassLabel(names=_ANNOTATION_CATEGORIES),
}
),
"token_annotations": datasets.Sequence(
{
"segment_id": datasets.Sequence(datasets.Value("int32")),
"first_token": datasets.Sequence(datasets.Value("int32")),
"last_token": datasets.Sequence(datasets.Value("int32")),
"token_after_gap": datasets.Sequence(datasets.Value("int32")),
"severity": datasets.ClassLabel(names=["minor", "major", "critical"]),
"category": datasets.ClassLabel(names=_ANNOTATION_CATEGORIES),
}
),
"token_index": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("int32")))),
"total_words": datasets.Value("int32"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir["train+dev"], "task3", "train"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir["test"], "test-blind"),
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir["train+dev"], "task3", "dev"),
"split": "dev",
},
),
]
def _generate_examples(self, filepath, split):
""" Yields examples. """
def open_and_read(fp):
with open(fp, encoding="utf-8") as f:
return f.read().splitlines()
for id_, folder in enumerate(sorted(glob.glob(os.path.join(filepath, "*")))):
source_segments = open_and_read(os.path.join(folder, "source.segments"))
source_tokenized = open_and_read(os.path.join(folder, "source.tokenized"))
mt_segments = open_and_read(os.path.join(folder, "mt.segments"))
mt_tokenized = open_and_read(os.path.join(folder, "mt.tokenized"))
if split in ["train", "dev"] and not os.path.exists(os.path.join(folder, "token_index")):
token_index = []
else:
token_index = [
[idx.split(" ") for idx in line.split("\t")]
for line in open_and_read(os.path.join(folder, "token_index"))
if line != ""
]
total_words = open_and_read(os.path.join(folder, "total_words"))[0]
if split in ["train", "dev"]:
with open(os.path.join(folder, "annotations.tsv"), encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
annotations = [
{
"segment_id": row["segment_id"].split(" "),
"annotation_start": row["annotation_start"].split(" "),
"annotation_length": row["annotation_length"].split(" "),
"severity": row["severity"],
"severity_weight": row["severity_weight"],
"category": row["category"],
}
for row in reader
]
with open(os.path.join(folder, "token_annotations.tsv"), encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
token_annotations = [
{
"segment_id": row["segment_id"].split(" "),
"first_token": row["first_token"].replace("-", "-1").split(" "),
"last_token": row["last_token"].replace("-", "-1").split(" "),
"token_after_gap": row["token_after_gap"].replace("-", "-1").split(" "),
"severity": row["severity"],
"category": row["category"],
}
for row in reader
]
else:
annotations = []
token_annotations = []
yield id_, {
"document_id": os.path.basename(folder),
"source_segments": source_segments,
"source_tokenized": source_tokenized,
"mt_segments": mt_segments,
"mt_tokenized": mt_tokenized,
"annotations": annotations,
"token_annotations": token_annotations,
"token_index": token_index,
"total_words": total_words,
} | en | 0.867997 | # coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. WMT MLQE Shared task 3. Not available. \ This shared task (part of WMT20) will build on its previous editions to further examine automatic methods for estimating the quality of neural machine translation output at run-time, without relying on reference translations. As in previous years, we cover estimation at various levels. Important elements introduced this year include: a new task where sentences are annotated with Direct Assessment (DA) scores instead of labels based on post-editing; a new multilingual sentence-level dataset mainly from Wikipedia articles, where the source articles can be retrieved for document-wide context; the availability of NMT models to explore system-internal information for the task. The goal of this task 3 is to predict document-level quality scores as well as fine-grained annotations. WMT MLQE Shared task 3. Returns SplitGenerators. Yields examples. | 1.344911 | 1 |
demo/put_example.py | untwisted/websnake | 22 | 6632430 | <gh_stars>10-100
from websnake import Put, ResponseHandle, core, die, FormData, TokenAuth
def on_done(con, response):
print('Headers:', response.headers)
print('Code:', response.code)
print('Version:', response.version)
print('Reason:', response.reason)
print('Data:', response.fd.read())
die()
if __name__ == '__main__':
url = 'http://httpbin.org/put'
data = {'somekey': 'somevalue'}
request = Put(url, payload=FormData(data))
request.add_map(ResponseHandle.DONE, on_done)
core.gear.mainloop()
| from websnake import Put, ResponseHandle, core, die, FormData, TokenAuth
def on_done(con, response):
print('Headers:', response.headers)
print('Code:', response.code)
print('Version:', response.version)
print('Reason:', response.reason)
print('Data:', response.fd.read())
die()
if __name__ == '__main__':
url = 'http://httpbin.org/put'
data = {'somekey': 'somevalue'}
request = Put(url, payload=FormData(data))
request.add_map(ResponseHandle.DONE, on_done)
core.gear.mainloop() | none | 1 | 2.302923 | 2 |
|
ClienteWSDL/Ambiente/Cliente/ListarBancos/admin.py | argotty2010/ClienteWSDL | 10 | 6632431 | <gh_stars>1-10
from django.contrib import admin
from .models import Comment
admin.site.register(Comment)
| from django.contrib import admin
from .models import Comment
admin.site.register(Comment) | none | 1 | 1.196863 | 1 |
|
torchplasma/conversion/xyz.py | hdkai/Plasma | 0 | 6632432 | #
# Plasma
# Copyright (c) 2021 <NAME>.
#
from torch import tensor, Tensor
def rgb_to_xyz (input: Tensor) -> Tensor:
"""
Convert linear RGB to D65 XYZ.
Parameters:
input (Tensor): Input image with shape (N,3,...) in range [-1., 1.].
Returns:
Tensor: XYZ image with shape (N,3,...) in range [0., 1.].
"""
input = (input + 1.) / 2.
RGB_TO_XYZ = tensor([
[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]
]).float().to(input.device)
xyz = RGB_TO_XYZ.matmul(input.flatten(start_dim=2)).view_as(input)
return xyz
def xyz_to_rgb (input: Tensor) -> Tensor:
"""
Convert D65 XYZ to linear RGB.
Parameters:
input (Tensor): Input image with shape (N,3,...) in range [0., 1.].
Returns:
Tensor: RGB image with shape (N,3,...) in range [-1., 1.].
"""
XYZ_TO_RGB = tensor([
[3.240479, -1.53715, -0.498535],
[-0.969256, 1.875991, 0.041556],
[0.055648, -0.204043, 1.057311]
]).float().to(input.device)
rgb = XYZ_TO_RGB.matmul(input.flatten(start_dim=2)).view_as(input)
rgb = 2. * rgb - 1.
rgb = rgb.clamp(min=-1., max=1.)
return rgb | #
# Plasma
# Copyright (c) 2021 <NAME>.
#
from torch import tensor, Tensor
def rgb_to_xyz (input: Tensor) -> Tensor:
"""
Convert linear RGB to D65 XYZ.
Parameters:
input (Tensor): Input image with shape (N,3,...) in range [-1., 1.].
Returns:
Tensor: XYZ image with shape (N,3,...) in range [0., 1.].
"""
input = (input + 1.) / 2.
RGB_TO_XYZ = tensor([
[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]
]).float().to(input.device)
xyz = RGB_TO_XYZ.matmul(input.flatten(start_dim=2)).view_as(input)
return xyz
def xyz_to_rgb (input: Tensor) -> Tensor:
"""
Convert D65 XYZ to linear RGB.
Parameters:
input (Tensor): Input image with shape (N,3,...) in range [0., 1.].
Returns:
Tensor: RGB image with shape (N,3,...) in range [-1., 1.].
"""
XYZ_TO_RGB = tensor([
[3.240479, -1.53715, -0.498535],
[-0.969256, 1.875991, 0.041556],
[0.055648, -0.204043, 1.057311]
]).float().to(input.device)
rgb = XYZ_TO_RGB.matmul(input.flatten(start_dim=2)).view_as(input)
rgb = 2. * rgb - 1.
rgb = rgb.clamp(min=-1., max=1.)
return rgb | en | 0.600116 | # # Plasma # Copyright (c) 2021 <NAME>. # Convert linear RGB to D65 XYZ. Parameters: input (Tensor): Input image with shape (N,3,...) in range [-1., 1.]. Returns: Tensor: XYZ image with shape (N,3,...) in range [0., 1.]. Convert D65 XYZ to linear RGB. Parameters: input (Tensor): Input image with shape (N,3,...) in range [0., 1.]. Returns: Tensor: RGB image with shape (N,3,...) in range [-1., 1.]. | 2.728464 | 3 |
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/calculators/calc_ber.py | PascalGuenther/gecko_sdk | 82 | 6632433 | from pyradioconfig.parts.ocelot.calculators.calc_ber import CALC_Ber_Ocelot
class Calc_BER_Bobcat(CALC_Ber_Ocelot):
pass | from pyradioconfig.parts.ocelot.calculators.calc_ber import CALC_Ber_Ocelot
class Calc_BER_Bobcat(CALC_Ber_Ocelot):
pass | none | 1 | 1.174688 | 1 |
|
TAO/Firewall/EXPLOITS/ELCO/fosho/requests/packages/oreos/core.py | dendisuhubdy/grokmachine | 46 | 6632434 | # -*- coding: utf-8 -*-
"""
oreos.core
~~~~~~~~~~
The creamy white center.
"""
from .monkeys import SimpleCookie
def dict_from_string(s):
''''''
cookies = dict()
c = SimpleCookie()
c.load(s)
for k,v in c.items():
cookies.update({k: v.value})
return cookies | # -*- coding: utf-8 -*-
"""
oreos.core
~~~~~~~~~~
The creamy white center.
"""
from .monkeys import SimpleCookie
def dict_from_string(s):
''''''
cookies = dict()
c = SimpleCookie()
c.load(s)
for k,v in c.items():
cookies.update({k: v.value})
return cookies | en | 0.596644 | # -*- coding: utf-8 -*- oreos.core ~~~~~~~~~~ The creamy white center. | 2.330609 | 2 |
lessons/lesson_3/T01/Asym.py | wouter-ham/blockchain | 0 | 6632435 | from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
def generate_keys():
private = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
public = private.public_key()
return private, public
def encrypt(message, key):
try:
encrypted = key.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return encrypted
except:
return False
def decrypt(ciphertext, key):
try:
decrypted = key.decrypt(
ciphertext,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return decrypted
except:
return False
| from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
def generate_keys():
private = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
public = private.public_key()
return private, public
def encrypt(message, key):
try:
encrypted = key.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return encrypted
except:
return False
def decrypt(ciphertext, key):
try:
decrypted = key.decrypt(
ciphertext,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return decrypted
except:
return False
| none | 1 | 3.19233 | 3 |
|
django_auth_ldap3/conf.py | intelie/django_auth_ldap3 | 0 | 6632436 | <reponame>intelie/django_auth_ldap3
from django.conf import settings as django_settings
class LDAPSettings(object):
"""
Class that provides access to the LDAP settings specified in Django's
settings, with defaults set if they are missing.
Settings are prefixed in Django's settings, but are used here without prefix.
So `AUTH_LDAP_URI` becomes `settings.URI`.
"""
prefix = 'AUTH_LDAP_'
defaults = {
'ADMIN_GROUP': None,
'BASE_DN': 'dc=example,dc=com',
'BIND_AS_AUTHENTICATING_USER': True,
'BIND_DN': '',
'BIND_PASSWORD': '',
'BIND_TEMPLATE': 'uid={username},{base_dn}',
'USER_DN_FILTER_TEMPLATE': '(&(objectclass=person)(uid={username}))',
'GROUP_MAP': None,
'LOGIN_GROUP': '*',
'UID_ATTRIB': 'uid',
'USERNAME_PREFIX': None,
'USERNAME_SUFFIX': None,
'URI': 'ldap://localhost',
'TLS': False,
'TLS_CA_CERTS': None,
'TLS_VALIDATE': True,
'TLS_PRIVATE_KEY': None,
'TLS_LOCAL_CERT': None,
}
def __init__(self):
for name, default in self.defaults.items():
v = getattr(django_settings, self.prefix + name, default)
setattr(self, name, v)
@property
def settings_dict(self):
return {k: getattr(self, k) for k in self.defaults.keys()}
settings = LDAPSettings()
| from django.conf import settings as django_settings
class LDAPSettings(object):
"""
Class that provides access to the LDAP settings specified in Django's
settings, with defaults set if they are missing.
Settings are prefixed in Django's settings, but are used here without prefix.
So `AUTH_LDAP_URI` becomes `settings.URI`.
"""
prefix = 'AUTH_LDAP_'
defaults = {
'ADMIN_GROUP': None,
'BASE_DN': 'dc=example,dc=com',
'BIND_AS_AUTHENTICATING_USER': True,
'BIND_DN': '',
'BIND_PASSWORD': '',
'BIND_TEMPLATE': 'uid={username},{base_dn}',
'USER_DN_FILTER_TEMPLATE': '(&(objectclass=person)(uid={username}))',
'GROUP_MAP': None,
'LOGIN_GROUP': '*',
'UID_ATTRIB': 'uid',
'USERNAME_PREFIX': None,
'USERNAME_SUFFIX': None,
'URI': 'ldap://localhost',
'TLS': False,
'TLS_CA_CERTS': None,
'TLS_VALIDATE': True,
'TLS_PRIVATE_KEY': None,
'TLS_LOCAL_CERT': None,
}
def __init__(self):
for name, default in self.defaults.items():
v = getattr(django_settings, self.prefix + name, default)
setattr(self, name, v)
@property
def settings_dict(self):
return {k: getattr(self, k) for k in self.defaults.keys()}
settings = LDAPSettings() | en | 0.867161 | Class that provides access to the LDAP settings specified in Django's settings, with defaults set if they are missing. Settings are prefixed in Django's settings, but are used here without prefix. So `AUTH_LDAP_URI` becomes `settings.URI`. | 2.335215 | 2 |
cohesity_management_sdk/models/update_sources_for_principal_parameters.py | sachinthakare-cohesity/management-sdk-python | 0 | 6632437 | <filename>cohesity_management_sdk/models/update_sources_for_principal_parameters.py
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.source_for_principal_parameters
class UpdateSourcesForPrincipalParameters(object):
"""Implementation of the 'Update Sources for Principal Parameters.' model.
Set Access Permissions for Principals.
Specifies a list of principals to set access permissions for.
For each principal, set the Protection Sources and View names
that the specified principal has permissions to access.
Attributes:
sources_for_principals (list of SourceForPrincipalParameters): Array
of Principals, Sources and Views. Specifies a list of principals.
For each principal, specify the Protection Sources and Views that
the principal has permissions to access.
"""
# Create a mapping from Model property names to API property names
_names = {
"sources_for_principals":'sourcesForPrincipals'
}
def __init__(self,
sources_for_principals=None):
"""Constructor for the UpdateSourcesForPrincipalParameters class"""
# Initialize members of the class
self.sources_for_principals = sources_for_principals
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
sources_for_principals = None
if dictionary.get('sourcesForPrincipals') != None:
sources_for_principals = list()
for structure in dictionary.get('sourcesForPrincipals'):
sources_for_principals.append(cohesity_management_sdk.models.source_for_principal_parameters.SourceForPrincipalParameters.from_dictionary(structure))
# Return an object of this model
return cls(sources_for_principals)
| <filename>cohesity_management_sdk/models/update_sources_for_principal_parameters.py
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.source_for_principal_parameters
class UpdateSourcesForPrincipalParameters(object):
"""Implementation of the 'Update Sources for Principal Parameters.' model.
Set Access Permissions for Principals.
Specifies a list of principals to set access permissions for.
For each principal, set the Protection Sources and View names
that the specified principal has permissions to access.
Attributes:
sources_for_principals (list of SourceForPrincipalParameters): Array
of Principals, Sources and Views. Specifies a list of principals.
For each principal, specify the Protection Sources and Views that
the principal has permissions to access.
"""
# Create a mapping from Model property names to API property names
_names = {
"sources_for_principals":'sourcesForPrincipals'
}
def __init__(self,
sources_for_principals=None):
"""Constructor for the UpdateSourcesForPrincipalParameters class"""
# Initialize members of the class
self.sources_for_principals = sources_for_principals
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
sources_for_principals = None
if dictionary.get('sourcesForPrincipals') != None:
sources_for_principals = list()
for structure in dictionary.get('sourcesForPrincipals'):
sources_for_principals.append(cohesity_management_sdk.models.source_for_principal_parameters.SourceForPrincipalParameters.from_dictionary(structure))
# Return an object of this model
return cls(sources_for_principals)
| en | 0.775785 | # -*- coding: utf-8 -*- # Copyright 2019 Cohesity Inc. Implementation of the 'Update Sources for Principal Parameters.' model. Set Access Permissions for Principals. Specifies a list of principals to set access permissions for. For each principal, set the Protection Sources and View names that the specified principal has permissions to access. Attributes: sources_for_principals (list of SourceForPrincipalParameters): Array of Principals, Sources and Views. Specifies a list of principals. For each principal, specify the Protection Sources and Views that the principal has permissions to access. # Create a mapping from Model property names to API property names Constructor for the UpdateSourcesForPrincipalParameters class # Initialize members of the class Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. # Extract variables from the dictionary # Return an object of this model | 2.313383 | 2 |
KthFromLast.py | ropso/AlgorithmDesignAndAnalysis-Python | 0 | 6632438 | import linklist as lk1
class linklist(lk1):
def __init__(self):
super().__init__()
def KthFromLast(self,indexk,node=12):
if node==12:
node=self._head
if node==None:
return [0,node]
else:
tmp=KthFromLast(indexk,node._next)
if tmp[0]+1==indexk:
return [indexk,node]
else:
return [tmp[0]+1,node]
if __name__=="__main__":
a=linklist()
a.insert(23)
a.insert(34)
a.insert(23)
a.insert(45)
a.insert(565)
a.insert(43)
print(a.KthFromLast(3)) | import linklist as lk1
class linklist(lk1):
def __init__(self):
super().__init__()
def KthFromLast(self,indexk,node=12):
if node==12:
node=self._head
if node==None:
return [0,node]
else:
tmp=KthFromLast(indexk,node._next)
if tmp[0]+1==indexk:
return [indexk,node]
else:
return [tmp[0]+1,node]
if __name__=="__main__":
a=linklist()
a.insert(23)
a.insert(34)
a.insert(23)
a.insert(45)
a.insert(565)
a.insert(43)
print(a.KthFromLast(3)) | none | 1 | 3.690998 | 4 |
|
test/datasets/VQuAnDa/results/old/utils/utils.py | librairy/explainable-qa | 1 | 6632439 | <reponame>librairy/explainable-qa
import json
import re
import csv
import pandas
def nthOfChar(string, char, n):
'''
Funcion auxiliar que extrae un substring hasta el n-esimo caracter
'''
regex=r'^((?:[^%c]*%c){%d}[^%c]*)%c(.*)' % (char,char,n-1,char,char)
regexGroups = re.match(regex, string)
if regexGroups is not None:
return regexGroups.group(1)
else:
return ""
def jsonToDict(route) -> dict:
'''
Funcion auxiliar que dada la ruta de un json, lo abre y lo convierte a diccionario
'''
with open(route, encoding="utf-8") as f:
return json.load(f)
def questionNotInCsv(JSONroute, csvRoute):
'''
Funcion auxiliar que dado un csv y un json con preguntas y respuestas, ve que preguntas no estan en el csv.
Es utilizado para ver que preguntas no estan siendo ver respondidas por el sistema.
'''
VQuandaData = jsonToDict(JSONroute)
with open('exceptionQuestions.csv','w', newline='', encoding="utf-8") as f:
firstColumnValues = ((pandas.read_csv(csvRoute,sep=';')).iloc[:, 0]).unique()
csvwriter = csv.writer(f,delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Question","Answer","Is Answered"])
for i in VQuandaData:
if i['question'] not in firstColumnValues:
isAnswered = "YES"
modelAnswerLongGroups = re.search(r"\[([^\)]+)\]", i['verbalized_answer'])
if(modelAnswerLongGroups is not None):
modelAnswer = modelAnswerLongGroups.group(1)
else:
modelAnswer = "None"
isAnswered = "NO"
csvwriter.writerow([i['question'], modelAnswer, isAnswered])
f.close()
questionNotInCsv("test.json","results/VQuanda.csv") | import json
import re
import csv
import pandas
def nthOfChar(string, char, n):
'''
Funcion auxiliar que extrae un substring hasta el n-esimo caracter
'''
regex=r'^((?:[^%c]*%c){%d}[^%c]*)%c(.*)' % (char,char,n-1,char,char)
regexGroups = re.match(regex, string)
if regexGroups is not None:
return regexGroups.group(1)
else:
return ""
def jsonToDict(route) -> dict:
'''
Funcion auxiliar que dada la ruta de un json, lo abre y lo convierte a diccionario
'''
with open(route, encoding="utf-8") as f:
return json.load(f)
def questionNotInCsv(JSONroute, csvRoute):
'''
Funcion auxiliar que dado un csv y un json con preguntas y respuestas, ve que preguntas no estan en el csv.
Es utilizado para ver que preguntas no estan siendo ver respondidas por el sistema.
'''
VQuandaData = jsonToDict(JSONroute)
with open('exceptionQuestions.csv','w', newline='', encoding="utf-8") as f:
firstColumnValues = ((pandas.read_csv(csvRoute,sep=';')).iloc[:, 0]).unique()
csvwriter = csv.writer(f,delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Question","Answer","Is Answered"])
for i in VQuandaData:
if i['question'] not in firstColumnValues:
isAnswered = "YES"
modelAnswerLongGroups = re.search(r"\[([^\)]+)\]", i['verbalized_answer'])
if(modelAnswerLongGroups is not None):
modelAnswer = modelAnswerLongGroups.group(1)
else:
modelAnswer = "None"
isAnswered = "NO"
csvwriter.writerow([i['question'], modelAnswer, isAnswered])
f.close()
questionNotInCsv("test.json","results/VQuanda.csv") | es | 0.948507 | Funcion auxiliar que extrae un substring hasta el n-esimo caracter Funcion auxiliar que dada la ruta de un json, lo abre y lo convierte a diccionario Funcion auxiliar que dado un csv y un json con preguntas y respuestas, ve que preguntas no estan en el csv. Es utilizado para ver que preguntas no estan siendo ver respondidas por el sistema. | 3.235064 | 3 |
tests/test_xasx_calendar.py | rajeshyogeshwar/exchange_calendars | 0 | 6632440 | <gh_stars>0
import pytest
import pandas as pd
from exchange_calendars.exchange_calendar_xasx import XASXExchangeCalendar
from .test_exchange_calendar import ExchangeCalendarTestBaseNew
class TestXASXCalendar(ExchangeCalendarTestBaseNew):
@pytest.fixture(scope="class")
def calendar_cls(self):
yield XASXExchangeCalendar
@pytest.fixture
def max_session_hours(self):
yield 6
@pytest.fixture
def regular_holidays_sample(self):
yield [
# 2018
"2018-01-01", # New Year's Day
"2018-01-26", # Australia Day
"2018-03-30", # Good Friday
"2018-04-02", # Easter Monday
"2018-04-25", # Anzac Day
"2018-06-11", # Queen's Birthday
"2018-12-25", # Christmas
"2018-12-26", # Boxing Day
#
# Holidays made up when fall on weekend.
# Anzac Day is observed on the following Monday only when falling
# on a Sunday. In years where Anzac Day falls on a Saturday, there
# is no make-up.
"2017-01-02", # New Year's Day on a Sunday, observed on Monday.
"2014-01-27", # Australia Day on a Sunday, observed on Monday (from 2010).
"2010-04-26", # Anzac Day on a Sunday, observed on Monday.
# Christmas/Boxing Day are special cases, whereby if Christmas is a
# Saturday and Boxing Day is a Sunday, the next Monday and Tuesday will
# be holidays. If Christmas is a Sunday and Boxing Day is a Monday then
# Monday and Tuesday will still both be holidays.
# Christmas on a Sunday, Boxing Day on Monday.
"2016-12-26",
"2016-12-27",
# Christmas on a Saturday, Boxing Day on Sunday.
"2010-12-27",
"2010-12-28",
]
@pytest.fixture
def non_holidays_sample(self):
# Anzac Day on a Saturday, does not have a make-up (prior to 2010).
yield ["2015-04-27", "2004-04-26"]
@pytest.fixture
def early_closes_sample(self):
yield [
# In 2018, the last trading days before Christmas and New Year's
# are on Mondays, so they should be early closes.
"2018-12-24",
"2018-12-31",
# In 2017, Christmas and New Year's fell on Mondays, so the last
# trading days before them were Fridays, which should be early closes.
"2017-12-22",
"2017-12-29",
# In 2016, Christmas and New Year's fell on Sundays, so the last
# trading days before them were Fridays, which should be early closes.
"2016-12-23",
"2016-12-30",
]
@pytest.fixture
def early_closes_sample_time(self):
yield pd.Timedelta(hours=14, minutes=10)
@pytest.fixture
def non_early_closes_sample(self):
# In 2009 the early close rules should not be in effect yet.
yield ["2009-12-24", "2009-12-31"]
@pytest.fixture
def non_early_closes_sample_time(self):
yield pd.Timedelta(16, "H")
| import pytest
import pandas as pd
from exchange_calendars.exchange_calendar_xasx import XASXExchangeCalendar
from .test_exchange_calendar import ExchangeCalendarTestBaseNew
class TestXASXCalendar(ExchangeCalendarTestBaseNew):
@pytest.fixture(scope="class")
def calendar_cls(self):
yield XASXExchangeCalendar
@pytest.fixture
def max_session_hours(self):
yield 6
@pytest.fixture
def regular_holidays_sample(self):
yield [
# 2018
"2018-01-01", # New Year's Day
"2018-01-26", # Australia Day
"2018-03-30", # Good Friday
"2018-04-02", # Easter Monday
"2018-04-25", # Anzac Day
"2018-06-11", # Queen's Birthday
"2018-12-25", # Christmas
"2018-12-26", # Boxing Day
#
# Holidays made up when fall on weekend.
# Anzac Day is observed on the following Monday only when falling
# on a Sunday. In years where Anzac Day falls on a Saturday, there
# is no make-up.
"2017-01-02", # New Year's Day on a Sunday, observed on Monday.
"2014-01-27", # Australia Day on a Sunday, observed on Monday (from 2010).
"2010-04-26", # Anzac Day on a Sunday, observed on Monday.
# Christmas/Boxing Day are special cases, whereby if Christmas is a
# Saturday and Boxing Day is a Sunday, the next Monday and Tuesday will
# be holidays. If Christmas is a Sunday and Boxing Day is a Monday then
# Monday and Tuesday will still both be holidays.
# Christmas on a Sunday, Boxing Day on Monday.
"2016-12-26",
"2016-12-27",
# Christmas on a Saturday, Boxing Day on Sunday.
"2010-12-27",
"2010-12-28",
]
@pytest.fixture
def non_holidays_sample(self):
# Anzac Day on a Saturday, does not have a make-up (prior to 2010).
yield ["2015-04-27", "2004-04-26"]
@pytest.fixture
def early_closes_sample(self):
yield [
# In 2018, the last trading days before Christmas and New Year's
# are on Mondays, so they should be early closes.
"2018-12-24",
"2018-12-31",
# In 2017, Christmas and New Year's fell on Mondays, so the last
# trading days before them were Fridays, which should be early closes.
"2017-12-22",
"2017-12-29",
# In 2016, Christmas and New Year's fell on Sundays, so the last
# trading days before them were Fridays, which should be early closes.
"2016-12-23",
"2016-12-30",
]
@pytest.fixture
def early_closes_sample_time(self):
yield pd.Timedelta(hours=14, minutes=10)
@pytest.fixture
def non_early_closes_sample(self):
# In 2009 the early close rules should not be in effect yet.
yield ["2009-12-24", "2009-12-31"]
@pytest.fixture
def non_early_closes_sample_time(self):
yield pd.Timedelta(16, "H") | en | 0.944329 | # 2018 # New Year's Day # Australia Day # Good Friday # Easter Monday # Anzac Day # Queen's Birthday # Christmas # Boxing Day # # Holidays made up when fall on weekend. # Anzac Day is observed on the following Monday only when falling # on a Sunday. In years where Anzac Day falls on a Saturday, there # is no make-up. # New Year's Day on a Sunday, observed on Monday. # Australia Day on a Sunday, observed on Monday (from 2010). # Anzac Day on a Sunday, observed on Monday. # Christmas/Boxing Day are special cases, whereby if Christmas is a # Saturday and Boxing Day is a Sunday, the next Monday and Tuesday will # be holidays. If Christmas is a Sunday and Boxing Day is a Monday then # Monday and Tuesday will still both be holidays. # Christmas on a Sunday, Boxing Day on Monday. # Christmas on a Saturday, Boxing Day on Sunday. # Anzac Day on a Saturday, does not have a make-up (prior to 2010). # In 2018, the last trading days before Christmas and New Year's # are on Mondays, so they should be early closes. # In 2017, Christmas and New Year's fell on Mondays, so the last # trading days before them were Fridays, which should be early closes. # In 2016, Christmas and New Year's fell on Sundays, so the last # trading days before them were Fridays, which should be early closes. # In 2009 the early close rules should not be in effect yet. | 2.311334 | 2 |
2021/day05/main.py | Kwarf/adventofcode | 0 | 6632441 | <filename>2021/day05/main.py
from collections import defaultdict
part1 = defaultdict(int)
part2 = defaultdict(int)
def step(d): return 1 if d > 0 else -1 if d < 0 else 0
for line in open("input.txt"):
[x1, y1, x2, y2] = map(int, line.replace(" -> ", ",").split(","))
dx = x2 - x1
dy = y2 - y1
for i in range(max(abs(dx), abs(dy)) + 1):
x = x1 + step(dx) * i
y = y1 + step(dy) * i
if dx == 0 or dy == 0:
part1[(x, y)] += 1
part2[(x, y)] += 1
print("The answer to the first part is: " +
str(len([x for x in part1 if part1[x] > 1])))
print("The answer to the second part is: " +
str(len([x for x in part2 if part2[x] > 1])))
| <filename>2021/day05/main.py
from collections import defaultdict
part1 = defaultdict(int)
part2 = defaultdict(int)
def step(d): return 1 if d > 0 else -1 if d < 0 else 0
for line in open("input.txt"):
[x1, y1, x2, y2] = map(int, line.replace(" -> ", ",").split(","))
dx = x2 - x1
dy = y2 - y1
for i in range(max(abs(dx), abs(dy)) + 1):
x = x1 + step(dx) * i
y = y1 + step(dy) * i
if dx == 0 or dy == 0:
part1[(x, y)] += 1
part2[(x, y)] += 1
print("The answer to the first part is: " +
str(len([x for x in part1 if part1[x] > 1])))
print("The answer to the second part is: " +
str(len([x for x in part2 if part2[x] > 1])))
| none | 1 | 3.264095 | 3 |
|
training_api/src/configuration_module/network_setter.py | michaelnguyen11/BMW-Classification-Training-GUI | 69 | 6632442 | <gh_stars>10-100
import json
import mxnet as mx
from DTO.Configuration import Configuration
from gluoncv import model_zoo
from mxnet import gluon
import os
import sys
from DTO.Checkpoint import Checkpoint
from checkpoint_module.checkpoint_facade import CheckpointFacade
"""
This class is responsible for specifying the network to use
get_network is mandatory. Feel free to add to the class any methods you think are necessary.
weights folder: folder storing all the pretrained weights from the model zoo
checkpoints folder: previous trained jobs weights
models folder: networks from scratch (specific to gluoncv)
"""
class NetworkSetter():
"""
Method that loads local weights for a model, used for transfer leaning or resuming a checkpoint
Input:
-------
- Model name : string
- Checkpoint object.
- number of classes for the model : int
- transfer_learning : a bool that is True when doing transfer learning
Output:
-------
-Network
"""
def get_local_weights(self,model_name,Checkpoint,nmbrofclasses,transfer_learning):
if transfer_learning:
print("TRANSFER LEARNING ... \n")
modelname=open(Checkpoint.network_name_file,'r').read()
if modelname != model_name:
sys.exit(modelname+" is different from "+model_name+",model from checkpoint and chosen model should be the same")
classesname=open(Checkpoint.classes_file,'r').read()
nmbrofclasses=len(classesname.split(","))
net = model_zoo.get_model(modelname, pretrained=False,classes=nmbrofclasses)
net.load_parameters(Checkpoint.params_file)
return net
else:
net = model_zoo.get_model(model_name, pretrained=False,classes=nmbrofclasses)
net.load_parameters(Checkpoint.params_file)
return net
"""
Method that initializes a model without it's weights, in order to train from scratch.
Input:
------
-model_name : string
-number of classes for the model: int
Output:
-------
-Network
"""
def get_scratch_model(self, model_name,nmbrofclasses):
all_models = model_zoo.get_model_list()
if model_name in all_models:
net = model_zoo.get_model(model_name, pretrained=False,classes=nmbrofclasses)
else:
print("Model not found, Please refer to the defined models")
return None
return net
"""
Method that loads a model with it's online pretrained weights
Input:
------
-model_name : string
-path :string that chooses where to download the online weights (default is inside the docker image)
Output:
-------
-Network
"""
def get_model_zoo_weights(self, path, model_name):
all_models = model_zoo.get_model_list()
if model_name not in all_models:
print("the model name is not found, please choose a model name from the list below ")
print(all_models)
return None
else:
net = model_zoo.get_model(str(model_name), pretrained=True)
return net
"""
Method that decides wether the model is being built from scratch, with local weights, or with online
weights.
Input:
------
-Configuration object
-number of classes : int
Output:
-------
-Network
"""
def get_network(self, config: Configuration,nmbrofclasses):
checkpoint_path = '/checkpoints/'
pre_trained_path = '../weights/'
weight_type = config.weights_type
model_name = config.weights_name
processor = config.processor
num_gpu = config.gpus_count
num_workers = config.num_workers
checkpoint_model_name = config.model_name
transfer_learning=False
if weight_type == 'pretrained_offline':
transfer_learning=True
if (weight_type == 'checkpoint') or (weight_type == 'pretrained_offline'):
Checkpoint=CheckpointFacade().create_checkpoint(model_name, checkpoint_model_name)
net = self.get_local_weights(model_name,Checkpoint,nmbrofclasses,transfer_learning)
elif weight_type == 'from_scratch':
net = self.get_scratch_model(model_name,nmbrofclasses)
elif weight_type == 'pre_trained':
net = self.get_model_zoo_weights(pre_trained_path, model_name)
return net
| import json
import mxnet as mx
from DTO.Configuration import Configuration
from gluoncv import model_zoo
from mxnet import gluon
import os
import sys
from DTO.Checkpoint import Checkpoint
from checkpoint_module.checkpoint_facade import CheckpointFacade
"""
This class is responsible for specifying the network to use
get_network is mandatory. Feel free to add to the class any methods you think are necessary.
weights folder: folder storing all the pretrained weights from the model zoo
checkpoints folder: previous trained jobs weights
models folder: networks from scratch (specific to gluoncv)
"""
class NetworkSetter():
"""
Method that loads local weights for a model, used for transfer leaning or resuming a checkpoint
Input:
-------
- Model name : string
- Checkpoint object.
- number of classes for the model : int
- transfer_learning : a bool that is True when doing transfer learning
Output:
-------
-Network
"""
def get_local_weights(self,model_name,Checkpoint,nmbrofclasses,transfer_learning):
if transfer_learning:
print("TRANSFER LEARNING ... \n")
modelname=open(Checkpoint.network_name_file,'r').read()
if modelname != model_name:
sys.exit(modelname+" is different from "+model_name+",model from checkpoint and chosen model should be the same")
classesname=open(Checkpoint.classes_file,'r').read()
nmbrofclasses=len(classesname.split(","))
net = model_zoo.get_model(modelname, pretrained=False,classes=nmbrofclasses)
net.load_parameters(Checkpoint.params_file)
return net
else:
net = model_zoo.get_model(model_name, pretrained=False,classes=nmbrofclasses)
net.load_parameters(Checkpoint.params_file)
return net
"""
Method that initializes a model without it's weights, in order to train from scratch.
Input:
------
-model_name : string
-number of classes for the model: int
Output:
-------
-Network
"""
def get_scratch_model(self, model_name,nmbrofclasses):
all_models = model_zoo.get_model_list()
if model_name in all_models:
net = model_zoo.get_model(model_name, pretrained=False,classes=nmbrofclasses)
else:
print("Model not found, Please refer to the defined models")
return None
return net
"""
Method that loads a model with it's online pretrained weights
Input:
------
-model_name : string
-path :string that chooses where to download the online weights (default is inside the docker image)
Output:
-------
-Network
"""
def get_model_zoo_weights(self, path, model_name):
all_models = model_zoo.get_model_list()
if model_name not in all_models:
print("the model name is not found, please choose a model name from the list below ")
print(all_models)
return None
else:
net = model_zoo.get_model(str(model_name), pretrained=True)
return net
"""
Method that decides wether the model is being built from scratch, with local weights, or with online
weights.
Input:
------
-Configuration object
-number of classes : int
Output:
-------
-Network
"""
def get_network(self, config: Configuration,nmbrofclasses):
checkpoint_path = '/checkpoints/'
pre_trained_path = '../weights/'
weight_type = config.weights_type
model_name = config.weights_name
processor = config.processor
num_gpu = config.gpus_count
num_workers = config.num_workers
checkpoint_model_name = config.model_name
transfer_learning=False
if weight_type == 'pretrained_offline':
transfer_learning=True
if (weight_type == 'checkpoint') or (weight_type == 'pretrained_offline'):
Checkpoint=CheckpointFacade().create_checkpoint(model_name, checkpoint_model_name)
net = self.get_local_weights(model_name,Checkpoint,nmbrofclasses,transfer_learning)
elif weight_type == 'from_scratch':
net = self.get_scratch_model(model_name,nmbrofclasses)
elif weight_type == 'pre_trained':
net = self.get_model_zoo_weights(pre_trained_path, model_name)
return net | en | 0.840808 | This class is responsible for specifying the network to use get_network is mandatory. Feel free to add to the class any methods you think are necessary. weights folder: folder storing all the pretrained weights from the model zoo checkpoints folder: previous trained jobs weights models folder: networks from scratch (specific to gluoncv) Method that loads local weights for a model, used for transfer leaning or resuming a checkpoint Input: ------- - Model name : string - Checkpoint object. - number of classes for the model : int - transfer_learning : a bool that is True when doing transfer learning Output: ------- -Network Method that initializes a model without it's weights, in order to train from scratch. Input: ------ -model_name : string -number of classes for the model: int Output: ------- -Network Method that loads a model with it's online pretrained weights Input: ------ -model_name : string -path :string that chooses where to download the online weights (default is inside the docker image) Output: ------- -Network Method that decides wether the model is being built from scratch, with local weights, or with online weights. Input: ------ -Configuration object -number of classes : int Output: ------- -Network | 2.634589 | 3 |
test.py | nobodyzxc/colorization-pytorch | 0 | 6632443 | <gh_stars>0
import os
from options.train_options import TrainOptions
from models import create_model
from util.visualizer import save_images
from util import html
import string
import torch
import torchvision
import torchvision.transforms as transforms
from util import util
from IPython import embed
import numpy as np
if __name__ == '__main__':
sample_ps = [1., .125, .03125]
to_visualize = ['gray', 'hint', 'hint_ab', 'fake_entr', 'real', 'fake_reg', 'real_ab', 'fake_ab_reg', ]
S = len(sample_ps)
opt = TrainOptions().parse()
opt.load_model = True
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.display_id = -1 # no visdom display
opt.phase = 'val'
opt.dataroot = './dataset/ilsvrc2012/%s/' % opt.phase
opt.serial_batches = True
opt.aspect_ratio = 1.
dataset = torchvision.datasets.ImageFolder(opt.dataroot,
transform=transforms.Compose([
transforms.Resize((opt.loadSize, opt.loadSize)),
transforms.ToTensor()]))
dataset_loader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=not opt.serial_batches)
model = create_model(opt)
model.setup(opt)
model.eval()
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# statistics
psnrs = np.zeros((opt.how_many, S))
entrs = np.zeros((opt.how_many, S))
for i, data_raw in enumerate(dataset_loader):
data_raw[0] = data_raw[0].cuda()
data_raw[0] = util.crop_mult(data_raw[0], mult=8)
# with no points
for (pp, sample_p) in enumerate(sample_ps):
#img_path = [string.replace('%08d_%.3f' % (i, sample_p), '.', 'p')]
img_path = [('%08d_%.3f' % (i, sample_p)).replace('.', 'p')]
data = util.get_colorization_data(data_raw, opt, ab_thresh=0., p=sample_p)
model.set_input(data)
model.test(True) # True means that losses will be computed
visuals = util.get_subset_dict(model.get_current_visuals(), to_visualize)
psnrs[i, pp] = util.calculate_psnr_np(util.tensor2im(visuals['real']), util.tensor2im(visuals['fake_reg']))
entrs[i, pp] = model.get_current_losses()['G_entr']
save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
if i % 5 == 0:
print('processing (%04d)-th image... %s' % (i, img_path))
if i == opt.how_many - 1:
break
webpage.save()
# Compute and print some summary statistics
psnrs_mean = np.mean(psnrs, axis=0)
psnrs_std = np.std(psnrs, axis=0) / np.sqrt(opt.how_many)
entrs_mean = np.mean(entrs, axis=0)
entrs_std = np.std(entrs, axis=0) / np.sqrt(opt.how_many)
for (pp, sample_p) in enumerate(sample_ps):
print('p=%.3f: %.2f+/-%.2f' % (sample_p, psnrs_mean[pp], psnrs_std[pp]))
| import os
from options.train_options import TrainOptions
from models import create_model
from util.visualizer import save_images
from util import html
import string
import torch
import torchvision
import torchvision.transforms as transforms
from util import util
from IPython import embed
import numpy as np
if __name__ == '__main__':
sample_ps = [1., .125, .03125]
to_visualize = ['gray', 'hint', 'hint_ab', 'fake_entr', 'real', 'fake_reg', 'real_ab', 'fake_ab_reg', ]
S = len(sample_ps)
opt = TrainOptions().parse()
opt.load_model = True
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.display_id = -1 # no visdom display
opt.phase = 'val'
opt.dataroot = './dataset/ilsvrc2012/%s/' % opt.phase
opt.serial_batches = True
opt.aspect_ratio = 1.
dataset = torchvision.datasets.ImageFolder(opt.dataroot,
transform=transforms.Compose([
transforms.Resize((opt.loadSize, opt.loadSize)),
transforms.ToTensor()]))
dataset_loader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=not opt.serial_batches)
model = create_model(opt)
model.setup(opt)
model.eval()
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# statistics
psnrs = np.zeros((opt.how_many, S))
entrs = np.zeros((opt.how_many, S))
for i, data_raw in enumerate(dataset_loader):
data_raw[0] = data_raw[0].cuda()
data_raw[0] = util.crop_mult(data_raw[0], mult=8)
# with no points
for (pp, sample_p) in enumerate(sample_ps):
#img_path = [string.replace('%08d_%.3f' % (i, sample_p), '.', 'p')]
img_path = [('%08d_%.3f' % (i, sample_p)).replace('.', 'p')]
data = util.get_colorization_data(data_raw, opt, ab_thresh=0., p=sample_p)
model.set_input(data)
model.test(True) # True means that losses will be computed
visuals = util.get_subset_dict(model.get_current_visuals(), to_visualize)
psnrs[i, pp] = util.calculate_psnr_np(util.tensor2im(visuals['real']), util.tensor2im(visuals['fake_reg']))
entrs[i, pp] = model.get_current_losses()['G_entr']
save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
if i % 5 == 0:
print('processing (%04d)-th image... %s' % (i, img_path))
if i == opt.how_many - 1:
break
webpage.save()
# Compute and print some summary statistics
psnrs_mean = np.mean(psnrs, axis=0)
psnrs_std = np.std(psnrs, axis=0) / np.sqrt(opt.how_many)
entrs_mean = np.mean(entrs, axis=0)
entrs_std = np.std(entrs, axis=0) / np.sqrt(opt.how_many)
for (pp, sample_p) in enumerate(sample_ps):
print('p=%.3f: %.2f+/-%.2f' % (sample_p, psnrs_mean[pp], psnrs_std[pp])) | en | 0.799134 | # test code only supports nThreads = 1 # test code only supports batch_size = 1 # no visdom display # create website # statistics # with no points #img_path = [string.replace('%08d_%.3f' % (i, sample_p), '.', 'p')] # True means that losses will be computed # Compute and print some summary statistics | 2.107786 | 2 |
model/procedure.py | beda-software/fhir-py-experements | 0 | 6632444 | <filename>model/procedure.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/Procedure) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .age import Age
from .annotation import Annotation
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .identifier import Identifier
from .period import Period
from .range import Range
@dataclass
class ProcedurePerformer(BackboneElement):
""" The people who performed the procedure.
Limited to "real" people rather than equipment.
"""
resource_type: ClassVar[str] = "ProcedurePerformer"
function: Optional[CodeableConcept] = None
actor: FHIRReference = None
onBehalfOf: Optional[FHIRReference] = None
@dataclass
class ProcedureFocalDevice(BackboneElement):
""" Manipulated, implanted, or removed device.
A device that is implanted, removed or otherwise manipulated (calibration,
battery replacement, fitting a prosthesis, attaching a wound-vac, etc.) as
a focal portion of the Procedure.
"""
resource_type: ClassVar[str] = "ProcedureFocalDevice"
action: Optional[CodeableConcept] = None
manipulated: FHIRReference = None
@dataclass
class Procedure(DomainResource):
""" An action that is being or was performed on a patient.
An action that is or was performed on or for a patient. This can be a
physical intervention like an operation, or less invasive like long term
services, counseling, or hypnotherapy.
"""
resource_type: ClassVar[str] = "Procedure"
identifier: Optional[List[Identifier]] = None
instantiatesCanonical: Optional[List[str]] = None
instantiatesUri: Optional[List[str]] = None
basedOn: Optional[List[FHIRReference]] = None
partOf: Optional[List[FHIRReference]] = None
status: str = None
statusReason: Optional[CodeableConcept] = None
category: Optional[CodeableConcept] = None
code: Optional[CodeableConcept] = None
subject: FHIRReference = None
encounter: Optional[FHIRReference] = None
performedDateTime: Optional[FHIRDate] = field(default=None, metadata=dict(one_of_many='performed',))
performedPeriod: Optional[Period] = field(default=None, metadata=dict(one_of_many='performed',))
performedString: Optional[str] = field(default=None, metadata=dict(one_of_many='performed',))
performedAge: Optional[Age] = field(default=None, metadata=dict(one_of_many='performed',))
performedRange: Optional[Range] = field(default=None, metadata=dict(one_of_many='performed',))
recorder: Optional[FHIRReference] = None
asserter: Optional[FHIRReference] = None
performer: Optional[List[ProcedurePerformer]] = None
location: Optional[FHIRReference] = None
reasonCode: Optional[List[CodeableConcept]] = None
reasonReference: Optional[List[FHIRReference]] = None
bodySite: Optional[List[CodeableConcept]] = None
outcome: Optional[CodeableConcept] = None
report: Optional[List[FHIRReference]] = None
complication: Optional[List[CodeableConcept]] = None
complicationDetail: Optional[List[FHIRReference]] = None
followUp: Optional[List[CodeableConcept]] = None
note: Optional[List[Annotation]] = None
focalDevice: Optional[List[ProcedureFocalDevice]] = None
usedReference: Optional[List[FHIRReference]] = None
usedCode: Optional[List[CodeableConcept]] = None | <filename>model/procedure.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/Procedure) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .age import Age
from .annotation import Annotation
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .identifier import Identifier
from .period import Period
from .range import Range
@dataclass
class ProcedurePerformer(BackboneElement):
""" The people who performed the procedure.
Limited to "real" people rather than equipment.
"""
resource_type: ClassVar[str] = "ProcedurePerformer"
function: Optional[CodeableConcept] = None
actor: FHIRReference = None
onBehalfOf: Optional[FHIRReference] = None
@dataclass
class ProcedureFocalDevice(BackboneElement):
""" Manipulated, implanted, or removed device.
A device that is implanted, removed or otherwise manipulated (calibration,
battery replacement, fitting a prosthesis, attaching a wound-vac, etc.) as
a focal portion of the Procedure.
"""
resource_type: ClassVar[str] = "ProcedureFocalDevice"
action: Optional[CodeableConcept] = None
manipulated: FHIRReference = None
@dataclass
class Procedure(DomainResource):
""" An action that is being or was performed on a patient.
An action that is or was performed on or for a patient. This can be a
physical intervention like an operation, or less invasive like long term
services, counseling, or hypnotherapy.
"""
resource_type: ClassVar[str] = "Procedure"
identifier: Optional[List[Identifier]] = None
instantiatesCanonical: Optional[List[str]] = None
instantiatesUri: Optional[List[str]] = None
basedOn: Optional[List[FHIRReference]] = None
partOf: Optional[List[FHIRReference]] = None
status: str = None
statusReason: Optional[CodeableConcept] = None
category: Optional[CodeableConcept] = None
code: Optional[CodeableConcept] = None
subject: FHIRReference = None
encounter: Optional[FHIRReference] = None
performedDateTime: Optional[FHIRDate] = field(default=None, metadata=dict(one_of_many='performed',))
performedPeriod: Optional[Period] = field(default=None, metadata=dict(one_of_many='performed',))
performedString: Optional[str] = field(default=None, metadata=dict(one_of_many='performed',))
performedAge: Optional[Age] = field(default=None, metadata=dict(one_of_many='performed',))
performedRange: Optional[Range] = field(default=None, metadata=dict(one_of_many='performed',))
recorder: Optional[FHIRReference] = None
asserter: Optional[FHIRReference] = None
performer: Optional[List[ProcedurePerformer]] = None
location: Optional[FHIRReference] = None
reasonCode: Optional[List[CodeableConcept]] = None
reasonReference: Optional[List[FHIRReference]] = None
bodySite: Optional[List[CodeableConcept]] = None
outcome: Optional[CodeableConcept] = None
report: Optional[List[FHIRReference]] = None
complication: Optional[List[CodeableConcept]] = None
complicationDetail: Optional[List[FHIRReference]] = None
followUp: Optional[List[CodeableConcept]] = None
note: Optional[List[Annotation]] = None
focalDevice: Optional[List[ProcedureFocalDevice]] = None
usedReference: Optional[List[FHIRReference]] = None
usedCode: Optional[List[CodeableConcept]] = None | en | 0.927338 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/Procedure) on 2020-02-03. # 2020, SMART Health IT. The people who performed the procedure. Limited to "real" people rather than equipment. Manipulated, implanted, or removed device. A device that is implanted, removed or otherwise manipulated (calibration, battery replacement, fitting a prosthesis, attaching a wound-vac, etc.) as a focal portion of the Procedure. An action that is being or was performed on a patient. An action that is or was performed on or for a patient. This can be a physical intervention like an operation, or less invasive like long term services, counseling, or hypnotherapy. | 2.391583 | 2 |
ParsingProject/instagram/migrations/0002_auto_20210414_2056.py | rzhvn1/Parsing-APi | 0 | 6632445 | # Generated by Django 3.2 on 2021-04-14 14:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('instagram', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='instagram.instagram'),
preserve_default=False,
),
migrations.DeleteModel(
name='PNC',
),
]
| # Generated by Django 3.2 on 2021-04-14 14:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('instagram', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='instagram.instagram'),
preserve_default=False,
),
migrations.DeleteModel(
name='PNC',
),
]
| en | 0.833513 | # Generated by Django 3.2 on 2021-04-14 14:56 | 1.717413 | 2 |
Chapter1/noreminderdev.py | galinadychko/IntroToPython | 0 | 6632446 | <filename>Chapter1/noreminderdev.py
import sys
import stdio
a, b = map(int, sys.argv[1:3])
stdio.writeln(a % b == 0) | <filename>Chapter1/noreminderdev.py
import sys
import stdio
a, b = map(int, sys.argv[1:3])
stdio.writeln(a % b == 0) | none | 1 | 3.074503 | 3 |
|
bdragon/constants.py | bangingheads/BDragon | 3 | 6632447 | <reponame>bangingheads/BDragon<filename>bdragon/constants.py
cdragon_url = 'http://raw.communitydragon.org'
ddragon_url = 'http://ddragon.leagueoflegends.com'
languages = {
'cs_CZ': 'cs_cz',
'de_DE': 'de_de',
'el_GR': 'el_gr',
'en_AU': 'en_au',
'en_GB': 'en_gb',
'en_PH': 'en_ph',
'en_SG': 'en_sg',
'en_US': 'default',
'es_AR': 'es_ar',
'es_ES': 'es_es',
'es_MX': 'es_mx',
'fr_FR': 'fr_fr',
'hu_HU': 'hu_hu',
'it_IT': 'it_it',
'ja_JP': 'ja_jp',
'ko_KR': 'ko_kr',
'pl_PL': 'pl_pl',
'pt_BR': 'pt_br',
'ro_RO': 'ro_ro',
'ru_RU': 'ru_ru',
'th_TH': 'th_th',
'tr_TR': 'tr_tr',
'vn_VN': 'vn_vn',
'zh_CN': 'zh_cn',
'zh_MY': 'zh_my',
'zh_TW': 'zh_tw',
}
realms = {
"br": "pt_BR",
"eune": "en_GB",
"euw": "en_GB",
"jp": "ja_JP",
"kr": "ko_KR",
"lan": "es_MX",
"las": "es_AR",
"na": "en_US",
"oce": "en_AU",
"tr": "tr_TR",
"ru": "ru_RU",
}
| cdragon_url = 'http://raw.communitydragon.org'
ddragon_url = 'http://ddragon.leagueoflegends.com'
languages = {
'cs_CZ': 'cs_cz',
'de_DE': 'de_de',
'el_GR': 'el_gr',
'en_AU': 'en_au',
'en_GB': 'en_gb',
'en_PH': 'en_ph',
'en_SG': 'en_sg',
'en_US': 'default',
'es_AR': 'es_ar',
'es_ES': 'es_es',
'es_MX': 'es_mx',
'fr_FR': 'fr_fr',
'hu_HU': 'hu_hu',
'it_IT': 'it_it',
'ja_JP': 'ja_jp',
'ko_KR': 'ko_kr',
'pl_PL': 'pl_pl',
'pt_BR': 'pt_br',
'ro_RO': 'ro_ro',
'ru_RU': 'ru_ru',
'th_TH': 'th_th',
'tr_TR': 'tr_tr',
'vn_VN': 'vn_vn',
'zh_CN': 'zh_cn',
'zh_MY': 'zh_my',
'zh_TW': 'zh_tw',
}
realms = {
"br": "pt_BR",
"eune": "en_GB",
"euw": "en_GB",
"jp": "ja_JP",
"kr": "ko_KR",
"lan": "es_MX",
"las": "es_AR",
"na": "en_US",
"oce": "en_AU",
"tr": "tr_TR",
"ru": "ru_RU",
} | none | 1 | 1.942969 | 2 |
|
circleguard/exceptions.py | wmpmiles/circleguard | 0 | 6632448 | <reponame>wmpmiles/circleguard
class CircleguardException(Exception):
"""Base class for exceptions in the Circleguard program."""
class InvalidArgumentsException(CircleguardException):
"""Indicates an invalid argument was passed to one of the flags."""
class APIException(CircleguardException):
"""Indicates some error on the API's end that we were not prepared to handle."""
| class CircleguardException(Exception):
"""Base class for exceptions in the Circleguard program."""
class InvalidArgumentsException(CircleguardException):
"""Indicates an invalid argument was passed to one of the flags."""
class APIException(CircleguardException):
"""Indicates some error on the API's end that we were not prepared to handle.""" | en | 0.947462 | Base class for exceptions in the Circleguard program. Indicates an invalid argument was passed to one of the flags. Indicates some error on the API's end that we were not prepared to handle. | 2.590197 | 3 |
_imputation/convert.Minimac3ToOxford.py | swvanderlaan/HerculesToolKit | 0 | 6632449 | <gh_stars>0
#!/hpc/local/CentOS7/common/lang/python/2.7.10/bin/python
# coding=UTF-8
# Alternative shebang for local Mac OS X: #!/usr/bin/python
# Linux version for HPC: #!/hpc/local/CentOS7/common/lang/python/2.7.10/bin/python
### ADD-IN:
### - flag to *optinally* determine which "COLUMNS_TO_KEEP"
#
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print " MINIMAC3 VCF TO OXFOR GEN CONVERTER "
print ""
print "* Version : v1.0.1"
print ""
print "* Last update : 2018-08-23"
print "* Written by : <NAME> "
print " University Medical Center Utrecht | University Utrecht"
print " t.bezemer[at]]umcutrecht[dot]nl"
print "* Designed by : <NAME> "
print " University Medical Center Utrecht | University Utrecht | s.w.vanderlaan[at]]gmail[dot]com"
print " s.w.vanderlaan-2[at]]umcutrecht[dot]nl"
print ""
print "* Description : This script will convert Michigan Imputation Server Minimax3-style VCF-files to "
print " Oxford-style GEN-files."
print ""
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
### ADD-IN:
### - requirement check
### - if not present install
### - report that the requirements are met
import gzip
import io
from sys import argv, exit
import sys
from time import strftime
import argparse
COLUMNS_TO_KEEP = ["ID", "ID", "POS", "REF", "ALT", "UPID"]
# SOME_FILE=argv[1]
parser = argparse.ArgumentParser(description="Convert Minimac3-style VCF to Oxford-style GEN-files.")
## parser.add_argument("-s", "--subsample", help="Number of rows to use for subsampling, when determining the optimal VariantID (default = 100,000)", type=int)
#
requiredNamed = parser.add_argument_group('required named arguments')
#
requiredNamed.add_argument("-f", "--file", help="The Minimac3 VCF-file to convert.", type=str)
requiredNamed.add_argument("-o", "--output", help="The path to the output file.", type=str)
args = parser.parse_args()
#
if not args.file:
print "Usage: " + argv[0] + " --help"
print "Exiting..."
exit()
try:
#print "Opening the VCF file."
with gzip.open(args.file, "rb") as gz:
f = io.BufferedReader(gz)
colmap = []
with open(args.output "w") as output_file:
for line in f.readlines():
# print "Reading lines, while ignoring lines starting with '##'."
line = line.strip()
if line.startswith("##"):
continue;
if line.startswith("#"):
colmap = line.split('\t')
continue
fields = line.split("\t")
for col in COLUMNS_TO_KEEP:
#print "Extracting relevant data."
for index, field in enumerate(fields):
if colmap[index] == col:
#print " - writing variant information ..."
output_file.write(field + " ")
elif col == "UPID" and colmap[index].startswith("UPID"):
#print " - extracting and writing the genotype probilities ..."
UPID_GP = field.split(":")[2]
UPID_GP = UPID_GP.split(",")
output_file.write(" ".join(UPID_GP)+ " ")
output_file.write('\n')
except IOError:
exit()
print "\t ..." + strftime("%a, %H:%M:%S") + " All done converting. Let's have a beer, buddy!"
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "+ The MIT License (MIT) +"
print "+ Copyright (c) 2018 <NAME>, <NAME> | UMC Utrecht, Utrecht, the Netherlands +"
print "+ +"
print "+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +"
print "+ associated documentation files (the \"Software\"), to deal in the Software without restriction, including +"
print "+ without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +"
print "+ copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the +"
print "+ following conditions: +"
print "+ +"
print "+ The above copyright notice and this permission notice shall be included in all copies or substantial +"
print "+ portions of the Software. +"
print "+ +"
print "+ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +"
print "+ LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO +"
print "+ EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +"
print "+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR +"
print "+ THE USE OR OTHER DEALINGS IN THE SOFTWARE. +"
print "+ +"
print "+ Reference: http://opensource.org. +"
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" | #!/hpc/local/CentOS7/common/lang/python/2.7.10/bin/python
# coding=UTF-8
# Alternative shebang for local Mac OS X: #!/usr/bin/python
# Linux version for HPC: #!/hpc/local/CentOS7/common/lang/python/2.7.10/bin/python
### ADD-IN:
### - flag to *optinally* determine which "COLUMNS_TO_KEEP"
#
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print " MINIMAC3 VCF TO OXFOR GEN CONVERTER "
print ""
print "* Version : v1.0.1"
print ""
print "* Last update : 2018-08-23"
print "* Written by : <NAME> "
print " University Medical Center Utrecht | University Utrecht"
print " t.bezemer[at]]umcutrecht[dot]nl"
print "* Designed by : <NAME> "
print " University Medical Center Utrecht | University Utrecht | s.w.vanderlaan[at]]gmail[dot]com"
print " s.w.vanderlaan-2[at]]umcutrecht[dot]nl"
print ""
print "* Description : This script will convert Michigan Imputation Server Minimax3-style VCF-files to "
print " Oxford-style GEN-files."
print ""
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
### ADD-IN:
### - requirement check
### - if not present install
### - report that the requirements are met
import gzip
import io
from sys import argv, exit
import sys
from time import strftime
import argparse
COLUMNS_TO_KEEP = ["ID", "ID", "POS", "REF", "ALT", "UPID"]
# SOME_FILE=argv[1]
parser = argparse.ArgumentParser(description="Convert Minimac3-style VCF to Oxford-style GEN-files.")
## parser.add_argument("-s", "--subsample", help="Number of rows to use for subsampling, when determining the optimal VariantID (default = 100,000)", type=int)
#
requiredNamed = parser.add_argument_group('required named arguments')
#
requiredNamed.add_argument("-f", "--file", help="The Minimac3 VCF-file to convert.", type=str)
requiredNamed.add_argument("-o", "--output", help="The path to the output file.", type=str)
args = parser.parse_args()
#
if not args.file:
print "Usage: " + argv[0] + " --help"
print "Exiting..."
exit()
try:
#print "Opening the VCF file."
with gzip.open(args.file, "rb") as gz:
f = io.BufferedReader(gz)
colmap = []
with open(args.output "w") as output_file:
for line in f.readlines():
# print "Reading lines, while ignoring lines starting with '##'."
line = line.strip()
if line.startswith("##"):
continue;
if line.startswith("#"):
colmap = line.split('\t')
continue
fields = line.split("\t")
for col in COLUMNS_TO_KEEP:
#print "Extracting relevant data."
for index, field in enumerate(fields):
if colmap[index] == col:
#print " - writing variant information ..."
output_file.write(field + " ")
elif col == "UPID" and colmap[index].startswith("UPID"):
#print " - extracting and writing the genotype probilities ..."
UPID_GP = field.split(":")[2]
UPID_GP = UPID_GP.split(",")
output_file.write(" ".join(UPID_GP)+ " ")
output_file.write('\n')
except IOError:
exit()
print "\t ..." + strftime("%a, %H:%M:%S") + " All done converting. Let's have a beer, buddy!"
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "+ The MIT License (MIT) +"
print "+ Copyright (c) 2018 <NAME>, <NAME> | UMC Utrecht, Utrecht, the Netherlands +"
print "+ +"
print "+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +"
print "+ associated documentation files (the \"Software\"), to deal in the Software without restriction, including +"
print "+ without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +"
print "+ copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the +"
print "+ following conditions: +"
print "+ +"
print "+ The above copyright notice and this permission notice shall be included in all copies or substantial +"
print "+ portions of the Software. +"
print "+ +"
print "+ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +"
print "+ LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO +"
print "+ EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +"
print "+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR +"
print "+ THE USE OR OTHER DEALINGS IN THE SOFTWARE. +"
print "+ +"
print "+ Reference: http://opensource.org. +"
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" | en | 0.560018 | #!/hpc/local/CentOS7/common/lang/python/2.7.10/bin/python # coding=UTF-8 # Alternative shebang for local Mac OS X: #!/usr/bin/python # Linux version for HPC: #!/hpc/local/CentOS7/common/lang/python/2.7.10/bin/python ### ADD-IN: ### - flag to *optinally* determine which "COLUMNS_TO_KEEP" # ### ADD-IN: ### - requirement check ### - if not present install ### - report that the requirements are met # SOME_FILE=argv[1] ## parser.add_argument("-s", "--subsample", help="Number of rows to use for subsampling, when determining the optimal VariantID (default = 100,000)", type=int) # # # #print "Opening the VCF file." # print "Reading lines, while ignoring lines starting with '##'." #"): #print "Extracting relevant data." #print " - writing variant information ..." #print " - extracting and writing the genotype probilities ..." | 2.049821 | 2 |
workflows/rivm/data_rivm_dashboard.py | vmenger/CoronaWatchNL | 0 | 6632450 | <filename>workflows/rivm/data_rivm_dashboard.py<gh_stars>0
from datetime import date
from pathlib import Path
import numpy as np
import pandas as pd
import requests
DATA_FOLDER = Path("data-dashboard")
URL = "https://coronadashboard.rijksoverheid.nl/json/NL.json"
def export_date(df, data_folder, prefix, data_date=None, label=None):
if data_date:
df_date = df.loc[df["Datum"] == data_date, :]
else:
df_date = df
# export with data date
if label is not None:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}_{label}.csv")
else:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}.csv")
print(f"Export {export_path}")
df_date.to_csv(export_path, index=False)
def _get_data(key, mapping, date_key):
r = requests.get(URL)
data = r.json()[key]["values"]
df = pd.DataFrame(data)
df["Datum"] = pd.to_datetime(df[date_key], unit='s').dt.date
df.rename(mapping, axis=1, inplace=True)
df_selection = ["Datum"] + list(mapping.values())
df = df[df_selection] \
.set_index("Datum") \
.dropna(how="all") \
.fillna(-1) \
.stack() \
.replace(-1, np.nan) \
.to_frame() \
.reset_index()
df.columns = ["Datum", "Type", "Waarde"]
return df
def main_rep():
df = _get_data(
"reproduction_index",
{
"reproduction_index_low": "Minimum",
"reproduction_index_high": "Maximum",
"reproduction_index_avg": "Reproductie index",
},
"date_of_report_unix"
)
Path(DATA_FOLDER, "data-reproduction").mkdir(exist_ok=True)
export_date(
df,
"data-reproduction",
"RIVM_NL_reproduction_index",
data_date=None,
label=None)
def main_infectious():
df_normalized = _get_data(
"infectious_people_count_normalized",
{
"infectious_low_normalized": "Minimum",
"infectious_high_normalized": "Maximum",
"infectious_avg_normalized": "Geschat aantal besmettelijke mensen",
},
"date_of_report_unix"
)
df = _get_data(
"infectious_people_count",
{
"infectious_low": "Minimum",
"infectious_high": "Maximum",
"infectious_avg": "Geschat aantal besmettelijke mensen",
},
"date_of_report_unix"
)
df["Waarde"] = df["Waarde"].astype(pd.Int64Dtype())
Path(DATA_FOLDER,
"data-contagious/data-contagious_estimates").mkdir(exist_ok=True)
export_date(
df_normalized,
"data-contagious",
"RIVM_NL_contagious_estimate_normalized",
data_date=None,
label=None)
export_date(
df,
"data-contagious",
"RIVM_NL_contagious_estimate",
data_date=None,
label=None)
def main_nursery():
df_pos = _get_data(
"nursing_home",
{
"newly_infected_people": "Positief geteste bewoners",
},
"date_of_report_unix"
)
df_deceased = _get_data(
"nursing_home",
{
"newly_infected_people": "Overleden besmette bewoners",
},
"date_of_report_unix"
)
df = df_pos.append(df_deceased) \
.rename({"Waarde": "Aantal"}, axis=1) \
.sort_values(by=['Datum', 'Type'], ascending=[True, False])
df['AantalCumulatief'] = df.groupby('Type')['Aantal'].transform(
pd.Series.cumsum)
df['Aantal'] = df["Aantal"].astype(pd.Int64Dtype())
df['AantalCumulatief'] = df["AantalCumulatief"].astype(pd.Int64Dtype())
Path(DATA_FOLDER,
"data-nursery/data-nursery_residents").mkdir(exist_ok=True)
export_date(
df,
"data-nursery/data-nursery_residents",
"RIVM_NL_nursery_residents",
data_date=None,
label=None)
def main_nurseryhomes():
df_new = _get_data(
"nursing_home",
{
"newly_infected_locations": "Besmette verpleeghuizen",
},
"date_of_report_unix"
).rename({"Waarde": "NieuwAantal"}, axis=1)
df_total = _get_data(
"nursing_home",
{
"infected_locations_total": "Besmette verpleeghuizen",
},
"date_of_report_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df = df_total.merge(df_new, on=["Datum", "Type"])
Path(DATA_FOLDER, "data-nursery/data-nursery_homes").mkdir(exist_ok=True)
export_date(
df,
"data-nursery/data-nursery_homes",
"RIVM_NL_nursery_counts",
data_date=None,
label=None)
def main_national():
df_total = _get_data(
"infected_people_total",
{
"infected_daily_total": "Totaal",
},
"date_of_report_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df_total['AantalCumulatief'] = df_total['Aantal'].transform(pd.Series.cumsum)
df_ma = _get_data(
"intake_hospital_ma",
{
"moving_average_hospital": "Ziekenhuisopname",
},
"date_of_report_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df_ma['AantalCumulatief'] = df_ma['Aantal'].transform(pd.Series.cumsum)
df = df_total.append(df_ma)
df['Aantal'] = df["Aantal"].astype(pd.Int64Dtype())
df['AantalCumulatief'] = df["AantalCumulatief"].astype(pd.Int64Dtype())
df = df.sort_values(by=['Datum', 'Type'], ascending=[True, True])
df = df.reset_index(drop=True)
Path(DATA_FOLDER, "data-cases").mkdir(exist_ok=True)
dates = sorted(df["Datum"].unique())
for data_date in dates:
export_date(df, "data-cases", "RIVM_NL_national_dashboard", data_date,
str(data_date).replace("-", ""))
export_date(
df,
"data-cases",
"RIVM_NL_national_dashboard",
data_date=dates[-1],
label="latest")
export_date(
df,
"data-cases",
"RIVM_NL_national_dashboard",
data_date=None,
label=None)
def main_suspects():
df = _get_data(
"verdenkingen_huisartsen",
{
"incidentie": "Verdachte patienten",
},
"week_unix"
).rename({"Waarde": "Aantal"}, axis=1)
Path(DATA_FOLDER, "data-suspects").mkdir(exist_ok=True)
export_date(
df, "data-suspects", "RIVM_NL_suspects", data_date=None, label=None)
def main_riool():
df_ml = _get_data(
"sewer",
{
"average": "Virusdeeltjes per ml rioolwater",
},
"week_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df_installations = _get_data(
"sewer",
{
"total_installation_count": "Installaties",
},
"week_unix"
).rename({"Waarde": "Installaties"}, axis=1).drop("Type", axis=1)
df = df_ml.merge(df_installations, on=["Datum"])
Path(DATA_FOLDER, "data-sewage").mkdir(exist_ok=True)
export_date(
df, "data-sewage", "RIVM_NL_sewage_counts", data_date=None, label=None)
def main_descriptive():
r = requests.get(URL)
data = r.json()["intake_share_age_groups"]["values"]
df = pd.DataFrame(data)
df["Datum"] = pd.to_datetime(df["date_of_report_unix"], unit='s').dt.date
df.rename({
"agegroup": "LeeftijdGroep",
"infected_per_agegroup_increase": "Aantal"
}, axis=1, inplace=True)
df = df[["Datum", "LeeftijdGroep", "Aantal"]]
data = pd.read_json(URL)
df_total = pd.read_csv(
Path("data-dashboard/data-descriptive",
"RIVM_NL_age_distribution.csv"),
parse_dates=["Datum"])
df_total["Datum"] = df_total["Datum"].dt.date
if df['Datum'][0] in list(df_total['Datum']):
next
else:
df_total = df_total.append(df, ignore_index=True)
df_total = df_total.sort_values(by=['Datum', 'LeeftijdGroep'])
df_total = df_total.reset_index(drop=True)
dates = sorted(df_total["Datum"].unique())
Path(DATA_FOLDER, "data-descriptive").mkdir(exist_ok=True)
for data_date in dates:
export_date(df_total, "data-descriptive", "RIVM_NL_age_distribution",
data_date,
str(data_date).replace("-", ""))
export_date(
df_total,
"data-descriptive",
"RIVM_NL_age_distribution",
data_date=None,
label=None)
export_date(
df_total,
"data-descriptive",
"RIVM_NL_age_distribution",
data_date=dates[-1],
label="latest")
if __name__ == '__main__':
DATA_FOLDER.mkdir(exist_ok=True)
main_rep()
main_infectious()
main_nursery()
main_nurseryhomes()
main_riool()
# main_national()
main_suspects()
main_descriptive()
| <filename>workflows/rivm/data_rivm_dashboard.py<gh_stars>0
from datetime import date
from pathlib import Path
import numpy as np
import pandas as pd
import requests
DATA_FOLDER = Path("data-dashboard")
URL = "https://coronadashboard.rijksoverheid.nl/json/NL.json"
def export_date(df, data_folder, prefix, data_date=None, label=None):
if data_date:
df_date = df.loc[df["Datum"] == data_date, :]
else:
df_date = df
# export with data date
if label is not None:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}_{label}.csv")
else:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}.csv")
print(f"Export {export_path}")
df_date.to_csv(export_path, index=False)
def _get_data(key, mapping, date_key):
r = requests.get(URL)
data = r.json()[key]["values"]
df = pd.DataFrame(data)
df["Datum"] = pd.to_datetime(df[date_key], unit='s').dt.date
df.rename(mapping, axis=1, inplace=True)
df_selection = ["Datum"] + list(mapping.values())
df = df[df_selection] \
.set_index("Datum") \
.dropna(how="all") \
.fillna(-1) \
.stack() \
.replace(-1, np.nan) \
.to_frame() \
.reset_index()
df.columns = ["Datum", "Type", "Waarde"]
return df
def main_rep():
df = _get_data(
"reproduction_index",
{
"reproduction_index_low": "Minimum",
"reproduction_index_high": "Maximum",
"reproduction_index_avg": "Reproductie index",
},
"date_of_report_unix"
)
Path(DATA_FOLDER, "data-reproduction").mkdir(exist_ok=True)
export_date(
df,
"data-reproduction",
"RIVM_NL_reproduction_index",
data_date=None,
label=None)
def main_infectious():
df_normalized = _get_data(
"infectious_people_count_normalized",
{
"infectious_low_normalized": "Minimum",
"infectious_high_normalized": "Maximum",
"infectious_avg_normalized": "Geschat aantal besmettelijke mensen",
},
"date_of_report_unix"
)
df = _get_data(
"infectious_people_count",
{
"infectious_low": "Minimum",
"infectious_high": "Maximum",
"infectious_avg": "Geschat aantal besmettelijke mensen",
},
"date_of_report_unix"
)
df["Waarde"] = df["Waarde"].astype(pd.Int64Dtype())
Path(DATA_FOLDER,
"data-contagious/data-contagious_estimates").mkdir(exist_ok=True)
export_date(
df_normalized,
"data-contagious",
"RIVM_NL_contagious_estimate_normalized",
data_date=None,
label=None)
export_date(
df,
"data-contagious",
"RIVM_NL_contagious_estimate",
data_date=None,
label=None)
def main_nursery():
df_pos = _get_data(
"nursing_home",
{
"newly_infected_people": "Positief geteste bewoners",
},
"date_of_report_unix"
)
df_deceased = _get_data(
"nursing_home",
{
"newly_infected_people": "Overleden besmette bewoners",
},
"date_of_report_unix"
)
df = df_pos.append(df_deceased) \
.rename({"Waarde": "Aantal"}, axis=1) \
.sort_values(by=['Datum', 'Type'], ascending=[True, False])
df['AantalCumulatief'] = df.groupby('Type')['Aantal'].transform(
pd.Series.cumsum)
df['Aantal'] = df["Aantal"].astype(pd.Int64Dtype())
df['AantalCumulatief'] = df["AantalCumulatief"].astype(pd.Int64Dtype())
Path(DATA_FOLDER,
"data-nursery/data-nursery_residents").mkdir(exist_ok=True)
export_date(
df,
"data-nursery/data-nursery_residents",
"RIVM_NL_nursery_residents",
data_date=None,
label=None)
def main_nurseryhomes():
df_new = _get_data(
"nursing_home",
{
"newly_infected_locations": "Besmette verpleeghuizen",
},
"date_of_report_unix"
).rename({"Waarde": "NieuwAantal"}, axis=1)
df_total = _get_data(
"nursing_home",
{
"infected_locations_total": "Besmette verpleeghuizen",
},
"date_of_report_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df = df_total.merge(df_new, on=["Datum", "Type"])
Path(DATA_FOLDER, "data-nursery/data-nursery_homes").mkdir(exist_ok=True)
export_date(
df,
"data-nursery/data-nursery_homes",
"RIVM_NL_nursery_counts",
data_date=None,
label=None)
def main_national():
df_total = _get_data(
"infected_people_total",
{
"infected_daily_total": "Totaal",
},
"date_of_report_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df_total['AantalCumulatief'] = df_total['Aantal'].transform(pd.Series.cumsum)
df_ma = _get_data(
"intake_hospital_ma",
{
"moving_average_hospital": "Ziekenhuisopname",
},
"date_of_report_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df_ma['AantalCumulatief'] = df_ma['Aantal'].transform(pd.Series.cumsum)
df = df_total.append(df_ma)
df['Aantal'] = df["Aantal"].astype(pd.Int64Dtype())
df['AantalCumulatief'] = df["AantalCumulatief"].astype(pd.Int64Dtype())
df = df.sort_values(by=['Datum', 'Type'], ascending=[True, True])
df = df.reset_index(drop=True)
Path(DATA_FOLDER, "data-cases").mkdir(exist_ok=True)
dates = sorted(df["Datum"].unique())
for data_date in dates:
export_date(df, "data-cases", "RIVM_NL_national_dashboard", data_date,
str(data_date).replace("-", ""))
export_date(
df,
"data-cases",
"RIVM_NL_national_dashboard",
data_date=dates[-1],
label="latest")
export_date(
df,
"data-cases",
"RIVM_NL_national_dashboard",
data_date=None,
label=None)
def main_suspects():
df = _get_data(
"verdenkingen_huisartsen",
{
"incidentie": "Verdachte patienten",
},
"week_unix"
).rename({"Waarde": "Aantal"}, axis=1)
Path(DATA_FOLDER, "data-suspects").mkdir(exist_ok=True)
export_date(
df, "data-suspects", "RIVM_NL_suspects", data_date=None, label=None)
def main_riool():
df_ml = _get_data(
"sewer",
{
"average": "Virusdeeltjes per ml rioolwater",
},
"week_unix"
).rename({"Waarde": "Aantal"}, axis=1)
df_installations = _get_data(
"sewer",
{
"total_installation_count": "Installaties",
},
"week_unix"
).rename({"Waarde": "Installaties"}, axis=1).drop("Type", axis=1)
df = df_ml.merge(df_installations, on=["Datum"])
Path(DATA_FOLDER, "data-sewage").mkdir(exist_ok=True)
export_date(
df, "data-sewage", "RIVM_NL_sewage_counts", data_date=None, label=None)
def main_descriptive():
r = requests.get(URL)
data = r.json()["intake_share_age_groups"]["values"]
df = pd.DataFrame(data)
df["Datum"] = pd.to_datetime(df["date_of_report_unix"], unit='s').dt.date
df.rename({
"agegroup": "LeeftijdGroep",
"infected_per_agegroup_increase": "Aantal"
}, axis=1, inplace=True)
df = df[["Datum", "LeeftijdGroep", "Aantal"]]
data = pd.read_json(URL)
df_total = pd.read_csv(
Path("data-dashboard/data-descriptive",
"RIVM_NL_age_distribution.csv"),
parse_dates=["Datum"])
df_total["Datum"] = df_total["Datum"].dt.date
if df['Datum'][0] in list(df_total['Datum']):
next
else:
df_total = df_total.append(df, ignore_index=True)
df_total = df_total.sort_values(by=['Datum', 'LeeftijdGroep'])
df_total = df_total.reset_index(drop=True)
dates = sorted(df_total["Datum"].unique())
Path(DATA_FOLDER, "data-descriptive").mkdir(exist_ok=True)
for data_date in dates:
export_date(df_total, "data-descriptive", "RIVM_NL_age_distribution",
data_date,
str(data_date).replace("-", ""))
export_date(
df_total,
"data-descriptive",
"RIVM_NL_age_distribution",
data_date=None,
label=None)
export_date(
df_total,
"data-descriptive",
"RIVM_NL_age_distribution",
data_date=dates[-1],
label="latest")
if __name__ == '__main__':
DATA_FOLDER.mkdir(exist_ok=True)
main_rep()
main_infectious()
main_nursery()
main_nurseryhomes()
main_riool()
# main_national()
main_suspects()
main_descriptive()
| en | 0.760022 | # export with data date # main_national() | 2.891003 | 3 |
Subsets and Splits