id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
600 | build tfidf matrix | import argparse
import logging
import os
import pickle
import time
from scipy.sparse import save_npz, csr_matrix
import sklearn.preprocessing
import numpy as np
from utils import TfidfTgzReader
topic_list = [
'321', '336', '341',
'347', '350', '362',
'363', '367', '375', '378', '393',
'397', '400', '408', '414',
'422', '426', '427', '433',
'439', '442', '445', '626', '646',
'690'
]
def read_vocab(path):
"""
"""
logging.info("loading vocabulary dictionary...")
with open(path, 'rb') as f:
vocab = pickle.load(f)
return vocab
def build_docid_idx_dict(rank_file):
"""
"""
logging.info('building docid idx dict...')
cur_idx = 0
docid_idx_dict = {}
with open(rank_file, 'r') as f:
for line in f:
topic, _, docid, _, _, _ = line.split(' ')
if topic in topic_list and docid not in docid_idx_dict:
docid_idx_dict[docid] = cur_idx
docid_idx_dict[cur_idx] = docid
cur_idx += 1
return docid_idx_dict
def write_docid_idx_dict(docid_idx_dict, filename):
"""
"""
logging.info(f"writting docid-idx-dict to {filename}")
with open(filename, 'wb') as f:
pickle.dump(docid_idx_dict, f)
def METHOD_NAME(tfidf_raw, docid_idx_dict, vocab_idx_dict):
"""
"""
num_docs, num_vocabs = len(docid_idx_dict) // 2, len(vocab_idx_dict) // 2
logging.info(f'start building tfidf sparse matrix with {num_docs} docs and {num_vocabs} vocabs...')
tfidf_dict = {}
count = 0
reader = TfidfTgzReader(tfidf_raw)
while reader.hasnextdoc():
docid = reader.getnextdoc().strip()
count += 1
if count % 100000 == 0:
logging.info(f'{count} files have been processed...')
if docid not in docid_idx_dict:
reader.skipdoc()
continue
doc_idx = docid_idx_dict[docid]
while reader.hasnexttfidf():
word, tfidf = reader.getnexttfidf()
if word in vocab_idx_dict:
vocab_idx = vocab_idx_dict[word]
tfidf_dict[(doc_idx, vocab_idx)] = float(tfidf)
logging.info(f'finish building tfidf dict, {count} files in total.')
indices = tuple(zip(*tfidf_dict.keys()))
values = list(tfidf_dict.values())
tfidf_sp = csr_matrix((values, indices), shape=(num_docs, num_vocabs), dtype=np.float32)
logging.info(f'finish building tfidf sparse matrix.')
return sklearn.preprocessing.normalize(tfidf_sp, norm='l2')
def _safe_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S ')
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("--tfidf-file", '-t', type=str,
help='path to tfidf file', required=True)
parser.add_argument("--rank-file", '-r', type=str,
help='path to qrels_file', required=True)
parser.add_argument("--vocab-folder", '-v', type=str,
help='folder contains vocab-idx-dict.pkl', required=True)
parser.add_argument("--output-folder", '-o', type=str,
help='output folder to dump training data for each topic', required=True)
args = parser.parse_args()
tfidf_raw = args.tfidf_file
rank_file = args.rank_file
out_folder = args.output_folder
vocab_folder = args.vocab_folder
# sanity check
assert os.path.isdir(vocab_folder)
# constant
vocab_path = os.path.join(vocab_folder, 'vocab-idx-dict.pkl')
out_docid_idx_file = os.path.join(out_folder, 'test-docid-idx-dict.pkl')
out_feature_file = os.path.join(out_folder, 'test.npz')
# preprocessing
_safe_mkdir(out_folder)
# pipeline from here
logging.info(f'start building test...')
vocab_dict = read_vocab(vocab_path)
docid_idx_dict = build_docid_idx_dict(rank_file)
tfidf_sp = METHOD_NAME(tfidf_raw, docid_idx_dict, vocab_dict)
write_docid_idx_dict(docid_idx_dict, out_docid_idx_file)
logging.info(f'writing test data to {out_feature_file}...')
save_npz(out_feature_file, tfidf_sp)
logging.info(f'build test finished in {time.time() - start_time} seconds')
|
601 | test norm object array | """ Test functions for linalg module
"""
import warnings
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
class TestRegression:
def test_eig_build(self):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
-6.04004526e-01 + 15.84422474j,
-6.04004526e-01 - 15.84422474j,
-1.13692929e+01 + 0.j,
-6.57612485e-01 + 10.41755503j,
-6.57612485e-01 - 10.41755503j,
1.82126812e+01 + 0.j,
1.06011014e+01 + 0.j,
7.80732773e+00 + 0.j,
-7.65390898e-01 + 0.j,
1.51971555e-15 + 0.j,
-1.51308713e-15 + 0.j])
a = arange(13 * 13, dtype=float64)
a.shape = (13, 13)
a = a % 17
va, ve = linalg.eig(a)
va.sort()
rva.sort()
assert_array_almost_equal(va, rva)
def test_eigh_build(self):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
cov = array([[77.70273908, 3.51489954, 15.64602427],
[3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
def test_svd_build(self):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
# Regression for #786: Frobenius norm for vectors raises
# ValueError.
assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
a = array([[5.7998084, -2.1825367],
[-2.1825367, 9.85910595]], dtype='>f8')
b = array(a, dtype='<f8')
ap = linalg.cholesky(a)
bp = linalg.cholesky(b)
assert_array_equal(ap, bp)
def test_large_svd_32bit(self):
# See gh-4442, 64bit would require very large/slow matrices.
x = np.eye(1000, 66)
np.linalg.svd(x)
def test_svd_no_uv(self):
# gh-4733
for shape in (3, 4), (4, 4), (4, 3):
for t in float, complex:
a = np.ones(shape, dtype=t)
w = linalg.svd(a, compute_uv=False)
c = np.count_nonzero(np.absolute(w) > 0.5)
assert_equal(c, 1)
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
def METHOD_NAME(self):
# gh-7575
testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)
norm = linalg.norm(testvector)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testvector, ord=1)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype != np.dtype('float64'))
norm = linalg.norm(testvector, ord=2)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(ValueError, linalg.norm, testvector, ord='fro')
assert_raises(ValueError, linalg.norm, testvector, ord='nuc')
assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)
assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)
assert_raises(ValueError, linalg.norm, testvector, ord=0)
assert_raises(ValueError, linalg.norm, testvector, ord=-1)
assert_raises(ValueError, linalg.norm, testvector, ord=-2)
testmatrix = np.array([[np.array([0, 1]), 0, 0],
[0, 0, 0]], dtype=object)
norm = linalg.norm(testmatrix)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testmatrix, ord='fro')
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')
assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=0)
assert_raises(ValueError, linalg.norm, testmatrix, ord=1)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)
assert_raises(TypeError, linalg.norm, testmatrix, ord=2)
assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)
assert_raises(ValueError, linalg.norm, testmatrix, ord=3)
def test_lstsq_complex_larger_rhs(self):
# gh-9891
size = 20
n_rhs = 70
G = np.random.randn(size, size) + 1j * np.random.randn(size, size)
u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)
b = G.dot(u)
# This should work without segmentation fault.
u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
# check results just in case
assert_array_almost_equal(u_lstsq, u) |
602 | test optional alternative | # coding=utf-8
import unittest
import string
from dragonfly.parsing.parse import spec_parser, CompoundTransformer
from dragonfly import Compound, Literal, Sequence, Optional, Empty, Alternative
# ===========================================================================
extras = {"an_extra": Alternative([Literal(u"1"), Literal(u"2")])}
def check_parse_tree(spec, expected):
tree = spec_parser.parse(spec)
output = CompoundTransformer(extras).transform(tree)
assert output.element_tree_string() == expected.element_tree_string()
return output
class TestLarkParser(unittest.TestCase):
def test_literal(self):
check_parse_tree("test ", Literal(u"test"))
def test_multiple_literals(self):
check_parse_tree("test hello world ", Literal(u"test hello world"))
def test_parens(self):
check_parse_tree("(test ) ", Literal(u"test"))
def test_punctuation(self):
check_parse_tree(",", Literal(u","))
check_parse_tree("test's ", Literal(u"test's"))
check_parse_tree("cul-de-sac ", Literal(u"cul-de-sac"))
def test_sequence(self):
check_parse_tree(
" test <an_extra> [op]",
Sequence([Literal(u"test"), extras["an_extra"], Optional(Literal(u"op"))]),
)
def test_alternative_no_parens(self):
check_parse_tree(
" test |[op] <an_extra>",
Alternative(
[
Literal(u"test"),
Sequence([Optional(Literal(u"op")), extras["an_extra"]]),
]
),
)
def test_alternative_parens(self):
check_parse_tree(
"( test |[op] <an_extra>)",
Alternative(
[
Literal(u"test"),
Sequence([Optional(Literal(u"op")), extras["an_extra"]]),
]
),
)
def METHOD_NAME(self):
check_parse_tree("[test|test's]", Optional(Alternative([Literal(u"test"), Literal(u"test's")])))
def test_digit_in_word(self):
check_parse_tree("F2", Literal(u"F2"))
def test_unicode(self):
check_parse_tree(u"touché", Literal(u"touché"))
def test_bool_special_in_sequence(self):
output = check_parse_tree(
" test <an_extra> [op] {test_special}",
Sequence([Literal(u"test"), extras["an_extra"], Optional(Literal(u"op"))]),
)
assert output.test_special == True
assert all(getattr(child, 'test_special', None) == None for child in output.children)
def test_other_special_in_sequence(self):
output = check_parse_tree(
" test <an_extra> [op] {test_special=4}",
Sequence([Literal(u"test"), extras["an_extra"], Optional(Literal(u"op"))]),
)
assert output.test_special == 4
assert all(getattr(child, 'test_special', None) == None for child in output.children)
def test_bool_special_in_alternative(self):
output = check_parse_tree(
"foo | bar {test_special} | baz",
Alternative([
Literal(u"foo"),
Literal(u"bar"),
Literal(u"baz"),
]),
)
assert getattr(output.children[0], 'test_special', None) == None
assert output.children[1].test_special == True
assert getattr(output.children[2], 'test_special', None) == None
def test_other_special_in_alternative(self):
output = check_parse_tree(
"foo | bar {test_special=4} | baz",
Alternative([
Literal(u"foo"),
Literal(u"bar"),
Literal(u"baz"),
]),
)
assert getattr(output.children[0], 'test_special', None) == None
assert output.children[1].test_special == 4
assert getattr(output.children[2], 'test_special', None) == None
# ===========================================================================
if __name__ == "__main__":
unittest.main() |
603 | main | #!/usr/bin/env python
#
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
ClangFormat Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-format-diff.py -p1 -i
svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i
"""
import argparse
import difflib
import re
import string
import subprocess
import StringIO
import sys
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
def METHOD_NAME():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto'
r'|protodevel|java)',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument('-v', '--verbose', action='store_true',
help='be more verbose, ineffective without -i')
parser.add_argument(
'-style',
help=
'formatting style to apply (LLVM, Google, Chromium, Mozilla, WebKit)')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1;
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
# Reformat files containing changes in place.
for filename, lines in lines_by_file.iteritems():
if args.i and args.verbose:
print 'Formatting', filename
command = [binary, filename]
if args.i:
command.append('-i')
command.extend(lines)
if args.style:
command.extend(['-style', args.style])
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=None, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode);
if not args.i:
with open(filename) as f:
code = f.readlines()
formatted_code = StringIO.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = string.join(diff, '')
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
METHOD_NAME() |
604 | left index | #!/usr/bin/env python2
"""
arith_parse.py: Parse shell-like and C-like arithmetic.
"""
from __future__ import print_function
import sys
import tdop
from tdop import CompositeNode
import demo_asdl
arith_expr = demo_asdl.arith_expr
op_id_e = demo_asdl.op_id_e
#
# Null Denotation -- token that takes nothing on the left
#
def NullConstant(p, token, bp):
if token.type == 'number':
return arith_expr.Const(token.val)
# We have to wrap a string in some kind of variant.
if token.type == 'name':
return arith_expr.ArithVar(token.val)
raise AssertionError(token.type)
def NullParen(p, token, bp):
""" Arithmetic grouping """
r = p.ParseUntil(bp)
p.Eat(')')
return r
def NullPrefixOp(p, token, bp):
"""Prefix operator.
Low precedence: return, raise, etc.
return x+y is return (x+y), not (return x) + y
High precedence: logical negation, bitwise complement, etc.
!x && y is (!x) && y, not !(x && y)
"""
r = p.ParseUntil(bp)
return CompositeNode(token, [r])
def NullIncDec(p, token, bp):
""" ++x or ++x[1] """
right = p.ParseUntil(bp)
if right.token.type not in ('name', 'get'):
raise tdop.ParseError("Can't assign to %r (%s)" % (right, right.token))
return CompositeNode(token, [right])
#
# Left Denotation -- token that takes an expression on the left
#
def LeftIncDec(p, token, left, rbp):
""" For i++ and i--
"""
if left.token.type not in ('name', 'get'):
raise tdop.ParseError("Can't assign to %r (%s)" % (left, left.token))
token.type = 'post' + token.type
return CompositeNode(token, [left])
def METHOD_NAME(p, token, left, unused_bp):
""" index f[x+1] """
# f[x] or f[x][y]
if not isinstance(left, demo_asdl.ArithVar):
raise tdop.ParseError("%s can't be indexed" % left)
index = p.ParseUntil(0)
if p.AtToken(':'):
p.Next()
end = p.ParseUntil(0)
else:
end = None
p.Eat(']')
# TODO: If you see ], then
# 1:4
# 1:4:2
# Both end and step are optional
if end:
return demo_asdl.Slice(left, index, end, None)
else:
return demo_asdl.Index(left, index)
def LeftTernary(p, token, left, bp):
""" e.g. a > 1 ? x : y """
true_expr = p.ParseUntil(bp)
p.Eat(':')
false_expr = p.ParseUntil(bp)
children = [left, true_expr, false_expr]
return CompositeNode(token, children)
def LeftBinaryOp(p, token, left, rbp):
""" Normal binary operator like 1+2 or 2*3, etc. """
if token.val == '+':
op_id_ = op_id_e.Plus
elif token.val == '-':
op_id_ = op_id_e.Minus
elif token.val == '*':
op_id_ = op_id_e.Star
else:
raise AssertionError(token.val)
return arith_expr.ArithBinary(op_id_, left, p.ParseUntil(rbp))
def LeftAssign(p, token, left, rbp):
""" Normal binary operator like 1+2 or 2*3, etc. """
# x += 1, or a[i] += 1
if left.token.type not in ('name', 'get'):
raise tdop.ParseError("Can't assign to %r (%s)" % (left, left.token))
return CompositeNode(token, [left, p.ParseUntil(rbp)])
def LeftComma(p, token, left, rbp):
""" foo, bar, baz
Could be sequencing operator, or tuple without parens
"""
r = p.ParseUntil(rbp)
if left.token.type == ',': # Keep adding more children
left.children.append(r)
return left
children = [left, r]
return CompositeNode(token, children)
# For overloading of , inside function calls
COMMA_PREC = 1
def LeftFuncCall(p, token, left, unused_bp):
""" Function call f(a, b). """
args = []
# f(x) or f[i](x)
if not isinstance(left, demo_asdl.ArithVar):
raise tdop.ParseError("%s can't be called" % left)
func_name = left.name # get a string
while not p.AtToken(')'):
# We don't want to grab the comma, e.g. it is NOT a sequence operator. So
# set the precedence to 5.
args.append(p.ParseUntil(COMMA_PREC))
if p.AtToken(','):
p.Next()
p.Eat(")")
return demo_asdl.FuncCall(func_name, args)
def MakeShellParserSpec():
"""
Create a parser.
Compare the code below with this table of C operator precedence:
http://en.cppreference.com/w/c/language/operator_precedence
"""
spec = tdop.ParserSpec()
spec.Left(31, LeftIncDec, ['++', '--'])
spec.Left(31, LeftFuncCall, ['('])
spec.Left(31, METHOD_NAME, ['['])
# 29 -- binds to everything except function call, indexing, postfix ops
spec.Null(29, NullIncDec, ['++', '--'])
spec.Null(29, NullPrefixOp, ['+', '!', '~', '-'])
# Right associative: 2 ** 3 ** 2 == 2 ** (3 ** 2)
spec.LeftRightAssoc(27, LeftBinaryOp, ['**'])
spec.Left(25, LeftBinaryOp, ['*', '/', '%'])
spec.Left(23, LeftBinaryOp, ['+', '-'])
spec.Left(21, LeftBinaryOp, ['<<', '>>'])
spec.Left(19, LeftBinaryOp, ['<', '>', '<=', '>='])
spec.Left(17, LeftBinaryOp, ['!=', '=='])
spec.Left(15, LeftBinaryOp, ['&'])
spec.Left(13, LeftBinaryOp, ['^'])
spec.Left(11, LeftBinaryOp, ['|'])
spec.Left(9, LeftBinaryOp, ['&&'])
spec.Left(7, LeftBinaryOp, ['||'])
spec.LeftRightAssoc(5, LeftTernary, ['?'])
# Right associative: a = b = 2 is a = (b = 2)
spec.LeftRightAssoc(3, LeftAssign, [
'=',
'+=', '-=', '*=', '/=', '%=',
'<<=', '>>=', '&=', '^=', '|='])
spec.Left(COMMA_PREC, LeftComma, [','])
# 0 precedence -- doesn't bind until )
spec.Null(0, NullParen, ['(']) # for grouping
# -1 precedence -- never used
spec.Null(-1, NullConstant, ['name', 'number'])
spec.Null(-1, tdop.NullError, [')', ']', ':', 'eof'])
return spec
def MakeParser(s):
"""Used by tests."""
spec = MakeShellParserSpec()
lexer = tdop.Tokenize(s)
p = tdop.Parser(spec, lexer)
return p
def ParseShell(s, expected=None):
"""Used by tests."""
p = MakeParser(s)
tree = p.Parse()
sexpr = repr(tree)
if expected is not None:
assert sexpr == expected, '%r != %r' % (sexpr, expected)
#print('%-40s %s' % (s, sexpr))
return tree
def main(argv):
try:
s = argv[1]
except IndexError:
print('Usage: ./arith_parse.py EXPRESSION')
else:
try:
tree = ParseShell(s)
except tdop.ParseError as e:
print('Error parsing %r: %s' % (s, e), file=sys.stderr)
print(tree)
if __name__ == '__main__':
main(sys.argv) |
605 | disable session cookies | import json
import secrets
from pathlib import Path
from typing import Any, Dict
from flask.sessions import SecureCookieSessionInterface
from flask_mongoengine import MongoEngine
from flask_security import ConfirmRegisterForm, MongoEngineUserDatastore, Security, UserDatastore
from wtforms import StringField
from common.utils.file_utils import open_new_securely_permissioned_file
from monkey_island.cc.mongo_consts import MONGO_DB_HOST, MONGO_DB_NAME, MONGO_DB_PORT, MONGO_URL
from . import AccountRole
from .role import Role
from .user import User
SECRET_FILE_NAME = ".flask_security_configuration.json"
ACCESS_TOKEN_TTL = 15 * 60 # 15 minutes
def configure_flask_security(app, data_dir: Path) -> Security:
_setup_flask_mongo(app)
flask_security_config = _generate_flask_security_configuration(data_dir)
app.config["SECRET_KEY"] = flask_security_config["secret_key"]
app.config["SECURITY_PASSWORD_SALT"] = flask_security_config["password_salt"]
app.config["SECURITY_USERNAME_ENABLE"] = True
app.config["SECURITY_USERNAME_REQUIRED"] = True
app.config["SECURITY_REGISTERABLE"] = True
app.config["SECURITY_SEND_REGISTER_EMAIL"] = False
app.config["SECURITY_TOKEN_MAX_AGE"] = ACCESS_TOKEN_TTL
app.config["SECURITY_RETURN_GENERIC_RESPONSES"] = True
# Ignore CSRF, because we don't store tokens in cookies
app.config["WTF_CSRF_CHECK_DEFAULT"] = False
app.config["SECURITY_CSRF_IGNORE_UNAUTH_ENDPOINTS"] = True
# Forbid sending authentication token in URL parameters
app.config["SECURITY_TOKEN_AUTHENTICATION_KEY"] = None
# The database object needs to be created after we configure the flask application
db = MongoEngine(app)
user_datastore = MongoEngineUserDatastore(db, User, Role)
_create_roles(user_datastore)
class CustomConfirmRegisterForm(ConfirmRegisterForm):
# We don't use the email, but the field is required by ConfirmRegisterForm.
# Email validators need to be overriden, otherwise an error about invalid email is raised.
email = StringField("Email", default="[email protected]", validators=[])
def to_dict(self, only_user):
registration_dict = super().to_dict(only_user)
registration_dict.update({"roles": [AccountRole.ISLAND_INTERFACE.name]})
return registration_dict
app.security = Security(
app,
user_datastore,
confirm_register_form=CustomConfirmRegisterForm,
register_blueprint=False,
)
# Force Security to always respond as an API rather than HTTP server
# This will cause 401 response instead of 301 for unauthorized requests for example
app.security._want_json = lambda _request: True
app.session_interface = METHOD_NAME()
return app.security
def _setup_flask_mongo(app):
app.config["MONGO_URI"] = MONGO_URL
app.config["MONGODB_SETTINGS"] = [
{
"db": MONGO_DB_NAME,
"host": MONGO_DB_HOST,
"port": MONGO_DB_PORT,
}
]
def _generate_flask_security_configuration(data_dir: Path) -> Dict[str, Any]:
secret_file_path = str(data_dir / SECRET_FILE_NAME)
try:
with open(secret_file_path, "r") as secret_file:
return json.load(secret_file)
except FileNotFoundError:
with open_new_securely_permissioned_file(secret_file_path, "w") as secret_file:
secret_key = secrets.token_urlsafe(32)
password_salt = str(secrets.SystemRandom().getrandbits(128))
security_options = {"secret_key": secret_key, "password_salt": password_salt}
json.dump(security_options, secret_file)
return security_options
def _create_roles(user_datastore: UserDatastore):
user_datastore.find_or_create_role(name=AccountRole.ISLAND_INTERFACE.name)
user_datastore.find_or_create_role(name=AccountRole.AGENT.name)
def METHOD_NAME() -> SecureCookieSessionInterface:
class CustomSessionInterface(SecureCookieSessionInterface):
"""Prevent creating session from API requests."""
def should_set_cookie(self, *args, **kwargs):
return False
def save_session(self, *args, **kwargs):
return
return CustomSessionInterface() |
606 | get boozer surface | from pathlib import Path
import numpy as np
from simsopt.configs import get_ncsx_data
from simsopt.field import coils_via_symmetries, BiotSavart
from simsopt.geo import Volume, Area, ToroidalFlux, SurfaceXYZFourier, SurfaceRZFourier, SurfaceXYZTensorFourier, BoozerSurface, MajorRadius
TEST_DIR = Path(__file__).parent / ".." / "test_files"
def get_surface(surfacetype, stellsym, phis=None, thetas=None, mpol=5, ntor=5,
nphi=None, ntheta=None, full=False, nfp=3):
if nphi is None:
nphi = 11 if surfacetype == "SurfaceXYZTensorFourier" else 15
if ntheta is None:
ntheta = 11 if surfacetype == "SurfaceXYZTensorFourier" else 15
if phis is None:
phis = np.linspace(0, 1/nfp, nphi, endpoint=False)
if thetas is None:
if (surfacetype == "SurfaceXYZTensorFourier" or full == True):
thetas = np.linspace(0, 1, ntheta, endpoint=False)
else:
thetas = np.linspace(0, 1/(1. + int(stellsym)), ntheta, endpoint=False)
if surfacetype == "SurfaceXYZFourier":
s = SurfaceXYZFourier(mpol=mpol, ntor=ntor, nfp=nfp, stellsym=stellsym,
quadpoints_phi=phis, quadpoints_theta=thetas)
elif surfacetype == "SurfaceRZFourier":
s = SurfaceRZFourier(mpol=mpol, ntor=ntor, nfp=nfp, stellsym=stellsym,
quadpoints_phi=phis, quadpoints_theta=thetas)
elif surfacetype == "SurfaceXYZTensorFourier":
s = SurfaceXYZTensorFourier(mpol=mpol, ntor=ntor, nfp=nfp, stellsym=stellsym,
clamped_dims=[False, False, False],
quadpoints_phi=phis, quadpoints_theta=thetas
)
else:
raise Exception("surface type not implemented")
return s
def get_exact_surface(surface_type='SurfaceXYZFourier'):
"""
Returns a boozer exact surface that will be used in unit tests.
"""
filename_X = TEST_DIR / 'NCSX_test_data'/'X.dat'
filename_Y = TEST_DIR / 'NCSX_test_data'/'Y.dat'
filename_Z = TEST_DIR / 'NCSX_test_data'/'Z.dat'
X = np.loadtxt(filename_X)
Y = np.loadtxt(filename_Y)
Z = np.loadtxt(filename_Z)
xyz = np.concatenate((X[:, :, None], Y[:, :, None], Z[:, :, None]), axis=2)
ntor = 16
mpol = 10
nfp = 1
stellsym = False
nphi = 33
ntheta = 21
phis = np.linspace(0, 1, nphi, endpoint=False)
thetas = np.linspace(0, 1, ntheta, endpoint=False)
if surface_type == 'SurfaceXYZFourier':
s = SurfaceXYZFourier(mpol=mpol, ntor=ntor, nfp=nfp, stellsym=stellsym,
quadpoints_phi=phis, quadpoints_theta=thetas)
elif surface_type == 'SurfaceXYZTensorFourier':
s = SurfaceXYZTensorFourier(mpol=mpol, ntor=ntor, nfp=nfp, stellsym=stellsym,
quadpoints_phi=phis, quadpoints_theta=thetas)
else:
raise Exception("surface type not implemented")
s.least_squares_fit(xyz)
return s
def METHOD_NAME(label="Volume", nphi=None, ntheta=None):
"""
Returns a boozer surface that will be used in unit tests.
"""
assert label == "Volume" or label == "ToroidalFlux" or label == "Area"
base_curves, base_currents, ma = get_ncsx_data()
coils = coils_via_symmetries(base_curves, base_currents, 3, True)
bs = BiotSavart(coils)
current_sum = sum(abs(c.current.get_value()) for c in coils)
G0 = 2. * np.pi * current_sum * (4 * np.pi * 10**(-7) / (2 * np.pi))
## RESOLUTION DETAILS OF SURFACE ON WHICH WE OPTIMIZE FOR QA
mpol = 6
ntor = 6
stellsym = True
nfp = 3
phis = np.linspace(0, 1/nfp, 2*ntor+1, endpoint=False)
thetas = np.linspace(0, 1, 2*mpol+1, endpoint=False)
s = SurfaceXYZTensorFourier(
mpol=mpol, ntor=ntor, stellsym=stellsym, nfp=nfp, quadpoints_phi=phis, quadpoints_theta=thetas)
s.fit_to_curve(ma, 0.1, flip_theta=True)
iota = -0.406
if label == "Volume":
lab = Volume(s, nphi=nphi, ntheta=ntheta)
lab_target = lab.J()
elif label == "ToroidalFlux":
bs_tf = BiotSavart(coils)
lab = ToroidalFlux(s, bs_tf, nphi=nphi, ntheta=ntheta)
lab_target = lab.J()
elif label == "Area":
lab = Area(s, nphi=nphi, ntheta=ntheta)
lab_target = lab.J()
## COMPUTE THE SURFACE
boozer_surface = BoozerSurface(bs, s, lab, lab_target)
res = boozer_surface.solve_residual_equation_exactly_newton(tol=1e-13, maxiter=20, iota=iota, G=G0)
print(f"NEWTON {res['success']}: iter={res['iter']}, iota={res['iota']:.3f}, vol={s.volume():.3f}")
return bs, boozer_surface |
607 | setup ui | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'rare/ui/components/tabs/store/wishlist.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Wishlist(object):
def METHOD_NAME(self, Wishlist):
Wishlist.setObjectName("Wishlist")
Wishlist.resize(736, 398)
Wishlist.setWindowTitle("StackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.verticalLayout = QtWidgets.QVBoxLayout(self.page)
self.verticalLayout.setObjectName("verticalLayout")
self.scroll_area = QtWidgets.QScrollArea(self.page)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setObjectName("scroll_area")
self.scroll_widget = QtWidgets.QWidget()
self.scroll_widget.setGeometry(QtCore.QRect(0, 0, 716, 378))
self.scroll_widget.setObjectName("scroll_widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.scroll_widget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.title_label = QtWidgets.QLabel(self.scroll_widget)
font = QtGui.QFont()
font.setPointSize(15)
self.title_label.setFont(font)
self.title_label.setObjectName("title_label")
self.verticalLayout_2.addWidget(self.title_label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.sort_label = QtWidgets.QLabel(self.scroll_widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sort_label.sizePolicy().hasHeightForWidth())
self.sort_label.setSizePolicy(sizePolicy)
self.sort_label.setObjectName("sort_label")
self.horizontalLayout.addWidget(self.sort_label)
self.sort_cb = QtWidgets.QComboBox(self.scroll_widget)
self.sort_cb.setObjectName("sort_cb")
self.sort_cb.addItem("")
self.sort_cb.addItem("")
self.sort_cb.addItem("")
self.sort_cb.addItem("")
self.horizontalLayout.addWidget(self.sort_cb)
self.reverse = QtWidgets.QCheckBox(self.scroll_widget)
self.reverse.setObjectName("reverse")
self.horizontalLayout.addWidget(self.reverse)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.filter_label = QtWidgets.QLabel(self.scroll_widget)
self.filter_label.setObjectName("filter_label")
self.horizontalLayout.addWidget(self.filter_label)
self.filter_cb = QtWidgets.QComboBox(self.scroll_widget)
self.filter_cb.setObjectName("filter_cb")
self.filter_cb.addItem("")
self.filter_cb.addItem("")
self.horizontalLayout.addWidget(self.filter_cb)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.reload_button = QtWidgets.QPushButton(self.scroll_widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.reload_button.sizePolicy().hasHeightForWidth())
self.reload_button.setSizePolicy(sizePolicy)
self.reload_button.setText("")
self.reload_button.setObjectName("reload_button")
self.horizontalLayout.addWidget(self.reload_button)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.list_layout = QtWidgets.QVBoxLayout()
self.list_layout.setObjectName("list_layout")
self.verticalLayout_2.addLayout(self.list_layout)
self.no_games_label = QtWidgets.QLabel(self.scroll_widget)
self.no_games_label.setObjectName("no_games_label")
self.verticalLayout_2.addWidget(self.no_games_label)
spacerItem2 = QtWidgets.QSpacerItem(379, 218, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem2)
self.scroll_area.setWidget(self.scroll_widget)
self.verticalLayout.addWidget(self.scroll_area)
Wishlist.addWidget(self.page)
self.retranslateUi(Wishlist)
def retranslateUi(self, Wishlist):
_translate = QtCore.QCoreApplication.translate
self.title_label.setText(_translate("Wishlist", "Wishlist"))
self.sort_label.setText(_translate("Wishlist", "Sort by"))
self.sort_cb.setItemText(0, _translate("Wishlist", "Name"))
self.sort_cb.setItemText(1, _translate("Wishlist", "Price"))
self.sort_cb.setItemText(2, _translate("Wishlist", "Developer"))
self.sort_cb.setItemText(3, _translate("Wishlist", "Discount"))
self.reverse.setText(_translate("Wishlist", "Reverse"))
self.filter_label.setText(_translate("Wishlist", "Filter:"))
self.filter_cb.setItemText(0, _translate("Wishlist", "None"))
self.filter_cb.setItemText(1, _translate("Wishlist", "Discount"))
self.no_games_label.setText(_translate("Wishlist", "No games matching your filter"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Wishlist = QtWidgets.QStackedWidget()
ui = Ui_Wishlist()
ui.METHOD_NAME(Wishlist)
Wishlist.show()
sys.exit(app.exec_()) |
608 | warn | # Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from enum import Enum
from sys import stdout
from traceback import format_exception
from typing import IO, Optional, Union
from mlrun.config import config
class JSONFormatter(logging.Formatter):
def __init__(self):
super(JSONFormatter, self).__init__()
self._json_encoder = json.JSONEncoder()
def format(self, record):
record_with = getattr(record, "with", {})
if record.exc_info:
record_with.update(exc_info=format_exception(*record.exc_info))
record_fields = {
"datetime": self.formatTime(record, self.datefmt),
"level": record.levelname.lower(),
"message": record.getMessage(),
"with": record_with,
}
return self._json_encoder.encode(record_fields)
class HumanReadableFormatter(logging.Formatter):
def format(self, record):
record_with = self._record_with(record)
more = f": {record_with}" if record_with else ""
return f"> {self.formatTime(record, self.datefmt)} [{record.levelname.lower()}] {record.getMessage()}{more}"
def _record_with(self, record):
record_with = getattr(record, "with", {})
if record.exc_info:
record_with.update(exc_info=format_exception(*record.exc_info))
return record_with
class HumanReadableExtendedFormatter(HumanReadableFormatter):
def format(self, record):
record_with = self._record_with(record)
more = f": {record_with}" if record_with else ""
return (
"> "
f"{self.formatTime(record, self.datefmt)} "
f"[{record.name}:{record.levelname.lower()}] "
f"{record.getMessage()}{more}"
)
class Logger(object):
def __init__(
self,
level,
name="mlrun",
propagate=True,
logger: Optional[logging.Logger] = None,
):
self._logger = logger or logging.getLogger(name)
self._logger.propagate = propagate
self._logger.setLevel(level)
self._bound_variables = {}
for log_level_func in [
self.exception,
self.error,
self.METHOD_NAME,
self.warning,
self.info,
self.debug,
]:
setattr(self, f"{log_level_func.__name__}_with", log_level_func)
def set_handler(
self, handler_name: str, file: IO[str], formatter: logging.Formatter
):
# check if there's a handler by this name
for handler in self._logger.handlers:
if handler.name == handler_name:
self._logger.removeHandler(handler)
break
# create a stream handler from the file
stream_handler = logging.StreamHandler(file)
stream_handler.name = handler_name
# set the formatter
stream_handler.setFormatter(formatter)
# add the handler to the logger
self._logger.addHandler(stream_handler)
def get_child(self, suffix):
"""
Get a child logger with the given suffix.
This is useful for when you want to have a logger for a specific component.
Once the formatter will support logger name, it will be easier to understand
which component logged the message.
:param suffix: The suffix to add to the logger name.
"""
return Logger(
self.level,
# name is not set as it is provided by the "getChild"
name="",
# allowing child to delegate events logged to ancestor logger
# not doing so, will leave log lines not being handled
propagate=True,
logger=self._logger.getChild(suffix),
)
@property
def level(self):
return self._logger.level
def set_logger_level(self, level: Union[str, int]):
self._logger.setLevel(level)
def replace_handler_stream(self, handler_name: str, file: IO[str]):
for handler in self._logger.handlers:
if handler.name == handler_name:
handler.stream = file
return
raise ValueError(f"Logger does not have a handler named '{handler_name}'")
def debug(self, message, *args, **kw_args):
self._update_bound_vars_and_log(logging.DEBUG, message, *args, **kw_args)
def info(self, message, *args, **kw_args):
self._update_bound_vars_and_log(logging.INFO, message, *args, **kw_args)
def METHOD_NAME(self, message, *args, **kw_args):
self._update_bound_vars_and_log(logging.WARNING, message, *args, **kw_args)
def warning(self, message, *args, **kw_args):
self.METHOD_NAME(message, *args, **kw_args)
def error(self, message, *args, **kw_args):
self._update_bound_vars_and_log(logging.ERROR, message, *args, **kw_args)
def exception(self, message, *args, exc_info=True, **kw_args):
self._update_bound_vars_and_log(
logging.ERROR, message, *args, exc_info=exc_info, **kw_args
)
def bind(self, **kw_args):
self._bound_variables.update(kw_args)
def _update_bound_vars_and_log(
self, level, message, *args, exc_info=None, **kw_args
):
kw_args.update(self._bound_variables)
if kw_args:
self._logger.log(
level, message, *args, exc_info=exc_info, extra={"with": kw_args}
)
return
self._logger.log(level, message, *args, exc_info=exc_info)
class FormatterKinds(Enum):
HUMAN = "human"
HUMAN_EXTENDED = "human_extended"
JSON = "json"
def _create_formatter_instance(formatter_kind: FormatterKinds) -> logging.Formatter:
return {
FormatterKinds.HUMAN: HumanReadableFormatter(),
FormatterKinds.HUMAN_EXTENDED: HumanReadableExtendedFormatter(),
FormatterKinds.JSON: JSONFormatter(),
}[formatter_kind]
def create_logger(
level: str = None,
formatter_kind: str = FormatterKinds.HUMAN.name,
name: str = "mlrun",
stream=stdout,
):
level = level or config.log_level or "info"
level = logging.getLevelName(level.upper())
# create logger instance
logger_instance = Logger(level, name=name, propagate=False)
# resolve formatter
formatter_instance = _create_formatter_instance(
FormatterKinds(formatter_kind.lower())
)
# set handler
logger_instance.set_handler("default", stream or stdout, formatter_instance)
return logger_instance |
609 | configure | #!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker, 2012
"""
Run a Stata do-script in the directory specified by **ctx.bldnode**. The
first and only argument will be the name of the do-script (no extension),
which can be accessed inside the do-script by the local macro `1'. Useful
for keeping a log file.
The tool uses the log file that is automatically kept by Stata only
for error-catching purposes, it will be destroyed if the task finished
without error. In case of an error in **some_script.do**, you can inspect
it as **some_script.log** in the **ctx.bldnode** directory.
Note that Stata will not return an error code if it exits abnormally --
catching errors relies on parsing the log file mentioned before. Should
the parser behave incorrectly please send an email to hmgaudecker [at] gmail.
**WARNING**
The tool will not work if multiple do-scripts of the same name---but in
different directories---are run at the same time! Avoid this situation.
Usage::
ctx(features='run_do_script',
source='some_script.do',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv')
"""
import os, re, sys
from waflib import Task, TaskGen, Logs
if sys.platform == 'darwin':
STATA_COMMANDS = ['Stata64MP', 'StataMP',
'Stata64SE', 'StataSE',
'Stata64', 'Stata']
STATAFLAGS = '-e -q do'
STATAENCODING = 'MacRoman'
elif sys.platform.startswith('linux'):
STATA_COMMANDS = ['stata-mp', 'stata-se', 'stata']
STATAFLAGS = '-b -q do'
# Not sure whether this is correct...
STATAENCODING = 'Latin-1'
elif sys.platform.lower().startswith('win'):
STATA_COMMANDS = ['StataMP-64', 'StataMP-ia',
'StataMP', 'StataSE-64',
'StataSE-ia', 'StataSE',
'Stata-64', 'Stata-ia',
'Stata.e', 'WMPSTATA',
'WSESTATA', 'WSTATA']
STATAFLAGS = '/e do'
STATAENCODING = 'Latin-1'
else:
raise Exception("Unknown sys.platform: %s " % sys.platform)
def METHOD_NAME(ctx):
ctx.find_program(STATA_COMMANDS, var='STATACMD', errmsg="""\n
No Stata executable found!\n\n
If Stata is needed:\n
1) Check the settings of your system path.
2) Note we are looking for Stata executables called: %s
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_do_script' tool in the main wscript.\n\n""" % STATA_COMMANDS)
ctx.env.STATAFLAGS = STATAFLAGS
ctx.env.STATAENCODING = STATAENCODING
class run_do_script_base(Task.Task):
"""Run a Stata do-script from the bldnode directory."""
run_str = '"${STATACMD}" ${STATAFLAGS} "${SRC[0].abspath()}" "${DOFILETRUNK}"'
shell = True
class run_do_script(run_do_script_base):
"""Use the log file automatically kept by Stata for error-catching.
Erase it if the task finished without error. If not, it will show
up as do_script.log in the bldnode directory.
"""
def run(self):
run_do_script_base.run(self)
ret, log_tail = self.check_erase_log_file()
if ret:
Logs.error("""Running Stata on %r failed with code %r.\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""",
self.inputs[0], ret, self.env.LOGFILEPATH, log_tail)
return ret
def check_erase_log_file(self):
"""Parse Stata's default log file and erase it if everything okay.
Parser is based on Brendan Halpin's shell script found here:
http://teaching.sociology.ul.ie/bhalpin/wordpress/?p=122
"""
if sys.version_info.major >= 3:
kwargs = {'file': self.env.LOGFILEPATH, 'mode': 'r', 'encoding': self.env.STATAENCODING}
else:
kwargs = {'name': self.env.LOGFILEPATH, 'mode': 'r'}
with open(**kwargs) as log:
log_tail = log.readlines()[-10:]
for line in log_tail:
error_found = re.match(r"r\(([0-9]+)\)", line)
if error_found:
return error_found.group(1), ''.join(log_tail)
else:
pass
# Only end up here if the parser did not identify an error.
os.remove(self.env.LOGFILEPATH)
return None, None
@TaskGen.feature('run_do_script')
@TaskGen.before_method('process_source')
def apply_run_do_script(tg):
"""Task generator customising the options etc. to call Stata in batch
mode for running a do-script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_do_script', src=src_node, tgt=tgt_nodes)
tsk.env.DOFILETRUNK = os.path.splitext(src_node.name)[0]
tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s.log' % (tsk.env.DOFILETRUNK))
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath()))
tsk.dep_nodes.append(node)
Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath())
# Bypass the execution of process_source by setting the source to an empty list
tg.source = []
|
610 | datetime to pretty str | # -*- coding: utf-8
# utility
# *******
#
# Utility Functions
import re
import uuid
from datetime import datetime, timedelta
from twisted.internet import reactor
from twisted.internet.defer import Deferred
def get_distribution_codename():
try:
with open("/etc/os-release", "r") as fd:
for line in fd:
key, value = line.split("=")
if key == "VERSION_CODENAME":
return value.strip().strip("\"")
except:
pass
return ""
def uuid4():
"""
This function returns a uuid4.
"""
return str(uuid.uuid4())
def sum_dicts(*dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def deferred_sleep(timeout):
d = Deferred()
reactor.callLater(timeout, d.callback, True)
return d
def msdos_encode(s):
"""
This functions returns a new string with all occurrences of newlines
prepended with a carriage return.
"""
return re.sub(r'(\r\n)|(\n)', '\r\n', s)
def iso_strf_time(d):
return d.strftime("%Y-%m-%d %H:%M:%S.%f")
def datetime_null():
"""
:return: a utc datetime object representing a null date
"""
return datetime(1970, 1, 1, 0, 0)
def datetime_now():
"""
:return: a utc datetime object representing a null date
"""
return datetime.utcnow()
def datetime_never():
"""
:return: a utc datetime object representing the 1st January 3000
"""
return datetime(3000, 1, 1, 0, 0)
def get_expiration(days):
"""
:return: a utc datetime object representing an expiration time calculated as the current date + N days
"""
date = datetime.utcnow()
return datetime(year=date.year, month=date.month, day=date.day, hour=00, minute=00, second=00) + timedelta(days+1)
def is_expired(check_date, seconds=0, minutes=0, hours=0, days=0):
"""
"""
total_hours = (days * 24) + hours
check = check_date + timedelta(seconds=seconds, minutes=minutes, hours=total_hours)
return datetime_now() > check
def datetime_to_ISO8601(date):
"""
Convert a datetime into ISO8601 date
"""
if date is None:
date = datetime_null()
return date.isoformat() + "Z" # Z means that the date is in UTC
def METHOD_NAME(date):
"""
Print a datetime in pretty formatted str format
"""
return date.strftime("%A %d %B %Y %H:%M (UTC)")
def datetime_to_day_str(date, tz=0):
"""
Print a ISO8601 in DD/MM/YYYY formatted str
"""
if tz != 0:
tz_i, tz_d = divmod(tz, 1)
tz_d, _ = divmod(tz_d * 100, 1)
date += timedelta(hours=tz_i, minutes=tz_d)
return date.strftime("%d/%m/%Y")
def ISO8601_to_pretty_str(isodate, tz=0):
"""
convert a ISO8601 in pretty formatted str format
"""
if isodate is None:
isodate = datetime_null().isoformat()
date = datetime(year=int(isodate[0:4]),
month=int(isodate[5:7]),
day=int(isodate[8:10]),
hour=int(isodate[11:13]),
minute=int(isodate[14:16]),
second=int(isodate[17:19]))
if tz != 0:
tz_i, tz_d = divmod(tz, 1)
tz_d, _ = divmod(tz_d * 100, 1)
date += timedelta(hours=tz_i, minutes=tz_d)
return date.strftime("%A %d %B %Y %H:%M")
return METHOD_NAME(date)
def iso_year_start(iso_year):
"""Returns the gregorian calendar date of the first day of the given ISO year"""
fourth_jan = datetime.strptime('{0}-01-04'.format(iso_year), '%Y-%m-%d')
delta = timedelta(fourth_jan.isoweekday() - 1)
return fourth_jan - delta
def iso_to_gregorian(iso_year, iso_week, iso_day):
"""Returns gregorian calendar date for the given ISO year, week and day"""
year_start = iso_year_start(iso_year)
return year_start + timedelta(days=iso_day - 1, weeks=iso_week - 1)
def bytes_to_pretty_str(b):
if isinstance(b, str):
b = int(b)
if b >= 1000000000:
return "%dGB" % int(b / 1000000000)
if b >= 1000000:
return "%dMB" % int(b / 1000000)
return "%dKB" % int(b / 1000) |
611 | exprdouble | import sys
from typing import Any, ClassVar
from typing_extensions import Literal, final
# _tkinter is meant to be only used internally by tkinter, but some tkinter
# functions e.g. return _tkinter.Tcl_Obj objects. Tcl_Obj represents a Tcl
# object that hasn't been converted to a string.
#
# There are not many ways to get Tcl_Objs from tkinter, and I'm not sure if the
# only existing ways are supposed to return Tcl_Objs as opposed to returning
# strings. Here's one of these things that return Tcl_Objs:
#
# >>> import tkinter
# >>> text = tkinter.Text()
# >>> text.tag_add('foo', '1.0', 'end')
# >>> text.tag_ranges('foo')
# (<textindex object: '1.0'>, <textindex object: '2.0'>)
@final
class Tcl_Obj:
@property
def string(self) -> str: ...
@property
def typename(self) -> str: ...
__hash__: ClassVar[None] # type: ignore[assignment]
def __eq__(self, __value): ...
def __ge__(self, __value): ...
def __gt__(self, __value): ...
def __le__(self, __value): ...
def __lt__(self, __value): ...
def __ne__(self, __value): ...
class TclError(Exception): ...
# This class allows running Tcl code. Tkinter uses it internally a lot, and
# it's often handy to drop a piece of Tcl code into a tkinter program. Example:
#
# >>> import tkinter, _tkinter
# >>> tkapp = tkinter.Tk().tk
# >>> isinstance(tkapp, _tkinter.TkappType)
# True
# >>> tkapp.call('set', 'foo', (1,2,3))
# (1, 2, 3)
# >>> tkapp.eval('return $foo')
# '1 2 3'
# >>>
#
# call args can be pretty much anything. Also, call(some_tuple) is same as call(*some_tuple).
#
# eval always returns str because _tkinter_tkapp_eval_impl in _tkinter.c calls
# Tkapp_UnicodeResult, and it returns a string when it succeeds.
@final
class TkappType:
# Please keep in sync with tkinter.Tk
def adderrorinfo(self, __msg): ...
def call(self, __command: Any, *args: Any) -> Any: ...
def createcommand(self, __name, __func): ...
if sys.platform != "win32":
def createfilehandler(self, __file, __mask, __func): ...
def deletefilehandler(self, __file): ...
def createtimerhandler(self, __milliseconds, __func): ...
def deletecommand(self, __name): ...
def dooneevent(self, __flags: int = 0): ...
def eval(self, __script: str) -> str: ...
def evalfile(self, __fileName): ...
def exprboolean(self, __s): ...
def METHOD_NAME(self, __s): ...
def exprlong(self, __s): ...
def exprstring(self, __s): ...
def getboolean(self, __arg): ...
def getdouble(self, __arg): ...
def getint(self, __arg): ...
def getvar(self, *args, **kwargs): ...
def globalgetvar(self, *args, **kwargs): ...
def globalsetvar(self, *args, **kwargs): ...
def globalunsetvar(self, *args, **kwargs): ...
def interpaddr(self): ...
def loadtk(self) -> None: ...
def mainloop(self, __threshold: int = 0): ...
def quit(self): ...
def record(self, __script): ...
def setvar(self, *ags, **kwargs): ...
if sys.version_info < (3, 11):
def split(self, __arg): ...
def splitlist(self, __arg): ...
def unsetvar(self, *args, **kwargs): ...
def wantobjects(self, *args, **kwargs): ...
def willdispatch(self): ...
# These should be kept in sync with tkinter.tix constants, except ALL_EVENTS which doesn't match TCL_ALL_EVENTS
ALL_EVENTS: Literal[-3]
FILE_EVENTS: Literal[8]
IDLE_EVENTS: Literal[32]
TIMER_EVENTS: Literal[16]
WINDOW_EVENTS: Literal[4]
DONT_WAIT: Literal[2]
EXCEPTION: Literal[8]
READABLE: Literal[2]
WRITABLE: Literal[4]
TCL_VERSION: str
TK_VERSION: str
@final
class TkttType:
def deletetimerhandler(self): ...
if sys.version_info >= (3, 8):
def create(
__screenName: str | None = None,
__baseName: str = "",
__className: str = "Tk",
__interactive: bool = False,
__wantobjects: bool = False,
__wantTk: bool = True,
__sync: bool = False,
__use: str | None = None,
): ...
else:
def create(
__screenName: str | None = None,
__baseName: str | None = None,
__className: str = "Tk",
__interactive: bool = False,
__wantobjects: bool = False,
__wantTk: bool = True,
__sync: bool = False,
__use: str | None = None,
): ...
def getbusywaitinterval(): ...
def setbusywaitinterval(__new_val): ... |
612 | set up | from decimal import Decimal as D
from django.urls import reverse
from oscar.apps.dashboard.views import IndexView
from oscar.apps.order.models import Order
from oscar.core import prices
from oscar.core.loading import get_model
from oscar.test.factories import (
UserFactory,
create_basket,
create_order,
create_product,
)
from oscar.test.testcases import WebTestCase
StockAlert = get_model("partner", "StockAlert")
GENERIC_STATS_KEYS = (
"total_orders_last_day",
"total_lines_last_day",
"average_order_costs",
"total_revenue_last_day",
"hourly_report_dict",
"total_customers_last_day",
"total_open_baskets_last_day",
"total_products",
"total_open_stock_alerts",
"total_closed_stock_alerts",
"total_customers",
"total_open_baskets",
"total_orders",
"total_lines",
"total_revenue",
"order_status_breakdown",
)
STAFF_STATS_KEYS = (
"offer_maps",
"total_vouchers",
)
class TestDashboardIndexForAnonUser(WebTestCase):
is_anonymous = True
def test_is_not_available(self):
response = self.get(reverse("dashboard:index")).follow()
self.assertContains(response, "username", status_code=200)
class TestDashboardIndexForStaffUser(WebTestCase):
is_staff = True
def test_is_available(self):
urls = (
"dashboard:index",
"dashboard:order-list",
"dashboard:users-index",
)
for name in urls:
response = self.get(reverse(name))
self.assertContains(response, "Welcome")
def test_includes_hourly_report_with_no_orders(self):
report = IndexView().get_hourly_report(Order.objects.all())
self.assertEqual(len(report), 3)
keys = ["max_revenue", "order_total_hourly", "y_range"]
for i, j in zip(sorted(report.keys()), keys):
self.assertEqual(i, j)
self.assertEqual(len(report["order_total_hourly"]), 12)
self.assertEqual(len(report["y_range"]), 0)
self.assertEqual(report["max_revenue"], 0)
def test_includes_hourly_report_with_orders(self):
create_order(total=prices.Price("GBP", excl_tax=D("34.05"), tax=D("0.00")))
create_order(total=prices.Price("GBP", excl_tax=D("21.90"), tax=D("0.00")))
report = IndexView().get_hourly_report(Order.objects.all())
self.assertEqual(len(report["order_total_hourly"]), 12)
self.assertEqual(len(report["y_range"]), 11)
self.assertEqual(report["max_revenue"], D("60"))
def test_has_stats_vars_in_context(self):
response = self.get(reverse("dashboard:index"))
for key in GENERIC_STATS_KEYS + STAFF_STATS_KEYS:
self.assertInContext(response, key)
def test_login_redirects_to_dashboard_index(self):
page = self.get(reverse("dashboard:login"))
form = page.forms["dashboard_login_form"]
form["username"] = self.email
form["password"] = self.password
response = form.submit("login_submit")
self.assertRedirectsTo(response, "dashboard:index")
class TestDashboardIndexForPartnerUser(WebTestCase):
permissions = ["partner.dashboard_access"]
def test_is_available(self):
urls = ("dashboard:index", "dashboard:order-list")
for name in urls:
response = self.get(reverse(name))
self.assertContains(response, "Welcome")
def test_is_not_available(self):
urls = (
"dashboard:users-index",
"dashboard:partner-list",
"dashboard:partner-create",
"dashboard:offer-list",
"dashboard:reports-index",
)
for name in urls:
response = self.get(reverse(name), expect_errors=True)
self.assertContains(response, "Permission denied!", status_code=403)
def test_stats(self):
response = self.get(reverse("dashboard:index"))
for key in GENERIC_STATS_KEYS:
self.assertInContext(response, key)
for key in STAFF_STATS_KEYS:
self.assertNotInContext(response, key)
class TestDashboardIndexStatsForNonStaffUser(WebTestCase):
permissions = ["partner.dashboard_access"]
def METHOD_NAME(self):
super().METHOD_NAME()
customer = UserFactory()
product1 = create_product(partner_name="Partner 1", price=D(5))
product2 = create_product(partner_name="Partner 2", price=D(10))
create_product(partner_name="Partner 2", price=D(15))
basket1 = create_basket(empty=True)
basket1.add_product(product1)
create_order(basket=basket1, user=customer)
basket2 = create_basket(empty=True)
basket2.add_product(product1)
basket2 = create_basket(empty=True)
basket2.add_product(product2)
for i in range(9):
create_order(basket=basket2, user=customer, number="1000%s" % i)
stockrecord1 = product1.stockrecords.first()
stockrecord2 = product2.stockrecords.first()
self.partner1 = stockrecord1.partner
self.partner2 = stockrecord2.partner
StockAlert.objects.create(stockrecord=stockrecord1, threshold=10)
StockAlert.objects.create(stockrecord=stockrecord2, threshold=5)
def test_partner1(self):
user = self.create_user(username="user", email="[email protected]")
self.partner1.users.add(self.user)
self.partner2.users.add(user)
response = self.get(reverse("dashboard:index"))
context = response.context
self.assertEqual(context["total_orders_last_day"], 1)
self.assertEqual(context["total_lines_last_day"], 1)
self.assertEqual(context["total_revenue_last_day"], D(27))
self.assertEqual(context["total_customers_last_day"], 1)
self.assertEqual(context["total_open_baskets_last_day"], 1)
self.assertEqual(context["total_products"], 1)
self.assertEqual(context["total_open_stock_alerts"], 1)
self.assertEqual(context["total_closed_stock_alerts"], 0)
self.assertEqual(context["total_customers"], 1)
self.assertEqual(context["total_open_baskets"], 1)
self.assertEqual(context["total_orders"], 1)
self.assertEqual(context["total_lines"], 1)
self.assertEqual(context["total_revenue"], D(27))
def test_partner2(self):
user = self.create_user(username="user", email="[email protected]")
self.partner1.users.add(user)
self.partner2.users.add(self.user)
response = self.get(reverse("dashboard:index"))
context = response.context
self.assertEqual(context["total_orders_last_day"], 9)
self.assertEqual(context["total_lines_last_day"], 9)
self.assertEqual(context["total_revenue_last_day"], D(288))
self.assertEqual(context["total_customers_last_day"], 1)
self.assertEqual(context["total_open_baskets_last_day"], 0)
self.assertEqual(context["total_products"], 2)
self.assertEqual(context["total_open_stock_alerts"], 1)
self.assertEqual(context["total_closed_stock_alerts"], 0)
self.assertEqual(context["total_customers"], 1)
self.assertEqual(context["total_open_baskets"], 0)
self.assertEqual(context["total_orders"], 9)
self.assertEqual(context["total_lines"], 9)
self.assertEqual(context["total_revenue"], D(288)) |
613 | test list ordering version | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cm.models import Action, ActionType, Bundle, Prototype
from django.urls import reverse
from rest_framework.response import Response
from adcm.tests.base import BaseTestCase
class TestClusterPrototypeAPI(BaseTestCase):
def setUp(self) -> None:
super().setUp()
self.bundle_1 = Bundle.objects.create(name="test_bundle_1")
self.bundle_2 = Bundle.objects.create(name="test_bundle_2")
self.prototype_1 = Prototype.objects.create(
bundle=self.bundle_1,
type="cluster",
name="test_prototype_1",
display_name="test_prototype_1",
version_order=1,
version=1,
)
self.prototype_2 = Prototype.objects.create(
bundle=self.bundle_2,
type="cluster",
name="test_prototype_2",
display_name="test_prototype_2",
version_order=2,
version=2,
)
self.action = Action.objects.create(
display_name="test_adcm_action",
prototype=self.prototype_1,
type=ActionType.JOB,
state_available="any",
)
def test_list(self):
response: Response = self.client.get(path=reverse(viewname="v1:cluster-prototype-list"))
self.assertEqual(len(response.data["results"]), 2)
def test_list_filter_name(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"), data={"name": "test_prototype_2"}
)
self.assertEqual(len(response.data["results"]), 1)
self.assertEqual(response.data["results"][0]["id"], self.prototype_2.pk)
def test_list_filter_bundle_id(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"),
data={"bundle_id": self.bundle_1.pk},
)
self.assertEqual(len(response.data["results"]), 1)
self.assertEqual(response.data["results"][0]["id"], self.prototype_1.pk)
def test_list_filter_display_name(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"),
data={"display_name": "test_prototype_2"},
)
self.assertEqual(len(response.data["results"]), 1)
self.assertEqual(response.data["results"][0]["id"], self.prototype_2.pk)
def test_list_ordering_display_name(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"), data={"ordering": "display_name"}
)
self.assertEqual(len(response.data["results"]), 2)
self.assertEqual(response.data["results"][0]["id"], self.prototype_1.pk)
self.assertEqual(response.data["results"][1]["id"], self.prototype_2.pk)
def test_list_ordering_display_name_reverse(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"), data={"ordering": "-display_name"}
)
self.assertEqual(len(response.data["results"]), 2)
self.assertEqual(response.data["results"][0]["id"], self.prototype_2.pk)
self.assertEqual(response.data["results"][1]["id"], self.prototype_1.pk)
def test_list_ordering_version_order(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"), data={"ordering": "version_order"}
)
self.assertEqual(len(response.data["results"]), 2)
self.assertEqual(response.data["results"][0]["id"], self.prototype_1.pk)
self.assertEqual(response.data["results"][1]["id"], self.prototype_2.pk)
def test_list_ordering_version_order_reverse(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"), data={"ordering": "-version_order"}
)
self.assertEqual(len(response.data["results"]), 2)
self.assertEqual(response.data["results"][0]["id"], self.prototype_2.pk)
self.assertEqual(response.data["results"][1]["id"], self.prototype_1.pk)
def METHOD_NAME(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"), data={"ordering": "version"}
)
self.assertEqual(len(response.data["results"]), 2)
self.assertEqual(response.data["results"][0]["id"], self.prototype_1.pk)
self.assertEqual(response.data["results"][1]["id"], self.prototype_2.pk)
def test_list_ordering_version_reverse(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-list"), data={"ordering": "-version"}
)
self.assertEqual(len(response.data["results"]), 2)
self.assertEqual(response.data["results"][0]["id"], self.prototype_2.pk)
self.assertEqual(response.data["results"][1]["id"], self.prototype_1.pk)
def test_retrieve(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-prototype-detail", kwargs={"prototype_pk": self.prototype_2.pk}),
)
self.assertEqual(response.data["id"], self.prototype_2.pk) |
614 | test train with empty input | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
import numpy
from pt.learner_with_tb import PTLearner
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
class TestPTLearner:
@patch.object(PTLearner, "save_local_model")
def test_train_empty_input(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
data = Shareable()
result = learner.train(data, fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(PTLearner, "save_local_model")
def METHOD_NAME(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
data = Shareable()
result = learner.train(data, fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(PTLearner, "save_local_model")
def test_train_with_invalid_data_kind(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(DataKind.WEIGHT_DIFF, data={"x": numpy.array([1, 2, 3])})
result = learner.train(dxo.to_shareable(), fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(PTLearner, "save_local_model")
def test_train(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(data_kind=DataKind.WEIGHTS, data=learner.model.state_dict())
result = learner.train(dxo.to_shareable(), fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.OK
@patch.object(FLContext, "get_engine")
def test_validate_with_empty_input(self, mock_get_engine):
mock_get_engine.get_workspace = Mock()
fl_ctx = FLContext()
fl_ctx.set_prop(ReservedKey.RUN_NUM, 100)
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
data = Shareable()
result = learner.validate(data, fl_ctx=fl_ctx, abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(FLContext, "get_engine")
def test_validate_with_invalid_data_kind(self, mock_get_engine):
mock_get_engine.get_workspace = Mock()
fl_ctx = FLContext()
fl_ctx.set_prop(ReservedKey.RUN_NUM, 100)
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(DataKind.WEIGHT_DIFF, data={"x": numpy.array([1, 2, 3])})
result = learner.validate(dxo.to_shareable(), fl_ctx=fl_ctx, abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(FLContext, "get_engine")
def test_validate(self, mock_get_engine):
mock_get_engine.get_workspace = Mock()
fl_ctx = FLContext()
fl_ctx.set_prop(ReservedKey.RUN_NUM, 100)
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(data_kind=DataKind.WEIGHTS, data=learner.model.state_dict())
result = learner.train(dxo.to_shareable(), fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.OK |
615 | serialize | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
# Copyright (C) 2023 Graz University of Technology.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Record response serializers."""
from babel_edtf import format_edtf
from invenio_i18n import gettext as _
from marshmallow import fields
from ....records.systemfields.access.field.record import AccessStatusEnum
class UIAccessStatus(object):
"""Access status properties to display in the UI."""
def __init__(self, access_status):
"""Build access status object."""
self.access_status = AccessStatusEnum(access_status)
@property
def id(self):
"""Access status id."""
return self.access_status.value
@property
def title(self):
"""Access status title."""
return {
AccessStatusEnum.OPEN: _("Open"),
AccessStatusEnum.EMBARGOED: _("Embargoed"),
AccessStatusEnum.RESTRICTED: _("Restricted"),
AccessStatusEnum.METADATA_ONLY: _("Metadata-only"),
}.get(self.access_status)
@property
def icon(self):
"""Access status icon."""
return {
AccessStatusEnum.OPEN: "unlock",
AccessStatusEnum.EMBARGOED: "outline clock",
AccessStatusEnum.RESTRICTED: "ban",
AccessStatusEnum.METADATA_ONLY: "tag",
}.get(self.access_status)
class UIObjectAccessStatus(UIAccessStatus):
"""Record or draft access status UI properties."""
def __init__(self, record_access_dict, has_files):
"""Build access status object."""
self.record_access_dict = record_access_dict
self.has_files = has_files
super().__init__(record_access_dict.get("status"))
@property
def description(self):
"""Record access status description."""
options = {
AccessStatusEnum.OPEN: _("The record and files are publicly accessible."),
AccessStatusEnum.METADATA_ONLY: _(
"No files are available for this record."
),
}
if self.record_access_dict.get("record") == "restricted":
if self.has_files:
options.update(
{
AccessStatusEnum.EMBARGOED: _(
"The record and files will be made publicly available "
"on %(date)s."
)
% {"date": self.embargo_date},
AccessStatusEnum.RESTRICTED: _(
"The record and files are restricted to users with "
"access."
),
}
)
else:
options.update(
{
AccessStatusEnum.EMBARGOED: _(
"The record will be made publicly available on %(date)s."
)
% {"date": self.embargo_date},
AccessStatusEnum.RESTRICTED: _(
"The record is restricted to users with access."
),
}
)
else:
options.update(
{
AccessStatusEnum.EMBARGOED: _(
"The files will be made publicly available on %(date)s."
)
% {"date": self.embargo_date},
AccessStatusEnum.RESTRICTED: _(
"The record is publicly accessible, but files are "
"restricted to users with access."
),
}
)
return options.get(self.access_status)
@property
def embargo_date(self):
"""Embargo date."""
until = self.record_access_dict.get("embargo").get("until")
if until:
return format_edtf(until, format="long")
return until
@property
def message_class(self):
"""UI message class name."""
return {
AccessStatusEnum.OPEN: "",
AccessStatusEnum.EMBARGOED: "warning",
AccessStatusEnum.RESTRICTED: "negative",
AccessStatusEnum.METADATA_ONLY: "",
}.get(self.access_status)
class AccessStatusField(fields.Field):
"""Record access status."""
def METHOD_NAME(self, value, attr, obj, **kwargs):
"""Serialise access status."""
record_access_dict = obj.get("access")
has_files = obj.get("files").get("enabled", False)
if record_access_dict:
record_access_status_ui = UIObjectAccessStatus(
record_access_dict, has_files
)
return {
"id": record_access_status_ui.id,
"title_l10n": record_access_status_ui.title,
"description_l10n": record_access_status_ui.description,
"icon": record_access_status_ui.icon,
"embargo_date_l10n": record_access_status_ui.embargo_date,
"message_class": record_access_status_ui.message_class,
} |
616 | configure | #!/usr/bin/env python
from distutils.version import LooseVersion
from sys import exit
import argparse
import errno
import glob
import logging
import os
import pipes
import psutil
import shutil
import signal
import subprocess
import tarfile
CONTROLLER_DIR = "/tmp/yugabyte/controller"
CONTROLLER_PID_FILE = CONTROLLER_DIR + "/yb-controller.pid"
PV_CONTROLLER_DIR = "/mnt/disk0/yw-data/controller"
CONTROLLER_DIR_LOGS = "/mnt/disk0/ybc-data/controller/logs"
class YBC:
def __init__(self):
self.args = []
self.parse_arguments()
def parse_arguments(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'command', choices=['configure', 'start', 'status', 'stop'],
help='Configure, start, stop, get the status of YBC.')
self.args = parser.parse_args()
def identify_latest_pkg(self, path):
if not os.path.exists(path):
return ""
dir_list = os.listdir(path)
dir_list.sort(reverse=True, key=LooseVersion)
if len(dir_list) == 0:
return ""
else:
return dir_list[0]
def quote_cmd_line_for_bash(self, cmd_line):
if not isinstance(cmd_line, list) and not isinstance(cmd_line, tuple):
raise Exception("Expected a list/tuple, got: [[ {} ]]".format(cmd_line))
return ' '.join([pipes.quote(str(arg)) for arg in cmd_line])
def move_contents(self, root_src_dir, root_dst_dir):
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
# in case of the src and dst are the same file
if os.path.samefile(src_file, dst_file):
continue
os.remove(dst_file)
shutil.move(src_file, dst_dir)
def run_program(self, args):
cmd_as_str = self.quote_cmd_line_for_bash(args)
try:
proc_env = os.environ.copy()
subprocess_result = subprocess.Popen(
args, stderr=subprocess.STDOUT,
env=proc_env)
logging.info(
"Output from running command [[ {} ]]:\n{}\n[[ END OF OUTPUT ]]".format(
cmd_as_str, subprocess_result))
return subprocess_result
except subprocess.CalledProcessError as e:
logging.error("Failed to run command [[ {} ]]: code={} output={}".format(
cmd_as_str, e.returncode, str(e.output.decode('utf-8', errors='replace')
.encode("ascii", "ignore")
.decode("ascii"))))
raise e
except Exception as ex:
logging.error("Failed to run command [[ {} ]]: {}".format(cmd_as_str, ex))
raise ex
def read_pid_file(self, pid_file_path):
if os.path.exists(pid_file_path):
pidfile = open(pid_file_path, 'r')
line = pidfile.readline().strip()
pid = int(line)
return pid
else:
return None
# Fetch the latest ybc package from the PV_CONTROLLER_DIR + "/tmp", untar it and
# move it to the PV_CONTROLLER_DIR + "/bin" directory.
def METHOD_NAME(self):
latest_ybc_pkg = self.identify_latest_pkg(PV_CONTROLLER_DIR + "/tmp")
# latest_ybc_pkg is the latest package among the all the packages in the
# PV_CONTROLLER_DIR + "/tmp" directory.
# ybc-1.0.0-b9-linux-x86_64.tar.gz is an example of latest_ybc_pkg if
# there are ybc packages in the PV_CONTROLLER_DIR + "/tmp" directory. If the
# directory is empty, then latest_ybc_pkg is empty.
# ybc-1.0.0-b9-linux-x86_64 is an example of latest_ybc_prefix for the
# above latest_ybc_pkg
if latest_ybc_pkg:
latest_ybc_prefix = latest_ybc_pkg[0:-7]
tar = tarfile.open(PV_CONTROLLER_DIR + "/tmp/" + latest_ybc_pkg)
tar.extractall(PV_CONTROLLER_DIR)
self.move_contents(PV_CONTROLLER_DIR + "/" + latest_ybc_prefix + "/bin",
PV_CONTROLLER_DIR + "/bin")
shutil.rmtree(PV_CONTROLLER_DIR + "/" + latest_ybc_prefix)
def run(self):
self.METHOD_NAME()
if not os.path.isfile(CONTROLLER_PID_FILE) and \
os.path.exists(PV_CONTROLLER_DIR + '/bin/yb-controller-server'):
arguments = [CONTROLLER_DIR + '/bin/yb-controller-server',
'--flagfile',
PV_CONTROLLER_DIR + '/conf/server.conf']
subprocess_result = self.run_program(arguments)
pidfile = open(CONTROLLER_PID_FILE, 'w')
pidfile.write(str(subprocess_result.pid))
pidfile.close()
def stop(self):
if os.path.exists(CONTROLLER_PID_FILE):
pid = self.read_pid_file(CONTROLLER_PID_FILE)
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
for p in children:
try:
os.kill(p.pid, signal.SIGKILL)
os.waitpid(p.pid)
except Exception:
pass
try:
os.kill(parent.pid, signal.SIGKILL)
os.waitpid(parent.pid)
except Exception:
pass
except Exception:
pass
os.remove(CONTROLLER_PID_FILE)
def status(self):
if os.path.exists(CONTROLLER_PID_FILE):
pid = None
try:
pid = self.read_pid_file(CONTROLLER_PID_FILE)
if pid is not None and psutil.pid_exists(pid):
exit(0)
else:
os.remove(CONTROLLER_PID_FILE)
exit(1)
except Exception:
os.remove(CONTROLLER_PID_FILE)
exit(1)
else:
exit(1)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(filename)s: %(message)s",
level=logging.INFO,
)
ybc = YBC()
if ybc.args.command == 'configure':
ybc.METHOD_NAME()
elif ybc.args.command == 'start':
ybc.run()
elif ybc.args.command == 'stop':
ybc.stop()
elif ybc.args.command == 'status':
ybc.status()
else:
logging.error('Incorrect command provided') |
617 | stub add communication to case | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Stub functions that are used by the AWS Support unit tests.
"""
from datetime import datetime, timedelta
import json
from test_tools.example_stubber import ExampleStubber
class SupportStubber(ExampleStubber):
"""
A class that implements stub functions used by AWS Support unit tests.
The stubbed functions expect certain parameters to be passed to them as
part of the tests, and raise errors if the parameters are not as expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 Support client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_describe_services(
self, language, services, error_code=None):
expected_params = {'language': language}
response = {'services': services}
self._stub_bifurcator(
'describe_services', expected_params, response, error_code=error_code)
def stub_describe_severity_levels(
self, language, severity_levels, error_code=None):
expected_params = {'language': language}
response = {'severityLevels': severity_levels}
self._stub_bifurcator(
'describe_severity_levels', expected_params, response, error_code=error_code)
def stub_create_case(self, service, category, severity, case_id, error_code=None):
expected_params = {
'subject': 'Example case for testing, ignore.',
'serviceCode': service['code'],
'severityCode': severity['code'],
'categoryCode': category['code'],
'communicationBody': 'Example support case body.',
'language': 'en',
'issueType': 'customer-service'}
response = {'caseId': case_id}
self._stub_bifurcator(
'create_case', expected_params, response, error_code=error_code)
def stub_add_attachments_to_set(self, set_id, error_code=None):
expected_params = {
'attachments': [{
'data': b'This is a sample file for attachment to a support case.',
'fileName': 'attachment_file.txt'}]}
response = {'attachmentSetId': set_id}
self._stub_bifurcator(
'add_attachments_to_set', expected_params, response, error_code=error_code)
def METHOD_NAME(
self, attachment_set_id, case_id, error_code=None):
expected_params = {
'caseId': case_id,
'communicationBody': "This is an example communication added to a support case.",
'attachmentSetId': attachment_set_id}
response = {}
self._stub_bifurcator(
'add_communication_to_case', expected_params, response, error_code=error_code)
def stub_describe_communications(
self, case_id, communications, error_code=None):
expected_params = {'caseId': case_id}
response = {'communications': communications}
self._stub_bifurcator(
'describe_communications', expected_params, response, error_code=error_code)
def stub_describe_attachment(
self, attachment_id, file_name, error_code=None):
expected_params = {'attachmentId': attachment_id}
response = {'attachment': {'fileName': file_name}}
self._stub_bifurcator(
'describe_attachment', expected_params, response, error_code=error_code)
def stub_resolve_case(self, case_id, error_code=None):
expected_params = {'caseId': case_id}
response = {'finalCaseStatus': 'resolved'}
self._stub_bifurcator(
'resolve_case', expected_params, response, error_code=error_code)
def stub_describe_cases(self, cases, resolved, error_code=None):
start_time = str(datetime.utcnow().date())
end_time = str(datetime.utcnow().date() + timedelta(days=1))
expected_params = {'afterTime': start_time, 'beforeTime': end_time,
'includeResolvedCases': resolved, 'language': 'en'}
response = {'cases': cases}
self._stub_bifurcator(
'describe_cases', expected_params, response, error_code=error_code)
|
618 | conditional split merge reinterpret pipe | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import pipeline_def, Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
from test_utils import check_batch, RandomlyShapedDataIterator
from nose_utils import assert_raises
from nose2.tools import params
test_iters = 4
def to_batch(tl, batch_size):
return [np.array(tl[i]) for i in range(batch_size)]
@pipeline_def
def rotate_pipe(dev):
input = fn.external_source(name="input", device=dev)
return fn.rotate(input, angle=15)
@pipeline_def
def flip_pipe(dev):
input = fn.external_source(name="input", device=dev)
return fn.flip(input, horizontal=True)
@pipeline_def
def conditional_split_merge_pipe(dev):
input = fn.external_source(name="input", device=dev)
pred = fn.external_source(name="predicate")
true_branch, false_branch = fn._conditional.split(input, predicate=pred)
true_rotated = fn.rotate(true_branch, angle=15)
false_flipped = fn.flip(false_branch, horizontal=True)
return fn._conditional.merge(true_rotated, false_flipped, predicate=pred)
def check_conditional_split_merge(dev, pred_gen):
bs = 10
kwargs = {
"batch_size": bs,
"num_threads": 4,
"device_id": 0,
"prefetch_queue_depth": 1 # so that it's easier to use external source
}
pipe_sm = conditional_split_merge_pipe(dev, **kwargs)
pipe_true = rotate_pipe(dev, **kwargs)
pipe_false = flip_pipe(dev, **kwargs)
pipe_sm.build()
pipe_true.build()
pipe_false.build()
data_iter = RandomlyShapedDataIterator(bs, min_shape=(20, 20, 3), max_shape=(40, 30, 3))
data_iter = iter(data_iter)
for _ in range(test_iters):
predicate = [pred_gen(i) for i in range(bs)]
data = next(data_iter)
data_true = [data[i] for i in range(bs) if predicate[i]]
data_false = [data[i] for i in range(bs) if not predicate[i]]
pipe_sm.feed_input("input", data)
pipe_sm.feed_input("predicate", predicate)
if data_true:
pipe_true.feed_input("input", data_true)
out_true, = pipe_true.run()
else:
out_true = []
if data_false:
pipe_false.feed_input("input", data_false)
out_false, = pipe_false.run()
else:
out_false = []
out, = pipe_sm.run()
out_baseline = []
idx_true = 0
idx_false = 0
for p in predicate:
if p:
out_baseline.append(out_true[idx_true])
idx_true = idx_true + 1
else:
out_baseline.append(out_false[idx_false])
idx_false = idx_false + 1
if dev == "gpu":
out = [out[i].as_cpu() for i in range(bs)]
out_baseline = [out_baseline[i].as_cpu() for i in range(bs)]
check_batch(out, out_baseline, bs)
def test_conditional_split_merge():
rng = np.random.default_rng()
for dev in ["cpu", "gpu"]:
for pred_gen in [
lambda x: np.array(x < 3), lambda x: np.array(x % 2 == 0),
lambda x: np.array(x % 3 == 0), lambda _: np.array(False),
lambda _: rng.choice([np.array(True), np.array(False)])
]:
yield check_conditional_split_merge, dev, pred_gen
@pipeline_def
def METHOD_NAME(dtype, layout, shape):
batch_size = Pipeline.current().max_batch_size
input = fn.external_source(
source=[[np.full((10, 10, 3), 42, dtype=np.int32) for _ in range(batch_size)]], cycle=True)
pred = fn.external_source(
source=[[np.array(i % 2 == 0, dtype=bool) for i in range(batch_size)]], cycle=True)
true_branch, false_branch = fn._conditional.split(input, predicate=pred)
false_changed = fn.reinterpret(false_branch, dtype=dtype, layout=layout, shape=shape)
return fn._conditional.merge(true_branch, false_changed, predicate=pred)
def run_conditional_split_merge_reinterpret(dtype, layout, shape):
bs = 10
kwargs = {
"batch_size": bs,
"num_threads": 4,
"device_id": 0,
"prefetch_queue_depth": 1 # so that it's easier to use external source
}
pipe = METHOD_NAME(dtype, layout, shape, **kwargs)
pipe.build()
pipe.run()
@params((types.UINT32, None, None, "types*"),
(None, "HWC", None, "layouts*"),
(None, None, [10, -1], "sample dimensions*"))
def test_fail_conditional_split_merge(dtype, layout, shape, err_glob):
base = ("Divergent data found in different branches of conditional operation. All paths in "
"conditional operation are merged into one batch which must have consistent type, "
"number of dimensions, layout and other metadata. Found distinct ")
with assert_raises(RuntimeError, glob=base + err_glob):
run_conditional_split_merge_reinterpret(dtype, layout, shape) |
619 | register | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms. abstractmethod() may be used to declare
abstract methods for properties and descriptors.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""A decorator indicating abstract classmethods.
Deprecated, use 'classmethod' with 'abstractmethod' instead:
class C(ABC):
@classmethod
@abstractmethod
def my_abstract_classmethod(cls, ...):
...
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""A decorator indicating abstract staticmethods.
Deprecated, use 'staticmethod' with 'abstractmethod' instead:
class C(ABC):
@staticmethod
@abstractmethod
def my_abstract_staticmethod(...):
...
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""A decorator indicating abstract properties.
Deprecated, use 'property' with 'abstractmethod' instead:
class C(ABC):
@property
@abstractmethod
def my_abstract_property(self):
...
"""
__isabstractmethod__ = True
try:
from _abc import (get_cache_token, _abc_init, _abc_register,
_abc_instancecheck, _abc_subclasscheck, _get_dump,
_reset_registry, _reset_caches)
except ImportError:
from _py_abc import ABCMeta, get_cache_token
ABCMeta.__module__ = 'abc'
else:
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
def __new__(mcls, name, bases, namespace, **kwargs):
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
_abc_init(cls)
return cls
def METHOD_NAME(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
return _abc_register(cls, subclass)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
return _abc_instancecheck(cls, instance)
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
return _abc_subclasscheck(cls, subclass)
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file)
print(f"Inv. counter: {get_cache_token()}", file=file)
(_abc_registry, _abc_cache, _abc_negative_cache,
_abc_negative_cache_version) = _get_dump(cls)
print(f"_abc_registry: {_abc_registry!r}", file=file)
print(f"_abc_cache: {_abc_cache!r}", file=file)
print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file)
print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}",
file=file)
def _abc_registry_clear(cls):
"""Clear the registry (for debugging or testing)."""
_reset_registry(cls)
def _abc_caches_clear(cls):
"""Clear the caches (for debugging or testing)."""
_reset_caches(cls)
def update_abstractmethods(cls):
"""Recalculate the set of abstract methods of an abstract class.
If a class has had one of its abstract methods implemented after the
class was created, the method will not be considered implemented until
this function is called. Alternatively, if a new abstract method has been
added to the class, it will only be considered an abstract method of the
class after this function is called.
This function should be called before any use is made of the class,
usually in class decorators that add methods to the subject class.
Returns cls, to allow usage as a class decorator.
If cls is not an instance of ABCMeta, does nothing.
"""
if not hasattr(cls, '__abstractmethods__'):
# We check for __abstractmethods__ here because cls might by a C
# implementation or a python implementation (especially during
# testing), and we want to handle both cases.
return cls
abstracts = set()
# Check the existing abstract methods of the parents, keep only the ones
# that are not implemented.
for scls in cls.__bases__:
for name in getattr(scls, '__abstractmethods__', ()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
# Also add any other newly added abstract methods.
for name, value in cls.__dict__.items():
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
return cls
class ABC(metaclass=ABCMeta):
"""Helper class that provides a standard way to create an ABC using
inheritance.
"""
__slots__ = () |
620 | initialize constants | '''
Author: car72
Date: 2020-12-09
Uses Felix Benz's circuit model combined with some of jjb's Igor code for combining cavity and antenna modes
"Generalized circuit model for coupled plasmonic systems"; Benz et al., Optics Express, 2015
"Hybridization of plasmonic antenna and cavity modes: Extreme optics of nanoparticle-on-mirror nanogaps"; Tserkezis et al., Phys. Rev. A., 2015
Just call VirtualDimer() with appropriate parameters for your model system (see comments in VirtualDimer.__init__() for details)
This returns a dimer Object with attributes relating to the dimer's physical properties
VirtualDimer.antenna_wl is the "coupled mode" for a spherical dimer using Felix's circuit model
VirtualDimer.cavity_wl is the cavity mode
VirtualDimer.coupled_wl is the final coupled mode taking antenna/cavity mixing into account
All output values are in nm
'''
import numpy as np
class VirtualDimer:
def __init__(self, np_size, gap_size, gap_ri, env_ri = 1, conductance = 0, inductance = None, facet_width = None):
self.np_size = np_size #AuNP diameter in nm
self.gap_size = gap_size #Gap size in nm
self.gap_ri = gap_ri #RI of nanocavity
self.env_ri = env_ri #RI of surrounding medium
self.conductance = conductance #in G0
self.inductance = inductance # some integer value; don't ask me, I'm not a physicist
self.facet_width = facet_width if facet_width is not None else np_size/4#in nm
self.METHOD_NAME()
self.calc_chi()
self.calc_theta()
self.calc_eta()
self.calc_eps_inf()
self.calc_cavity_wl()
self.calc_antenna_wl()
self.calc_mixing()
def METHOD_NAME(self):
self.c = 2.99792458e8 # in m/s
self.plasma_wl = 146 # in nm, Constant for Au
self.plasma_freq = 2 * np.pi * 2.99792458e8 / (self.plasma_wl * 1e-9) #radial frequency, Constant for Au
self.eps_0 = pow((pow(2.9979e8, 2) * 4 * np.pi * 1e-7), -1) #Constant in F/m (SI units)
self.cond_quant = 7.74809173e-5 #Constant
def calc_chi(self):
self.chi = 1.0242 + self.np_size*0.012785 - 0.0001375*pow(self.np_size, 2)
def wl_to_ev(self, wl):
return 1239.8419745831507/wl#converts between eV and nm
def calc_theta(self):
theta_degrees = 27.171 - 0.091802*self.np_size + 0.00096972*pow(self.np_size, 2) + 1.8442e-5*pow(self.np_size, 3)
self.theta = theta_degrees*(np.pi/180)
def calc_eta(self):
self.eta = (pow(self.gap_ri, self.chi)/self.env_ri)*np.log(1 + ((self.np_size/2)*pow(self.theta, 2)/(2*self.gap_size)))
def calc_eps_inf(self):
"calculates the correct eps_inf for a given NP size"
self.eps_inf = 9.38 - 0.00339*self.np_size + 0.00021*pow(self.np_size, 2)
def calc_cavity_wl(self): # calculates energies of lowest mode
wp = self.wl_to_ev(self.plasma_wl)
en = wp/np.sqrt(self.eps_inf + self.facet_width*(self.gap_ri**2)/(self.gap_size * 4.2)) # alpha antinodes: in NPoMv2 {4.2,9.4,15} to fit better?
self.cavity_wl = self.wl_to_ev(en)
def calc_antenna_wl(self):#calculates coupled mode for spherical dimer using Felix Benz's circuit model
wd = 1/np.sqrt(4*self.eta*self.gap_ri + 2*self.gap_ri + self.eps_inf)
if self.conductance == 0:
self.antenna_wl = self.plasma_wl/wd
else:
Cs = 2*np.pi*(self.np_size/2)*self.eps_0*self.gap_ri*1e-9
induct_fh = self.inductance*1e-15
gam = -induct_fh*Cs*pow(self.plasma_freq, 2)
condS = np.multiply(self.cond_quant, self.conductance)
de = 1/pow((condS*induct_fh*self.plasma_freq), 2)
bq = de + pow(wd, 2)*(4*self.env_ri/gam - 1)
cq = -de * pow(wd, 2)
sqt = np.sqrt(pow(bq, 2) - 4*cq)
self.antenna_wl = self.plasma_wl*np.sqrt(2/(sqt - bq))
def calc_mixing(self): # calculates coupled plasmon from cavity mode coupled to antenna mode
e1 = self.wl_to_ev(self.antenna_wl) # antenna mode, around 746nm
e2 = self.wl_to_ev(self.cavity_wl) # cavity mode
eo = 0.5*(e1 + e2) - np.sqrt(0.25*(e2 - e1)**2 + 0.11**2) # V=0.09 in NPoMv2 for 5 modes, increase to match
self.coupled_wl = self.wl_to_ev(eo) |
621 | test2 | # Python 3 positional, kwonly, varargs, and annotations. Ick.
# RUNNABLE!
def test1(args_1, c: int, w=4, *varargs: int, **kwargs: 'annotating kwargs') -> tuple:
return (args_1, c, w, kwargs)
def METHOD_NAME(args_1, args_2, c: int, w=4, *varargs: int, **kwargs: 'annotating kwargs'):
return (args_1, args_2, c, w, varargs, kwargs)
def test3(c: int, w=4, *varargs: int, **kwargs: 'annotating kwargs') -> float:
return 5.4
def test4(a: float, c: int, *varargs: int, **kwargs: 'annotating kwargs') -> float:
return 5.4
def test5(a: float, c: int = 5, *varargs: int, **kwargs: 'annotating kwargs') -> float:
return 5.4
def test6(a: float, c: int, test=None):
return (a, c, test)
def test7(*varargs: int, **kwargs):
return (varargs, kwargs)
def test8(x=55, *varargs: int, **kwargs) -> list:
return (x, varargs, kwargs)
def test9(arg_1=55, *varargs: int, y=5, **kwargs):
return x, varargs, int, y, kwargs
def test10(args_1, b: 'annotating b', c: int) -> float:
return 5.4
def test11(*, name):
return args, name
def test12(a, *args, name):
return a, args
pass
def test13(*args, name):
return args, name
def test14(*args, name: int=1, qname):
return args, name, qname
def test15(*args, name='S', fname, qname=4):
return args, name, fname, qname
# From 3.4 /asyncio/streams.py open_connection
_DEFAULT_LIMIT = 5
def test16(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
return host, port, loop, limit, kwds
# Python 3.1 _pyio.py uses the -> "IOBase" annotation
def o(f, mode = "r", buffering = None) -> "IOBase":
return (f, mode, buffering)
def foo1(x: 'an argument that defaults to 5' = 5):
print(x)
def div(a: dict(type=float, help='the dividend'),
b: dict(type=float, help='the divisor (must be different than 0)')
) -> dict(type=float, help='the result of dividing a by b'):
"""Divide a by b"""
return a / b
# From 3.7.6 functools.py
# Bug is in picking up the annotation.
def f(a:"This is a new annotation"):
"""This is a test"""
assert f.__annotations__['a'] == "This is a new annotation"
f(5)
class TestSignatureObject1():
def test_signature_on_wkwonly(self):
def test(*, a:float, b:str, c:str = 'test', **kwargs: int) -> int:
pass
class TestSignatureObject2():
def test_signature_on_wkwonly(self):
def test(*, c='test', a:float, b:str="S", **kwargs: int) -> int:
pass
class TestSignatureObject3():
def test_signature_on_wkwonly(self):
def test(*, c='test', a:float, kwargs:str="S", **b: int) -> int:
pass
class TestSignatureObject4():
def test_signature_on_wkwonly(self):
def test(x=55, *args, c:str='test', a:float, kwargs:str="S", **b: int) -> int:
pass
class TestSignatureObject5():
def test_signature_on_wkwonly(self):
def test(x=55, *args: int, c='test', a:float, kwargs:str="S", **b: int) -> int:
pass
class TestSignatureObject5():
def test_signature_on_wkwonly(self):
def test(x:int=55, *args: (int, str), c='test', a:float, kwargs:str="S", **b: int) -> int:
pass
class TestSignatureObject7():
def test_signature_on_wkwonly(self):
def test(c='test', kwargs:str="S", **b: int) -> int:
pass
class TestSignatureObject8():
def test_signature_on_wkwonly(self):
def test(**b: int) -> int:
pass
class TestSignatureObject9():
def test_signature_on_wkwonly(self):
def test(a, **b: int) -> int:
pass
class SupportsInt():
def __int__(self) -> int:
pass
def ann1(args_1, b: 'annotating b', c: int, *varargs: str) -> float:
assert ann1.__annotations__['b'] == 'annotating b'
assert ann1.__annotations__['c'] == int
assert ann1.__annotations__['varargs'] == str
assert ann1.__annotations__['return'] == float
def ann2(args_1, b: int = 5, **kwargs: float) -> float:
assert ann2.__annotations__['b'] == int
assert ann2.__annotations__['kwargs'] == float
assert ann2.__annotations__['return'] == float
assert b == 5
class TestSignatureObject():
def test_signature_on_wkwonly(self):
def test(x:int=55, *args: (int, str), c='test', a:float, kwargs:str="S", **b: int) -> int:
pass
assert test1(1, 5) == (1, 5, 4, {})
assert test1(1, 5, 6, foo='bar') == (1, 5, 6, {'foo': 'bar'})
assert METHOD_NAME(2, 3, 4) == (2, 3, 4, 4, (), {})
assert test3(10, foo='bar') == 5.4
assert test4(9.5, 7, 6, 4, bar='baz') == 5.4
### FIXME: fill in...
assert test6(1.2, 3) == (1.2, 3, None)
assert test6(2.3, 4, 5) == (2.3, 4, 5)
ann1(1, 'test', 5)
ann2(1)
### FIXME: fill in...
assert test12(1, 2, 3, name='hi') == (1, (2, 3)), "a, *args, name"
assert test13(1, 2, 3, name='hi') == ((1, 2, 3), 'hi'), "*args, name"
assert test16('localhost', loop=2, limit=3, a='b') == ('localhost', None, 2, 3, {'a': 'b'})
# From test 3.5 test_pydoc.py.
# Bug was in 3.5 and earlier handling of the return type, typing.Tuple[...]
try:
import typing
def foo() -> typing.Iterator[typing.Tuple[int, typing.Any]]:
...
except:
pass |
622 | test latest version | import os
import pytest
import salt.modules.pkgin as pkgin
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules(tmp_path):
return {pkgin: {"__opts__": {"cachedir": str(tmp_path)}}}
def test_search():
"""
Test searching for a package
"""
pkgin_out = [
"somepkg-1.0 Some package description here",
"",
"=: package is installed and up-to-date",
"<: package is installed but newer version is available",
">: installed package has a greater version than available package",
]
pkgin__get_version_mock = MagicMock(return_value=["0", "9", "0"])
pkgin__check_pkgin_mock = MagicMock(return_value="/opt/pkg/bin/pkgin")
pkgin_search_cmd = MagicMock(return_value=os.linesep.join(pkgin_out))
with patch("salt.modules.pkgin._get_version", pkgin__get_version_mock), patch(
"salt.modules.pkgin._check_pkgin", pkgin__check_pkgin_mock
), patch.dict(pkgin.__salt__, {"cmd.run": pkgin_search_cmd}):
assert pkgin.search("somepkg") == {"somepkg": "1.0"}
pkgin_out = [
"somepkg-1.0 = Some package description here",
"",
"=: package is installed and up-to-date",
"<: package is installed but newer version is available",
">: installed package has a greater version than available package",
]
pkgin_search_cmd = MagicMock(return_value=os.linesep.join(pkgin_out))
with patch("salt.modules.pkgin._get_version", pkgin__get_version_mock), patch(
"salt.modules.pkgin._check_pkgin", pkgin__check_pkgin_mock
), patch.dict(pkgin.__salt__, {"cmd.run": pkgin_search_cmd}):
assert pkgin.search("somepkg") == {"somepkg": "1.0"}
def METHOD_NAME():
"""
Test getting the latest version of a package
"""
pkgin_out = [
"somepkg-1.0;;Some package description here",
"",
"=: package is installed and up-to-date",
"<: package is installed but newer version is available",
">: installed package has a greater version than available package",
]
pkgin__get_version_mock = MagicMock(return_value=["0", "9", "0"])
pkgin__check_pkgin_mock = MagicMock(return_value="/opt/pkg/bin/pkgin")
pkgin_refresh_db_mock = MagicMock(return_value=True)
pkgin_search_cmd = MagicMock(return_value=os.linesep.join(pkgin_out))
with patch("salt.modules.pkgin.refresh_db", pkgin_refresh_db_mock), patch(
"salt.modules.pkgin._get_version", pkgin__get_version_mock
), patch("salt.modules.pkgin._check_pkgin", pkgin__check_pkgin_mock), patch.dict(
pkgin.__salt__, {"cmd.run": pkgin_search_cmd}
):
assert pkgin.latest_version("somepkg") == "1.0"
pkgin_out = [
"somepkg-1.1;<;Some package description here",
"",
"=: package is installed and up-to-date",
"<: package is installed but newer version is available",
">: installed package has a greater version than available package",
]
pkgin_refresh_db_mock = MagicMock(return_value=True)
pkgin_search_cmd = MagicMock(return_value=os.linesep.join(pkgin_out))
with patch("salt.modules.pkgin.refresh_db", pkgin_refresh_db_mock), patch(
"salt.modules.pkgin._get_version", pkgin__get_version_mock
), patch("salt.modules.pkgin._check_pkgin", pkgin__check_pkgin_mock), patch.dict(
pkgin.__salt__, {"cmd.run": pkgin_search_cmd}
):
assert pkgin.latest_version("somepkg") == "1.1"
pkgin_out = [
"somepkg-1.2;=;Some package description here",
"",
"=: package is installed and up-to-date",
"<: package is installed but newer version is available",
">: installed package has a greater version than available package",
]
pkgin_refresh_db_mock = MagicMock(return_value=True)
pkgin_search_cmd = MagicMock(return_value=os.linesep.join(pkgin_out))
with patch("salt.modules.pkgin.refresh_db", pkgin_refresh_db_mock), patch(
"salt.modules.pkgin._get_version", pkgin__get_version_mock
), patch("salt.modules.pkgin._check_pkgin", pkgin__check_pkgin_mock), patch.dict(
pkgin.__salt__, {"cmd.run": pkgin_search_cmd}
):
assert pkgin.latest_version("somepkg") == "1.2"
pkgin_out = "No results found for ^boguspkg$"
pkgin_refresh_db_mock = MagicMock(return_value=True)
pkgin_search_cmd = MagicMock(return_value=pkgin_out)
with patch("salt.modules.pkgin.refresh_db", pkgin_refresh_db_mock), patch(
"salt.modules.pkgin._get_version", pkgin__get_version_mock
), patch("salt.modules.pkgin._check_pkgin", pkgin__check_pkgin_mock), patch.dict(
pkgin.__salt__, {"cmd.run": pkgin_search_cmd}
):
assert pkgin.latest_version("boguspkg") == {}
def test_file_dict():
"""
Test that file_dict doesn't crash
"""
pkg_info_stdout = [
"/opt/pkg/bin/pkgin",
"/opt/pkg/man/man1/pkgin.1",
"/opt/pkg/share/examples/pkgin/preferred.conf.example",
"/opt/pkg/share/examples/pkgin/repositories.conf.example",
]
pkg_info_out = {
"pid": 1234,
"retcode": 0,
"stderr": "",
"stdout": os.linesep.join(pkg_info_stdout),
}
pkg_info_cmd = MagicMock(return_value=pkg_info_out)
with patch.dict(pkgin.__salt__, {"cmd.run_all": pkg_info_cmd}):
assert pkgin.file_dict("pkgin") == {
"files": {
"pkgin": [
"/opt/pkg/bin/pkgin",
"/opt/pkg/man/man1/pkgin.1",
"/opt/pkg/share/examples/pkgin/preferred.conf.example",
"/opt/pkg/share/examples/pkgin/repositories.conf.example",
]
}
} |
623 | get private endpoint connection output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, group_ids=None, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, system_data=None, type=None):
if group_ids and not isinstance(group_ids, list):
raise TypeError("Expected argument 'group_ids' to be a list")
pulumi.set(__self__, "group_ids", group_ids)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="groupIds")
def group_ids(self) -> Optional[Sequence[str]]:
"""
The private endpoint connection group ids.
"""
return pulumi.get(self, "group_ids")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
group_ids=self.group_ids,
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
Azure REST API version: 2022-08-01.
:param str private_endpoint_connection_name: The private endpoint connection name of Azure Managed Grafana.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The workspace name of Azure Managed Grafana.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:dashboard:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
group_ids=pulumi.get(__ret__, 'group_ids'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def METHOD_NAME(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
The Private Endpoint Connection resource.
Azure REST API version: 2022-08-01.
:param str private_endpoint_connection_name: The private endpoint connection name of Azure Managed Grafana.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The workspace name of Azure Managed Grafana.
"""
... |
624 | test create or update indexer | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.core import MatchConditions
from azure.core.exceptions import HttpResponseError
from azure.search.documents.indexes.aio import SearchIndexClient, SearchIndexerClient
from azure.search.documents.indexes.models import (
SearchIndex,
SearchIndexer,
SearchIndexerDataContainer,
SearchIndexerDataSourceConnection,
)
from devtools_testutils import AzureRecordedTestCase
from devtools_testutils.aio import recorded_by_proxy_async
from search_service_preparer import SearchEnvVarPreparer, search_decorator
class TestSearchIndexerClientTestAsync(AzureRecordedTestCase):
@SearchEnvVarPreparer()
@search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json")
@recorded_by_proxy_async
async def test_search_indexers(self, endpoint, api_key, **kwargs):
storage_cs = kwargs.get("search_storage_connection_string")
container_name = kwargs.get("search_storage_container_name")
client = SearchIndexerClient(endpoint, api_key, retry_backoff_factor=60)
index_client = SearchIndexClient(endpoint, api_key, retry_backoff_factor=60)
async with client:
async with index_client:
await self._test_create_indexer(client, index_client, storage_cs, container_name)
await self._test_delete_indexer(client, index_client, storage_cs, container_name)
await self._test_get_indexer(client, index_client, storage_cs, container_name)
await self._test_list_indexer(client, index_client, storage_cs, container_name)
await self.METHOD_NAME(client, index_client, storage_cs, container_name)
await self._test_reset_indexer(client, index_client, storage_cs, container_name)
await self._test_run_indexer(client, index_client, storage_cs, container_name)
await self._test_get_indexer_status(client, index_client, storage_cs, container_name)
await self._test_create_or_update_indexer_if_unchanged(client, index_client, storage_cs, container_name)
await self._test_delete_indexer_if_unchanged(client, index_client, storage_cs, container_name)
async def _prepare_indexer(self, client, index_client, storage_cs, name, container_name):
data_source_connection = SearchIndexerDataSourceConnection(
name=f"{name}-ds",
type="azureblob",
connection_string=storage_cs,
container=SearchIndexerDataContainer(name=container_name),
)
ds = await client.create_data_source_connection(data_source_connection)
fields = [{"name": "hotelId", "type": "Edm.String", "key": True, "searchable": False}]
index = SearchIndex(name=f"{name}-hotels", fields=fields)
ind = await index_client.create_index(index)
return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name)
async def _test_create_indexer(self, client, index_client, storage_cs, container_name):
name = "create"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
result = await client.create_indexer(indexer)
assert result.name == name
assert result.target_index_name == f"{name}-hotels"
assert result.data_source_name == f"{name}-ds"
async def _test_delete_indexer(self, client, index_client, storage_cs, container_name):
name = "delete"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
expected = len(await client.get_indexers()) - 1
await client.delete_indexer(name)
assert len(await client.get_indexers()) == expected
async def _test_get_indexer(self, client, index_client, storage_cs, container_name):
name = "get"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
result = await client.get_indexer(name)
assert result.name == name
async def _test_list_indexer(self, client, index_client, storage_cs, container_name):
name1 = "list1"
name2 = "list2"
indexer1 = await self._prepare_indexer(client, index_client, storage_cs, name1, container_name)
indexer2 = await self._prepare_indexer(client, index_client, storage_cs, name2, container_name)
await client.create_indexer(indexer1)
await client.create_indexer(indexer2)
result = await client.get_indexers()
assert isinstance(result, list)
assert set(x.name for x in result).intersection([name1, name2]) == set([name1, name2])
async def METHOD_NAME(self, client, index_client, storage_cs, container_name):
name = "cou"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
expected = len(await client.get_indexers())
indexer.description = "updated"
await client.create_or_update_indexer(indexer)
assert len(await client.get_indexers()) == expected
result = await client.get_indexer(name)
assert result.name == name
assert result.description == "updated"
async def _test_reset_indexer(self, client, index_client, storage_cs, container_name):
name = "reset"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
await client.reset_indexer(name)
assert (await client.get_indexer_status(name)).last_result.status.lower() in ("inprogress", "reset")
async def _test_run_indexer(self, client, index_client, storage_cs, container_name):
name = "run"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
await client.run_indexer(name)
assert (await client.get_indexer_status(name)).status == "running"
async def _test_get_indexer_status(self, client, index_client, storage_cs, container_name):
name = "get-status"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
status = await client.get_indexer_status(name)
assert status.status is not None
async def _test_create_or_update_indexer_if_unchanged(self, client, index_client, storage_cs, container_name):
name = "couunch"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
created = await client.create_indexer(indexer)
etag = created.e_tag
indexer.description = "updated"
await client.create_or_update_indexer(indexer)
indexer.e_tag = etag
with pytest.raises(HttpResponseError):
await client.create_or_update_indexer(indexer, match_condition=MatchConditions.IfNotModified)
async def _test_delete_indexer_if_unchanged(self, client, index_client, storage_cs, container_name):
name = "delunch"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
result = await client.create_indexer(indexer)
etag = result.e_tag
indexer.description = "updated"
await client.create_or_update_indexer(indexer)
indexer.e_tag = etag
with pytest.raises(HttpResponseError):
await client.delete_indexer(indexer, match_condition=MatchConditions.IfNotModified) |
625 | test execute program no args | from __future__ import annotations
import json
import os.path
import platform
import subprocess
import sys
import tempfile
import threading
import time
import psutil
import pytest
from simpleflow import execute
from simpleflow.exceptions import ExecutionError, ExecutionTimeoutError
@execute.program(path="ls")
def ls_nokwargs(*args):
"""
Only accepts a variable number of positional arguments.
"""
pass
def test_execute_program_no_kwargs():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError):
ls_nokwargs(hide=f.name)
@execute.program(path="ls")
def ls_noargs(**kwargs):
"""
Only accepts a variable number of keyword arguments.
"""
pass
def METHOD_NAME():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError):
ls_noargs(f.name)
@execute.program(path="ls")
def ls_restrict_named_arguments(*, hide=execute.RequiredArgument):
pass
def test_execute_program_restrict_named_arguments():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError):
ls_restrict_named_arguments(f.name)
@execute.program(path="ls")
def ls_optional_named_arguments(hide="", *args):
pass
@pytest.mark.xfail(platform.system() == "Darwin", reason="ls doesn't have a --hide option on MacOSX")
def test_execute_program_optional_named_arguments():
with tempfile.NamedTemporaryFile(suffix="\xe9") as f:
assert ls_optional_named_arguments(f.name).strip() == f.name
assert f.name not in ls_optional_named_arguments(hide=f.name)
@execute.program()
def ls(*args, **kwargs):
pass
def test_execute_program_with_positional_arguments():
with tempfile.NamedTemporaryFile() as f:
assert ls(f.name).strip() == f.name
@pytest.mark.xfail(platform.system() == "Darwin", reason="ls doesn't have a --hide option on MacOSX")
def test_execute_program_with_named_arguments():
with tempfile.NamedTemporaryFile() as f:
assert f.name not in (ls(os.path.dirname(f.name), hide=f.name).strip())
@execute.program()
def ls_2args(a, b):
pass
def test_ls_2args():
with pytest.raises(TypeError):
ls_2args(1, 2, 3)
@execute.python()
def inc(xs):
return [x + 1 for x in xs]
def test_function_as_program():
assert inc([1, 2, 3]) == [2, 3, 4]
@execute.python()
def add(a, b=1):
return a + b
@execute.python()
class Add:
def __init__(self, a, b=1):
self.a = a
self.b = b
def execute(self):
return self.a + self.b
def test_function_as_program_with_default_kwarg():
assert add(4) == 5
assert Add(4) == 5
def test_function_as_program_with_kwargs():
assert add(3, 7) == 10
assert Add(3, 7) == 10
def test_function_as_program_raises_builtin_exception():
with pytest.raises(ExecutionError) as excinfo:
add("1")
assert '"error":"TypeError"' in str(excinfo.value)
with pytest.raises(ExecutionError) as excinfo:
Add("1")
assert '"error":"TypeError"' in str(excinfo.value)
@execute.python()
def print_string(s, retval):
print(s, end="")
return retval
@execute.python()
class PrintString:
def __init__(self, s, retval):
self.s = s
self.retval = retval
def execute(self):
print(self.s)
return self.retval
def test_function_with_print():
actual = print_string("This isn't part of the return value", None)
assert actual is None, actual
actual = PrintString("This isn't part of the return value", None)
assert actual is None, actual
def test_function_with_print_and_return():
assert print_string("This isn't part of the return value", 42) == 42
assert PrintString("This isn't part of the return value", 42) == 42
def test_function_returning_lf():
assert print_string("This isn't part of the return value", "a\nb") == "a\nb"
assert PrintString("This isn't part of the return value", "a\nb") == "a\nb"
class DummyException(Exception):
pass
@execute.python()
def raise_dummy_exception():
raise DummyException
@execute.python()
class RaiseDummyException:
def __init__(self):
pass
@staticmethod
def execute():
raise DummyException
def test_function_as_program_raises_custom_exception():
with pytest.raises(ExecutionError) as excinfo:
raise_dummy_exception()
assert '"error":"DummyException"' in str(excinfo.value)
with pytest.raises(ExecutionError) as excinfo:
RaiseDummyException()
assert '"error":"DummyException"' in str(excinfo.value)
@execute.python()
def raise_timeout_error():
from simpleflow.exceptions import TimeoutError
raise TimeoutError("timeout", 1)
def test_function_as_program_raises_module_exception():
with pytest.raises(ExecutionError) as excinfo:
raise_timeout_error()
assert '"error":"TimeoutError"' in str(excinfo.value)
@execute.python()
def warn():
import warnings
warnings.warn(
"The _posixsubprocess module is not being used. "
"Child process reliability may suffer if your "
"program uses threads.",
RuntimeWarning,
)
raise Exception("Fake Exception")
def test_function_with_warning():
try:
warn()
except Exception:
pass
else:
assert False
def test_function_returning_unicode():
assert print_string("", "ʘ‿ʘ") == "ʘ‿ʘ"
@execute.python()
def raise_dummy_exception_with_unicode():
raise DummyException("ʘ‿ʘ")
def test_exception_with_unicode():
with pytest.raises(ExecutionError) as excinfo:
raise_dummy_exception_with_unicode()
assert '"error":"DummyException"' in str(excinfo.value)
error = json.loads(excinfo.value.args[0])
assert error["message"] == "ʘ‿ʘ"
def sleep_and_return(seconds):
time.sleep(seconds)
return seconds
def test_timeout_execute():
timeout = 3 # TODO: the timeout should be smaller but as a workaround for Pypy slowness/overhead we set it to 3 sec
func = execute.python(timeout=timeout)(sleep_and_return)
# Normal case
result = func(0.25)
assert result == 0.25
# Timeout case
t = time.time()
with pytest.raises(ExecutionTimeoutError) as e:
func(10)
assert (time.time() - t) < 10.0
assert f"ExecutionTimeoutError after {timeout} seconds" in str(e.value)
def test_timeout_execute_from_thread():
# From a thread
t = threading.Thread(target=test_timeout_execute)
t.start()
t.join()
def create_sleeper_subprocess():
pid = subprocess.Popen(["sleep", "600"]).pid
return pid
@pytest.mark.xfail(
platform.system() == "Darwin" or "PyPy" in sys.version,
reason="psutil process statuses are buggy on OSX, and test randomly fails on PyPy",
)
def test_execute_dont_kill_children():
pid = execute.python()(create_sleeper_subprocess)()
subprocess = psutil.Process(pid)
assert subprocess.status() == "sleeping"
subprocess.terminate() # cleanup
def test_execute_kill_children():
pid = execute.python(kill_children=True)(create_sleeper_subprocess)()
with pytest.raises(psutil.NoSuchProcess):
psutil.Process(pid)
@execute.python()
def length(x):
return len(x)
def test_large_command_line():
x = "a" * 1024 * 1024
assert length(x) == len(x)
def test_large_command_line_unicode():
x = "ä" * 1024 * 1024
assert length(x) == len(x)
def test_large_command_line_utf8():
"""
UTF-8 bytes must be handled as Unicode, both in Python 2 and Python 3.
"""
x = "ä" * 1024 * 1024
assert length(x.encode("utf-8")) == len(x) |
626 | step | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.dataclass.utils import gen_parser_from_dataclass
class FairseqOptimizer(object):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def add_args(cls, parser):
"""Add optimizer-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
"""Reset optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
self._optimizer = optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
@property
def params(self):
"""Return an iterable of the parameters held by the optimizer."""
for param_group in self.param_groups:
for p in param_group["params"]:
yield p
@property
def param_groups(self):
return self.optimizer.param_groups
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.param_groups[0]["lr"]
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.param_groups:
param_group["lr"] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def all_reduce_grads(self, module):
"""Manually all-reduce gradients (if required)."""
if hasattr(module, "all_reduce_grads"):
module.all_reduce_grads()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
def METHOD_NAME(self, closure=None, scale=1.0, groups=None):
"""Performs a single optimization step."""
if self.supports_step_with_scale:
if self.supports_groups:
self.optimizer.METHOD_NAME(closure, scale=scale, groups=groups)
else:
self.optimizer.METHOD_NAME(closure, scale=scale)
else:
if scale != 1.0:
self.multiply_grads(1.0 / scale)
if self.supports_groups:
self.optimizer.METHOD_NAME(closure, groups=groups)
else:
self.optimizer.METHOD_NAME(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
return self.optimizer.supports_memory_efficient_fp16
return False
@property
def supports_step_with_scale(self):
if hasattr(self.optimizer, "supports_step_with_scale"):
return self.optimizer.supports_step_with_scale
return False
@property
def supports_groups(self):
if hasattr(self.optimizer, "supports_groups"):
return self.optimizer.supports_groups
return False
@property
def supports_flat_params(self):
"""
Whether the optimizer supports collapsing of the model
parameters/gradients into a single contiguous Tensor.
"""
if hasattr(self.optimizer, "supports_flat_params"):
return self.optimizer.supports_flat_params
return False
def average_params(self):
pass
def broadcast_global_state_dict(self, state_dict):
"""
Broadcasts a global state dict to all ranks.
Useful for optimizers that shard state between ranks.
"""
if hasattr(self.optimizer, "broadcast_global_state_dict"):
return self.optimizer.broadcast_global_state_dict(state_dict)
else:
return state_dict
class LegacyFairseqOptimizer(FairseqOptimizer):
def __init__(self, args):
self.args = args |
627 | test single frame | # SPDX-License-Identifier: LGPL-3.0-or-later
import json
import os
import unittest
import numpy as np
from common import (
j_loader,
run_dp,
tests_path,
)
from deepmd.env import (
GLOBAL_NP_FLOAT_PRECISION,
tf,
)
from deepmd.train.run_options import (
RunOptions,
)
from deepmd.train.trainer import (
DPTrainer,
)
from deepmd.utils.argcheck import (
normalize,
)
from deepmd.utils.compat import (
update_deepmd_input,
)
from deepmd.utils.data_system import (
DeepmdDataSystem,
)
if GLOBAL_NP_FLOAT_PRECISION == np.float32:
default_places = 4
else:
default_places = 10
def _file_delete(file):
if os.path.isdir(file):
os.rmdir(file)
elif os.path.isfile(file):
os.remove(file)
def _init_models():
data_file = str(tests_path / os.path.join("init_frz_model", "data"))
frozen_model = str(tests_path / "init_frz_se_a.pb")
ckpt = str(tests_path / "init_frz_se_a.ckpt")
run_opt_ckpt = RunOptions(init_model=ckpt, log_level=20)
run_opt_frz = RunOptions(init_frz_model=frozen_model, log_level=20)
INPUT = str(tests_path / "input.json")
jdata = j_loader(str(tests_path / os.path.join("init_frz_model", "input.json")))
jdata["training"]["training_data"]["systems"] = data_file
jdata["training"]["validation_data"]["systems"] = data_file
jdata["training"]["save_ckpt"] = ckpt
with open(INPUT, "w") as fp:
json.dump(jdata, fp, indent=4)
ret = run_dp("dp train " + INPUT)
np.testing.assert_equal(ret, 0, "DP train failed!")
ret = run_dp("dp freeze -c " + str(tests_path) + " -o " + frozen_model)
np.testing.assert_equal(ret, 0, "DP freeze failed!")
jdata = update_deepmd_input(jdata, warning=True, dump="input_v2_compat.json")
jdata = normalize(jdata)
model_ckpt = DPTrainer(jdata, run_opt=run_opt_ckpt)
model_frz = DPTrainer(jdata, run_opt=run_opt_frz)
rcut = model_ckpt.model.get_rcut()
type_map = model_ckpt.model.get_type_map()
data = DeepmdDataSystem(
systems=[data_file],
batch_size=1,
test_size=1,
rcut=rcut,
type_map=type_map,
trn_all_set=True,
)
data_requirement = {
"energy": {
"ndof": 1,
"atomic": False,
"must": False,
"high_prec": True,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"force": {
"ndof": 3,
"atomic": True,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"virial": {
"ndof": 9,
"atomic": False,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"atom_ener": {
"ndof": 1,
"atomic": True,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"atom_pref": {
"ndof": 1,
"atomic": True,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 3,
"default": 0.0,
},
}
data.add_dict(data_requirement)
stop_batch = jdata["training"]["numb_steps"]
return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch
(
INPUT,
CKPT,
FROZEN_MODEL,
CKPT_TRAINER,
FRZ_TRAINER,
VALID_DATA,
STOP_BATCH,
) = _init_models()
class TestInitFrzModelA(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dp_ckpt = CKPT_TRAINER
cls.dp_frz = FRZ_TRAINER
cls.valid_data = VALID_DATA
cls.stop_batch = STOP_BATCH
@classmethod
def tearDownClass(cls):
_file_delete(INPUT)
_file_delete(FROZEN_MODEL)
_file_delete("out.json")
_file_delete(str(tests_path / "checkpoint"))
_file_delete(CKPT + ".meta")
_file_delete(CKPT + ".index")
_file_delete(CKPT + ".data-00000-of-00001")
_file_delete(CKPT + "-0.meta")
_file_delete(CKPT + "-0.index")
_file_delete(CKPT + "-0.data-00000-of-00001")
_file_delete(CKPT + "-1.meta")
_file_delete(CKPT + "-1.index")
_file_delete(CKPT + "-1.data-00000-of-00001")
_file_delete("input_v2_compat.json")
_file_delete("lcurve.out")
def METHOD_NAME(self):
valid_batch = self.valid_data.get_batch()
natoms = valid_batch["natoms_vec"]
tf.reset_default_graph()
self.dp_ckpt.build(self.valid_data, self.stop_batch)
self.dp_ckpt._init_session()
feed_dict_ckpt = self.dp_ckpt.get_feed_dict(valid_batch, is_training=False)
ckpt_rmse_ckpt = self.dp_ckpt.loss.eval(
self.dp_ckpt.sess, feed_dict_ckpt, natoms
)
tf.reset_default_graph()
self.dp_frz.build(self.valid_data, self.stop_batch)
self.dp_frz._init_session()
feed_dict_frz = self.dp_frz.get_feed_dict(valid_batch, is_training=False)
ckpt_rmse_frz = self.dp_frz.loss.eval(self.dp_frz.sess, feed_dict_frz, natoms)
tf.reset_default_graph()
# check values
np.testing.assert_almost_equal(
ckpt_rmse_ckpt["rmse_e"], ckpt_rmse_frz["rmse_e"], default_places
)
np.testing.assert_almost_equal(
ckpt_rmse_ckpt["rmse_f"], ckpt_rmse_frz["rmse_f"], default_places
)
np.testing.assert_almost_equal(
ckpt_rmse_ckpt["rmse_v"], ckpt_rmse_frz["rmse_v"], default_places
) |
628 | new | # -*- coding: utf-8 -*-
#-----------------------
# Name: tmdb_request.py
# Python Library
# Author: Raymond Wagner
# Purpose: Wrapped urllib2.Request class pre-configured for accessing the
# TMDb v3 API
#-----------------------
from .tmdb_exceptions import *
from .locales import get_locale
from .cache import Cache
from urllib.parse import urlencode
from urllib.request import Request as _py3Request
from urllib.request import urlopen as _py3urlopen
from urllib.error import HTTPError as _py3HTTPError
import urllib.error
import urllib.parse
import json
import os
import time
DEBUG = False
cache = Cache(filename='pytmdb3.cache')
#DEBUG = True
#cache = Cache(engine='null')
def set_key(key):
"""
Specify the API key to use retrieving data from themoviedb.org.
This key must be set before any calls will function.
"""
if len(key) != 32:
raise TMDBKeyInvalid("Specified API key must be 128-bit hex")
try:
int(key, 16)
except:
raise TMDBKeyInvalid("Specified API key must be 128-bit hex")
Request._api_key = key
def set_cache(engine=None, *args, **kwargs):
"""Specify caching engine and properties."""
cache.configure(engine, *args, **kwargs)
class Request(_py3Request):
_api_key = None
_base_url = "http://api.themoviedb.org/3/"
@property
def api_key(self):
if self._api_key is None:
raise TMDBKeyMissing("API key must be specified before " +
"requests can be made")
return self._api_key
def __init__(self, url, **kwargs):
"""
Return a request object, using specified API path and
arguments.
"""
kwargs['api_key'] = self.api_key
self._url = url.lstrip('/')
self._kwargs = dict([(kwa, kwv) for kwa, kwv in list(kwargs.items())
if kwv is not None])
locale = get_locale()
kwargs = {}
for k, v in list(self._kwargs.items()):
kwargs[k] = locale.encode(v)
url = '{0}{1}?{2}'\
.format(self._base_url, self._url, urlencode(kwargs))
_py3Request.__init__(self, url)
self.add_header('Accept', 'application/json')
self.lifetime = 3600 # 1hr
def METHOD_NAME(self, **kwargs):
"""
Create a new instance of the request, with tweaked arguments.
"""
args = dict(self._kwargs)
for k, v in list(kwargs.items()):
if v is None:
if k in args:
del args[k]
else:
args[k] = v
obj = self.__class__(self._url, **args)
obj.lifetime = self.lifetime
return obj
def add_data(self, data):
"""Provide data to be sent with POST."""
_py3Request.data = (self, urllib.parse.urlencode(data))
def open(self):
"""Open a file object to the specified URL."""
try:
if DEBUG:
print('loading '+self.get_full_url())
if self.data:
print(' '+self.data)
return _py3urlopen(self)
except _py3HTTPError as e:
raise TMDBHTTPError(e)
def read(self):
"""Return result from specified URL as a string."""
return self.open().read()
@cache.cached(_py3Request.get_full_url)
def readJSON(self):
"""Parse result from specified URL as JSON data."""
url = self.get_full_url()
tries = 0
while tries < 100:
try:
# catch HTTP error from open()
data = json.load(self.open())
break
except TMDBHTTPError as e:
try:
# try to load whatever was returned
data = json.loads(e.response)
except:
# cannot parse json, just raise existing error
raise e
else:
# Check for error code of 25 which means we are doing more than 40 requests per 10 seconds
if data.get('status_code', 1) ==25:
# Sleep and retry query.
if DEBUG:
print('Retry after {0} seconds'.format(max(float(e.headers['retry-after']),10)))
time.sleep(max(float(e.headers['retry-after']),10))
continue
else:
# response parsed, try to raise error from TMDB
handle_status(data, url)
# no error from TMDB, just raise existing error
raise e
handle_status(data, url)
if DEBUG:
import pprint
pprint.PrettyPrinter().pprint(data)
return data
# See https://www.themoviedb.org/documentation/api/status-codes
status_handlers = {
1: None,
2: TMDBRequestInvalid('Invalid service - This service does not exist.'),
3: TMDBRequestError('Authentication Failed - You do not have ' +
'permissions to access this service.'),
4: TMDBRequestInvalid("Invalid format - This service doesn't exist " +
'in that format.'),
5: TMDBRequestInvalid('Invalid parameters - Your request parameters ' +
'are incorrect.'),
6: TMDBRequestInvalid('Invalid id - The pre-requisite id is invalid ' +
'or not found.'),
7: TMDBKeyInvalid('Invalid API key - You must be granted a valid key.'),
8: TMDBRequestError('Duplicate entry - The data you tried to submit ' +
'already exists.'),
9: TMDBOffline('This service is tempirarily offline. Try again later.'),
10: TMDBKeyRevoked('Suspended API key - Access to your account has been ' +
'suspended, contact TMDB.'),
11: TMDBError('Internal error - Something went wrong. Contact TMDb.'),
12: None,
13: None,
14: TMDBRequestError('Authentication Failed.'),
15: TMDBError('Failed'),
16: TMDBError('Device Denied'),
17: TMDBError('Session Denied'),
# collection not found
34: TMDBRequestInvalid('The resource you requested could not be found.')}
def handle_status(data, query):
status = status_handlers[data.get('status_code', 1)]
if status is not None:
status.tmdberrno = data['status_code']
status.query = query
raise status |
629 | run write instructions | import os
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.exceptions import HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
import aimcore.transport.message_utils as utils
from aimcore.transport.router import ClientRouter
from aimcore.transport.tracking import TrackingRouter, ResourceTypeRegistry
from aimcore.transport.heartbeat import HeartbeatWatcher
from aimcore.transport.handlers import (
get_tree,
get_khash_array,
get_lock,
get_file_manager,
get_dev_package,
get_repo
)
from aimcore.transport.config import AIM_SERVER_BASE_PATH
from aim._core.storage.treeutils import encode_tree, decode_tree
def prepare_resource_registry():
registry = ResourceTypeRegistry()
registry.register('TreeView', get_tree)
registry.register('KhashArrayView', get_khash_array)
registry.register('Lock', get_lock)
registry.register('FileManager', get_file_manager)
registry.register('Package', get_dev_package)
registry.register('Repo', get_repo)
return registry
async def http_exception_handler(request, exc):
message = str(exc.detail)
detail = None
if isinstance(exc.detail, dict):
message = exc.detail.pop('message', message)
detail = exc.detail.pop('detail', None)
response = {'message': message}
if detail:
response.update({'detail': detail})
else:
response.update({'detail': str(exc)})
return JSONResponse(response, status_code=exc.status_code)
async def fallback_exception_handler(request, exc):
response = {
'message': f'\'{type(exc)}\' exception raised!',
'detail': str(exc)
}
return JSONResponse(response, status_code=500)
def create_app():
app = FastAPI(title=__name__)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_methods=['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'HEAD'],
allow_headers=['Origin', 'X-Requested-With',
'Content-Type', 'Accept', 'Authorization', 'X-Timezone-Offset'],
allow_credentials=True,
max_age=86400
)
registry = prepare_resource_registry()
client_router = ClientRouter()
tracking_router = TrackingRouter(registry)
api_app = FastAPI()
api_app.include_router(client_router.router, prefix='/client')
api_app.include_router(tracking_router.router, prefix='/tracking')
api_app.add_exception_handler(HTTPException, http_exception_handler)
api_app.add_exception_handler(Exception, fallback_exception_handler)
base_path = os.environ.get(AIM_SERVER_BASE_PATH, '')
app.mount(f'{base_path}/', api_app)
heartbeat_watcher = HeartbeatWatcher(
ClientRouter.client_heartbeat_pool,
)
heartbeat_watcher.start()
@api_app.get('/status/')
async def status():
return {'status': 'OK'}
@api_app.websocket('/tracking/{client_uri}/write-instruction/')
async def METHOD_NAME(websocket: WebSocket, client_uri: str):
await TrackingRouter.manager.connect(websocket)
try:
while True:
raw_message = await websocket.receive_bytes()
write_instructions = decode_tree(
utils.unpack_args(raw_message))
for instruction in write_instructions:
resource_handler, method_name, args = instruction
TrackingRouter._verify_resource_handler(
resource_handler, client_uri)
checked_args = []
for arg in args:
if isinstance(arg, utils.ResourceObject):
handler = arg.storage['handler']
TrackingRouter._verify_resource_handler(
handler, client_uri)
checked_args.append(
TrackingRouter.resource_pool[handler][1].ref)
else:
checked_args.append(arg)
resource = TrackingRouter.resource_pool[resource_handler][1].ref
if method_name.endswith('.setter'):
attr_name = method_name.split('.')[0]
setattr(resource, attr_name, checked_args[0])
else:
attr = getattr(resource, method_name)
assert callable(attr)
attr(*checked_args)
await websocket.send_bytes(b'OK')
except WebSocketDisconnect:
TrackingRouter.manager.disconnect(websocket)
except Exception as e:
await websocket.send_bytes(utils.pack_args(encode_tree(utils.build_exception(e))))
return app
app = create_app() |
630 | autograd log det | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import pytest
import torch
from torch.distributions import (
AffineTransform,
Beta,
TransformedDistribution,
biject_to,
transform_to,
)
from pyro.distributions import constraints, transforms
from pyro.distributions.torch import LKJCholesky
from tests.common import assert_equal, assert_tensors_equal
@pytest.mark.parametrize("value_shape", [(1, 1), (3, 3), (5, 5)])
def test_constraint(value_shape):
value = torch.randn(value_shape).clamp(-2, 2).tril()
value.diagonal(dim1=-2, dim2=-1).exp_()
value = value / value.norm(2, dim=-1, keepdim=True)
assert (constraints.corr_cholesky.check(value) == 1).all()
def METHOD_NAME(ys, x):
# computes log_abs_det_jacobian of y w.r.t. x
return (
torch.stack([torch.autograd.grad(y, (x,), retain_graph=True)[0] for y in ys])
.det()
.abs()
.log()
)
@pytest.mark.parametrize("y_shape", [(1,), (3, 1), (6,), (1, 6), (2, 6)])
def test_unconstrained_to_corr_cholesky_transform(y_shape):
transform = transforms.CorrCholeskyTransform()
y = torch.empty(y_shape).uniform_(-4, 4).requires_grad_()
x = transform(y)
# test codomain
assert (transform.codomain.check(x) == 1).all()
# test inv
y_prime = transform.inv(x)
assert_tensors_equal(y, y_prime, prec=1e-4)
# test domain
assert (transform.domain.check(y_prime) == 1).all()
# test log_abs_det_jacobian
log_det = transform.log_abs_det_jacobian(y, x)
assert log_det.shape == y_shape[:-1]
if len(y_shape) == 1:
triu_index = x.new_ones(x.shape).triu(diagonal=1).to(torch.bool)
x_tril_vector = x.t()[triu_index]
assert_tensors_equal(METHOD_NAME(x_tril_vector, y), log_det, prec=1e-4)
x_tril_vector = x_tril_vector.detach().requires_grad_()
x = x.new_zeros(x.shape)
x[triu_index] = x_tril_vector
x = x.t()
z = transform.inv(x)
assert_tensors_equal(METHOD_NAME(z, x_tril_vector), -log_det, prec=1e-4)
@pytest.mark.parametrize("x_shape", [(1,), (3, 1), (6,), (1, 6), (5, 6)])
@pytest.mark.parametrize("mapping", [biject_to, transform_to])
def test_corr_cholesky_transform(x_shape, mapping):
transform = mapping(constraints.corr_cholesky)
x = torch.randn(x_shape, requires_grad=True).clamp(-2, 2)
y = transform(x)
# test codomain
assert (transform.codomain.check(y) == 1).all()
# test inv
z = transform.inv(y)
assert_tensors_equal(x, z, prec=1e-4)
# test domain
assert (transform.domain.check(z) == 1).all()
# test log_abs_det_jacobian
log_det = transform.log_abs_det_jacobian(x, y)
assert log_det.shape == x_shape[:-1]
@pytest.mark.parametrize("dim", [2, 3, 4, 10])
def test_log_prob_conc1(dim):
dist = LKJCholesky(dim, torch.tensor([1.0]))
a_sample = dist.sample(torch.Size([100]))
lp = dist.log_prob(a_sample)
if dim == 2:
assert_equal(lp, lp.new_full(lp.size(), -math.log(2)))
else:
ladj = (
a_sample.diagonal(dim1=-2, dim2=-1)
.log()
.mul(
torch.linspace(
start=dim - 1,
end=0,
steps=dim,
device=a_sample.device,
dtype=a_sample.dtype,
)
)
.sum(-1)
)
lps_less_ladj = lp - ladj
assert (lps_less_ladj - lps_less_ladj.min()).abs().sum() < 1e-4
@pytest.mark.parametrize("concentration", [0.1, 0.5, 1.0, 2.0, 5.0])
def test_log_prob_d2(concentration):
dist = LKJCholesky(2, torch.tensor([concentration]))
test_dist = TransformedDistribution(
Beta(concentration, concentration), AffineTransform(loc=-1.0, scale=2.0)
)
samples = dist.sample(torch.Size([100]))
lp = dist.log_prob(samples)
x = samples[..., 1, 0]
tst = test_dist.log_prob(x)
# LKJ prevents inf values in log_prob
lp[tst == math.inf] = math.inf # substitute inf for comparison
assert_tensors_equal(lp, tst, prec=1e-3)
def test_sample_batch():
# Regression test for https://github.com/pyro-ppl/pyro/issues/2615
dist = LKJCholesky(3, concentration=torch.ones(())).expand([12])
# batch shape and event shape are as you'd expect
assert dist.batch_shape == torch.Size([12])
assert dist.event_shape == torch.Size([3, 3])
# samples have correct shape when sample_shape=()
assert dist.shape(()) == torch.Size([12, 3, 3])
assert dist.sample().shape == torch.Size([12, 3, 3])
# samples had the wrong shape when sample_shape is non-unit
assert dist.shape((4,)) == torch.Size([4, 12, 3, 3])
assert dist.sample((4,)).shape == torch.Size([4, 12, 3, 3]) |
631 | signal recompose meanfreq | import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster
from .signal_zerocrossings import signal_zerocrossings
def signal_recompose(components, method="wcorr", threshold=0.5, keep_sd=None, **kwargs):
"""**Combine signal sources after decomposition**
Combine and reconstruct meaningful signal sources after signal decomposition.
Parameters
-----------
components : array
Array of components obtained via :func:`.signal_decompose`.
method : str
The decomposition method. Can be one of ``"wcorr"``.
threshold : float
The threshold used to group components together.
keep_sd : float
If a float is specified, will only keep the reconstructed components that are superior
or equal to that percentage of the max standard deviaiton (SD) of the components. For
instance, ``keep_sd=0.01`` will remove all components with SD lower than 1% of the
max SD. This can be used to filter out noise.
**kwargs
Other arguments used to override, for instance ``metric="chebyshev"``.
Returns
-------
Array
Components of the recomposed components.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Create complex signal
signal = nk.signal_simulate(duration=10, frequency=1, noise=0.01) # High freq
signal += 3 * nk.signal_simulate(duration=10, frequency=3, noise=0.01) # Higher freq
signal += 3 * np.linspace(0, 2, len(signal)) # Add baseline and trend
signal += 2 * nk.signal_simulate(duration=10, frequency=0.1, noise=0)
# Decompose signal
components = nk.signal_decompose(signal, method='emd')
# Recompose
recomposed = nk.signal_recompose(components, method='wcorr', threshold=0.90)
@savefig p_signal_recompose1.png scale=100%
nk.signal_plot(components) # Visualize components
@suppress
plt.close()
"""
# Apply method
method = method.lower()
if method in ["wcorr"]:
clusters = _signal_recompose_wcorr(components, threshold=threshold, **kwargs)
recomposed = _signal_recompose_sum(components, clusters)
else:
raise ValueError("NeuroKit error: signal_decompose(): 'method' should be one of 'emd'")
if keep_sd is not None:
recomposed = _signal_recompose_filter_sd(components, threshold=keep_sd)
return recomposed
# =============================================================================
# Recombination methods
# =============================================================================
def _signal_recompose_sum(components, clusters):
# Reorient components
components = components.T
# Reconstruct Time Series from correlated components
clusters = [np.where(clusters == cluster)[0] for cluster in np.unique(clusters)]
if len(clusters) == 0:
raise ValueError(
"Not enough clusters of components detected. Please decrease the " "`threshold`."
)
# Initialize components matrix
recomposed = np.zeros((len(components), len(clusters)))
for i, indices in enumerate(clusters):
recomposed[:, i] = components[:, indices].sum(axis=1)
return recomposed.T
# =============================================================================
# Clustering Methods
# =============================================================================
# Weighted Correlation
# ----------------------------------------------------------------------------
def _signal_recompose_wcorr(components, threshold=0.5, metric="chebyshev"):
""""""
# Calculate the w-correlation matrix.
wcorr = _signal_recompose_get_wcorr(components, show=False)
# Find clusters in correlation matrix
pairwise_distances = scipy.cluster.hierarchy.distance.pdist(wcorr, metric=metric)
linkage = scipy.cluster.hierarchy.linkage(pairwise_distances, method="complete")
threshold = threshold * pairwise_distances.max()
clusters = scipy.cluster.hierarchy.fcluster(linkage, threshold, "distance")
return clusters
def _signal_recompose_get_wcorr(components, show=False):
"""Calculates the weighted correlation matrix for the time series.
References
----------
- https://www.kaggle.com/jdarcy/introducing-ssa-for-time-series-decomposition
"""
# Reorient components
components = components.T
L = components.shape[1]
K = components.shape[0] - L + 1
# Calculate the weights
w = np.array(list(np.arange(L) + 1) + [L] * (K - L - 1) + list(np.arange(L) + 1)[::-1])
def w_inner(F_i, F_j):
return w.dot(F_i * F_j)
# Calculated weighted norms, ||F_i||_w, then invert.
F_wnorms = np.array([w_inner(components[:, i], components[:, i]) for i in range(L)])
F_wnorms = F_wnorms ** -0.5
# Calculate Wcorr.
Wcorr = np.identity(L)
for i in range(L):
for j in range(i + 1, L):
Wcorr[i, j] = abs(
w_inner(components[:, i], components[:, j]) * F_wnorms[i] * F_wnorms[j]
)
Wcorr[j, i] = Wcorr[i, j]
if show is True:
ax = plt.imshow(Wcorr)
plt.xlabel(r"$\tilde{F}_i$")
plt.ylabel(r"$\tilde{F}_j$")
plt.colorbar(ax.colorbar, fraction=0.045)
ax.colorbar.set_label("$W_{i,j}$")
plt.clim(0, 1)
# For plotting purposes:
min_range = 0
max_range = len(Wcorr) - 1
plt.xlim(min_range - 0.5, max_range + 0.5)
plt.ylim(max_range + 0.5, min_range - 0.5)
return Wcorr
# =============================================================================
# Filter method
# =============================================================================
def _signal_recompose_filter_sd(components, threshold=0.01):
"""Filter by standard deviation."""
SDs = [np.std(components[i, :], ddof=1) for i in range(len(components))]
indices = np.where(SDs >= threshold * np.max(SDs))
return components[indices]
def METHOD_NAME(components, sampling_rate=1000):
"""Get the mean frequency of components."""
duration = components.shape[1] / sampling_rate
n = len(components)
freqs = np.zeros(n)
for i in range(n):
c = components[i, :] - np.mean(components[i, :])
freqs[i] = len(signal_zerocrossings(c)) / duration |
632 | merge | import numpy
import pandas as pd
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps.waterfall import (
_RESULT_COLUMN,
GROUP_WATERFALL_COLUMN,
LABEL_WATERFALL_COLUMN,
TYPE_WATERFALL_COLUMN,
WaterfallStep,
)
def execute_waterfall(
step: WaterfallStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
# first milestone
start_df = df[df[step.milestonesColumn] == step.start].rename(
columns={step.valueColumn: f"{step.valueColumn}_start"}
)
# second milestone
end_df = df[df[step.milestonesColumn] == step.end].rename(
columns={step.valueColumn: f"{step.valueColumn}_end"}
)
upper = compute_agg_milestone(
step,
df,
start_df.rename(columns={f"{step.valueColumn}_start": step.valueColumn}),
step.start,
)
mid = compute_mid(step, METHOD_NAME(step, start_df, end_df), df)
downer = compute_agg_milestone(
step, df, end_df.rename(columns={f"{step.valueColumn}_end": step.valueColumn}), step.end
)
return pd.concat([upper, mid, downer])
# start_df is the base dataframe filtered to contains only result at the start of the waterfall
# end_df is the base dataframe filtered to contains only result at the start of the waterfall
# this methods compute the difference for the value value of every label between the end and the start
def METHOD_NAME(step: WaterfallStep, start_df: DataFrame, end_df: DataFrame) -> DataFrame:
group_by_columns = [step.labelsColumn] + step.groupby
if step.parentsColumn is not None:
group_by_columns = group_by_columns + [step.parentsColumn]
start_df = (
start_df.groupby(by=group_by_columns)
.agg({step.valueColumn + "_start": "sum"})
.METHOD_NAME(start_df[group_by_columns], on=group_by_columns)
.rename(columns={step.labelsColumn + "_x": step.labelsColumn})
.drop_duplicates()
)
end_df = (
end_df.groupby(by=group_by_columns)
.agg({step.valueColumn + "_end": "sum"})
.METHOD_NAME(end_df[group_by_columns], on=group_by_columns)
.rename(columns={step.labelsColumn + "_x": step.labelsColumn})
.drop_duplicates()
)
# we join the result to compare them
merged_df = start_df.METHOD_NAME(end_df, on=get_join_key(step))
merged_df[_RESULT_COLUMN] = (
merged_df[f"{step.valueColumn}_end"] - merged_df[f"{step.valueColumn}_start"]
)
merged_df = merged_df.drop(
columns=[
f"{step.valueColumn}_start",
f"{step.valueColumn}_end",
]
)
# if there is a parent column, we need to aggregate for them
if step.parentsColumn is not None:
parents_results = merged_df.groupby(
step.groupby + [step.parentsColumn], as_index=False
).agg({_RESULT_COLUMN: "sum"})
parents_results[step.labelsColumn] = parents_results[step.parentsColumn]
return pd.concat([merged_df, parents_results])
return merged_df
# the waterfall has a very specific structure.
# this methods create the top / bottom rows.
# these contains the sum of values for the whole milestone, regardless of label.
def compute_agg_milestone(
step: WaterfallStep, df: DataFrame, start_df: DataFrame, milestone
) -> pd.DataFrame:
if len(step.groupby) > 0:
groups = df[step.groupby].drop_duplicates()
group_by = step.groupby
else:
groups = pd.DataFrame({"_VQB_GROUP": [0]}) # pseudo group
start_df["_VQB_GROUP"] = 0
groups["_VQB_GROUP"] = 0
group_by = ["_VQB_GROUP"]
groups = groups.assign(**{step.labelsColumn: milestone})
agg = start_df.groupby(group_by).agg({f"{step.valueColumn}": "sum"})
agg = (
groups.METHOD_NAME(agg, on=group_by)
.sort_values(
by=step.valueColumn if step.sortBy == "value" else step.labelsColumn,
ascending=step.order == "asc",
)
.rename(columns={step.labelsColumn: LABEL_WATERFALL_COLUMN})
)
if step.parentsColumn is not None:
agg[GROUP_WATERFALL_COLUMN] = agg[LABEL_WATERFALL_COLUMN].astype(str)
agg[TYPE_WATERFALL_COLUMN] = None
agg[LABEL_WATERFALL_COLUMN] = agg[LABEL_WATERFALL_COLUMN].astype(str)
if len(step.groupby) == 0:
del start_df["_VQB_GROUP"]
del agg["_VQB_GROUP"]
return agg
# this method shapes the merged DF so it matches the waterfall format
def compute_mid(step: WaterfallStep, merged_df: DataFrame, df: DataFrame) -> DataFrame:
result_df = DataFrame(
{
LABEL_WATERFALL_COLUMN: merged_df.sort_values(
by=get_sort_column(step), ascending=step.order == "asc"
)[step.labelsColumn].astype(str)
}
)
result_df[step.groupby] = merged_df.sort_values(
by=get_sort_column(step), ascending=step.order == "asc"
)[step.groupby]
if step.parentsColumn is None:
result_df[TYPE_WATERFALL_COLUMN] = "Parent"
else:
result_df[GROUP_WATERFALL_COLUMN] = merged_df.sort_values(
by=get_sort_column(step), ascending=step.order == "asc"
)[step.parentsColumn]
result_df[TYPE_WATERFALL_COLUMN] = numpy.where(
result_df["LABEL_waterfall"] == (result_df[GROUP_WATERFALL_COLUMN]),
["parent"],
["child"],
)
result_df[step.valueColumn] = merged_df.sort_values(
by=get_sort_column(step), ascending=step.order == "asc"
)[_RESULT_COLUMN]
return result_df
def get_sort_column(step: WaterfallStep):
if step.sortBy == "value":
return _RESULT_COLUMN
else:
return step.labelsColumn
def get_join_key(step: WaterfallStep):
if step.parentsColumn is None:
return [step.labelsColumn] + step.groupby
else:
return [step.labelsColumn, step.parentsColumn] + step.groupby |
633 | test analyze package invalid data | import sys
import pytest
from frictionless import Package
pytestmark = pytest.mark.skipif(
sys.version_info < (3, 10),
reason="Supported on Python3.10+",
)
# General
def test_analyze_package():
package = Package("data/package-1067.json")
analysis = package.analyze()
assert len(analysis) == 3
assert analysis["capital-valid"]["rows"] == 5
assert list(analysis["capital-valid"].keys()) == [
"variableTypes",
"notNullRows",
"rowsWithNullValues",
"fieldStats",
"averageRecordSizeInBytes",
"timeTaken",
"md5",
"sha256",
"bytes",
"fields",
"rows",
]
assert analysis["capital-invalid"]["rows"] == 11
assert list(analysis["capital-invalid"].keys()) == [
"variableTypes",
"notNullRows",
"rowsWithNullValues",
"fieldStats",
"averageRecordSizeInBytes",
"timeTaken",
"md5",
"sha256",
"bytes",
"fields",
"rows",
]
assert analysis["analysis-data"]["rows"] == 9
assert list(analysis["analysis-data"].keys()) == [
"variableTypes",
"notNullRows",
"rowsWithNullValues",
"fieldStats",
"averageRecordSizeInBytes",
"timeTaken",
"md5",
"sha256",
"bytes",
"fields",
"rows",
]
def test_analyze_package_detailed():
package = Package("data/package-1067.json")
analysis = package.analyze(detailed=True)
assert analysis["capital-valid"]["rows"] == 5
assert list(analysis["capital-valid"].keys()) == [
"variableTypes",
"notNullRows",
"rowsWithNullValues",
"fieldStats",
"correlations",
"averageRecordSizeInBytes",
"timeTaken",
"md5",
"sha256",
"bytes",
"fields",
"rows",
]
assert analysis["capital-invalid"]["rows"] == 11
assert list(analysis["capital-invalid"].keys()) == [
"variableTypes",
"notNullRows",
"rowsWithNullValues",
"fieldStats",
"correlations",
"averageRecordSizeInBytes",
"timeTaken",
"md5",
"sha256",
"bytes",
"fields",
"rows",
]
assert analysis["analysis-data"]["rows"] == 9
assert list(analysis["analysis-data"].keys()) == [
"variableTypes",
"notNullRows",
"rowsWithNullValues",
"fieldStats",
"correlations",
"averageRecordSizeInBytes",
"timeTaken",
"md5",
"sha256",
"bytes",
"fields",
"rows",
]
def METHOD_NAME():
descriptor = {
"name": "capitals-and-schools",
"resources": [
{"name": "capital-invalid", "path": "data/invalid.csv"},
],
}
package = Package(descriptor)
analysis = package.analyze()
assert round(analysis["capital-invalid"]["averageRecordSizeInBytes"]) == 12
assert analysis["capital-invalid"]["fields"] == 4
assert analysis["capital-invalid"]["fieldStats"] == {}
assert analysis["capital-invalid"]["rows"] == 4
assert analysis["capital-invalid"]["rowsWithNullValues"] == 3
assert analysis["capital-invalid"]["notNullRows"] == 1
assert analysis["capital-invalid"]["variableTypes"] == {}
def test_analyze_package_detailed_variable_types():
package = Package("data/package-1067.json")
analysis = package.analyze(detailed=True)
assert len(analysis) == 3
assert analysis["capital-valid"]["variableTypes"] == {
"number": 1,
"string": 1,
}
assert analysis["capital-invalid"]["variableTypes"] == {
"integer": 1,
"string": 1,
}
assert analysis["analysis-data"]["variableTypes"] == {
"boolean": 2,
"integer": 2,
"number": 2,
"string": 5,
}
def test_analyze_package_detailed_non_numeric_values_summary():
package = Package("data/package-1067.json")
analysis = package.analyze(detailed=True)
assert list(analysis["capital-valid"]["fieldStats"]["name"].keys()) == [
"type",
"values",
]
assert list(analysis["capital-invalid"]["fieldStats"]["name"].keys()) == [
"type",
"values",
]
assert list(analysis["analysis-data"]["fieldStats"]["gender"].keys()) == [
"type",
"values",
]
def test_analyze_package_detailed_numeric_values_descriptive_summary():
package = Package("data/package-1067.json")
analysis = package.analyze(detailed=True)
assert list(analysis["analysis-data"]["fieldStats"]["parent_age"].keys()) == [
"type",
"mean",
"median",
"mode",
"variance",
"quantiles",
"stdev",
"max",
"min",
"bounds",
"uniqueValues",
"outliers",
"missingValues",
]
def test_analyze_package_detailed_numeric_descriptive_statistics():
package = Package("data/package-1067.json")
analysis = package.analyze(detailed=True)
name = "analysis-data"
assert analysis[name]["fieldStats"]["parent_age"]["bounds"] == [39, 67]
assert analysis[name]["fieldStats"]["parent_age"]["max"] == 57
assert analysis[name]["fieldStats"]["parent_age"]["mean"] == 52.666666666666664
assert analysis[name]["fieldStats"]["parent_age"]["median"] == 52
assert analysis[name]["fieldStats"]["parent_age"]["min"] == 48
assert analysis[name]["fieldStats"]["parent_age"]["missingValues"] == 0
assert analysis[name]["fieldStats"]["parent_age"]["mode"] == 57
assert analysis[name]["fieldStats"]["parent_age"]["quantiles"] == [49.5, 52.0, 56.5]
assert analysis[name]["fieldStats"]["parent_age"]["stdev"] == 3.391164991562634
assert analysis[name]["fieldStats"]["parent_age"]["uniqueValues"] == 7
assert analysis[name]["fieldStats"]["parent_age"]["variance"] == 11.5
assert analysis[name]["fieldStats"]["parent_age"]["outliers"] == []
def test_analyze_package_detailed_non_numeric_summary():
package = Package("data/package-1067.json")
analysis = package.analyze(detailed=True)
assert analysis["capital-valid"]["fieldStats"]["name"]["type"] == "categorical"
assert analysis["capital-valid"]["fieldStats"]["name"]["values"] == {
"Berlin",
"London",
"Madrid",
"Paris",
"Rome",
}
assert (
analysis["analysis-data"]["fieldStats"]["school_accreditation"]["type"]
== "categorical"
)
assert analysis["analysis-data"]["fieldStats"]["school_accreditation"]["values"] == {
"A",
"B",
}
def test_analyze_package_detailed_invalid_data():
descriptor = {
"name": "capitals-and-schools",
"resources": [
{"name": "capital-invalid", "path": "data/invalid.csv"},
],
}
package = Package(descriptor)
analysis = package.analyze(detailed=True)
name = "capital-invalid"
assert round(analysis[name]["averageRecordSizeInBytes"]) == 12
assert analysis[name]["fields"] == 4
assert list(analysis[name]["fieldStats"].keys()) == [
"id",
"name",
"field3",
"name2",
]
assert analysis[name]["rows"] == 4
assert analysis[name]["rowsWithNullValues"] == 3
assert analysis[name]["notNullRows"] == 1
assert analysis[name]["variableTypes"] == {"integer": 3, "string": 1} |
634 | get traces | from pathlib import Path
import numpy as np
from spikeinterface.core import BaseRecording, BaseRecordingSegment
from spikeinterface.core.core_tools import define_function_from_class
try:
import h5py
HAVE_MCSH5 = True
except ImportError:
HAVE_MCSH5 = False
class MCSH5RecordingExtractor(BaseRecording):
"""Load a MCS H5 file as a recording extractor.
Parameters
----------
file_path : str or Path
The path to the MCS h5 file.
stream_id : int, default: 0
The stream ID to load.
Returns
-------
recording : MCSH5RecordingExtractor
The loaded data.
"""
extractor_name = "MCSH5Recording"
installed = HAVE_MCSH5 # check at class level if installed or not
mode = "file"
installation_mesg = (
"To use the MCSH5RecordingExtractor install h5py: \n\n pip install h5py\n\n" # error message when not installed
)
name = "mcsh5"
def __init__(self, file_path, stream_id=0):
assert self.installed, self.installation_mesg
self._file_path = file_path
mcs_info = openMCSH5File(self._file_path, stream_id)
self._rf = mcs_info["filehandle"]
BaseRecording.__init__(
self,
sampling_frequency=mcs_info["sampling_frequency"],
channel_ids=mcs_info["channel_ids"],
dtype=mcs_info["dtype"],
)
self.extra_requirements.append("h5py")
recording_segment = MCSH5RecordingSegment(
self._rf, stream_id, mcs_info["num_frames"], sampling_frequency=mcs_info["sampling_frequency"]
)
self.add_recording_segment(recording_segment)
# set gain
self.set_channel_gains(mcs_info["gain"])
# set other properties
self.set_property("electrode_labels", mcs_info["electrode_labels"])
self._kwargs = {"file_path": str(Path(file_path).absolute()), "stream_id": stream_id}
def __del__(self):
self._rf.close()
class MCSH5RecordingSegment(BaseRecordingSegment):
def __init__(self, rf, stream_id, num_frames, sampling_frequency):
BaseRecordingSegment.__init__(self, sampling_frequency=sampling_frequency)
self._rf = rf
self._stream_id = stream_id
self._num_samples = int(num_frames)
self._stream = self._rf.require_group("/Data/Recording_0/AnalogStream/Stream_" + str(self._stream_id))
def get_num_samples(self):
return self._num_samples
def METHOD_NAME(self, start_frame=None, end_frame=None, channel_indices=None):
if isinstance(channel_indices, slice):
traces = self._stream.get("ChannelData")[channel_indices, start_frame:end_frame].T
else:
# channel_indices is np.ndarray
if np.array(channel_indices).size > 1 and np.any(np.diff(channel_indices) < 0):
# get around h5py constraint that it does not allow datasets
# to be indexed out of order
sorted_channel_indices = np.sort(channel_indices)
resorted_indices = np.array([list(sorted_channel_indices).index(ch) for ch in channel_indices])
recordings = self._stream.get("ChannelData")[sorted_channel_indices, start_frame:end_frame].T
traces = recordings[:, resorted_indices]
else:
traces = self._stream.get("ChannelData")[channel_indices, start_frame:end_frame].T
return traces
def openMCSH5File(filename, stream_id):
"""Open an MCS hdf5 file, read and return the recording info."""
rf = h5py.File(filename, "r")
stream_name = "Stream_" + str(stream_id)
analog_stream_names = list(rf.require_group("/Data/Recording_0/AnalogStream").keys())
assert stream_name in analog_stream_names, (
f"Specified stream does not exist. " f"Available streams: {analog_stream_names}"
)
stream = rf.require_group("/Data/Recording_0/AnalogStream/" + stream_name)
data = stream.get("ChannelData")
timestamps = np.array(stream.get("ChannelDataTimeStamps"))
info = np.array(stream.get("InfoChannel"))
dtype = data.dtype
Unit = info["Unit"][0]
Tick = info["Tick"][0] / 1e6
exponent = info["Exponent"][0]
convFact = info["ConversionFactor"][0]
gain = convFact.astype(float) * (10.0**exponent)
nRecCh, nFrames = data.shape
channel_ids = [f"Ch{ch}" for ch in info["ChannelID"]]
assert len(np.unique(channel_ids)) == len(channel_ids), "Duplicate MCS channel IDs found"
electrodeLabels = [l.decode() for l in info["Label"]]
assert timestamps[0][0] < timestamps[0][2], "Please check the validity of 'ChannelDataTimeStamps' in the stream."
TimeVals = np.arange(timestamps[0][0], timestamps[0][2] + 1, 1) * Tick
if Unit != b"V":
print(f"Unexpected units found, expected volts, found {Unit.decode('UTF-8')}. Assuming Volts.")
timestep_avg = np.mean(TimeVals[1:] - TimeVals[0:-1])
timestep_min = np.min(TimeVals[1:] - TimeVals[0:-1])
timestep_max = np.min(TimeVals[1:] - TimeVals[0:-1])
assert all(
np.abs(np.array((timestep_min, timestep_max)) - timestep_avg) / timestep_avg < 1e-6
), "Time steps vary by more than 1 ppm"
samplingRate = 1.0 / timestep_avg
mcs_info = {
"filehandle": rf,
"num_frames": nFrames,
"sampling_frequency": samplingRate,
"num_channels": nRecCh,
"channel_ids": channel_ids,
"electrode_labels": electrodeLabels,
"gain": gain,
"dtype": dtype,
}
return mcs_info
read_mcsh5 = define_function_from_class(source_class=MCSH5RecordingExtractor, name="read_mcsh5") |
635 | test bootstrap for vector | """
This module is used to test the functionalities of the baseclass.
- test_pick_and_freeze_sampling:
Test the `generate_pick_and_test_samples` function.
- test_bootstrap_for_vector:
Test the bootstrap sampling for a vector.
- test_bootstrap_for_matrix:
Test the bootstrap sampling for a matrix.
"""
import numpy as np
import pytest
from UQpy.run_model.RunModel import RunModel
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.SobolSensitivity import SobolSensitivity
from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples
# Prepare
###############################################################################
# Prepare the input distribution
@pytest.fixture()
def ishigami_input_dist_object():
"""
This function returns the input distribution for the Ishigami function.
X1 ~ Uniform(-pi, pi)
X2 ~ Uniform(-pi, pi)
X3 ~ Uniform(-pi, pi)
"""
return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
@pytest.fixture()
def ishigami_model_object():
"""This function creates the Ishigami run_model_object"""
model = PythonModel(
model_script="ishigami.py",
model_object_name="evaluate",
var_names=[r"$X_1$", "$X_2$", "$X_3$"],
delete_files=True,
params=[7, 0.1],
)
runmodel_obj = RunModel(model=model)
return runmodel_obj
@pytest.fixture()
def sobol_object(ishigami_model_object, ishigami_input_dist_object):
"""This function returns the Sobol object."""
return SobolSensitivity(ishigami_model_object, ishigami_input_dist_object)
@pytest.fixture()
def sobol_object_input_samples_small(sobol_object):
"""This creates the Sobol object."""
SA = sobol_object
np.random.seed(12345) # set seed for reproducibility
SA.n_samples = 2
return generate_pick_freeze_samples(SA.dist_object, SA.n_samples)
# Generate N pick and free samples
@pytest.fixture()
def pick_and_freeze_samples_small():
"""
This function returns input matrices A, B and C_i with a small number
of samples for the Ishigami input distribution.
This is used to test the `generate_pick_and_freeze_samples` function.
The samples are generated as follows:
dist_1 = JointInd([Uniform(-np.pi, 2*np.pi)]*3)
np.random.seed(12345) #! set seed for reproducibility
n_samples = 2
n_vars = 3
samples = dist_1.rvs(n_samples*2)
# Split samples
A_samples = samples[:n_samples, :]
B_samples = samples[n_samples:, :]
def _get_C_i(i, A, B):
C_i = copy.deepcopy(B)
C_i[:, i] = A[:, i]
return C_i
C_samples = np.zeros((n_vars, n_samples, n_vars))
for i in range(3):
C_samples[i, :, :] = _get_C_i(i, A_samples, B_samples)
print(np.around(A_samples,3))
print(np.around(B_samples,3))
print(np.around(C_samples,3))
"""
A_samples = np.array([[2.699, 0.426, 1.564], [-1.154, 0.600, 0.965]])
B_samples = np.array([[-1.986, 2.919, 1.556], [-1.856, 0.962, 2.898]])
C_samples = np.array(
[
[[2.699, 2.919, 1.556], [-1.154, 0.962, 2.898]],
[[-1.986, 0.426, 1.556], [-1.856, 0.6, 2.898]],
[[-1.986, 2.919, 1.564], [-1.856, 0.962, 0.965]],
]
)
return A_samples, B_samples, C_samples
@pytest.fixture()
def random_f_A():
"""This function returns an A-like vector"""
rand_f_A = np.array([[100], [101], [102], [103], [104]])
return rand_f_A
@pytest.fixture()
def random_f_C_i():
"""This function returns a C_i-like vector"""
rand_f_C_i = np.array([[100, 200], [101, 201], [102, 202], [103, 203], [104, 204]])
return rand_f_C_i
@pytest.fixture()
def manual_bootstrap_samples_f_A():
"""This function bootstraps the A-like vector using random indices"""
# Genrated using np.random.randint(low=0, high=5, size=(5,1))
# with np.random.seed(12345)
# rand_indices_f_A = np.array([ [2],
# [1],
# [4],
# [1],
# [2]])
# bootstrap_f_A = rand_f_A[rand_indices_A]
bootstrap_sample_A = np.array([[102], [101], [104], [101], [102]])
return bootstrap_sample_A
@pytest.fixture()
def manual_bootstrap_samples_f_C_i():
"""This function bootstraps the C_i-like vector using random indices"""
# Genrated using np.random.randint(low=0, high=5, size=(5,2))
# with np.random.seed(12345)
# rand_indices_C_i = np.array([ [2, 1],
# [4, 1],
# [2, 1],
# [1, 3],
# [1, 3]])
bootstrap_f_C_i = np.array(
[[102, 201], [104, 201], [102, 201], [101, 203], [101, 203]]
)
return bootstrap_f_C_i
# Unit tests
###############################################################################
def test_pick_and_freeze_sampling(
pick_and_freeze_samples_small, sobol_object_input_samples_small
):
"""Test the `generate_pick_and_test_samples` function."""
# Prepare
A_samples, B_samples, C_samples = pick_and_freeze_samples_small
A_test, B_test, C_test_generator, _ = sobol_object_input_samples_small
# Act
assert np.allclose(A_samples, np.around(A_test, 3))
assert np.allclose(B_samples, np.around(B_test, 3))
for i in range(3):
C_test = next(C_test_generator)
assert np.allclose(C_samples[i, :, :], np.around(C_test, 3))
def METHOD_NAME(random_f_A, manual_bootstrap_samples_f_A):
"""Test the bootstrap sampling for a vector."""
# Prepare
np.random.seed(12345) #! set seed for reproducibility
gen_f_A = SobolSensitivity.bootstrap_sample_generator_1D(random_f_A)
bootstrap_samples_f_A = next(gen_f_A)
# Act
assert np.array_equal(manual_bootstrap_samples_f_A, bootstrap_samples_f_A)
def test_bootstrap_for_matrix(random_f_C_i, manual_bootstrap_samples_f_C_i):
"""Test the bootstrap sampling for a matrix."""
# Prepare
np.random.seed(12345) #! set seed for reproducibility
gen_f_C_i = SobolSensitivity.bootstrap_sample_generator_2D(random_f_C_i)
bootstrap_samples_C_i = next(gen_f_C_i)
# Act
assert np.array_equal(manual_bootstrap_samples_f_C_i, bootstrap_samples_C_i) |
636 | server bind | # -*- coding: utf-8 -*-
"""
Helper module which exposes abstractions to write webservers easily
"""
from abc import ABC, abstractmethod
import socket
import http.server as http
from http import HTTPStatus
from urllib.parse import parse_qs, urlparse
import json
class Response():
""" Represents a HTTP `Response` object """
def __init__(self, status, body=None, headers=None):
if not isinstance(status, HTTPStatus):
raise TypeError('status has to be of type http.HTTPStatus')
if body and not isinstance(body, (str, dict)):
raise TypeError('body has to be of type str or dict')
if headers and not isinstance(headers, dict):
raise TypeError('headers has to be of type dict')
self.status = status
self.body = body
self.headers = headers
def get_body(self):
if not self.body:
return ''
if isinstance(self.body, dict):
return json.dumps(self.body)
return self.body
class Request():
""" Represents a HTTP `Request` object """
def __init__(self, path, qs=None, body=None, json=None, headers=None):
self.path = path
self.qs = qs
self.body = body
self.json = json
self.headers = headers
class RequestHandler(ABC):
"""
The class that users should sub-class and provide implementation. Each of
these functions **should** return an instance of the `Response` class
"""
@abstractmethod
def get(self, request):
pass
@abstractmethod
def post(self, request):
pass
def MkHandlers(handlers):
class HTTPHandler(http.BaseHTTPRequestHandler):
def not_found(self):
self.send_response(HTTPStatus.NOT_FOUND)
self.end_headers()
self.wfile.write('<h1> Not Found </h1>'.encode('utf-8'))
def parse_path(self):
return urlparse(self.path)
def append_headers(self, headers):
for k, v in headers.items():
self.send_header(k, v)
def do_GET(self):
try:
raw_path = self.parse_path()
path = raw_path.path
handler = handlers[path]()
qs = parse_qs(raw_path.query)
req = Request(path, qs, None, None, self.headers)
resp = handler.get(req)
self.send_response(resp.status)
if resp.headers:
self.append_headers(resp.headers)
self.end_headers()
self.wfile.write(resp.get_body().encode('utf-8'))
except KeyError:
self.not_found()
def do_POST(self):
try:
raw_path = self.parse_path()
path = raw_path.path
handler = handlers[path]()
content_len = self.headers.get('Content-Length')
qs = None
req_body = self.rfile.read(int(content_len)).decode("utf-8")
req_json = None
if self.headers.get('Content-Type') == 'application/json':
req_json = json.loads(req_body)
req = Request(self.path, qs, req_body, req_json, self.headers)
resp = handler.post(req)
self.send_response(resp.status)
if resp.headers:
self.append_headers(resp.headers)
#Required for graphiql to work with the graphQL test server
self.send_header('Access-Control-Allow-Origin', self.headers['Origin'])
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Methods', 'GET,POST,PUT,PATCH,DELETE,OPTIONS')
self.end_headers()
self.wfile.write(resp.get_body().encode('utf-8'))
except KeyError:
self.not_found()
def do_OPTIONS(self):
self.send_response(204)
#Required for graphiql to work with the graphQL test server
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Max-Age', '1728000')
self.send_header('Access-Control-Allow-Headers', 'content-type,x-apollo-tracing')
self.send_header('Content-Type', 'text/plain charset=UTF-8')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', self.headers['Origin'])
self.send_header('Access-Control-Allow-Methods', 'GET,POST,PUT,PATCH,DELETE,OPTIONS')
self.end_headers()
def log_message(self, format, *args):
return
return HTTPHandler
class WebServer(http.HTTPServer):
def __init__(self, server_address, handler):
super().__init__(server_address, handler)
def METHOD_NAME(self):
print('Running http server on {0}:{1}'.format(self.server_address[0],
self.server_address[1]))
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address) |
637 | cb | # Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS
from __future__ import annotations
from typing import ContextManager, DefaultDict, Dict, Iterator, List, Tuple, Type, Union
try:
# Introduced in Python 3.8
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore
from collections import defaultdict
from contextlib import contextmanager
from enum import Enum
import trio
from structlog import get_logger
from parsec._parsec import BackendEvent
logger = get_logger()
class MetaEvent(Enum):
EVENT_CONNECTED = "event.connected"
EVENT_DISCONNECTED = "event.disconnected"
# `BackendEvent` is not an enum but a class with inheritance
EventTypes = Union[Enum, Type[BackendEvent]]
class EventCallback(Protocol):
def __call__(self, event: EventTypes, **kwargs: object) -> None:
...
class EventFilterCallback(Protocol):
def __call__(self, event: EventTypes, **kwargs: object) -> bool:
...
class EventWaiter:
def __init__(self, filter: EventFilterCallback | None):
self._filter = filter
self._event_occurred = trio.Event()
self._event_result: Tuple[EventTypes, Dict[str, object]] | None = None
def METHOD_NAME(self, event: EventTypes, **kwargs: object) -> None:
if self._event_occurred.is_set():
return
try:
if self._filter and not self._filter(event, **kwargs):
return
except Exception:
breakpoint()
self._event_result = (event, kwargs)
self._event_occurred.set()
async def wait(self) -> Tuple[EventTypes, Dict[str, object]]:
await self._event_occurred.wait()
assert self._event_result is not None
return self._event_result
def clear(self) -> None:
self._event_occurred = trio.Event()
self._event_result = None
class EventBus:
def __init__(self) -> None:
self._event_handlers: DefaultDict[EventTypes, List[EventCallback]] = defaultdict(list)
def stats(self) -> Dict[EventTypes, int]:
return {event: len(cbs) for event, cbs in self._event_handlers.items() if cbs}
def connection_context(self) -> "EventBusConnectionContext":
return EventBusConnectionContext(self)
def send(self, event: EventTypes, **kwargs: object) -> None:
# Do not log meta events (event.connected and event.disconnected)
if "event_type" not in kwargs:
logger.debug("Send event", event_type=event, **kwargs)
for cb in self._event_handlers[event]:
try:
cb(event, **kwargs)
except Exception:
logger.exception(
"Unhandled exception in event bus callback",
callback=cb,
event_type=event,
**kwargs,
)
@contextmanager
def waiter_on(
self, event: EventTypes, *, filter: EventFilterCallback | None = None
) -> Iterator[EventWaiter]:
ew = EventWaiter(filter)
self.connect(event, ew.METHOD_NAME)
try:
yield ew
finally:
self.disconnect(event, ew.METHOD_NAME)
@contextmanager
def waiter_on_first(
self, *events: EventTypes, filter: EventFilterCallback | None = None
) -> Iterator[EventWaiter]:
ew = EventWaiter(filter)
for event in events:
self.connect(event, ew.METHOD_NAME)
try:
yield ew
finally:
for event in events:
self.disconnect(event, ew.METHOD_NAME)
def connect(self, event: EventTypes, cb: EventCallback) -> None:
self._event_handlers[event].append(cb)
self.send(MetaEvent.EVENT_CONNECTED, event_type=event)
@contextmanager
def connect_in_context(self, *events: Tuple[EventTypes, EventCallback]) -> Iterator[None]:
for event, cb in events:
self.connect(event, cb)
try:
yield
finally:
for event, cb in events:
self.disconnect(event, cb)
def disconnect(self, event: EventTypes, cb: EventCallback) -> None:
self._event_handlers[event].remove(cb)
self.send(MetaEvent.EVENT_DISCONNECTED, event_type=event)
class EventBusConnectionContext:
def __init__(self, event_bus: EventBus):
self.event_bus = event_bus
self.to_disconnect: List[Tuple[EventTypes, EventCallback]] = []
def __enter__(self) -> "EventBusConnectionContext":
return self
def __exit__(self, exc_type, exc_value, traceback): # type: ignore
self.clear()
def clear(self) -> None:
for event, cb in self.to_disconnect:
self.event_bus.disconnect(event, cb)
self.to_disconnect.clear()
def send(self, event: EventTypes, **kwargs: object) -> None:
self.event_bus.send(event, **kwargs)
def waiter_on(self, event: EventTypes) -> ContextManager[EventWaiter]:
return self.event_bus.waiter_on(event)
def waiter_on_first(self, *events: EventTypes) -> ContextManager[EventWaiter]:
return self.event_bus.waiter_on_first(*events)
def connect(self, event: EventTypes, cb: EventCallback) -> None:
self.to_disconnect.append((event, cb))
self.event_bus.connect(event, cb)
def connect_in_context(self, *events: Tuple[EventTypes, EventCallback]) -> ContextManager[None]:
return self.event_bus.connect_in_context(*events)
def disconnect(self, event: EventTypes, cb: EventCallback) -> None:
self.event_bus.disconnect(event, cb)
self.to_disconnect.remove((event, cb)) |
638 | get udp ports | """
owtf.config
~~~~~~~~~~~
The Configuration object parses all configuration files, loads them into
memory, derives some settings and provides framework modules with a central
repository to get info.
"""
import logging
from collections import defaultdict
try: # PY3
from urllib.parse import urlparse
except ImportError: # PY2
from urlparse import urlparse
try:
import configparser as parser
except ImportError:
import ConfigParser as parser
from owtf.lib.exceptions import PluginAbortException
from owtf.settings import CONFIG_TYPES, REPLACEMENT_DELIMITER, ROOT_DIR
__all__ = ["config_handler"]
class Config(object):
target = None
def __init__(self):
self.root_dir = ROOT_DIR
self.config = defaultdict(list) # General configuration information.
for type in CONFIG_TYPES:
self.config[
type
] = {} # key can consist alphabets, numbers, hyphen & underscore.
self.cli_options = {}
def is_set(self, key):
"""Checks if the key is set in the config dict
:param key: Key to check
:type key: `str`
:return: True if present, else False
:rtype: `bool`
"""
key = self.pad_key(key)
config = self.get_config_dict
for type in CONFIG_TYPES:
if key in config[type]:
return True
return False
def get_key_val(self, key):
"""Gets the right config for target / general.
:param key: The key
:type key: `str`
:return: Value for the key
"""
config = self.get_config_dict
for type in CONFIG_TYPES:
if key in config[type]:
return config[type][key]
def pad_key(self, key):
"""Add delimiters.
:param key: Key to pad
:type key: `str`
:return: Padded key string
:rtype: `str`
"""
return REPLACEMENT_DELIMITER + key + REPLACEMENT_DELIMITER
def strip_key(self, key):
"""Replaces key with empty space
:param key: Key to clear
:type key: `str`
:return: Empty key
:rtype: `str`
"""
return key.replace(REPLACEMENT_DELIMITER, "")
def get_val(self, key):
"""Transparently gets config info from target or General.
:param key: Key
:type key: `str`
:return: The value for the key
"""
try:
key = self.pad_key(key)
return self.get_key_val(key)
except KeyError:
message = "The configuration item: %s does not exist!" % key
# Raise plugin-level exception to move on to next plugin.
raise PluginAbortException(message)
def get_as_list(self, key_list):
"""Get values for keys in a list
:param key_list: List of keys
:type key_list: `list`
:return: List of corresponding values
:rtype: `list`
"""
value_list = []
for key in key_list:
value_list.append(self.get_val(key))
return value_list
def get_header_list(self, key):
"""Get list from a string of values for a key
:param key: Key
:type key: `str`
:return: List of values
:rtype: `list`
"""
return self.get_val(key).split(",")
def set_general_val(self, type, key, value):
""" Set value for a key in any config file
:param type: Type of config file, framework or general.cfg
:type type: `str`
:param key: The key
:type key: `str`
:param value: Value to be set
:type value:
:return: None
:rtype: None
"""
self.config[type][key] = value
def set_val(self, key, value):
"""set config items in target-specific or General config."""
# Store config in "replacement mode", that way we can multiple-replace
# the config on resources, etc.
key = REPLACEMENT_DELIMITER + key + REPLACEMENT_DELIMITER
type = "other"
# Only when value is a string, store in replacements config.
if isinstance(value, str):
type = "string"
return self.set_general_val(type, key, value)
@property
def get_framework_config_dict(self):
return self.get_config_dict["string"]
def __getitem__(self, key):
return self.get_val(key)
def __setitem__(self, key, value):
return self.set_val(key, value)
@property
def get_config_dict(self):
"""Get the global config dictionary
:return: None
:rtype: None
"""
return self.config
@property
def get_replacement_dict(self):
return {"FRAMEWORK_DIR": self.root_dir}
def show(self):
"""Print all keys and values from configuration dictionary
:return: None
:rtype: None
"""
logging.info("Configuration settings: ")
for k, v in list(self.get_config_dict.items()):
logging.info("%s => %s", str(k), str(v))
def get_tcp_ports(self, start_port, end_port):
"""Get TCP ports from the config file
:param start_port: Start port in a range
:type start_port: `str`
:param end_port: End port
:type end_port: `str`
:return: Comma-separate string of tcp ports
:rtype: `str`
"""
return ",".join(
self.get_val("TCP_PORTS").split(",")[int(start_port):int(end_port)]
)
def METHOD_NAME(self, start_port, end_port):
"""Get UDP ports from the config file
:param start_ort: Start port in a range
:type start_port: `str`
:param end_port: End port
:type end_port: `str`
:return: Comma-separate string of udp ports
:rtype: `str`
"""
return ",".join(
self.get_val("UDP_PORTS").split(",")[int(start_port):int(end_port)]
)
config_handler = Config() |
639 | test08 wrong os update | #!/usr/bin/env python3
import os
import tarfile
from gppylib.commands.base import ExecutionError
from gppylib.operations.test.regress.test_package import GppkgTestCase, unittest, GppkgSpec, RPMSpec, ARCH, OS, GPDB_VERSION
class SimpleNegativeTestCase(GppkgTestCase):
# @unittest.expectedFailure
def test00_wrong_os(self):
os = "abcde"
A_spec = self.A_spec
alpha_spec = GppkgSpec("alpha", "1.0", GPDB_VERSION, os)
gppkg_file = self.build(alpha_spec, A_spec)
with self.assertRaisesRegex(ExecutionError , "%s OS required. %s OS found" % (os, OS)):
self.install(gppkg_file)
def test02_wrong_gpdbversion(self):
gpdb_version = "4.6"
A_spec = self.A_spec
alpha_spec = GppkgSpec("alpha", "1.0", gpdb_version)
gppkg_file = self.build(alpha_spec, A_spec)
with self.assertRaisesRegex(ExecutionError, "requires Cloudberry Database version %s" % gpdb_version):
self.install(gppkg_file)
def test03_install_twice(self):
gppkg_file = self.build(self.alpha_spec, self.A_spec)
self.install(self.alpha_spec.get_filename())
with self.assertRaisesRegex(ExecutionError, "%s is already installed" % gppkg_file):
self.install(gppkg_file)
@unittest.expectedFailure
def test04_update_gppkg_lower(self):
"""
This test tries to update a gppkg which has a lower version,
but the main comprising rpm is of higher version than the one
already installed on the system.
"""
#Use gppkg from previous test
self.install(self.alpha_spec.get_filename())
#Use gppkg which has a lower version, but the rpm is > the one installed
update_rpm_spec = RPMSpec("A", "1", "2")
update_gppkg_spec = GppkgSpec("alpha", "0.1")
update_gppkg_file = self.build(update_gppkg_spec, update_rpm_spec)
with self.assertRaisesRegex(ExecutionError, "Newer version of %s already installed" % update_gppkg_spec.get_package_name()):
self.update(update_gppkg_file)
#Check that the original package is still installed and not updated
assert self.check_install(self.alpha_spec.get_filename())
def test05_update_rpm_lower(self):
"""
This test tries to install a gppkg which has a higher version,
but the main comprising rpm is of lower version than the one
already installed on the system.
"""
#Use gppkg from previous test
self.install(self.alpha_spec.get_filename())
#Use gppkg with a lower RPM version but gppkg version is > the one installed
update_rpm_spec = RPMSpec("A", "1", "0")
update_gppkg_spec = GppkgSpec("alpha", "1.1")
update_gppkg_file = self.build(update_gppkg_spec, update_rpm_spec)
with self.assertRaisesRegex(ExecutionError, self.A_spec.get_filename()):
self.update(update_gppkg_file)
#Check that the original package is still installed and not updated
assert self.check_install(self.alpha_spec.get_filename())
def test06_uninstall_twice(self):
#Uses the gppkg from previous test
self.install(self.alpha_spec.get_filename())
#Uninstall gppkg
self.remove(self.alpha_spec.get_filename())
with self.assertRaisesRegex(ExecutionError, "%s has not been installed" % self.alpha_spec.get_package_name()):
self.remove(self.alpha_spec.get_filename())
def test07_invalid_gppkg_name(self):
invalid_gppkg_name = "abcde-abc"
with self.assertRaisesRegex(ExecutionError, "Cannot find package %s" % invalid_gppkg_name):
self.install(invalid_gppkg_name)
@unittest.expectedFailure
def METHOD_NAME(self):
os = "windows"
self.install(self.alpha_spec.get_filename())
invalid_os_gppkg = GppkgSpec("alpha", "1.1", GPDB_VERSION, os)
gppkg_file = self.build(invalid_os_gppkg, self.A_spec)
with self.assertRaisesRegex(ExecutionError, "%s os required. %s os found" % (os, OS)):
self.update(gppkg_file)
if __name__ == "__main__":
unittest.main() |
640 | get file name | import factory
from django.conf import settings
from django.core.files import File
from kitsune.questions.tests import QuestionFactory
from kitsune.sumo.tests import TestCase
from kitsune.upload.models import ImageAttachment
from kitsune.upload.storage import RenameFileStorage
from kitsune.upload.utils import FileTooLargeError, check_file_size, create_imageattachment
from kitsune.users.tests import UserFactory
class ImageAttachmentFactory(factory.django.DjangoModelFactory):
class Meta:
model = ImageAttachment
creator = factory.SubFactory(UserFactory)
content_object = factory.SubFactory(QuestionFactory)
file = factory.django.FileField()
def check_file_info(file_info, name, width, height, delete_url, url, thumbnail_url):
tc = TestCase()
tc.assertEqual(name, file_info["name"])
tc.assertEqual(width, file_info["width"])
tc.assertEqual(height, file_info["height"])
tc.assertEqual(delete_url, file_info["delete_url"])
tc.assertEqual(url, file_info["url"])
tc.assertEqual(thumbnail_url, file_info["thumbnail_url"])
def METHOD_NAME(name):
storage = RenameFileStorage()
return storage.get_available_name(name)
class CheckFileSizeTestCase(TestCase):
"""Tests for check_file_size"""
def test_check_file_size_under(self):
"""No exception should be raised"""
with open("kitsune/upload/tests/media/test.jpg", "rb") as f:
up_file = File(f)
check_file_size(up_file, settings.IMAGE_MAX_FILESIZE)
def test_check_file_size_over(self):
"""FileTooLargeError should be raised"""
with self.assertRaises(FileTooLargeError):
with open("kitsune/upload/tests/media/test.jpg", "rb") as f:
up_file = File(f)
# This should raise
check_file_size(up_file, 0)
class CreateImageAttachmentTestCase(TestCase):
def setUp(self):
super(CreateImageAttachmentTestCase, self).setUp()
self.user = UserFactory()
self.obj = QuestionFactory()
def tearDown(self):
ImageAttachment.objects.all().delete()
super(CreateImageAttachmentTestCase, self).tearDown()
def test_create_imageattachment(self):
"""
An image attachment is created from an uploaded file.
Verifies all appropriate fields are correctly set.
"""
with open("kitsune/upload/tests/media/test.jpg", "rb") as f:
up_file = File(f)
file_info = create_imageattachment({"image": up_file}, self.user, self.obj)
image = ImageAttachment.objects.all()[0]
check_file_info(
file_info,
name="test.png",
width=90,
height=120,
delete_url=image.get_delete_url(),
url=image.get_absolute_url(),
thumbnail_url=image.thumbnail.url,
)
def test_create_imageattachment_when_animated(self):
"""
An image attachment is created from an uploaded animated GIF file.
Verifies all appropriate fields are correctly set.
"""
filepath = "kitsune/upload/tests/media/animated.gif"
with open(filepath, "rb") as f:
up_file = File(f)
file_info = create_imageattachment({"image": up_file}, self.user, self.obj)
image = ImageAttachment.objects.all()[0]
check_file_info(
file_info,
name=filepath,
width=120,
height=120,
delete_url=image.get_delete_url(),
url=image.get_absolute_url(),
thumbnail_url=image.thumbnail.url,
)
class FileNameTestCase(TestCase):
def _match_file_name(self, name, name_end):
assert name.endswith(name_end), '"%s" does not end with "%s"' % (name, name_end)
def test_empty_file_name(self):
self._match_file_name("", "")
def test_empty_file_name_with_extension(self):
self._match_file_name(METHOD_NAME(".wtf"), "3f8242")
def test_ascii(self):
self._match_file_name(METHOD_NAME("some ascii.jpg"), "5959e0.jpg")
def test_ascii_dir(self):
self._match_file_name(METHOD_NAME("dir1/dir2/some ascii.jpg"), "5959e0.jpg")
def test_low_unicode(self):
self._match_file_name(METHOD_NAME("157d9383e6aeba7180378fd8c1d46f80.gif"), "bdaf1a.gif")
def test_high_unicode(self):
self._match_file_name(METHOD_NAME("\u6709\u52b9.jpeg"), "ce1518.jpeg")
def test_full_mixed(self):
self._match_file_name(
METHOD_NAME("123\xe5\xe5\xee\xe9\xf8\xe7\u6709\u52b9.png"), "686c11.png"
) |
641 | test pyrochem elemental sum | import unittest
import numpy as np
import pandas as pd
import pyrolite.geochem
from pyrolite.comp.codata import renormalise
from pyrolite.geochem.norm import get_reference_composition
from pyrolite.util.synthetic import normal_frame, normal_series
# [print("# " + i) for i in dir(df.pyrochem) if "__" not in i and not i.startswith("_")]
class TestPyrochem(unittest.TestCase):
"""
Test the pyrochem dataframe accessor interface to
pyrolite.geochem functions.
"""
def setUp(self):
cols = [
"MgO",
"SiO2",
"CaO",
"FeO",
"Ti",
"Hf",
"Zr",
"H2O",
"Sr87_Sr86",
"87Sr/86Sr",
"87Sr/86Sri",
] + pyrolite.geochem.REE()
self.df = normal_frame(size=4, columns=cols)
self.df = renormalise(self.df)
# pyrolite.geochem.ind functions
def test_pyrochem_indexes(self):
obj = self.df
for index in [
"list_elements",
"list_oxides",
"list_REE",
"list_isotope_ratios",
]:
with self.subTest(index=index):
out = getattr(obj.pyrochem, index)
self.assertIsInstance(out, list)
def test_pyrochem_subsetters(self):
obj = self.df
for subset in [
"REE",
"REY",
"elements",
"oxides",
"isotope_ratios",
"compositional",
]:
with self.subTest(subset=subset):
out = getattr(obj.pyrochem, subset)
self.assertIsInstance(out, obj.__class__) # in this case a dataframe
def test_pyrochem_subsetter_assignment(self):
obj = self.df
for subset in [
"REE",
"REY",
"elements",
"oxides",
"isotope_ratios",
"compositional",
]:
with self.subTest(subset=subset):
setattr(obj.pyrochem, subset, getattr(obj.pyrochem, subset) * 1.0)
# pyrolite.geochem.parse functions
def test_pyrochem_check_multiple_cation_inclusion(self):
obj = self.df.copy(deep=True)
cations = obj.pyrochem.check_multiple_cation_inclusion()
self.assertTrue(len(cations) == 0)
def test_pyochem_parse_chem(self):
obj = self.df.copy(deep=True)
start_cols = obj.columns
out = obj.pyrochem.parse_chem()
self.assertTrue(len(out.columns) == len(start_cols))
self.assertTrue(
all([a == b for (a, b) in zip(out.columns, start_cols) if "/" not in a])
)
# pyrolite.geochem.transform functions
def test_pyrochem_to_molecular(self):
obj = self.df.copy(deep=True).pyrochem.compositional
start = obj.values
out = obj.pyrochem.to_molecular()
self.assertFalse(np.isclose(out.values.flatten(), start.flatten()).any())
def test_pyrochem_to_weight(self):
obj = self.df.copy(deep=True).pyrochem.compositional
start = obj.values
out = obj.pyrochem.to_weight()
self.assertFalse(np.isclose(out.values.flatten(), start.flatten()).any())
def test_pyrochem_add_MgNo(self):
obj = self.df.copy(deep=True).pyrochem.compositional
obj.pyrochem.add_MgNo()
self.assertIn("Mg#", obj.columns)
def test_pyrochem_add_ratio(self):
obj = self.df.copy(deep=True)
for ratio in ["MgO/SiO2", "MgO/Ti", "Lu/Hf", "Mg/TiO2"]:
with self.subTest(ratio=ratio):
obj.pyrochem.add_ratio(ratio)
self.assertIn(ratio, obj.columns)
def test_pyrochem_aggregate_element(self):
obj = self.df.copy(deep=True)
target = "Fe"
out = obj.pyrochem.aggregate_element(target)
self.assertIsInstance(out, pd.DataFrame)
self.assertTrue(target in out.columns)
def test_pyrochem_devolatilise(self):
obj = self.df.copy(deep=True).pyrochem.compositional
out = obj.pyrochem.devolatilise()
self.assertNotIn("H2O", out.columns)
def METHOD_NAME(self):
obj = self.df.copy(deep=True)
Mg = obj.pyrochem.elemental_sum("Mg")
self.assertFalse(np.isclose(Mg, obj.MgO).any())
def test_pyrochem_lambda_lnREE(self):
obj = self.df.copy(deep=True)
lambdas = obj.pyrochem.lambda_lnREE()
self.assertIsInstance(lambdas, pd.DataFrame)
self.assertIn("λ0", lambdas.columns)
def test_pyrochem_convert_chemistry(self):
obj = self.df.copy(deep=True)
obj = obj.pyrochem.convert_chemistry(
to=["MgO", "Si", "Ti", "HfO2", "La2O3", dict(FeO=0.9, Fe2O3=0.1)]
)
self.assertIn("Fe2O3", obj.columns)
self.assertIn("Si", obj.columns)
self.assertIn("Si", obj.columns)
self.assertTrue((obj.FeO.values > obj.Fe2O3.values).all())
# pyrolite.geochem.norm functions
def test_pyrochem_normalize_to_str(self):
obj = self.df.copy(deep=True).pyrochem.compositional
out = obj.pyrochem.normalize_to("Chondrite_PON")
def test_pyrochem_normalize_to_composition(self):
obj = self.df.copy(deep=True).pyrochem.compositional
out = obj.pyrochem.normalize_to(get_reference_composition("Chondrite_PON"))
def test_pyrochem_normalize_to_array(self):
obj = self.df.copy(deep=True).pyrochem.compositional
out = obj.pyrochem.normalize_to(np.ones(obj.columns.size))
def test_pyrochem_denormalize_from_str(self):
obj = self.df.copy(deep=True).pyrochem.compositional
out = obj.pyrochem.denormalize_from("Chondrite_PON")
def test_pyrochem_denormalize_from_composition(self):
obj = self.df.copy(deep=True).pyrochem.compositional
out = obj.pyrochem.denormalize_from(get_reference_composition("Chondrite_PON"))
def test_pyrochem_denormalize_from_array(self):
obj = self.df.copy(deep=True).pyrochem.compositional
out = obj.pyrochem.denormalize_from(np.ones(obj.columns.size))
def test_pyrochem_scale(self):
obj = self.df.copy(deep=True).pyrochem.compositional
REEppm = obj.pyrochem.REE.pyrochem.scale("wt%", "ppm")
self.assertFalse(np.isclose(REEppm.values, obj.pyrochem.REE.values).any())
self.assertTrue((REEppm.values > obj.pyrochem.REE.values).all()) |
642 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetStartStopManagedInstanceScheduleResult',
'AwaitableGetStartStopManagedInstanceScheduleResult',
'get_start_stop_managed_instance_schedule',
'get_start_stop_managed_instance_schedule_output',
]
@pulumi.output_type
class GetStartStopManagedInstanceScheduleResult:
"""
Managed instance's Start/Stop schedule.
"""
def __init__(__self__, description=None, id=None, name=None, next_execution_time=None, next_run_action=None, schedule_list=None, METHOD_NAME=None, time_zone_id=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if next_execution_time and not isinstance(next_execution_time, str):
raise TypeError("Expected argument 'next_execution_time' to be a str")
pulumi.set(__self__, "next_execution_time", next_execution_time)
if next_run_action and not isinstance(next_run_action, str):
raise TypeError("Expected argument 'next_run_action' to be a str")
pulumi.set(__self__, "next_run_action", next_run_action)
if schedule_list and not isinstance(schedule_list, list):
raise TypeError("Expected argument 'schedule_list' to be a list")
pulumi.set(__self__, "schedule_list", schedule_list)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if time_zone_id and not isinstance(time_zone_id, str):
raise TypeError("Expected argument 'time_zone_id' to be a str")
pulumi.set(__self__, "time_zone_id", time_zone_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of the schedule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextExecutionTime")
def next_execution_time(self) -> str:
"""
Timestamp when the next action will be executed in the corresponding schedule time zone.
"""
return pulumi.get(self, "next_execution_time")
@property
@pulumi.getter(name="nextRunAction")
def next_run_action(self) -> str:
"""
Next action to be executed (Start or Stop)
"""
return pulumi.get(self, "next_run_action")
@property
@pulumi.getter(name="scheduleList")
def schedule_list(self) -> Sequence['outputs.ScheduleItemResponse']:
"""
Schedule list.
"""
return pulumi.get(self, "schedule_list")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
System data of the scheduled resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> Optional[str]:
"""
The time zone of the schedule.
"""
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetStartStopManagedInstanceScheduleResult(GetStartStopManagedInstanceScheduleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStartStopManagedInstanceScheduleResult(
description=self.description,
id=self.id,
name=self.name,
next_execution_time=self.next_execution_time,
next_run_action=self.next_run_action,
schedule_list=self.schedule_list,
METHOD_NAME=self.METHOD_NAME,
time_zone_id=self.time_zone_id,
type=self.type)
def get_start_stop_managed_instance_schedule(managed_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
start_stop_schedule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStartStopManagedInstanceScheduleResult:
"""
Gets the managed instance's Start/Stop schedule.
:param str managed_instance_name: The name of the managed instance.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str start_stop_schedule_name: Name of the managed instance Start/Stop schedule.
"""
__args__ = dict()
__args__['managedInstanceName'] = managed_instance_name
__args__['resourceGroupName'] = resource_group_name
__args__['startStopScheduleName'] = start_stop_schedule_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20230201preview:getStartStopManagedInstanceSchedule', __args__, opts=opts, typ=GetStartStopManagedInstanceScheduleResult).value
return AwaitableGetStartStopManagedInstanceScheduleResult(
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
next_execution_time=pulumi.get(__ret__, 'next_execution_time'),
next_run_action=pulumi.get(__ret__, 'next_run_action'),
schedule_list=pulumi.get(__ret__, 'schedule_list'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
time_zone_id=pulumi.get(__ret__, 'time_zone_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_start_stop_managed_instance_schedule)
def get_start_stop_managed_instance_schedule_output(managed_instance_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
start_stop_schedule_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetStartStopManagedInstanceScheduleResult]:
"""
Gets the managed instance's Start/Stop schedule.
:param str managed_instance_name: The name of the managed instance.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str start_stop_schedule_name: Name of the managed instance Start/Stop schedule.
"""
... |
643 | check scope | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""A builder to build Relax VM executable."""
from enum import IntEnum
from typing import Optional, Union, List
import tvm
from tvm.runtime import Object
from tvm.runtime.container import ShapeTuple
from .vm_build import Executable
from . import _ffi_api
class SpecialReg(IntEnum):
"""Magic numbers that represent special registers in vm."""
VOID_ARG = (1 << 54) + 0
VM_STATE = (1 << 54) + 1
class VMFuncKind(IntEnum):
"""VM function kind code."""
PACKED_FUNC = 0
VM_FUNC = 1
class VMFuncScope(object):
"""An object corresponds to each VM function, working as a context manager."""
stack: List["VMFuncScope"] = []
def __init__(self, exit_callback):
self.exit_callback = exit_callback
def __enter__(self):
VMFuncScope.stack.append(self)
return self
def __exit__(self, ptype, value, trace):
VMFuncScope.stack.pop()
self.exit_callback()
@tvm._ffi.register_object("relax.ExecBuilder")
class ExecBuilder(Object):
"""A builder to emit instructions and build executable for the virtual machine."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(_ffi_api.ExecBuilderCreate) # type: ignore
def r(self, idx: int) -> int:
"""set instruction's argument as a register."""
return _ffi_api.ExecBuilderR(self, idx) # type: ignore
def imm(self, value: int) -> int:
"""set instruction's argument as an immediate."""
return _ffi_api.ExecBuilderImm(self, value) # type: ignore
def c(self, idx: int) -> int:
"""set instruction's argument as a constant."""
return _ffi_api.ExecBuilderC(self, idx) # type: ignore
def f(self, name: str) -> int:
"""set instruction's argument as a function."""
return _ffi_api.ExecBuilderF(self, name) # type: ignore
def void_arg(self) -> int:
return self.r(SpecialReg.VOID_ARG)
def vm_state(self) -> int:
return self.r(SpecialReg.VM_STATE)
def declare_function(self, func_name: str, kind: VMFuncKind = VMFuncKind.PACKED_FUNC) -> None:
"""Declare a function"""
_ffi_api.ExecBuilderDecalreFunction(self, func_name, kind) # type: ignore
def function(
self, func_name: str, num_inputs: Optional[int] = 0, param_names: List[str] = None
) -> VMFuncScope:
"""annotate a VM function."""
_ffi_api.ExecBuilderEmitFunction(self, func_name, num_inputs, param_names) # type: ignore
return VMFuncScope(lambda: _ffi_api.ExecBuilderEndFunction(self, func_name)) # type: ignore
def METHOD_NAME(self) -> None:
if len(VMFuncScope.stack) == 0:
raise ValueError("emit should happen in a function scope")
def convert_constant(self, const: object) -> int:
return _ffi_api.ExecBuilderConvertConstant(self, const) # type: ignore
def emit_call(
self,
name: str,
args: Optional[List[Union[tvm.nd.NDArray, tvm.DataType]]] = None,
dst: int = None,
) -> None:
"""emit a call instruction which calls a packed function."""
self.METHOD_NAME()
if dst is None:
dst = SpecialReg.VOID_ARG
args_ = []
if args is not None:
for arg in args:
if isinstance(arg, tuple):
shape_tuple = ShapeTuple(arg)
new_arg = self.convert_constant(shape_tuple)
args_.append(new_arg)
elif isinstance(arg, (tvm.nd.NDArray, tvm.DataType, ShapeTuple)):
new_arg = self.convert_constant(arg)
args_.append(new_arg)
else:
args_.append(arg)
_ffi_api.ExecBuilderEmitCall(self, name, args_, dst) # type: ignore
def emit_ret(self, result: int) -> None:
"""emit a return instruction"""
self.METHOD_NAME()
_ffi_api.ExecBuilderEmitRet(self, result) # type: ignore
def emit_goto(self, pc_offset):
"""emit a goto instruction"""
self.METHOD_NAME()
_ffi_api.ExecBuilderEmitGoto(self, pc_offset) # type: ignore
def emit_if(self, cond, false_offset):
"""emit an if instruction"""
self.METHOD_NAME()
_ffi_api.ExecBuilderEmitIf(self, cond, false_offset) # type: ignore
def get(self) -> Executable:
"""return the executable"""
return Executable(_ffi_api.ExecBuilderGet(self)) # type: ignore |
644 | configure | from __future__ import annotations
from .Layer import Layer
from .Node import Node
from .Printable import Printable
from .Emulator import Emulator
from .enums import NodeRole
from .Binding import Binding
from typing import Dict, List, Set, Tuple
from .BaseSystem import BaseSystem
class Server(Printable):
"""!
@brief Server class.
The Server class is the handler for installed services.
"""
__class_names: list
_base_system: BaseSystem
def __init__(self):
super().__init__()
self.__class_names = []
self._base_system = BaseSystem.DEFAULT
def install(self, node: Node):
"""!
@brief Install the server on node.
@param node node.
"""
raise NotImplementedError('install not implemented')
def setBaseSystem(self, base_system: BaseSystem) -> Server:
"""!
@brief Set a base_system of a server.
@param base_system base_system to use.
@returns self, for chaining API calls.
"""
self._base_system = base_system
def getBaseSystem(self) -> BaseSystem:
"""!
@brief Get configured base system on this server.
@returns base system.
"""
return self._base_system
def getClassNames(self):
return self.__class_names
def appendClassName(self, class_name:str):
"""!
@brief Append Class Name
The method called by User.
@param class_name class name.
@return self.
"""
self.__class_names.append(class_name)
return self
class Service(Layer):
"""!
@brief Service base class.
The base class for all Services.
"""
_pending_targets: Dict[str, Server]
__targets: Set[Tuple[Server, Node]]
def __init__(self):
super().__init__()
self._pending_targets = {}
self.__targets = set()
def _createServer(self) -> Server:
"""!
@brief Create a new server.
"""
raise NotImplementedError('_createServer not implemented')
def _doInstall(self, node: Node, server: Server):
"""!
@brief install the server on node. This can be overridden by service
implementations.
@param node node.
@param server server.
"""
server.install(node)
def _doSetClassNames(self, node:Node, server:Server) -> Node:
"""!
@brief set the class names on node.
@param node node.
@param server server.
"""
server.setClassNames(node)
def _doConfigure(self, node: Node, server: Server):
"""!
@brief configure the node. Some services may need to by configure before
rendered.
This is currently used by the DNS layer to configure NS and gules
records before the actual installation.
@param node node
@param server server
"""
return
def __configureServer(self, server: Server, node: Node):
"""!
@brief Configure the service on given node.
@param node node to configure the service on.
@throws AssertionError if node is not host node.
"""
assert node.getRole() == NodeRole.Host, 'node as{}/{} is not a host node'.format(node.getAsn(), node.getName())
servicesdb: Dict = node.getAttribute('services', {})
for (name, service_info) in servicesdb.items():
service: Service = service_info['__self']
assert name not in self.getConflicts(), '{} conflict with {} on as{}/{}.'.format(self.getName(), service.getName(), node.getAsn(), node.getName())
assert self.getName() not in service.getConflicts(), '{} conflict with {} on as{}/{}.'.format(self.getName(), service.getName(), node.getAsn(), node.getName())
m_name = self.getName()
if m_name not in servicesdb:
servicesdb[m_name] = {
'__self': self
}
node.setBaseSystem(server.getBaseSystem())
self._doConfigure(node, server)
self.__targets.add((server, node))
def addPrefix(self, prefix: str):
"""!
@brief add a prefix to all virtual nodes.
This method sets a prepend a prefix to all virtual node names.
"""
new_dict = {}
for k, v in self._pending_targets.items():
new_dict[prefix + k] = v
self._pending_targets = new_dict
def install(self, vnode: str) -> Server:
"""!
@brief install the service on a node identified by given name.
"""
if vnode in self._pending_targets.keys(): return self._pending_targets[vnode]
s = self._createServer()
self._pending_targets[vnode] = s
return self._pending_targets[vnode]
def METHOD_NAME(self, emulator: Emulator):
for (vnode, server) in self._pending_targets.items():
pnode = emulator.getBindingFor(vnode)
self._log('looking for binding for {}...'.format(vnode))
self.__configureServer(server, pnode)
self._log('configure: bound {} to as{}/{}.'.format(vnode, pnode.getAsn(), pnode.getName()))
def render(self, emulator: Emulator):
for (server, node) in self.__targets:
self._doInstall(node, server)
for className in server.getClassNames():
node.appendClassName(className)
def getConflicts(self) -> List[str]:
"""!
@brief Get a list of conflicting services.
Override to change.
@return list of service names.
"""
return []
def getTargets(self) -> Set[Tuple[Server, Node]]:
"""!
@brief Get nodes and the server object associated with them. Note this
only work after the layer is configured.
"""
return self.__targets
def setPendingTargets(self, targets: Dict[str, Server]):
"""!
@brief Overrides the pending vnode dict. Use with caution.
@param targets new targets.
"""
self._pending_targets = targets
def getPendingTargets(self) -> Dict[str, Server]:
"""!
@brief Get a set of pending vnode to install the service on.
"""
return self._pending_targets
|
645 | materials2 | """
merge_materials
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class merge_materials(Operator):
"""Assembles a set of materials into a unique one.
Parameters
----------
materials1 : Materials
A vector of materials to merge or materials
from pin 0 to ...
materials2 : Materials
A vector of materials to merge or materials
from pin 0 to ...
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.utility.merge_materials()
>>> # Make input connections
>>> my_materials1 = dpf.Materials()
>>> op.inputs.materials1.connect(my_materials1)
>>> my_materials2 = dpf.Materials()
>>> op.inputs.materials2.connect(my_materials2)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.utility.merge_materials(
... materials1=my_materials1,
... materials2=my_materials2,
... )
>>> # Get output data
>>> result_merged_materials = op.outputs.merged_materials()
"""
def __init__(self, materials1=None, METHOD_NAME=None, config=None, server=None):
super().__init__(name="merge::materials", config=config, server=server)
self._inputs = InputsMergeMaterials(self)
self._outputs = OutputsMergeMaterials(self)
if materials1 is not None:
self.inputs.materials1.connect(materials1)
if METHOD_NAME is not None:
self.inputs.METHOD_NAME.connect(METHOD_NAME)
@staticmethod
def _spec():
description = """Assembles a set of materials into a unique one."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="materials",
type_names=["materials"],
optional=False,
document="""A vector of materials to merge or materials
from pin 0 to ...""",
),
1: PinSpecification(
name="materials",
type_names=["materials"],
optional=False,
document="""A vector of materials to merge or materials
from pin 0 to ...""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="merged_materials",
type_names=["materials"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="merge::materials", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsMergeMaterials
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsMergeMaterials
"""
return super().outputs
class InputsMergeMaterials(_Inputs):
"""Intermediate class used to connect user inputs to
merge_materials operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_materials()
>>> my_materials1 = dpf.Materials()
>>> op.inputs.materials1.connect(my_materials1)
>>> my_materials2 = dpf.Materials()
>>> op.inputs.materials2.connect(my_materials2)
"""
def __init__(self, op: Operator):
super().__init__(merge_materials._spec().inputs, op)
self._materials1 = Input(merge_materials._spec().input_pin(0), 0, op, 0)
self._inputs.append(self._materials1)
self._materials2 = Input(merge_materials._spec().input_pin(1), 1, op, 1)
self._inputs.append(self._materials2)
@property
def materials1(self):
"""Allows to connect materials1 input to the operator.
A vector of materials to merge or materials
from pin 0 to ...
Parameters
----------
my_materials1 : Materials
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_materials()
>>> op.inputs.materials1.connect(my_materials1)
>>> # or
>>> op.inputs.materials1(my_materials1)
"""
return self._materials1
@property
def METHOD_NAME(self):
"""Allows to connect materials2 input to the operator.
A vector of materials to merge or materials
from pin 0 to ...
Parameters
----------
my_materials2 : Materials
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_materials()
>>> op.inputs.materials2.connect(my_materials2)
>>> # or
>>> op.inputs.materials2(my_materials2)
"""
return self._materials2
class OutputsMergeMaterials(_Outputs):
"""Intermediate class used to get outputs from
merge_materials operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_materials()
>>> # Connect inputs : op.inputs. ...
>>> result_merged_materials = op.outputs.merged_materials()
"""
def __init__(self, op: Operator):
super().__init__(merge_materials._spec().outputs, op)
self._merged_materials = Output(merge_materials._spec().output_pin(0), 0, op)
self._outputs.append(self._merged_materials)
@property
def merged_materials(self):
"""Allows to get merged_materials output of the operator
Returns
----------
my_merged_materials : Materials
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_materials()
>>> # Connect inputs : op.inputs. ...
>>> result_merged_materials = op.outputs.merged_materials()
""" # noqa: E501
return self._merged_materials |
646 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_invoice_request(billing_account_name: str, invoice_name: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices/{invoiceName}/transactions",
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"invoiceName": _SERIALIZER.url("invoice_name", invoice_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class TransactionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.billing.BillingManagementClient`'s
:attr:`transactions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_invoice(
self, billing_account_name: str, invoice_name: str, **kwargs: Any
) -> Iterable["_models.Transaction"]:
"""Lists the transactions for an invoice. Transactions include purchases, refunds and Azure usage
charges.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param invoice_name: The ID that uniquely identifies an invoice. Required.
:type invoice_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Transaction or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.billing.models.Transaction]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.TransactionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_invoice_request(
billing_account_name=billing_account_name,
invoice_name=invoice_name,
api_version=api_version,
template_url=self.list_by_invoice.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("TransactionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list_by_invoice.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices/{invoiceName}/transactions"} # type: ignore |
647 | datastore job | # encoding: utf-8
import unittest.mock as mock
import pytest
import ckan.lib.jobs as jobs
import ckan.plugins as p
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
import ckanext.datastore.backend as backend
import ckanext.datastore.backend.postgres as db
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("with_plugins")
class TestCreateIndexes(object):
def test_creates_fts_index_using_gist_by_default(self):
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on(
"_full_text", connection, resource_id, method="gist"
)
@pytest.mark.ckan_config("ckan.datastore.default_fts_index_method", "gin")
def test_default_fts_index_method_can_be_overwritten_by_config_var(self):
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on(
"_full_text", connection, resource_id, method="gin"
)
@mock.patch("ckanext.datastore.backend.postgres._get_fields")
def test_creates_fts_index_on_all_fields_except_dates_nested_and_arrays_with_english_as_default(
self, _get_fields
):
_get_fields.return_value = [
{"id": "text", "type": "text"},
{"id": "number", "type": "number"},
{"id": "nested", "type": "nested"},
{"id": "date", "type": "date"},
{"id": "text array", "type": "text[]"},
{"id": "timestamp", "type": "timestamp"},
]
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on(
"text", connection, resource_id, "english"
)
self._assert_created_index_on(
"number", connection, resource_id, "english", cast=True
)
@pytest.mark.ckan_config("ckan.datastore.default_fts_lang", "simple")
@mock.patch("ckanext.datastore.backend.postgres._get_fields")
def test_creates_fts_index_on_textual_fields_can_overwrite_lang_with_config_var(
self, _get_fields
):
_get_fields.return_value = [{"id": "foo", "type": "text"}]
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on("foo", connection, resource_id, "simple")
@pytest.mark.ckan_config("ckan.datastore.default_fts_lang", "simple")
@mock.patch("ckanext.datastore.backend.postgres._get_fields")
def test_creates_fts_index_on_textual_fields_can_overwrite_lang_using_lang_param(
self, _get_fields
):
_get_fields.return_value = [{"id": "foo", "type": "text"}]
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id, "language": "french"}
db.create_indexes(context, data_dict)
self._assert_created_index_on("foo", connection, resource_id, "french")
def _assert_created_index_on(
self,
field,
connection,
resource_id,
lang=None,
cast=False,
method="gist",
):
field = u'"{0}"'.format(field)
if cast:
field = u"cast({0} AS text)".format(field)
if lang is not None:
sql_str = (
u'ON "resource_id" '
u"USING {method}(to_tsvector('{lang}', {field}))"
)
sql_str = sql_str.format(method=method, lang=lang, field=field)
else:
sql_str = u"USING {method}({field})".format(
method=method, field=field
)
calls = connection.execute.call_args_list
was_called = [call for call in calls if call[0][0].find(sql_str) != -1]
assert was_called, (
"Expected 'connection.execute' to have been "
"called with a string containing '%s'" % sql_str
)
class TestGetAllResourcesIdsInDatastore(object):
@pytest.mark.ckan_config(u"ckan.plugins", u"datastore")
@pytest.mark.usefixtures(u"with_plugins", u"clean_db")
def test_get_all_resources_ids_in_datastore(self):
resource_in_datastore = factories.Resource()
resource_not_in_datastore = factories.Resource()
data = {"resource_id": resource_in_datastore["id"], "force": True}
helpers.call_action("datastore_create", **data)
resource_ids = backend.get_all_resources_ids_in_datastore()
assert resource_in_datastore["id"] in resource_ids
assert resource_not_in_datastore["id"] not in resource_ids
def METHOD_NAME(res_id, value):
"""
A background job that uses the Datastore.
"""
app = helpers._get_test_app()
if not p.plugin_loaded(u"datastore"):
p.load("datastore")
data = {
"resource_id": res_id,
"method": "insert",
"records": [{"value": value}],
}
with app.flask_app.test_request_context():
helpers.call_action("datastore_upsert", **data)
class TestBackgroundJobs(helpers.RQTestBase):
"""
Test correct interaction with the background jobs system.
"""
@pytest.mark.ckan_config(u"ckan.plugins", u"datastore")
@pytest.mark.usefixtures(u"with_plugins", u"clean_db")
def test_worker_datastore_access(self, app):
"""
Test DataStore access from within a worker.
"""
pkg = factories.Dataset()
data = {
"resource": {"package_id": pkg["id"]},
"fields": [{"id": "value", "type": "int"}],
}
table = helpers.call_action("datastore_create", **data)
res_id = table["resource_id"]
for i in range(3):
self.enqueue(METHOD_NAME, args=[res_id, i])
jobs.Worker().work(burst=True)
# Aside from ensuring that the job succeeded, this also checks
# that accessing the Datastore still works in the main process.
result = helpers.call_action("datastore_search", resource_id=res_id)
assert [0, 1, 2] == [r["value"] for r in result["records"]] |
648 | get chunks | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import csv
import os
import random
from typing import List
from paddle.io import Dataset
from tqdm import tqdm
from ..backends.soundfile_backend import soundfile_load as load_audio
from ..backends.soundfile_backend import soundfile_save as save_wav
from ..utils import DATA_HOME
from ..utils.download import download_and_decompress
from .dataset import feat_funcs
__all__ = ['OpenRIRNoise']
class OpenRIRNoise(Dataset):
archieves = [
{
'url': 'http://www.openslr.org/resources/28/rirs_noises.zip',
'md5': 'e6f48e257286e05de56413b4779d8ffb',
},
]
sample_rate = 16000
meta_info = collections.namedtuple('META_INFO', ('id', 'duration', 'wav'))
base_path = os.path.join(DATA_HOME, 'open_rir_noise')
wav_path = os.path.join(base_path, 'RIRS_NOISES')
csv_path = os.path.join(base_path, 'csv')
subsets = ['rir', 'noise']
def __init__(self,
subset: str='rir',
feat_type: str='raw',
target_dir=None,
random_chunk: bool=True,
chunk_duration: float=3.0,
seed: int=0,
**kwargs):
assert subset in self.subsets, \
'Dataset subset must be one in {}, but got {}'.format(self.subsets, subset)
self.subset = subset
self.feat_type = feat_type
self.feat_config = kwargs
self.random_chunk = random_chunk
self.chunk_duration = chunk_duration
OpenRIRNoise.csv_path = os.path.join(
target_dir, "open_rir_noise",
"csv") if target_dir else self.csv_path
self._data = self._get_data()
super(OpenRIRNoise, self).__init__()
# Set up a seed to reproduce training or predicting result.
# random.seed(seed)
def _get_data(self):
# Download audio files.
print(f"rirs noises base path: {self.base_path}")
if not os.path.isdir(self.base_path):
download_and_decompress(
self.archieves, self.base_path, decompress=True)
else:
print(
f"{self.base_path} already exists, we will not download and decompress again"
)
# Data preparation.
print(f"prepare the csv to {self.csv_path}")
if not os.path.isdir(self.csv_path):
os.makedirs(self.csv_path)
self.prepare_data()
data = []
with open(os.path.join(self.csv_path, f'{self.subset}.csv'), 'r') as rf:
for line in rf.readlines()[1:]:
audio_id, duration, wav = line.strip().split(',')
data.append(self.meta_info(audio_id, float(duration), wav))
random.shuffle(data)
return data
def _convert_to_record(self, idx: int):
sample = self._data[idx]
record = {}
# To show all fields in a namedtuple: `type(sample)._fields`
for field in type(sample)._fields:
record[field] = getattr(sample, field)
waveform, sr = load_audio(record['wav'])
assert self.feat_type in feat_funcs.keys(), \
f"Unknown feat_type: {self.feat_type}, it must be one in {list(feat_funcs.keys())}"
feat_func = feat_funcs[self.feat_type]
feat = feat_func(
waveform, sr=sr, **self.feat_config) if feat_func else waveform
record.update({'feat': feat})
return record
@staticmethod
def METHOD_NAME(seg_dur, audio_id, audio_duration):
num_chunks = int(audio_duration / seg_dur) # all in milliseconds
chunk_lst = [
audio_id + "_" + str(i * seg_dur) + "_" + str(i * seg_dur + seg_dur)
for i in range(num_chunks)
]
return chunk_lst
def _get_audio_info(self, wav_file: str,
split_chunks: bool) -> List[List[str]]:
waveform, sr = load_audio(wav_file)
audio_id = wav_file.split("/open_rir_noise/")[-1].split(".")[0]
audio_duration = waveform.shape[0] / sr
ret = []
if split_chunks and audio_duration > self.chunk_duration: # Split into pieces of self.chunk_duration seconds.
uniq_chunks_list = self.METHOD_NAME(self.chunk_duration, audio_id,
audio_duration)
for idx, chunk in enumerate(uniq_chunks_list):
s, e = chunk.split("_")[-2:] # Timestamps of start and end
start_sample = int(float(s) * sr)
end_sample = int(float(e) * sr)
new_wav_file = os.path.join(self.base_path,
audio_id + f'_chunk_{idx+1:02}.wav')
save_wav(waveform[start_sample:end_sample], sr, new_wav_file)
# id, duration, new_wav
ret.append([chunk, self.chunk_duration, new_wav_file])
else: # Keep whole audio.
ret.append([audio_id, audio_duration, wav_file])
return ret
def generate_csv(self,
wav_files: List[str],
output_file: str,
split_chunks: bool=True):
print(f'Generating csv: {output_file}')
header = ["id", "duration", "wav"]
infos = list(
tqdm(
map(self._get_audio_info, wav_files, [split_chunks] * len(
wav_files)),
total=len(wav_files)))
csv_lines = []
for info in infos:
csv_lines.extend(info)
with open(output_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(header)
for line in csv_lines:
csv_writer.writerow(line)
def prepare_data(self):
rir_list = os.path.join(self.wav_path, "real_rirs_isotropic_noises",
"rir_list")
rir_files = []
with open(rir_list, 'r') as f:
for line in f.readlines():
rir_file = line.strip().split(' ')[-1]
rir_files.append(os.path.join(self.base_path, rir_file))
noise_list = os.path.join(self.wav_path, "pointsource_noises",
"noise_list")
noise_files = []
with open(noise_list, 'r') as f:
for line in f.readlines():
noise_file = line.strip().split(' ')[-1]
noise_files.append(os.path.join(self.base_path, noise_file))
self.generate_csv(rir_files, os.path.join(self.csv_path, 'rir.csv'))
self.generate_csv(noise_files, os.path.join(self.csv_path, 'noise.csv'))
def __getitem__(self, idx):
return self._convert_to_record(idx)
def __len__(self):
return len(self._data) |
649 | main test | # Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
from typing import List, Dict
import pygal
from pygal import Graph
from pygal.style import DarkSolarizedStyle
from skytemple_files.data.level_bin_entry.model import LevelBinEntry
from skytemple_files.data.md.protocol import MdEntryProtocol
from skytemple_files.data.waza_p.protocol import MoveLearnsetProtocol
from skytemple_files.common.i18n_util import f, _
class LevelUpGraphProvider:
def __init__(self, monster: MdEntryProtocol, level_bin_entry: LevelBinEntry,
move_learnset: MoveLearnsetProtocol, move_strings: List[str]):
self.monster = monster
self.level_bin_entry = level_bin_entry
self.move_learnset = move_learnset
self.move_strings = move_strings
def provide(self, add_title=None, dark=False, disable_xml_declaration=False) -> Graph:
chart = pygal.XY(
xrange=(1, len(self.level_bin_entry.levels) + 1),
secondary_range=(0, max([x.experience_required for x in self.level_bin_entry.levels])),
disable_xml_declaration=disable_xml_declaration
)
if add_title:
chart.title = add_title
if dark:
chart.style = DarkSolarizedStyle
exps = []
hps = []
atks = []
sp_atks = []
defs = []
sp_defs = []
hp_accu = self.monster.base_hp
atk_accu = self.monster.base_atk
sp_atk_accu = self.monster.base_sp_atk
def_accu = self.monster.base_def
sp_def_accu = self.monster.base_sp_def
for i, level in enumerate(self.level_bin_entry.levels):
exps.append((i + 1, level.experience_required))
hp_accu += level.hp_growth # type: ignore
hps.append((i + 1, hp_accu))
atk_accu += level.attack_growth # type: ignore
atks.append((i + 1, atk_accu))
sp_atk_accu += level.special_attack_growth # type: ignore
sp_atks.append((i + 1, sp_atk_accu))
def_accu += level.defense_growth # type: ignore
defs.append((i + 1, def_accu))
sp_def_accu += level.special_defense_growth # type: ignore
sp_defs.append((i + 1, sp_def_accu))
max_val: int = max(hp_accu, atk_accu, sp_atk_accu, def_accu, sp_def_accu) # type: ignore
moves = []
processed_levels: Dict[int, int] = {}
for lum in self.move_learnset.level_up_moves:
if lum.level_id in processed_levels:
processed_levels[lum.level_id] += 1
else:
processed_levels[lum.level_id] = 1
count_so_far = processed_levels[lum.level_id] - 1
moves.append({
'value': (lum.level_id, max_val + 5 + (5 * count_so_far)),
'label': self.move_strings[lum.move_id]
})
chart.add(_('Exp.'), exps, secondary=True) # TRANSLATORS: Experience
chart.add(_('HP'), hps) # TRANSLATORS: Health Points
chart.add(_('ATK'), atks) # TRANSLATORS: Attack
chart.add(_('Sp. ATK'), sp_atks) # TRANSLATORS: Special Attack
chart.add(_('DEF'), defs) # TRANSLATORS: Defense
chart.add(_('Sp. DEF'), sp_defs) # TRANSLATORS: Special Defense
chart.add(_('Moves'), moves, stroke=False,
formatter=lambda x: f(_('at level {x[0]}')))
return chart
if __name__ == '__main__':
def METHOD_NAME():
import os
from skytemple_files.common.types.file_types import FileType
from ndspy.rom import NintendoDSRom
from skytemple_files.common.util import get_ppmdu_config_for_rom
# Testing.
base_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
rom = NintendoDSRom.fromFile(os.path.join(base_dir, 'skyworkcopy_us.nds'))
config = get_ppmdu_config_for_rom(rom)
out_dir = '/tmp/monster_graphs'
os.makedirs(out_dir, exist_ok=True)
monster_md = FileType.MD.deserialize(rom.getFileByName('BALANCE/monster.md'))
level_bin = FileType.BIN_PACK.deserialize(rom.getFileByName('BALANCE/m_level.bin'))
waza_p = FileType.WAZA_P.deserialize(rom.getFileByName('BALANCE/waza_p.bin'))
move_string_block = config.string_index_data.string_blocks['Move Names']
monster_name_block = config.string_index_data.string_blocks['Pokemon Names']
strings = FileType.STR.deserialize(rom.getFileByName('MESSAGE/text_e.str'))
move_strings = strings.strings[move_string_block.begin:move_string_block.end]
monster_strings = strings.strings[monster_name_block.begin:monster_name_block.end]
level_bin = level_bin
# The level_bin has no entry for monster 0.
for monster, lbinentry_bin, waza_entry in zip(monster_md.entries[1:], level_bin, waza_p.learnsets[1:]):
level_bin_entry = FileType.LEVEL_BIN_ENTRY.deserialize(
FileType.COMMON_AT.deserialize(FileType.SIR0.deserialize(lbinentry_bin).content).decompress()
)
graph_provider = LevelUpGraphProvider(monster, level_bin_entry, waza_entry, move_strings)
g = graph_provider.provide(
f'{monster_strings[monster.md_index]}',
dark=True
)
g.render_to_file(os.path.join(out_dir, f'{monster.md_index}.svg'))
g.render_to_png(os.path.join(out_dir, f'{monster.md_index}.png'), dpi=92)
METHOD_NAME() |
650 | yotpo loyalty erasure data | from typing import Any, Dict, Generator
import pydash
import pytest
import requests
from faker import Faker
from requests import Response
from sqlalchemy.orm import Session
from fides.api.cryptography import cryptographic_util
from fides.api.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fides.api.models.datasetconfig import DatasetConfig
from fides.api.models.sql_models import Dataset as CtlDataset
from fides.api.util.saas_util import (
load_config_with_replacement,
load_dataset_with_replacement,
)
from tests.ops.test_helpers.saas_test_utils import poll_for_existence
from tests.ops.test_helpers.vault_client import get_secrets
secrets = get_secrets("yotpo_loyalty")
@pytest.fixture(scope="session")
def yotpo_loyalty_secrets(saas_config):
return {
"domain": pydash.get(saas_config, "yotpo_loyalty.domain") or secrets["domain"],
"merchant_id": pydash.get(saas_config, "yotpo_loyalty.merchant_id")
or secrets["merchant_id"],
"guid": pydash.get(saas_config, "yotpo_loyalty.guid") or secrets["guid"],
"api_key": pydash.get(saas_config, "yotpo_loyalty.api_key")
or secrets["api_key"],
"store_id": pydash.get(saas_config, "yotpo_loyalty.store_id")
or secrets["store_id"],
"secret_key": pydash.get(saas_config, "yotpo_loyalty.secret_key")
or secrets["secret_key"],
}
@pytest.fixture(scope="session")
def yotpo_loyalty_identity_email(saas_config):
return (
pydash.get(saas_config, "yotpo_loyalty.identity_email")
or secrets["identity_email"]
)
@pytest.fixture(scope="session")
def yotpo_loyalty_identity_phone_number(saas_config):
return (
pydash.get(saas_config, "yotpo_loyalty.identity_phone_number")
or secrets["identity_phone_number"]
)
@pytest.fixture(scope="function")
def yotpo_loyalty_erasure_identity_email() -> str:
return f"{cryptographic_util.generate_secure_random_string(13)}@email.com"
@pytest.fixture
def yotpo_loyalty_config() -> Dict[str, Any]:
return load_config_with_replacement(
"data/saas/config/yotpo_loyalty_config.yml",
"<instance_fides_key>",
"yotpo_loyalty_instance",
)
@pytest.fixture
def yotpo_loyalty_dataset() -> Dict[str, Any]:
return load_dataset_with_replacement(
"data/saas/dataset/yotpo_loyalty_dataset.yml",
"<instance_fides_key>",
"yotpo_loyalty_instance",
)[0]
@pytest.fixture(scope="function")
def yotpo_loyalty_connection_config(
db: Session, yotpo_loyalty_config, yotpo_loyalty_secrets
) -> Generator:
fides_key = yotpo_loyalty_config["fides_key"]
connection_config = ConnectionConfig.create(
db=db,
data={
"key": fides_key,
"name": fides_key,
"connection_type": ConnectionType.saas,
"access": AccessLevel.write,
"secrets": yotpo_loyalty_secrets,
"saas_config": yotpo_loyalty_config,
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture
def yotpo_loyalty_dataset_config(
db: Session,
yotpo_loyalty_connection_config: ConnectionConfig,
yotpo_loyalty_dataset: Dict[str, Any],
) -> Generator:
fides_key = yotpo_loyalty_dataset["fides_key"]
yotpo_loyalty_connection_config.name = fides_key
yotpo_loyalty_connection_config.key = fides_key
yotpo_loyalty_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, yotpo_loyalty_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": yotpo_loyalty_connection_config.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)
ctl_dataset.delete(db=db)
class YotpoLoyaltyTestClient:
def __init__(self, connection_config: ConnectionConfig):
yotpo_loyalty_secrets = connection_config.secrets
self.domain = yotpo_loyalty_secrets["domain"]
self.headers = {
"x-guid": yotpo_loyalty_secrets["guid"],
"x-api-key": yotpo_loyalty_secrets["api_key"],
}
def create_customer(self, email: str) -> Response:
faker = Faker()
return requests.post(
url=f"https://{self.domain}/api/v2/customers",
headers=self.headers,
json={
"email": email,
"first_name": faker.first_name(),
"last_name": faker.last_name(),
},
)
def get_customer(self, email: str) -> Any:
response = requests.get(
url=f"https://{self.domain}/api/v2/customers",
headers=self.headers,
params={"customer_email": email},
)
if not response.status_code == 404:
return response
@pytest.fixture(scope="function")
def yotpo_loyalty_test_client(
yotpo_loyalty_connection_config: ConnectionConfig,
) -> Generator:
test_client = YotpoLoyaltyTestClient(yotpo_loyalty_connection_config)
yield test_client
@pytest.fixture(scope="function")
def METHOD_NAME(
yotpo_loyalty_test_client: YotpoLoyaltyTestClient,
yotpo_loyalty_erasure_identity_email,
) -> None:
# create customer
response = yotpo_loyalty_test_client.create_customer(
yotpo_loyalty_erasure_identity_email
)
assert response.ok
poll_for_existence(
yotpo_loyalty_test_client.get_customer,
(yotpo_loyalty_erasure_identity_email,),
interval=30,
verification_count=3,
) |
651 | estimate | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Amplitude Estimation interface."""
from __future__ import annotations
from abc import abstractmethod, ABC
from collections.abc import Callable
import numpy as np
from .estimation_problem import EstimationProblem
from ..algorithm_result import AlgorithmResult
class AmplitudeEstimator(ABC):
"""The Amplitude Estimation interface."""
@abstractmethod
def METHOD_NAME(self, estimation_problem: EstimationProblem) -> "AmplitudeEstimatorResult":
"""Run the amplitude estimation algorithm.
Args:
estimation_problem: An ``EstimationProblem`` containing all problem-relevant information
such as the state preparation and the objective qubits.
"""
raise NotImplementedError
class AmplitudeEstimatorResult(AlgorithmResult):
"""The results object for amplitude estimation algorithms."""
def __init__(self) -> None:
super().__init__()
self._circuit_results: np.ndarray | dict[str, int] | None = None
self._shots: int | None = None
self._estimation: float | None = None
self._estimation_processed: float | None = None
self._num_oracle_queries: int | None = None
self._post_processing: Callable[[float], float] | None = None
self._confidence_interval: tuple[float, float] | None = None
self._confidence_interval_processed: tuple[float, float] | None = None
@property
def circuit_results(self) -> np.ndarray | dict[str, int] | None:
"""Return the circuit results. Can be a statevector or counts dictionary."""
return self._circuit_results
@circuit_results.setter
def circuit_results(self, value: np.ndarray | dict[str, int]) -> None:
"""Set the circuit results."""
self._circuit_results = value
@property
def shots(self) -> int:
"""Return the number of shots used. Is 1 for statevector-based simulations."""
return self._shots
@shots.setter
def shots(self, value: int) -> None:
"""Set the number of shots used."""
self._shots = value
@property
def estimation(self) -> float:
r"""Return the estimation for the amplitude in :math:`[0, 1]`."""
return self._estimation
@estimation.setter
def estimation(self, value: float) -> None:
r"""Set the estimation for the amplitude in :math:`[0, 1]`."""
self._estimation = value
@property
def estimation_processed(self) -> float:
"""Return the estimation for the amplitude after the post-processing has been applied."""
return self._estimation_processed
@estimation_processed.setter
def estimation_processed(self, value: float) -> None:
"""Set the estimation for the amplitude after the post-processing has been applied."""
self._estimation_processed = value
@property
def num_oracle_queries(self) -> int:
"""Return the number of Grover oracle queries."""
return self._num_oracle_queries
@num_oracle_queries.setter
def num_oracle_queries(self, value: int) -> None:
"""Set the number of Grover oracle queries."""
self._num_oracle_queries = value
@property
def post_processing(self) -> Callable[[float], float]:
"""Return a handle to the post processing function."""
return self._post_processing
@post_processing.setter
def post_processing(self, post_processing: Callable[[float], float]) -> None:
"""Set a handle to the post processing function."""
self._post_processing = post_processing
@property
def confidence_interval(self) -> tuple[float, float]:
"""Return the confidence interval for the amplitude (95% interval by default)."""
return self._confidence_interval
@confidence_interval.setter
def confidence_interval(self, confidence_interval: tuple[float, float]) -> None:
"""Set the confidence interval for the amplitude (95% interval by default)."""
self._confidence_interval = confidence_interval
@property
def confidence_interval_processed(self) -> tuple[float, float]:
"""Return the post-processed confidence interval (95% interval by default)."""
return self._confidence_interval_processed
@confidence_interval_processed.setter
def confidence_interval_processed(self, confidence_interval: tuple[float, float]) -> None:
"""Set the post-processed confidence interval (95% interval by default)."""
self._confidence_interval_processed = confidence_interval |
652 | stream slices | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import datetime
import decimal
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import requests
import source_adjust.model
from airbyte_cdk.models import AirbyteMessage, AirbyteStateMessage, SyncMode, Type
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import IncrementalMixin, Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
class AdjustReportStream(HttpStream, IncrementalMixin):
"""
Adjust reports service integration with support for incremental synchronization.
"""
def __init__(self, connector: "SourceAdjust", config: Mapping[str, Any], **kwargs):
super().__init__(**kwargs)
self.connector = connector
self.config = config
self._cursor: Optional[datetime.date] = None
@property
def url_base(self) -> str:
return "https://dash.adjust.com/control-center/reports-service/"
@property
def state(self):
if self._cursor is not None:
cursor = self._cursor.isoformat()
else:
cursor = self.config["ingest_start"]
return {
self.cursor_field: cursor,
}
@state.setter
def state(self, value: MutableMapping[str, Any]):
self._cursor = datetime.date.fromisoformat(value[self.cursor_field])
def read_records(
self,
sync_mode: SyncMode,
cursor_field: Optional[List[str]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
stream_state: Optional[Mapping[str, Any]] = None,
) -> Iterable[Mapping[str, Any]]:
fallback = datetime.date.fromisoformat(self.config["ingest_start"])
cf: str = self.cursor_field
for record in super().read_records(sync_mode, cursor_field, stream_slice, stream_state):
record_stamp = datetime.date.fromisoformat(record[cf])
self._cursor = max(record_stamp, self._cursor or fallback)
yield record
def path(
self,
stream_state: Optional[Mapping[str, Any]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> str:
"""
Report URL path suffix.
"""
return "report"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
"""
Get query parameter definitions.
"""
required_dimensions = ["day"]
dimensions = required_dimensions + self.config["dimensions"]
metrics = self.config["metrics"] + self.config["additional_metrics"]
date = stream_slice[self.cursor_field]
return {
"date_period": ":".join([date, date]), # inclusive
"metrics": ",".join(metrics),
"dimensions": ",".join(dimensions),
}
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
def reshape(row: MutableMapping[str, Any]):
model = source_adjust.model.Report.__dict__["__fields__"]
row.pop("attr_dependency", None)
# Unfortunately all fields are returned as strings by the API
for k, v in list(row.items()):
if k in model:
type_ = model[k].type_
else: # Additional user-provided metrics are assumed to be decimal
type_ = decimal.Decimal
if type_ in (int, decimal.Decimal):
try:
row[k] = type_(v)
except TypeError:
self.logger.warning("Unable to convert field '%s': %s to %s, leaving '%s' as is", k, v, type_.__name__, k)
return row
body = response.json()
return (reshape(row) for row in body["rows"])
def METHOD_NAME(self, stream_state: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
cf: str = self.cursor_field
now = datetime.datetime.utcnow().date()
if self._cursor and self._cursor > now:
self.logger.warning("State ingest target date in future, setting cursor to today's date")
self._cursor = now
self.connector.checkpoint()
if stream_state is not None and stream_state.get(cf):
date = datetime.date.fromisoformat(stream_state[cf])
if now - date == datetime.timedelta(days=1):
return
else:
date = datetime.date.fromisoformat(self.config["ingest_start"])
if self.config["until_today"]:
end_date = now
else:
end_date = now + datetime.timedelta(days=1)
while date < end_date:
yield {cf: date.isoformat()}
date += datetime.timedelta(days=1)
def get_json_schema(self):
"""
Prune the schema to only include selected fields to synchronize.
"""
schema = source_adjust.model.Report.schema()
properties = schema["properties"]
required = schema["required"]
selected = self.config["metrics"] + self.config["dimensions"]
retain = required + selected
for attr in list(properties.keys()):
if attr not in retain:
del properties[attr]
for attr in self.config["additional_metrics"]:
properties[attr] = {"type": "number"}
return schema
@property
def cursor_field(self) -> str:
"""
Name of the field in the API response body used as cursor.
"""
return "day"
@property
def primary_key(self):
return None
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
class SourceAdjust(AbstractSource):
check_endpoint = "https://dash.adjust.com/control-center/reports-service/filters_data"
def check_connection(self, logger, config) -> Tuple[bool, Any]:
"""
Verify the configuration supplied can be used to connect to the API.
:param config: config object as per definition in spec.yaml
:param logger: logger object
:return: (True, None) on connecton to the API successfully,
(False, error) otherwise.
"""
requests.get(
url=self.check_endpoint,
headers={"Authorization": f'Bearer {config["api_token"]:s}'},
).raise_for_status()
return True, None # Are we coding in go?
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
"""
Stream registry.
:param config: user input configuration as defined in the connector spec.
"""
auth = TokenAuthenticator(token=config["api_token"])
self._streams = [
AdjustReportStream(connector=self, config=config, authenticator=auth),
]
return self._streams
def checkpoint(self):
"""
Checkpoint state.
"""
state = AirbyteMessage(
type=Type.STATE,
state=AirbyteStateMessage(
data={stream.name: stream.state for stream in self._streams},
),
)
print(state.json(exclude_unset=True)) # Emit state |
653 | inner | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for common micro transports."""
import logging
import sys
import unittest
import pytest
import tvm.testing
# Implementing as a fixture so that the tvm.micro import doesn't occur
# until fixture setup time. This is necessary for pytest's collection
# phase to work when USE_MICRO=OFF, while still explicitly listing the
# tests as skipped.
@tvm.testing.fixture
def transport():
import tvm.micro
class MockTransport_Impl(tvm.micro.transport.Transport):
def __init__(self):
self.exc = None
self.to_return = None
def _raise_or_return(self):
if self.exc is not None:
to_raise = self.exc
self.exc = None
raise to_raise
elif self.to_return is not None:
to_return = self.to_return
self.to_return = None
return to_return
else:
assert False, "should not get here"
def open(self):
pass
def close(self):
pass
def timeouts(self):
raise NotImplementedError()
def read(self, n, timeout_sec):
return self._raise_or_return()
def write(self, data, timeout_sec):
return self._raise_or_return()
return MockTransport_Impl()
@tvm.testing.fixture
def transport_logger(transport):
logger = logging.getLogger("transport_logger_test")
return tvm.micro.transport.TransportLogger("foo", transport, logger=logger)
@tvm.testing.fixture
def get_latest_log(caplog):
def METHOD_NAME():
return caplog.records[-1].getMessage()
with caplog.at_level(logging.INFO, "transport_logger_test"):
yield METHOD_NAME
@tvm.testing.requires_micro
def test_open(transport_logger, get_latest_log):
transport_logger.open()
assert get_latest_log() == "foo: opening transport"
@tvm.testing.requires_micro
def test_close(transport_logger, get_latest_log):
transport_logger.close()
assert get_latest_log() == "foo: closing transport"
@tvm.testing.requires_micro
def test_read_normal(transport, transport_logger, get_latest_log):
transport.to_return = b"data"
transport_logger.read(23, 3.0)
assert get_latest_log() == (
"foo: read { 3.00s} 23 B -> [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_read_multiline(transport, transport_logger, get_latest_log):
transport.to_return = b"data" * 6
transport_logger.read(23, 3.0)
assert get_latest_log() == (
"foo: read { 3.00s} 23 B -> [ 24 B]:\n"
"0000 64 61 74 61 64 61 74 61 64 61 74 61 64 61 74 61 datadatadatadata\n"
"0010 64 61 74 61 64 61 74 61 datadata"
)
@tvm.testing.requires_micro
def test_read_no_timeout_prints(transport, transport_logger, get_latest_log):
transport.to_return = b"data"
transport_logger.read(15, None)
assert get_latest_log() == (
"foo: read { None } 15 B -> [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_read_io_timeout(transport, transport_logger, get_latest_log):
# IoTimeoutError includes the timeout value.
transport.exc = tvm.micro.transport.IoTimeoutError()
with pytest.raises(tvm.micro.transport.IoTimeoutError):
transport_logger.read(23, 0.0)
assert get_latest_log() == ("foo: read { 0.00s} 23 B -> [IoTimeoutError 0.00s]")
@tvm.testing.requires_micro
def test_read_other_exception(transport, transport_logger, get_latest_log):
# Other exceptions are logged by name.
transport.exc = tvm.micro.transport.TransportClosedError()
with pytest.raises(tvm.micro.transport.TransportClosedError):
transport_logger.read(8, 0.0)
assert get_latest_log() == ("foo: read { 0.00s} 8 B -> [err: TransportClosedError]")
@tvm.testing.requires_micro
def test_read_keyboard_interrupt(transport, transport_logger, get_latest_log):
# KeyboardInterrupt produces no log record.
transport.exc = KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
transport_logger.read(8, 0.0)
with pytest.raises(IndexError):
get_latest_log()
@tvm.testing.requires_micro
def test_write_normal(transport, transport_logger, get_latest_log):
transport.to_return = 3
transport_logger.write(b"data", 3.0)
assert get_latest_log() == (
"foo: write { 3.00s} <- [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_write_multiline(transport, transport_logger, get_latest_log):
# Normal log, multi-line data written.
transport.to_return = 20
transport_logger.write(b"data" * 6, 3.0)
assert get_latest_log() == (
"foo: write { 3.00s} <- [ 24 B]:\n"
"0000 64 61 74 61 64 61 74 61 64 61 74 61 64 61 74 61 datadatadatadata\n"
"0010 64 61 74 61 64 61 74 61 datadata"
)
@tvm.testing.requires_micro
def test_write_no_timeout_prints(transport, transport_logger, get_latest_log):
transport.to_return = 3
transport_logger.write(b"data", None)
assert get_latest_log() == (
"foo: write { None } <- [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_write_io_timeout(transport, transport_logger, get_latest_log):
# IoTimeoutError includes the timeout value.
transport.exc = tvm.micro.transport.IoTimeoutError()
with pytest.raises(tvm.micro.transport.IoTimeoutError):
transport_logger.write(b"data", 0.0)
assert get_latest_log() == ("foo: write { 0.00s} <- [ 4 B]: [IoTimeoutError 0.00s]")
@tvm.testing.requires_micro
def test_write_other_exception(transport, transport_logger, get_latest_log):
# Other exceptions are logged by name.
transport.exc = tvm.micro.transport.TransportClosedError()
with pytest.raises(tvm.micro.transport.TransportClosedError):
transport_logger.write(b"data", 0.0)
assert get_latest_log() == ("foo: write { 0.00s} <- [ 4 B]: [err: TransportClosedError]")
@tvm.testing.requires_micro
def test_write_keyboard_interrupt(transport, transport_logger, get_latest_log):
# KeyboardInterrupt produces no log record.
transport.exc = KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
transport_logger.write(b"data", 0.0)
with pytest.raises(IndexError):
get_latest_log()
if __name__ == "__main__":
tvm.testing.main() |
654 | create output header | # -*- coding:utf-8 -*-
# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""This is a tool to transform a crt file into a C/C++ header.
Usage:
generate_sslroots.py cert_file.crt [--verbose | -v] [--full_cert | -f]
Arguments:
-v Print output while running.
-f Add public key and certificate name. Default is to skip and reduce
generated file size.
"""
import commands
from optparse import OptionParser
import os
import re
import string
_GENERATED_FILE = 'ssl_roots.h'
_PREFIX = '__generated__'
_EXTENSION = '.crt'
_SUBJECT_NAME_ARRAY = 'subject_name'
_SUBJECT_NAME_VARIABLE = 'SubjectName'
_PUBLIC_KEY_ARRAY = 'public_key'
_PUBLIC_KEY_VARIABLE = 'PublicKey'
_CERTIFICATE_ARRAY = 'certificate'
_CERTIFICATE_VARIABLE = 'Certificate'
_CERTIFICATE_SIZE_VARIABLE = 'CertificateSize'
_INT_TYPE = 'size_t'
_CHAR_TYPE = 'const unsigned char*'
_VERBOSE = 'verbose'
def main():
"""The main entrypoint."""
parser = OptionParser('usage %prog FILE')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true')
parser.add_option('-f', '--full_cert', dest='full_cert', action='store_true')
options, args = parser.parse_args()
if len(args) < 1:
parser.error('No crt file specified.')
return
root_dir = _SplitCrt(args[0], options)
_GenCFiles(root_dir, options)
_Cleanup(root_dir)
def _SplitCrt(source_file, options):
sub_file_blocks = []
label_name = ''
root_dir = os.path.dirname(os.path.abspath(source_file)) + '/'
_PrintOutput(root_dir, options)
f = open(source_file)
for line in f:
if line.startswith('# Label: '):
sub_file_blocks.append(line)
label = re.search(r'\".*\"', line)
temp_label = label.group(0)
end = len(temp_label)-1
label_name = _SafeName(temp_label[1:end])
elif line.startswith('-----END CERTIFICATE-----'):
sub_file_blocks.append(line)
new_file_name = root_dir + _PREFIX + label_name + _EXTENSION
_PrintOutput('Generating: ' + new_file_name, options)
new_file = open(new_file_name, 'w')
for out_line in sub_file_blocks:
new_file.write(out_line)
new_file.close()
sub_file_blocks = []
else:
sub_file_blocks.append(line)
f.close()
return root_dir
def _GenCFiles(root_dir, options):
output_header_file = open(root_dir + _GENERATED_FILE, 'w')
output_header_file.write(METHOD_NAME())
if options.full_cert:
subject_name_list = _CreateArraySectionHeader(_SUBJECT_NAME_VARIABLE,
_CHAR_TYPE, options)
public_key_list = _CreateArraySectionHeader(_PUBLIC_KEY_VARIABLE,
_CHAR_TYPE, options)
certificate_list = _CreateArraySectionHeader(_CERTIFICATE_VARIABLE,
_CHAR_TYPE, options)
certificate_size_list = _CreateArraySectionHeader(_CERTIFICATE_SIZE_VARIABLE,
_INT_TYPE, options)
for _, _, files in os.walk(root_dir):
for current_file in files:
if current_file.startswith(_PREFIX):
prefix_length = len(_PREFIX)
length = len(current_file) - len(_EXTENSION)
label = current_file[prefix_length:length]
filtered_output, cert_size = _CreateCertSection(root_dir, current_file,
label, options)
output_header_file.write(filtered_output + '\n\n\n')
if options.full_cert:
subject_name_list += _AddLabelToArray(label, _SUBJECT_NAME_ARRAY)
public_key_list += _AddLabelToArray(label, _PUBLIC_KEY_ARRAY)
certificate_list += _AddLabelToArray(label, _CERTIFICATE_ARRAY)
certificate_size_list += (' %s,\n') %(cert_size)
if options.full_cert:
subject_name_list += _CreateArraySectionFooter()
output_header_file.write(subject_name_list)
public_key_list += _CreateArraySectionFooter()
output_header_file.write(public_key_list)
certificate_list += _CreateArraySectionFooter()
output_header_file.write(certificate_list)
certificate_size_list += _CreateArraySectionFooter()
output_header_file.write(certificate_size_list)
output_header_file.close()
def _Cleanup(root_dir):
for f in os.listdir(root_dir):
if f.startswith(_PREFIX):
os.remove(root_dir + f)
def _CreateCertSection(root_dir, source_file, label, options):
command = 'openssl x509 -in %s%s -noout -C' %(root_dir, source_file)
_PrintOutput(command, options)
output = commands.getstatusoutput(command)[1]
renamed_output = output.replace('unsigned char XXX_',
'const unsigned char ' + label + '_')
filtered_output = ''
cert_block = '^const unsigned char.*?};$'
prog = re.compile(cert_block, re.IGNORECASE | re.MULTILINE | re.DOTALL)
if not options.full_cert:
filtered_output = prog.sub('', renamed_output, count=2)
else:
filtered_output = renamed_output
cert_size_block = r'\d\d\d+'
prog2 = re.compile(cert_size_block, re.MULTILINE | re.VERBOSE)
result = prog2.findall(renamed_output)
cert_size = result[len(result) - 1]
return filtered_output, cert_size
def METHOD_NAME():
output = ('// This file is the root certificates in C form that are needed to'
' connect to\n// Google.\n\n'
'// It was generated with the following command line:\n'
'// > python tools/certs/generate_sslroots.py'
'\n// https://pki.google.com/roots.pem\n\n')
return output
def _CreateArraySectionHeader(type_name, type_type, options):
output = ('const %s kSSLCert%sList[] = {\n') %(type_type, type_name)
_PrintOutput(output, options)
return output
def _AddLabelToArray(label, type_name):
return ' %s_%s,\n' %(label, type_name)
def _CreateArraySectionFooter():
return '};\n\n'
def _SafeName(original_file_name):
bad_chars = ' -./\\()áéíőú'
replacement_chars = ''
for _ in bad_chars:
replacement_chars += '_'
translation_table = string.maketrans(bad_chars, replacement_chars)
return original_file_name.translate(translation_table)
def _PrintOutput(output, options):
if options.verbose:
print output
if __name__ == '__main__':
main() |
655 | test delayed | from datetime import datetime, timedelta
from django.test.utils import override_settings
from olympia.amo.tests import (
TestCase,
addon_factory,
block_factory,
user_factory,
version_factory,
)
from ..models import BlocklistSubmission
class TestBlocklistSubmissionManager(TestCase):
def METHOD_NAME(self):
now = datetime.now()
BlocklistSubmission.objects.create(delayed_until=None)
BlocklistSubmission.objects.create(delayed_until=now - timedelta(days=1))
future = BlocklistSubmission.objects.create(
input_guids='future@', delayed_until=now + timedelta(days=1)
)
assert list(BlocklistSubmission.objects.delayed()) == [future]
class TestBlocklistSubmission(TestCase):
def test_is_submission_ready(self):
submitter = user_factory()
signoffer = user_factory()
block = BlocklistSubmission.objects.create()
# No signoff_by so not permitted
assert not block.signoff_state
assert not block.signoff_by
assert not block.is_submission_ready
# Except when the state is SIGNOFF_AUTOAPPROVED.
block.update(signoff_state=BlocklistSubmission.SIGNOFF_AUTOAPPROVED)
assert block.is_submission_ready
# But if the state is SIGNOFF_APPROVED we need to know the signoff user
block.update(signoff_state=BlocklistSubmission.SIGNOFF_APPROVED)
assert not block.is_submission_ready
# If different users update and signoff, it's permitted.
block.update(updated_by=submitter, signoff_by=signoffer)
assert block.is_submission_ready
# But not if submitter is also the sign off user.
block.update(signoff_by=submitter)
assert not block.is_submission_ready
# Except when that's not enforced locally
with override_settings(DEBUG=True):
assert block.is_submission_ready
def test_is_delayed_submission_ready(self):
now = datetime.now()
submission = BlocklistSubmission.objects.create(
signoff_state=BlocklistSubmission.SIGNOFF_AUTOAPPROVED
)
# auto approved submissions with no delay are ready
assert submission.is_submission_ready
# not when the submission is delayed though
submission.update(delayed_until=now + timedelta(days=1))
assert not submission.is_submission_ready
# it's ready when the delay date has passed though
submission.update(delayed_until=now)
assert submission.is_submission_ready
def test_get_submissions_from_guid(self):
addon = addon_factory(guid='guid@')
block_subm = BlocklistSubmission.objects.create(
input_guids='guid@\n{sdsd-dssd}'
)
# add another one which shouldn't match
BlocklistSubmission.objects.create(input_guids='gguid@\n{4545-986}')
assert block_subm.to_block == [
{
'id': None,
'guid': 'guid@',
'average_daily_users': addon.average_daily_users,
}
]
# The guid is in a BlocklistSubmission
assert list(BlocklistSubmission.get_submissions_from_guid('guid@')) == [
block_subm
]
# But by default we ignored "finished" BlocklistSubmissions
block_subm.update(signoff_state=BlocklistSubmission.SIGNOFF_PUBLISHED)
assert list(BlocklistSubmission.get_submissions_from_guid('guid@')) == []
# Except when we override the states to exclude
assert list(
BlocklistSubmission.get_submissions_from_guid('guid@', excludes=())
) == [block_subm]
# And check that a guid that doesn't exist in any submissions is empty
assert list(BlocklistSubmission.get_submissions_from_guid('ggguid@')) == []
def test_all_adu_safe(self):
addon_factory(guid='zero@adu', average_daily_users=0)
addon_factory(guid='normal@adu', average_daily_users=500)
addon_factory(guid='high@adu', average_daily_users=999_999)
submission = BlocklistSubmission.objects.create(
input_guids='zero@adu\nnormal@adu'
)
submission.to_block = submission._serialize_blocks()
# 0 adu is safe when we have unlisted adu
assert submission.all_adu_safe()
# safe because just normal adu
submission.update(input_guids='normal@adu')
submission.update(to_block=submission._serialize_blocks())
assert submission.all_adu_safe()
# unsafe because just a high adu addon included
submission.update(input_guids='high@adu\nnormal@adu')
submission.update(to_block=submission._serialize_blocks())
assert not submission.all_adu_safe()
def test_has_version_changes(self):
addon = addon_factory(guid='guid@')
block_factory(addon=addon, updated_by=user_factory(), reason='things')
new_version = version_factory(addon=addon)
submission = BlocklistSubmission.objects.create(
input_guids=addon.guid, changed_version_ids=[]
)
submission.to_block = submission._serialize_blocks()
# reason is chaning, but no versions are being changed
assert not submission.has_version_changes()
submission.update(changed_version_ids=[new_version.id])
assert submission.has_version_changes()
def test_is_delayed(self):
now = datetime.now()
submission = BlocklistSubmission.objects.create(
signoff_state=BlocklistSubmission.SIGNOFF_AUTOAPPROVED
)
assert not submission.is_delayed
submission.update(delayed_until=now + timedelta(minutes=1))
assert submission.is_delayed
submission.update(delayed_until=now - timedelta(minutes=1))
assert not submission.is_delayed |
656 | active | # Copyright The IETF Trust 2016-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import io
import os
import sys
import time
from pathlib import Path
from textwrap import dedent
from xym import xym
from django.conf import settings
from django.core.management.base import BaseCommand
import debug # pyflakes:ignore
class Command(BaseCommand):
"""
Populate the yang module repositories from drafts and RFCs.
Extracts yang models from RFCs (found in settings.RFC_PATH and places
them in settings.SUBMIT_YANG_RFC_MODEL_DIR, and from active drafts, placed in
settings.SUBMIT_YANG_DRAFT_MODEL_DIR.
"""
help = dedent(__doc__).strip()
def add_arguments(self, parser):
parser.add_argument('--clean',
action='store_true', dest='clean', default=False,
help='Remove the current directory content before writing new models.')
def handle(self, *filenames, **options):
"""
* All yang modules from published RFCs should be extracted and be
available in an rfc-yang repository.
* All valid yang modules from active, not replaced, Internet-Drafts
should be extracted and be available in a draft-valid-yang repository.
* All, valid and invalid, yang modules from active, not replaced,
Internet-Drafts should be available in a draft-all-yang repository.
(Actually, given precedence ordering, it would be enough to place
non-validating modules in a draft-invalid-yang repository instead).
* In all cases, example modules should be excluded.
* Precedence is established by the search order of the repository as
provided to pyang.
* As drafts expire, models should be removed in order to catch cases
where a module being worked on depends on one which has slipped out
of the work queue.
"""
verbosity = int(options.get('verbosity'))
def extract_from(file, dir, strict=True):
saved_stdout = sys.stdout
saved_stderr = sys.stderr
xymerr = io.StringIO()
xymout = io.StringIO()
sys.stderr = xymerr
sys.stdout = xymout
model_list = []
try:
model_list = xym.xym(str(file), str(file.parent), str(dir), strict=strict, debug_level=verbosity-2)
for name in model_list:
modfile = moddir / name
mtime = file.stat().st_mtime
os.utime(str(modfile), (mtime, mtime))
if '"' in name:
name = name.replace('"', '')
modfile.rename(str(moddir/name))
model_list = [ n.replace('"','') for n in model_list ]
except Exception as e:
self.stderr.write("** Error when extracting from %s: %s" % (file, str(e)))
finally:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
#
if verbosity > 1:
outmsg = xymout.getvalue()
if outmsg.strip():
self.stdout.write(outmsg)
if verbosity>2:
errmsg = xymerr.getvalue()
if errmsg.strip():
self.stderr.write(errmsg)
return model_list
# Extract from new RFCs
rfcdir = Path(settings.RFC_PATH)
moddir = Path(settings.SUBMIT_YANG_RFC_MODEL_DIR)
if not moddir.exists():
moddir.mkdir(parents=True)
latest = 0
for item in moddir.iterdir():
if item.stat().st_mtime > latest:
latest = item.stat().st_mtime
if verbosity > 0:
self.stdout.write("Extracting to %s ..." % moddir)
for item in rfcdir.iterdir():
if item.is_file() and item.name.startswith('rfc') and item.name.endswith('.txt') and item.name[3:-4].isdigit():
if item.stat().st_mtime > latest:
model_list = extract_from(item, moddir)
for name in model_list:
if name.startswith('ietf') or name.startswith('iana'):
if verbosity > 1:
self.stdout.write(" Extracted from %s: %s" % (item, name))
elif verbosity > 0:
self.stdout.write('.', ending='')
self.stdout.flush()
else:
modfile = moddir / name
modfile.unlink()
if verbosity > 1:
self.stdout.write(" Skipped module from %s: %s" % (item, name))
if verbosity > 0:
self.stdout.write("")
# Extract valid modules from drafts
six_months_ago = time.time() - 6*31*24*60*60
def METHOD_NAME(item):
return item.stat().st_mtime > six_months_ago
draftdir = Path(settings.INTERNET_DRAFT_PATH)
moddir = Path(settings.SUBMIT_YANG_DRAFT_MODEL_DIR)
if not moddir.exists():
moddir.mkdir(parents=True)
if verbosity > 0:
self.stdout.write("Emptying %s ..." % moddir)
for item in moddir.iterdir():
item.unlink()
if verbosity > 0:
self.stdout.write("Extracting to %s ..." % moddir)
for item in draftdir.iterdir():
try:
if item.is_file() and item.name.startswith('draft') and item.name.endswith('.txt') and METHOD_NAME(item):
model_list = extract_from(item, moddir, strict=False)
for name in model_list:
if not name.startswith('example'):
if verbosity > 1:
self.stdout.write(" Extracted module from %s: %s" % (item, name))
elif verbosity > 0:
self.stdout.write('.', ending='')
self.stdout.flush()
else:
modfile = moddir / name
modfile.unlink()
if verbosity > 1:
self.stdout.write(" Skipped module from %s: %s" % (item, name))
except UnicodeDecodeError as e:
self.stderr.write('\nError: %s' % (e, ))
self.stderr.write(item.name)
self.stderr.write('')
if verbosity > 0:
self.stdout.write('')
|
657 | is initialized | from hvac.api.system_backend.system_backend_mixin import SystemBackendMixin
from hvac.exceptions import ParamValidationError
class Init(SystemBackendMixin):
def read_init_status(self):
"""Read the initialization status of Vault.
Supported methods:
GET: /sys/init. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = "/v1/sys/init"
return self._adapter.get(
url=api_path,
)
def METHOD_NAME(self):
"""Determine is Vault is initialized or not.
:return: True if Vault is initialized, False otherwise.
:rtype: bool
"""
status = self.read_init_status()
return status["initialized"]
def initialize(
self,
secret_shares=5,
secret_threshold=3,
pgp_keys=None,
root_token_pgp_key=None,
stored_shares=None,
recovery_shares=None,
recovery_threshold=None,
recovery_pgp_keys=None,
):
"""Initialize a new Vault.
The Vault must not have been previously initialized. The recovery options, as well as the stored shares option,
are only available when using Vault HSM.
Supported methods:
PUT: /sys/init. Produces: 200 application/json
:param secret_shares: The number of shares to split the master key into.
:type secret_shares: int
:param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be
less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as
secret_shares.
:type secret_threshold: int
:param pgp_keys: List of PGP public keys used to encrypt the output unseal keys.
Ordering is preserved. The keys must be base64-encoded from their original binary representation.
The size of this array must be the same as secret_shares.
:type pgp_keys: list
:param root_token_pgp_key: Specifies a PGP public key used to encrypt the initial root token. The
key must be base64-encoded from its original binary representation.
:type root_token_pgp_key: str | unicode
:param stored_shares: <enterprise only> Specifies the number of shares that should be encrypted by the HSM and
stored for auto-unsealing. Currently must be the same as secret_shares.
:type stored_shares: int
:param recovery_shares: <enterprise only> Specifies the number of shares to split the recovery key into.
:type recovery_shares: int
:param recovery_threshold: <enterprise only> Specifies the number of shares required to reconstruct the recovery
key. This must be less than or equal to recovery_shares.
:type recovery_threshold: int
:param recovery_pgp_keys: <enterprise only> Specifies an array of PGP public keys used to encrypt the output
recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary
representation. The size of this array must be the same as recovery_shares.
:type recovery_pgp_keys: list
:return: The JSON response of the request.
:rtype: dict
"""
params = {
"secret_shares": secret_shares,
"secret_threshold": secret_threshold,
"root_token_pgp_key": root_token_pgp_key,
}
if pgp_keys is not None:
if len(pgp_keys) != secret_shares:
raise ParamValidationError(
"length of pgp_keys list argument must equal secret_shares value"
)
params["pgp_keys"] = pgp_keys
if stored_shares is not None:
if stored_shares != secret_shares:
raise ParamValidationError(
"value for stored_shares argument must equal secret_shares argument"
)
params["stored_shares"] = stored_shares
if recovery_shares is not None:
params["recovery_shares"] = recovery_shares
if recovery_threshold is not None:
if recovery_threshold > recovery_shares:
error_msg = "value for recovery_threshold argument be less than or equal to recovery_shares argument"
raise ParamValidationError(error_msg)
params["recovery_threshold"] = recovery_threshold
if recovery_pgp_keys is not None:
if len(recovery_pgp_keys) != recovery_shares:
raise ParamValidationError(
"length of recovery_pgp_keys list argument must equal recovery_shares value"
)
params["recovery_pgp_keys"] = recovery_pgp_keys
api_path = "/v1/sys/init"
return self._adapter.put(
url=api_path,
json=params,
) |
658 | set up | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.providers.aws.aws_dpb_emr."""
import copy
import json
from typing import Any, Optional
import unittest
from unittest import mock
from absl import flags
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import aws_dpb_glue
from perfkitbenchmarker.providers.aws import aws_dpb_glue_prices
from tests import pkb_common_test_case
TEST_RUN_URI = 'fakeru'
AWS_ZONE_US_EAST_1A = 'us-east-1a'
FLAGS = flags.FLAGS
_BASE_JOB_RUN_PAYLOAD = {
'JobRun': {
'Id': 'jr_01234567890abcdef',
'Attempt': 0,
'JobName': 'pkb-deadbeef-0',
'StartedOn': 1675103057.784,
'LastModifiedOn': 1675105738.096,
'CompletedOn': 1675105738.096,
'JobRunState': 'SUCCEEDED',
'Arguments': {
'--pkb_main': 'hello',
'--pkb_args': '[]'
},
'PredecessorRuns': [],
'AllocatedCapacity': 32,
'ExecutionTime': 2672,
'Timeout': 2880,
'MaxCapacity': 32.0,
'WorkerType': 'G.2X',
'NumberOfWorkers': 4,
'LogGroupName': '/aws-glue/jobs',
'GlueVersion': '3.0'
}
}
def _GetJobRunMockPayload(
dpu_seconds: Optional[float],
max_capacity: Optional[float],
execution_time: Optional[float]
) -> dict[str, Any]:
payload = copy.deepcopy(_BASE_JOB_RUN_PAYLOAD)
if dpu_seconds is not None:
payload['JobRun']['DPUSeconds'] = dpu_seconds
if max_capacity is not None:
payload['JobRun']['MaxCapacity'] = max_capacity
if execution_time is not None:
payload['JobRun']['ExecutionTime'] = execution_time
return payload
GLUE_SPEC = mock.Mock(
static_dpb_service_instance=None,
version='3.0',
worker_count=4,
worker_group=mock.Mock(vm_spec=mock.Mock(machine_type='G.2X')))
class AwsDpbEmrTestCase(pkb_common_test_case.PkbCommonTestCase):
def METHOD_NAME(self):
super(AwsDpbEmrTestCase, self).METHOD_NAME()
FLAGS.run_uri = TEST_RUN_URI
FLAGS.dpb_service_zone = AWS_ZONE_US_EAST_1A
FLAGS.zones = [AWS_ZONE_US_EAST_1A]
self.issue_cmd_mock = self.enter_context(
mock.patch.object(vm_util, 'IssueCommand', autospec=True))
def testGlueCalculateLastJobCost(self):
dpb_glue = aws_dpb_glue.AwsDpbGlue(GLUE_SPEC)
create_job_response = {'Name': 'pkb-deadbeef-0'}
start_job_run_response = {'JobRunId': 'jr_01234567890abcdef'}
self.issue_cmd_mock.side_effect = [
(json.dumps(create_job_response), '', 0),
(json.dumps(start_job_run_response), '', 0),
(json.dumps(
_GetJobRunMockPayload(dpu_seconds=None, max_capacity=32.0,
execution_time=2672)), '', 0)
]
with mock.patch.object(aws_dpb_glue_prices, 'GLUE_PRICES'):
# The actual prices are expected to change over time, but we don't want to
# update the test every time.
aws_dpb_glue_prices.GLUE_PRICES = {'us-east-1': 0.44}
dpb_glue.SubmitJob(
pyspark_file='s3://test/hello.py',
job_type=dpb_service.BaseDpbService.PYSPARK_JOB_TYPE,
job_arguments=[])
self.assertEqual(dpb_glue.CalculateLastJobCost(), 10.45048888888889)
def testGluePricesSchema(self):
for region, price in aws_dpb_glue_prices.GLUE_PRICES.items():
self.assertIsInstance(region, str)
self.assertIsInstance(price, float)
if __name__ == '__main__':
unittest.main() |
659 | stop | import sys
import datetime
import asyncio
import traceback
from aiohttp_json_rpc import JsonRpcClient
class WorkerClient(JsonRpcClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_methods(
("", self.start_job),
)
self.current_job = None
self._id = None
def set_id(self, worker_id):
self._id = worker_id
async def start_job(self, job_data):
if self.current_job is not None:
return False
print("Got new job {}".format(str(job_data)))
self.current_job = job_data
return True
def finish_job(self, success, message, data):
asyncio.ensure_future(
self._finish_job(success, message, data),
loop=self._loop
)
async def _finish_job(self, success, message, data):
print("Current job", self.current_job)
job_id = self.current_job["job_id"]
self.current_job = None
return await self.call(
"job_done", [self._id, job_id, success, message, data]
)
class WorkerJobsConnection:
"""WS connection to Job server.
Helper class to create a connection to process jobs from job server.
To be able receive jobs is needed to create a connection and then register
as worker for specific host.
"""
retry_time_seconds = 5
def __init__(self, server_url, host_name, loop=None):
self.client = None
self._loop = loop
self._host_name = host_name
self._server_url = server_url
self._is_running = False
self._connecting = False
self._connected = False
self._stopped = False
def METHOD_NAME(self):
print("Stopping worker")
self._stopped = True
@property
def is_running(self):
return self._is_running
@property
def current_job(self):
if self.client is not None:
return self.client.current_job
return None
def finish_job(self, success=True, message=None, data=None):
"""Worker finished job and sets the result which is send to server."""
if self.client is None:
print((
"Couldn't sent job status to server because"
" client is not connected."
))
else:
self.client.finish_job(success, message, data)
async def main_loop(self, register_worker=True):
"""Main loop of connection which keep connection to server alive."""
self._is_running = True
while not self._stopped:
start_time = datetime.datetime.now()
await self._connection_loop(register_worker)
delta = datetime.datetime.now() - start_time
print("Connection loop took {}s".format(str(delta)))
# Check if was stopped and stop while loop in that case
if self._stopped:
break
if delta.seconds < 60:
print((
"Can't connect to server will try in {} seconds."
).format(self.retry_time_seconds))
await asyncio.sleep(self.retry_time_seconds)
self._is_running = False
async def _connect(self):
self.client = WorkerClient()
print("Connecting to {}".format(self._server_url))
try:
await self.client.connect_url(self._server_url)
except KeyboardInterrupt:
raise
except Exception:
traceback.print_exception(*sys.exc_info())
async def _connection_loop(self, register_worker):
self._connecting = True
future = asyncio.run_coroutine_threadsafe(
self._connect(), loop=self._loop
)
while self._connecting:
if not future.done():
await asyncio.sleep(0.07)
continue
session = getattr(self.client, "_session", None)
ws = getattr(self.client, "_ws", None)
if session is not None:
if session.closed:
self._connecting = False
self._connected = False
break
elif ws is not None:
self._connecting = False
self._connected = True
if self._stopped:
break
await asyncio.sleep(0.07)
if not self._connected:
self.client = None
return
print("Connected to job queue server")
if register_worker:
self.register_as_worker()
while self._connected and self._loop.is_running():
if self._stopped or ws.closed:
break
await asyncio.sleep(0.3)
await self._stop_cleanup()
def register_as_worker(self):
"""Register as worker ready to work on server side."""
asyncio.ensure_future(self._register_as_worker(), loop=self._loop)
async def _register_as_worker(self):
worker_id = await self.client.call(
"register_worker", [self._host_name]
)
self.client.set_id(worker_id)
print(
"Registered as worker with id {}".format(worker_id)
)
async def disconnect(self):
await self._stop_cleanup()
async def _stop_cleanup(self):
print("Cleanup after stop")
if self.client is not None and hasattr(self.client, "_ws"):
await self.client.disconnect()
self.client = None
self._connecting = False
self._connected = False |
660 | parse statement |
# Copyright 2008-2014 Jaap Karssenberg <[email protected]>
'''This module contains the parser to parse expressions in templates
and returns an L{Expression} object.
'''
import re
import operator
import ast
from zim.parser import ParserError
from zim.templates.expression import \
ExpressionOperator, ExpressionUnaryOperator, \
ExpressionLiteral, ExpressionParameter, \
ExpressionList, ExpressionFunctionCall
class ExpressionSyntaxError(ParserError):
pass
class ExpressionParser(object):
'''Parser for expressions'''
# This parser does not use the Parser / Builder architecture
# as the expression format is not really suited for this kind
# of block parser
# Instead it is a simple parser that first splits text in tokens
# and then consumes those tokens left-to-right while building
# an object tree.
# TODO keep character count to raise meaningful errors
# Operator precedence: or -- and -- not -- <, <=, >, >=, <>, !=, ==
operators = {
'==': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'>=': operator.ge,
'<': operator.lt,
'<=': operator.le,
'and': operator.and_,
'or': operator.or_,
'not': operator.not_, # special case - unary operator
}
tokens = [',', '[', ']', '(', ')'] \
+ [k for k in list(operators.keys()) if not k.isalnum()]
# Only inluding NON-alphanumeric operators here
_param_re = re.compile(r'^[^\W\d_]\w*(\.[^\W_]\w*)*$')
# like "name.name" but first char can not be "_"
# digits are allowed after dot, since template assumes dot notation for list index as well..
# FIXME for generic use make this configurable / subclass template specific version
def __init__(self):
tokens = list(map(re.escape, self.tokens))
self._word_re = re.compile(
r'''(
'(\\'|[^'])*' | # single quoted word
"(\\"|[^"])*" | # double quoted word
[^\s'"%s]+ | # word without spaces and token chars
%s # tokens are a word on their own
)''' % (''.join(tokens), '|'.join(tokens)), re.X
)
def parse(self, string):
'''Parse an expression
@param string: the expression text
@returns: an L{Expression} object
'''
tokens = self._tokenize(string)
expr = self._parse(tokens)
if tokens: # trailing stuff remaining
raise ExpressionSyntaxError('Unexpected text after expression: %s' % tokens)
return expr
def _tokenize(self, string):
# custom version of split_quoted_strings
string = string.strip()
words = []
m = self._word_re.match(string)
while m:
words.append(m.group(0))
i = m.end()
string = string[i:].lstrip()
m = self._word_re.match(string)
assert not string, '>> %s' % string
return words
def _parse(self, tokens):
# Operator precedence: or, and, not, <, <=, >, >=, <>, !=, ==
# so we start with parsing " ... or .. or .." and decent from there
lexpr = self._parse_and(tokens)
if tokens and tokens[0] == 'or':
tokens.pop(0)
rexpr = self._parse(tokens) # recurs
return ExpressionOperator(operator.or_, lexpr, rexpr)
else:
return lexpr
def _parse_and(self, tokens):
# Handle "... and ... and ..."
lexpr = self._parse_not(tokens)
if tokens and tokens[0] == 'and':
tokens.pop(0)
rexpr = self._parse_and(tokens) # recurs
return ExpressionOperator(operator.and_, lexpr, rexpr)
else:
return lexpr
def _parse_not(self, tokens):
# Handle "not ..."
if not tokens:
raise ExpressionSyntaxError('Unexpected end of expression')
if tokens[0] == 'not':
tokens.pop(0)
rexpr = self._parse_comparison(tokens)
return ExpressionUnaryOperator(operator.not_, rexpr)
else:
return self._parse_comparison(tokens)
def _parse_comparison(self, tokens):
# Handle "... op ..." where op is: <, <=, >, >=, <>, !=, ==
lexpr = self.METHOD_NAME(tokens)
if tokens and tokens[0] in self.operators \
and tokens[0] not in ('or', 'and', 'not'):
op = tokens.pop(0)
rexpr = self.METHOD_NAME(tokens)
return ExpressionOperator(self.operators[op], lexpr, rexpr)
else:
return lexpr
def METHOD_NAME(self, tokens):
# Handle: param, func call or literal
if not tokens:
raise ExpressionSyntaxError('Unexpected end of expression')
if tokens[0] == '[':
return self._parse_list(tokens)
elif tokens[0] in self.tokens \
or tokens[0] in ('or', 'and', 'not'):
raise ExpressionSyntaxError('Unexpected token: "%s"' % tokens[0])
elif self._param_re.match(tokens[0]) \
and not tokens[0] in ('True', 'False', 'None'):
param = ExpressionParameter(tokens.pop(0))
if tokens and tokens[0] == '(':
args = self._parse_list(tokens)
return ExpressionFunctionCall(param, args)
else:
return param
else:
text = tokens.pop(0)
try:
value = ast.literal_eval(text)
except SyntaxError:
raise ExpressionSyntaxError('Invalid literal: %s' % text)
else:
return ExpressionLiteral(value)
def _parse_list(self, tokens):
# Process left to right, allow descending in sub lists
assert tokens[0] in ('[', '(')
delim = ']' if tokens.pop(0) == '[' else ')'
expr = ExpressionList()
while tokens and tokens[0] != delim:
item = self._parse(tokens)
if tokens and tokens[0] != delim:
if tokens.pop(0) != ',':
raise ExpressionSyntaxError('Expected: ","')
expr.append(item)
if not tokens or tokens[0] != delim:
raise ExpressionSyntaxError('Missing: "%s"' % delim)
else:
tokens.pop(0)
return expr
|
661 | test object equal self | import unittest
import collections
class TestSegment(unittest.TestCase):
def getSegment_line(self):
contour, unrequested = self.objectGenerator("contour")
unrequested.append(contour)
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
segment = contour[1]
return segment
# ----
# Type
# ----
def test_type_get(self):
segment = self.getSegment_line()
self.assertEqual(
segment.type,
"line"
)
def test_set_move(self):
segment = self.getSegment_line()
segment.type = "move"
self.assertEqual(
segment.type,
"move"
)
def test_len_move(self):
segment = self.getSegment_line()
segment.type = "move"
self.assertEqual(
len(segment.points),
1
)
def test_oncuve_type_move(self):
segment = self.getSegment_line()
segment.type = "move"
self.assertEqual(
segment.onCurve.type,
"move"
)
def test_oncuve_x_y(self):
segment = self.getSegment_line()
segment.type = "move"
self.assertEqual(
(segment.onCurve.x, segment.onCurve.y),
(101, 202)
)
def test_set_curve(self):
segment = self.getSegment_line()
segment.type = "curve"
self.assertEqual(
segment.type,
"curve"
)
def test_len_curve(self):
segment = self.getSegment_line()
segment.type = "curve"
self.assertEqual(
len(segment.points),
3
)
def test_curve_pt_types(self):
segment = self.getSegment_line()
segment.type = "curve"
types = tuple(point.type for point in segment.points)
self.assertEqual(
types,
("offcurve", "offcurve", "curve")
)
def test_curve_pt_x_y(self):
segment = self.getSegment_line()
segment.type = "curve"
coordinates = tuple((point.x, point.y) for point in segment.points)
self.assertEqual(
coordinates,
((0, 0), (101, 202), (101, 202))
)
def test_set_qcurve(self):
segment = self.getSegment_line()
segment.type = "qcurve"
self.assertEqual(
segment.type,
"qcurve"
)
def test_len_qcurve(self):
segment = self.getSegment_line()
segment.type = "qcurve"
self.assertEqual(
len(segment.points),
3
)
def test_qcurve_pt_types(self):
segment = self.getSegment_line()
segment.type = "qcurve"
types = tuple(point.type for point in segment.points)
self.assertEqual(
types,
("offcurve", "offcurve", "qcurve")
)
def test_qcurve_pt_x_y(self):
segment = self.getSegment_line()
segment.type = "qcurve"
coordinates = tuple((point.x, point.y) for point in segment.points)
self.assertEqual(
coordinates,
((0, 0), (101, 202), (101, 202))
)
def test_set_invalid_segment_type_string(self):
segment = self.getSegment_line()
with self.assertRaises(ValueError):
segment.type = "xxx"
def test_set_invalid_segment_type_int(self):
segment = self.getSegment_line()
with self.assertRaises(TypeError):
segment.type = 123
def test_offCurve_only_segment(self):
contour, unrequested = self.objectGenerator("contour")
unrequested.append(contour)
contour.appendPoint((0, 0), "offcurve")
contour.appendPoint((100, 0), "offcurve")
contour.appendPoint((100, 100), "offcurve")
contour.appendPoint((0, 100), "offcurve")
segment = contour[0]
self.assertEqual(
len(contour),
1
)
# onCurve is a dummy None value, telling this is an on-curve-less quad blob
self.assertIsNone(
segment.onCurve,
)
self.assertEqual(
segment.points,
segment.offCurve
)
self.assertEqual(
segment.type,
"qcurve"
)
# ----
# Hash
# ----
def test_hash(self):
segment = self.getSegment_line()
self.assertEqual(
isinstance(segment, collections.abc.Hashable),
False
)
# --------
# Equality
# --------
def METHOD_NAME(self):
segment_one = self.getSegment_line()
self.assertEqual(
segment_one,
segment_one
)
def test_object_not_equal_other(self):
segment_one = self.getSegment_line()
segment_two = self.getSegment_line()
self.assertNotEqual(
segment_one,
segment_two
)
def test_object_equal_self_variable_assignment(self):
segment_one = self.getSegment_line()
a = segment_one
self.assertEqual(
segment_one,
a
)
def test_object_not_equal_other_variable_assignment(self):
segment_one = self.getSegment_line()
segment_two = self.getSegment_line()
a = segment_one
self.assertNotEqual(
segment_two,
a
)
# ---------
# Selection
# ---------
def test_selected_true(self):
segment = self.getSegment_line()
try:
segment.selected = False
except NotImplementedError:
return
segment.selected = True
self.assertEqual(
segment.selected,
True
)
def test_selected_false(self):
segment = self.getSegment_line()
try:
segment.selected = False
except NotImplementedError:
return
self.assertEqual(
segment.selected,
False
) |
662 | save | from copy import deepcopy
from os.path import join, realpath
from typing import Union, Dict
import shapely.wkb
from shapely.geometry import Point, Polygon, LineString, MultiLineString
from shapely.ops import unary_union
from aequilibrae.project.basic_table import BasicTable
from aequilibrae.project.project_creation import run_queries_from_sql_file
from aequilibrae.project.table_loader import TableLoader
from aequilibrae.utils.geo_index import GeoIndex
from aequilibrae.project.zone import Zone
class Zoning(BasicTable):
"""
Access to the API resources to manipulate the zones table in the project
.. code-block:: python
>>> from aequilibrae import Project
>>> project = Project.from_path("/tmp/test_project")
>>> zoning = project.zoning
>>> zone_downtown = zoning.get(1)
>>> zone_downtown.population = 637
>>> zone_downtown.employment = 10039
>>> zone_downtown.save()
# changing the value for an existing value/field
>>> project.about.scenario_name = 'Just a better scenario name'
>>> project.about.write_back()
# We can also add one more field to the table
>>> fields = zoning.fields
>>> fields.add('parking_spots', 'Total licensed parking spots', 'INTEGER')
"""
def __init__(self, network):
super().__init__(network.project)
self.__items: Dict[int, Zone] = {}
self.network = network
self.__table_type__ = "zones"
self.__fields = []
self.__geo_index = GeoIndex()
if self.__has_zoning():
self.__load()
def new(self, zone_id: int) -> Zone:
"""Creates a new zone
:Returns:
**zone** (:obj:`Zone`): A new zone object populated only with zone_id (but not saved in the model yet)
"""
if zone_id in self.__items:
raise Exception(f"Zone ID {zone_id} already exists")
data = {key: None for key in self.__fields}
data["zone_id"] = zone_id
self.project.logger.info(f"Zone with id {zone_id} was created")
return self.__create_return_zone(data)
def create_zoning_layer(self):
"""Creates the 'zones' table for project files that did not previously contain it"""
if not self.__has_zoning():
qry_file = join(realpath(__file__), "database_specification", "tables", "zones.sql")
run_queries_from_sql_file(self.conn, qry_file)
self.__load()
else:
self.project.warning("zones table already exists. Nothing was done", Warning)
def coverage(self) -> Polygon:
"""Returns a single polygon for the entire zoning coverage
:Returns:
**model coverage** (:obj:`Polygon`): Shapely (Multi)polygon of the zoning system.
"""
self._curr.execute('Select ST_asBinary("geometry") from zones;')
polygons = [shapely.wkb.loads(x[0]) for x in self._curr.fetchall()]
return unary_union(polygons)
def get(self, zone_id: str) -> Zone:
"""Get a zone from the model by its **zone_id**"""
if zone_id not in self.__items:
raise ValueError(f"Zone {zone_id} does not exist in the model")
return self.__items[zone_id]
def all_zones(self) -> dict:
"""Returns a dictionary with all Zone objects available in the model. zone_id as key"""
return self.__items
def METHOD_NAME(self):
for item in self.__items.values():
item.METHOD_NAME()
def get_closest_zone(self, geometry: Union[Point, LineString, MultiLineString]) -> int:
"""Returns the zone in which the given geometry is located.
If the geometry is not fully enclosed by any zone, the zone closest to
the geometry is returned
:Arguments:
**geometry** (:obj:`Point` or :obj:`LineString`): A Shapely geometry object
:Return:
**zone_id** (:obj:`int`): ID of the zone applicable to the point provided
"""
nearest = self.__geo_index.nearest(geometry, 10)
dists = {}
for zone_id in nearest:
geo = self.__items[zone_id].geometry
if geo.contains(geometry):
return zone_id
dists[geo.distance(geometry)] = zone_id
return dists[min(dists.keys())]
def refresh_geo_index(self):
self.__geo_index.reset()
for zone_id, zone in self.__items.items():
self.__geo_index.insert(feature_id=zone_id, geometry=zone.geometry)
def __has_zoning(self):
curr = self.conn.cursor()
curr.execute("SELECT name FROM sqlite_master WHERE type='table';")
return any(["zone" in x[0].lower() for x in curr.fetchall()])
def __load(self):
tl = TableLoader()
zones_list = tl.load_table(self._curr, "zones")
self.__fields = deepcopy(tl.fields)
existing_list = [zn["zone_id"] for zn in zones_list]
if zones_list:
self.__properties = list(zones_list[0].keys())
for zn in zones_list:
if zn["zone_id"] not in self.__items:
self.__items[zn["zone_id"]] = Zone(zn, self)
to_del = [key for key in self.__items.keys() if key not in existing_list]
for key in to_del:
del self.__items[key]
self.refresh_geo_index()
def _remove_zone(self, zone_id: int):
del self.__items[zone_id]
def __create_return_zone(self, data):
zone = Zone(data, self)
self.__items[zone.zone_id] = zone
return zone |
663 | gen random input fn | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import os
import sys
import types
import numpy as np
import tensorflow as tf
def check_images(image_dir, image_list, iterations, batch_size):
"""Check images validation"""
if not tf.gfile.Exists(image_list):
raise ValueError("Cannot find image_list file {}.".format(image_list))
text = open(image_list).readlines()
print(
"Total images for calibration: {}\ncalib_iter: {}\nbatch_size: {}".format(
len(text), iterations, batch_size))
if (len(text) < iterations * batch_size):
raise RuntimeError(
"calib_iter * batch_size > number of images, please decrease calib_iter or batch_size"
)
def convert_bgr_to_rgb(image):
"""Convert BGR to RGB"""
B, G, R = cv2.split(image)
return cv2.merge([R, G, B])
def means_subtraction(image, means):
"""Subtract image means for RGB channels"""
B, G, R = cv2.split(image)
R = R - means[0]
G = G - means[1]
B = B - means[2]
return cv2.merge([B, G, R])
def scale_image(image, scales):
"""scale image, often used to normalize image"""
B, G, R = cv2.split(image)
B = B * scales[0]
G = G * scales[1]
R = R * scales[2]
return cv2.merge([B, G, R])
def central_crop(image, crop_height, crop_width):
"""Central crop image"""
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = (image_height - crop_height) // 2
offset_width = (image_width - crop_width) // 2
return image[offset_height:offset_height +
crop_height, offset_width:offset_width + crop_width]
def resize(image, image_height, image_width):
"""Resize image"""
return cv2.resize(
image, (image_height, image_width), interpolation=cv2.INTER_NEAREST)
def nomalize_image(image):
"""Nomalize image from [0,255] to "[-1, 1]"""
image = image / 255.0
image = 2 * (image - 0.5)
return image
def gen_imagenet_input_fn(input_node, image_dir, image_list, calib_iter,
batch_size, image_height, image_width, size_type,
means, scales, normalize):
"""Generate imagenet input_fn"""
check_images(image_dir, image_list, calib_iter, batch_size)
def imagenet_input_fn(iter):
"""imagenet input function to load image and do preprocessing for quantize calibraton,
as the calibration process do not need labels, the return value only contain
images without labels"""
if len(input_nodes) != 1:
raise ValueError(
"Default input_fn only support single input network, but {} found.".
format(len(input_nodes)))
text = open(image_list).readlines()
images = []
for i in range(0, batch_size):
image_name = text[iter + i].split(' ')[0]
image_file = os.path.join(image_dir, image_name.strip())
if not tf.gfile.Exists(image_file):
raise ValueError("Cannot find image file {}.".format(image_file))
image = cv2.imread(image_file)
if size_type == 0:
image = central_crop(image, image_height, image_width)
elif size_type == 1:
image = resize(image, image_height, image_width)
else:
raise ValueError("Invalid size_type")
image = means_subtraction(image, means)
if scales != 1:
image = scale_image(image, scales)
if normalize:
image = nomalize_image(image)
image = convert_bgr_to_rgb(image)
images.append(image)
return {input_nodes[0]: images}
return default_input_fn
def METHOD_NAME(input_nodes, input_shapes, input_dtypes):
"""Generate random input_fn"""
def random_input_fn(iter):
feed_dict = dict()
for input_node, input_shape, input_dtype in zip(input_nodes, input_shapes,
input_dtypes):
in_shape = input_shape.copy()
if input_shape[0] is None or input_shape[0] == -1:
in_shape[0] = 1
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
min_value = dtype_range[np.dtype(input_dtype).type][0]
max_value = dtype_range[np.dtype(input_dtype).type][1]
feed_dict[input_node] = np.random.random(in_shape) * (
max_value - min_value) + min_value
return feed_dict
return random_input_fn |
664 | constant shift log scale fn | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MaskedAutoregressiveFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import real_nvp_default_template
from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import RealNVP
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
class RealNVPTest(test_util.VectorDistributionTestHelpers, test.TestCase):
@property
def _real_nvp_kwargs(self):
return {
"shift_and_log_scale_fn": real_nvp_default_template(
hidden_layers=[3], shift_only=False),
"is_constant_jacobian": False,
}
def testBijector(self):
x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4 * 2)
with self.cached_session() as sess:
nvp = RealNVP(
num_masked=4,
validate_args=True,
**self._real_nvp_kwargs)
x = constant_op.constant(x_)
forward_x = nvp.forward(x)
# Use identity to invalidate cache.
inverse_y = nvp.inverse(array_ops.identity(forward_x))
forward_inverse_y = nvp.forward(inverse_y)
fldj = nvp.forward_log_det_jacobian(x, event_ndims=1)
# Use identity to invalidate cache.
ildj = nvp.inverse_log_det_jacobian(
array_ops.identity(forward_x), event_ndims=1)
variables.global_variables_initializer().run()
[
forward_x_,
inverse_y_,
forward_inverse_y_,
ildj_,
fldj_,
] = sess.run([
forward_x,
inverse_y,
forward_inverse_y,
ildj,
fldj,
])
self.assertEqual("real_nvp", nvp.name)
self.assertAllClose(forward_x_, forward_inverse_y_, rtol=1e-1, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-1, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
def testMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
nvp = RealNVP(
num_masked=3,
validate_args=True,
**self._real_nvp_kwargs)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=nvp,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
nvp = Invert(RealNVP(
num_masked=3,
validate_args=True,
**self._real_nvp_kwargs))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=nvp,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
class NICETest(RealNVPTest):
@property
def _real_nvp_kwargs(self):
return {
"shift_and_log_scale_fn": real_nvp_default_template(
hidden_layers=[2], shift_only=True),
"is_constant_jacobian": True,
}
class RealNVPConstantShiftScaleTest(RealNVPTest):
@property
def _real_nvp_kwargs(self):
def METHOD_NAME(x0, output_units):
del x0, output_units
shift = constant_op.constant([0.1])
log_scale = constant_op.constant([0.5])
return shift, log_scale
return {
"shift_and_log_scale_fn": METHOD_NAME,
"is_constant_jacobian": True,
}
if __name__ == "__main__":
test.main() |
665 | test deactivate preorder for variant | from django.db.models.aggregates import Sum
from ...product.models import ProductVariantChannelListing
from ..management import deactivate_preorder_for_variant
from ..models import Allocation, PreorderAllocation, Stock
def METHOD_NAME(
preorder_variant_global_and_channel_threshold,
preorder_allocation,
shipping_method_channel_PLN,
):
variant = preorder_variant_global_and_channel_threshold
order = preorder_allocation.order_line.order
order.shipping_method = shipping_method_channel_PLN
order.save(update_fields=["shipping_method"])
channel_listings_id = ProductVariantChannelListing.objects.filter(
variant_id=variant.pk
).values_list("id", flat=True)
preorder_allocations_before = PreorderAllocation.objects.filter(
product_variant_channel_listing_id__in=channel_listings_id
).count()
assert preorder_allocations_before > 0
allocations_before = Allocation.objects.filter(
stock__product_variant_id=variant.pk
).count()
# Allocations and stocks will be created
assert variant.stocks.count() == 0
deactivate_preorder_for_variant(variant)
assert (
PreorderAllocation.objects.filter(
product_variant_channel_listing_id__in=channel_listings_id
).count()
== 0
)
assert (
Allocation.objects.filter(stock__product_variant_id=variant.pk).count()
== allocations_before + preorder_allocations_before
)
variant.refresh_from_db()
stock = variant.stocks.first()
assert (
stock.quantity_allocated
== stock.allocations.aggregate(Sum("quantity_allocated"))[
"quantity_allocated__sum"
]
)
assert variant.is_preorder is False
assert variant.preorder_global_threshold is None
assert variant.preorder_end_date is None
channel_listings = ProductVariantChannelListing.objects.filter(
variant_id=variant.pk
)
for channel_listing in channel_listings:
assert channel_listing.preorder_quantity_threshold is None
def test_deactivate_preorder_for_variant_order_without_shipping_method(
preorder_variant_global_and_channel_threshold,
preorder_allocation,
):
"""When order has no shiping method set,
use warehouse based on country from address."""
variant = preorder_variant_global_and_channel_threshold
order = preorder_allocation.order_line.order
assert order.shipping_method is None
channel_listings_id = ProductVariantChannelListing.objects.filter(
variant_id=variant.pk
).values_list("id", flat=True)
preorder_allocations_before = PreorderAllocation.objects.filter(
product_variant_channel_listing_id__in=channel_listings_id
).count()
assert preorder_allocations_before > 0
allocations_before = Allocation.objects.filter(
stock__product_variant_id=variant.pk
).count()
deactivate_preorder_for_variant(variant)
assert (
PreorderAllocation.objects.filter(
product_variant_channel_listing_id__in=channel_listings_id
).count()
== 0
)
assert (
Allocation.objects.filter(stock__product_variant_id=variant.pk).count()
== allocations_before + preorder_allocations_before
)
def test_deactivate_preorder_for_variant_existing_stock(
preorder_variant_global_and_channel_threshold,
preorder_allocation,
shipping_method_channel_PLN,
warehouse,
):
variant = preorder_variant_global_and_channel_threshold
order = preorder_allocation.order_line.order
order.shipping_method = shipping_method_channel_PLN
order.save(update_fields=["shipping_method"])
stock = Stock.objects.create(
warehouse=warehouse, product_variant=variant, quantity=0
)
channel_listings_id = ProductVariantChannelListing.objects.filter(
variant_id=variant.pk
).values_list("id", flat=True)
preorder_allocations_before = PreorderAllocation.objects.filter(
product_variant_channel_listing_id__in=channel_listings_id
).count()
assert preorder_allocations_before > 0
allocations_before = Allocation.objects.filter(
stock__product_variant_id=variant.pk
).count()
# Existing stock will be used
assert variant.stocks.count() > 0
deactivate_preorder_for_variant(variant)
stock.refresh_from_db()
assert (
PreorderAllocation.objects.filter(
product_variant_channel_listing_id__in=channel_listings_id
).count()
== 0
)
assert (
Allocation.objects.filter(stock__product_variant_id=variant.pk).count()
== allocations_before + preorder_allocations_before
)
assert (
stock.quantity_allocated
== stock.allocations.aggregate(Sum("quantity_allocated"))[
"quantity_allocated__sum"
]
) |
666 | pos tag | # Natural Language Toolkit: Taggers
#
# Copyright (C) 2001-2023 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (minor additions)
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
NLTK Taggers
This package contains classes and interfaces for part-of-speech
tagging, or simply "tagging".
A "tag" is a case-sensitive string that specifies some property of a token,
such as its part of speech. Tagged tokens are encoded as tuples
``(tag, token)``. For example, the following tagged token combines
the word ``'fly'`` with a noun part of speech tag (``'NN'``):
>>> tagged_tok = ('fly', 'NN')
An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset:
>>> from nltk import pos_tag, word_tokenize
>>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE
[('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
A Russian tagger is also available if you specify lang="rus". It uses
the Russian National Corpus tagset:
>>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP
[('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'),
('бумажку', 'S'), ('.', 'NONLEX')]
This package defines several taggers, which take a list of tokens,
assign a tag to each one, and return the resulting list of tagged tokens.
Most of the taggers are built automatically based on a training corpus.
For example, the unigram tagger tags each word *w* by checking what
the most frequent tag for *w* was in a training corpus:
>>> from nltk.corpus import brown
>>> from nltk.tag import UnigramTagger
>>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500])
>>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment']
>>> for word, tag in tagger.tag(sent):
... print(word, '->', tag)
Mitchell -> NP
decried -> None
the -> AT
high -> JJ
rate -> NN
of -> IN
unemployment -> None
Note that words that the tagger has not seen during training receive a tag
of ``None``.
We evaluate a tagger on data that was not seen during training:
>>> round(tagger.accuracy(brown.tagged_sents(categories='news')[500:600]), 3)
0.735
For more information, please consult chapter 5 of the NLTK Book.
isort:skip_file
"""
from nltk.tag.api import TaggerI
from nltk.tag.util import str2tuple, tuple2str, untag
from nltk.tag.sequential import (
SequentialBackoffTagger,
ContextTagger,
DefaultTagger,
NgramTagger,
UnigramTagger,
BigramTagger,
TrigramTagger,
AffixTagger,
RegexpTagger,
ClassifierBasedTagger,
ClassifierBasedPOSTagger,
)
from nltk.tag.brill import BrillTagger
from nltk.tag.brill_trainer import BrillTaggerTrainer
from nltk.tag.tnt import TnT
from nltk.tag.hunpos import HunposTagger
from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger
from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer
from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger
from nltk.tag.mapping import tagset_mapping, map_tag
from nltk.tag.crf import CRFTagger
from nltk.tag.perceptron import PerceptronTagger
from nltk.data import load, find
RUS_PICKLE = (
"taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle"
)
def _get_tagger(lang=None):
if lang == "rus":
tagger = PerceptronTagger(False)
ap_russian_model_loc = "file:" + str(find(RUS_PICKLE))
tagger.load(ap_russian_model_loc)
else:
tagger = PerceptronTagger()
return tagger
def _pos_tag(tokens, tagset=None, tagger=None, lang=None):
# Currently only supports English and Russian.
if lang not in ["eng", "rus"]:
raise NotImplementedError(
"Currently, NLTK pos_tag only supports English and Russian "
"(i.e. lang='eng' or lang='rus')"
)
# Throws Error if tokens is of string type
elif isinstance(tokens, str):
raise TypeError("tokens: expected a list of strings, got a string")
else:
tagged_tokens = tagger.tag(tokens)
if tagset: # Maps to the specified tagset.
if lang == "eng":
tagged_tokens = [
(token, map_tag("en-ptb", tagset, tag))
for (token, tag) in tagged_tokens
]
elif lang == "rus":
# Note that the new Russian pos tags from the model contains suffixes,
# see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018
tagged_tokens = [
(token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0]))
for (token, tag) in tagged_tokens
]
return tagged_tokens
def METHOD_NAME(tokens, tagset=None, lang="eng"):
"""
Use NLTK's currently recommended part of speech tagger to
tag the given list of tokens.
>>> from nltk.tag import pos_tag
>>> from nltk.tokenize import word_tokenize
>>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE
[('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
>>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal') # doctest: +NORMALIZE_WHITESPACE
[('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'),
("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')]
NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence.
:param tokens: Sequence of tokens to be tagged
:type tokens: list(str)
:param tagset: the tagset to be used, e.g. universal, wsj, brown
:type tagset: str
:param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
:type lang: str
:return: The tagged tokens
:rtype: list(tuple(str, str))
"""
tagger = _get_tagger(lang)
return _pos_tag(tokens, tagset, tagger, lang)
def pos_tag_sents(sentences, tagset=None, lang="eng"):
"""
Use NLTK's currently recommended part of speech tagger to tag the
given list of sentences, each consisting of a list of tokens.
:param sentences: List of sentences to be tagged
:type sentences: list(list(str))
:param tagset: the tagset to be used, e.g. universal, wsj, brown
:type tagset: str
:param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
:type lang: str
:return: The list of tagged sentences
:rtype: list(list(tuple(str, str)))
"""
tagger = _get_tagger(lang)
return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences] |
667 | delete | from _collections_abc import Generator, dict_keys
from _typeshed import Incomplete, ReadableBuffer
from types import TracebackType
from typing_extensions import Literal, Self, TypeAlias
from pyasn1.type.base import Asn1Item
from .pooling import ServerPool
from .server import Server
SASL_AVAILABLE_MECHANISMS: Incomplete
CLIENT_STRATEGIES: Incomplete
_ServerSequence: TypeAlias = (
set[Server] | list[Server] | tuple[Server, ...] | Generator[Server, None, None] | dict_keys[Server, Incomplete]
)
class Connection:
connection_lock: Incomplete
last_error: str
strategy_type: Incomplete
user: Incomplete
password: Incomplete
authentication: Incomplete
version: Incomplete
auto_referrals: Incomplete
request: Incomplete
response: Incomplete | None
result: Incomplete
bound: bool
listening: bool
closed: bool
auto_bind: Incomplete
sasl_mechanism: Incomplete
sasl_credentials: Incomplete
socket: Incomplete
tls_started: bool
sasl_in_progress: bool
read_only: Incomplete
lazy: Incomplete
pool_name: Incomplete
pool_size: int | None
cred_store: Incomplete
pool_lifetime: Incomplete
pool_keepalive: Incomplete
starting_tls: bool
check_names: Incomplete
raise_exceptions: Incomplete
auto_range: Incomplete
extend: Incomplete
fast_decoder: Incomplete
receive_timeout: Incomplete
empty_attributes: Incomplete
use_referral_cache: Incomplete
auto_escape: Incomplete
auto_encode: Incomplete
source_address: Incomplete
source_port_list: Incomplete
server_pool: Incomplete | None
server: Incomplete
strategy: Incomplete
send: Incomplete
open: Incomplete
get_response: Incomplete
post_send_single_response: Incomplete
post_send_search: Incomplete
def __init__(
self,
server: Server | str | _ServerSequence | ServerPool,
user: str | None = None,
password: str | None = None,
auto_bind: Literal["DEFAULT", "NONE", "NO_TLS", "TLS_BEFORE_BIND", "TLS_AFTER_BIND"] = "DEFAULT",
version: int = 3,
authentication: Literal["ANONYMOUS", "SIMPLE", "SASL", "NTLM"] | None = None,
client_strategy: Literal[
"SYNC",
"SAFE_RESTARTABLE",
"SAFE_SYNC",
"ASYNC",
"LDIF",
"RESTARTABLE",
"REUSABLE",
"MOCK_SYNC",
"MOCK_ASYNC",
"ASYNC_STREAM",
] = "SYNC",
auto_referrals: bool = True,
auto_range: bool = True,
sasl_mechanism: str | None = None,
sasl_credentials: Incomplete | None = None,
check_names: bool = True,
collect_usage: bool = False,
read_only: bool = False,
lazy: bool = False,
raise_exceptions: bool = False,
pool_name: str | None = None,
pool_size: int | None = None,
pool_lifetime: int | None = None,
cred_store: Incomplete | None = None,
fast_decoder: bool = True,
receive_timeout: Incomplete | None = None,
return_empty_attributes: bool = True,
use_referral_cache: bool = False,
auto_escape: bool = True,
auto_encode: bool = True,
pool_keepalive: Incomplete | None = None,
source_address: str | None = None,
source_port: int | None = None,
source_port_list: Incomplete | None = None,
) -> None: ...
def repr_with_sensitive_data_stripped(self): ...
@property
def stream(self): ...
@stream.setter
def stream(self, value) -> None: ...
@property
def usage(self): ...
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> Literal[False] | None: ...
def bind(self, read_server_info: bool = True, controls: Incomplete | None = None): ...
def rebind(
self,
user: Incomplete | None = None,
password: Incomplete | None = None,
authentication: Incomplete | None = None,
sasl_mechanism: Incomplete | None = None,
sasl_credentials: Incomplete | None = None,
read_server_info: bool = True,
controls: Incomplete | None = None,
): ...
def unbind(self, controls: Incomplete | None = None): ...
def search(
self,
search_base: str,
search_filter: str,
search_scope: Literal["BASE", "LEVEL", "SUBTREE"] = "SUBTREE",
dereference_aliases: Literal["NEVER", "SEARCH", "FINDING_BASE", "ALWAYS"] = "ALWAYS",
attributes: Incomplete | None = None,
size_limit: int = 0,
time_limit: int = 0,
types_only: bool = False,
get_operational_attributes: bool = False,
controls: Incomplete | None = None,
paged_size: int | None = None,
paged_criticality: bool = False,
paged_cookie: str | bytes | None = None,
auto_escape: bool | None = None,
): ...
def compare(self, dn, attribute, value, controls: Incomplete | None = None): ...
def add(
self, dn, object_class: Incomplete | None = None, attributes: Incomplete | None = None, controls: Incomplete | None = None
): ...
def METHOD_NAME(self, dn, controls: Incomplete | None = None): ...
def modify(self, dn, changes, controls: Incomplete | None = None): ...
def modify_dn(
self,
dn,
relative_dn,
delete_old_dn: bool = True,
new_superior: Incomplete | None = None,
controls: Incomplete | None = None,
): ...
def abandon(self, message_id, controls: Incomplete | None = None): ...
def extended(
self,
request_name,
request_value: Asn1Item | ReadableBuffer | None = None,
controls: Incomplete | None = None,
no_encode: bool | None = None,
): ...
def start_tls(self, read_server_info: bool = True): ...
def do_sasl_bind(self, controls): ...
def do_ntlm_bind(self, controls): ...
def refresh_server_info(self) -> None: ...
def response_to_ldif(
self,
search_result: Incomplete | None = None,
all_base64: bool = False,
line_separator: Incomplete | None = None,
sort_order: Incomplete | None = None,
stream: Incomplete | None = None,
): ...
def response_to_json(
self,
raw: bool = False,
search_result: Incomplete | None = None,
indent: int = 4,
sort: bool = True,
stream: Incomplete | None = None,
checked_attributes: bool = True,
include_empty: bool = True,
): ...
def response_to_file(self, target, raw: bool = False, indent: int = 4, sort: bool = True) -> None: ...
@property
def entries(self): ... |
668 | do set parent | from panda3d.core import NodePath
from . import DistributedObjectAI
from . import GridParent
class DistributedNodeAI(DistributedObjectAI.DistributedObjectAI, NodePath):
def __init__(self, air, name=None):
# Be careful not to create multiple NodePath objects
if not hasattr(self, 'DistributedNodeAI_initialized'):
self.DistributedNodeAI_initialized = 1
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
if name is None:
name = self.__class__.__name__
NodePath.__init__(self, name)
self.gridParent = None
def delete(self):
if self.gridParent:
self.gridParent.delete()
self.gridParent = None
if not self.isEmpty():
self.removeNode()
DistributedObjectAI.DistributedObjectAI.delete(self)
def setLocation(self, parentId, zoneId, teleport=0):
# Redefine DistributedObject setLocation, so that when
# location is set to the ocean grid, we can update our parenting
# under gridParent
DistributedObjectAI.DistributedObjectAI.setLocation(self, parentId, zoneId)
parentObj = self.air.doId2do.get(parentId)
if parentObj:
if parentObj.isGridParent():
if not self.gridParent:
self.gridParent = GridParent.GridParent(self)
self.gridParent.setGridParent(parentObj, zoneId)
else:
if self.gridParent:
self.gridParent.delete()
self.gridParent = None
# NOTE: at this point the avatar has been detached from the scene
# graph. Someone else needs to reparent him to something in the scene graph
# TODO: handle DistributedNode parenting
### setParent ###
def b_setParent(self, parentToken):
if isinstance(parentToken, str):
self.setParentStr(parentToken)
else:
self.setParent(parentToken)
self.d_setParent(parentToken)
def d_setParent(self, parentToken):
if isinstance(parentToken, str):
self.sendUpdate("setParentStr", [parentToken])
else:
self.sendUpdate("setParent", [parentToken])
def setParentStr(self, parentToken):
self.notify.debug('setParentStr(%s): %s' % (self.doId, parentToken))
if len(parentToken) > 0:
self.METHOD_NAME(parentToken)
def setParent(self, parentToken):
self.notify.debug('setParent(%s): %s' % (self.doId, parentToken))
if parentToken == 0:
senderId = self.air.getAvatarIdFromSender()
self.air.writeServerEvent('suspicious', senderId, 'setParent(0)')
else:
self.METHOD_NAME(parentToken)
def METHOD_NAME(self, parentToken):
self.getParentMgr().requestReparent(self, parentToken)
###### set pos and hpr functions #######
# setX provided by NodePath
def d_setX(self, x):
self.sendUpdate("setX", [x])
# setY provided by NodePath
def d_setY(self, y):
self.sendUpdate("setY", [y])
# setZ provided by NodePath
def d_setZ(self, z):
self.sendUpdate("setZ", [z])
# setH provided by NodePath
def d_setH(self, h):
self.sendUpdate("setH", [h])
# setP provided by NodePath
def d_setP(self, p):
self.sendUpdate("setP", [p])
# setR provided by NodePath
def d_setR(self, r):
self.sendUpdate("setR", [r])
def setXY(self, x, y):
self.setX(x)
self.setY(y)
def d_setXY(self, x, y):
self.sendUpdate("setXY", [x, y])
# setPos provided by NodePath
def d_setPos(self, x, y, z):
self.sendUpdate("setPos", [x, y, z])
# setHpr provided by NodePath
def d_setHpr(self, h, p, r):
self.sendUpdate("setHpr", [h, p, r])
def setXYH(self, x, y, h):
self.setX(x)
self.setY(y)
self.setH(h)
def d_setXYH(self, x, y, h):
self.sendUpdate("setXYH", [x, y, h])
def b_setXYZH(self, x, y, z, h):
self.setXYZH(x, y, z, h)
self.d_setXYZH(x, y, z, h)
def setXYZH(self, x, y, z, h):
self.setPos(x, y, z)
self.setH(h)
def getXYZH(self):
pos = self.getPos()
h = self.getH()
return pos[0], pos[1], pos[2], h
def d_setXYZH(self, x, y, z, h):
self.sendUpdate("setXYZH", [x, y, z, h])
# setPosHpr provided by NodePath
def b_setPosHpr(self, x, y, z, h, p, r):
self.setPosHpr(x, y, z, h, p, r)
self.d_setPosHpr(x, y, z, h, p, r)
def d_setPosHpr(self, x, y, z, h, p, r):
self.sendUpdate("setPosHpr", [x, y, z, h, p, r]) |
669 | get | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import annotations
import warnings
import json
from typing import Dict, Union, Optional
from collections import OrderedDict
from .meters import Meter
class MeterDict(object):
def __init__(
self,
meters: Optional[Dict[str, Meter]] = None,
meterdicts: Optional[Dict[str, MeterDict]] = None,
) -> None:
self._meters = OrderedDict()
self._meterdicts = OrderedDict()
if meters is not None:
for name, meter in meters.items():
self.register_meter(name, meter)
if meterdicts is not None:
for name, meterdict in meterdicts.items():
self.register_meterdict(name, meterdict)
def register_meter(self, name: str, meter: Meter) -> None:
if "_meters" not in self.__dict__:
raise AttributeError(
"cannot assign meter before MeterDict.__init__() call"
)
elif "/" in name:
raise KeyError("meter name cannot contain '/'")
elif name == "":
raise KeyError("meter name cannot be empty")
elif hasattr(self, name) and name not in self._meters:
raise KeyError(f"attribute {name} already exists")
self._meters[name] = meter
def register_meterdict(self, name: str, meterdict: MeterDict) -> None:
if "_meterdicts" not in self.__dict__:
raise AttributeError(
"cannot assign meterdict before MeterDict.__init__() call"
)
elif "/" in name:
raise KeyError("meterdict name cannot contain '/'")
elif name == "":
raise KeyError("meterdict name cannot be empty")
elif hasattr(self, name) and name not in self._meters:
raise KeyError(f"attribute {name} already exists")
self._meterdicts[name] = meterdict
def __setattr__(self, name: str, value):
if isinstance(value, Meter):
if name in self.__dict__:
del self.__dict__[name]
if name in self._meterdicts:
del self._meterdicts[name]
self.register_meter(name, value)
elif isinstance(value, MeterDict):
if name in self.__dict__:
del self.__dict__[name]
if name in self._meters:
del self._meters[name]
self.register_meterdict(name, value)
else:
super(MeterDict, self).__setattr__(name, value)
def __getattr__(self, name: str):
if "_meters" in self.__dict__:
_meters = self.__dict__["_meters"]
if name in _meters:
return _meters[name]
if "_meterdicts" in self.__dict__:
_meterdicts = self.__dict__["_meterdicts"]
if name in _meterdicts:
return _meterdicts[name]
raise AttributeError(f"{type(self).__name__} has no attribute {name}")
def __delattr__(self, name: str):
if name in self._meters:
del self._meters[name]
elif name in self._meterdicts:
del self._meterdicts[name]
else:
object.__delattr__(name)
def __setitem__(self, name: str, value):
names = name.split("/", 1)
if len(names) > 1:
member = getattr(self, names[0])
member[names[1]] = value
else:
setattr(self, names[0], value)
def __getitem__(self, name: str):
names = name.split("/", 1)
item = getattr(self, names[0])
if len(names) > 1:
return item[names[1]]
else:
return item
def __contains__(self, name: str):
names = name.split("/", 1)
if hasattr(self, names[0]):
if len(names) > 1:
return names[1] in getattr(self, names[0])
else:
return True
else:
return False
def METHOD_NAME(self, name: str, default=None):
if name in self:
return self[name]
else:
return default
def _named_dicts(self, memo: Optional[set] = None, prefix: str = ""):
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield prefix, self
if len(prefix) > 0:
prefix += "/"
for name, meterdict in self._meterdicts.items():
for m in meterdict._named_dicts(memo, prefix + name):
yield m
def _named_meters(self, prefix: str = ""):
memo = set()
for name_prefix, meterdict in self._named_dicts(prefix=prefix):
for name, meter in meterdict._meters.items():
if meter in memo:
continue
memo.add(meter)
name = (
name_prefix + ("/" if len(name_prefix) > 0 else "") + name
)
yield name, meter
def restart(self):
for _, meter in self._named_meters():
meter.restart()
@property
def value(self) -> Dict:
return {name: meter.value for name, meter in self._named_meters()}
@property
def best(self) -> Dict:
return {name: meter.best for name, meter in self._named_meters()}
def state_dict(self) -> Dict:
return {
name: meter.state_dict() for name, meter in self._named_meters()
}
def load_state_dict(self, state_dict: Dict) -> None:
missing_keys = []
for name, meter in self._named_meters():
if name in state_dict:
meter_state = state_dict.pop(name)
meter.load_state_dict(meter_state)
else:
missing_keys.append(name)
unexpected_keys = list(state_dict.keys())
if len(missing_keys) > 0:
warnings.warn(f"missing keys in state_dict: {missing_keys}")
if len(unexpected_keys) > 0:
warnings.warn(f"unexpected keys in state_dict: {unexpected_keys}")
def __repr__(self) -> str:
def _add_spaces(str_, n_spaces=4):
return "\n".join(
[(" " * n_spaces) + line for line in str_.split("\n")]
)
main_str = "\n".join(
[f"{name}: {repr(meter)}" for name, meter in self._meters.items()]
)
child_str = "\n".join(
[
f"{name}:\n{_add_spaces(repr(meterdict))}"
for name, meterdict in self._meterdicts.items()
]
)
if child_str:
main_str = "\n".join([main_str, child_str])
return main_str |
670 | test verbosity special | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2021, Tomas Babej, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import re
import unittest
import operator
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
class TestVerbosity(TestCase):
def setUp(self):
self.t = Task()
self.t.config("print.empty.columns", "1")
self.t("add Sample")
# TODO Verbosity: 'edit'
def test_verbosity_new_id(self):
"""Verbosity new-id"""
code, out, err = self.t("rc.verbose:new-id add Sample1")
self.assertRegex(out, r"Created task \d")
code, out, err = self.t("rc.verbose:nothing add Sample2")
self.assertNotRegex(out, r"Created task \d")
def test_verbosity_new_uuid(self):
"""Verbosity new-uuid"""
code, out, err = self.t(("rc.verbose:new-uuid", "add", "Sample1"))
self.assertRegex(out, r"Created task [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}")
def test_verbosity_label(self):
"""Verbosity label"""
code, out, err = self.t("rc.verbose:label ls")
self.assertRegex(
out,
"ID.+A.+D.+Project.+Tags.+R.+Wait.+S.+Due.+Until.+Description"
)
def test_verbosity_affected(self):
"""Verbosity affected"""
code, out, err = self.t("rc.verbose:affected ls")
expected = re.compile(r"^\d+ tasks?$", re.MULTILINE)
self.assertRegex(out, expected)
def test_verbosity_off(self):
"""Verbosity off"""
code, out, err = self.t("rc.verbose:nothing ls")
expected = re.compile(r"^\d+ tasks?$", re.MULTILINE)
self.assertNotRegex(out, expected)
self.assertNotRegex(out, "ID.+Project.+Pri.+Description")
def METHOD_NAME(self):
"""Verbosity special"""
code, out, err = self.t("rc.verbose:special 1 mod +next")
self.assertIn("The 'next' special tag will boost the urgency of this "
"task so it appears on the 'next' report.", out)
def test_verbosity_blank(self):
"""Verbosity blank"""
def count_blank_lines(x):
return x.splitlines().count('')
code, out, err = self.t("rc.verbose:nothing ls")
self.assertEqual(count_blank_lines(out), 0)
code, out, err = self.t("rc.verbose:blank ls")
self.assertEqual(count_blank_lines(out), 2)
def test_verbosity_header(self):
"""Verbosity header"""
code, out, err = self.t("rc.verbose:override ls")
self.assertNotIn("TASKRC override:", err)
self.assertNotIn("TASKDATA override:", err)
code, out, err = self.t("rc.verbose:header,override ls")
self.assertIn("TASKRC override:", err)
self.assertIn("TASKDATA override:", err)
def test_verbosity_project(self):
"""Verbosity project"""
code, out, err = self.t("rc.verbose:nothing add proj:T one")
self.assertNotIn("The project 'T' has changed.", err)
code, out, err = self.t("rc.verbose:project add proj:T two")
self.assertIn("The project 'T' has changed.", err)
def test_bug_2247(self):
"""
Verbosity override is applied regardless of the order of the arguments.
"""
code, out, err = self.t("rc.color:0 add test")
self.assertIn("Configuration override", err)
# Once rc.verbose:nothing is set, no output about configuration overrides should appear
code, out, err = self.t("rc.verbose:nothing add test")
self.assertNotIn("Configuration override", err)
code, out, err = self.t("rc.color:0 rc.verbose:nothing add test")
self.assertNotIn("Configuration override", err)
code, out, err = self.t("rc.verbose:nothing rc.color:0 add test")
self.assertNotIn("Configuration override", err)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python |
671 | has enough cpus | import ctypes
import logging
import os
import psutil
import subprocess
from abc import ABC
from os.path import realpath
from shutil import disk_usage
from .file_utils import get_kubeconfig_path, get_kubectl_directory
from . import definitions
logger = logging.getLogger(__name__)
class Auxiliary(ABC):
"""
Base OS auxiliary class.
"""
def __init__(self, args) -> None:
"""
:param args: ArgumentParser
:return: None
"""
self._args = args
try:
self.requested_disk = self._args.disk * 1024 * 1024 * 1024
self.requested_memory = self._args.mem * 1024 * 1024 * 1024
self.requested_cores = self._args.cpu
except AttributeError:
self.requested_disk = definitions.DEFAULT_DISK_GB
self.requested_memory = definitions.DEFAULT_MEMORY_GB
self.requested_cores = definitions.DEFAULT_CORES
@staticmethod
def _free_disk_space() -> int:
"""
Get space free of disk that this script is installed to.
:return: Integer free space
"""
return disk_usage(realpath("/")).free
@staticmethod
def _total_memory() -> int:
"""
Get available memory in machine this script is installed to.
:return: Available memory in bytes
"""
return psutil.virtual_memory().total
@staticmethod
def _cpu_count() -> int:
"""
Get the number of cpus on the machine this script is installed to.
:return: Number of cpus
"""
return psutil.cpu_count(logical=False)
def has_enough_disk_space(self) -> bool:
"""
Compare free space with minimum.
:return: Boolean
"""
return self._free_disk_space() > self.requested_disk
def has_enough_memory(self) -> bool:
"""
Compare requested memory against available
:return: Boolean
"""
return self._total_memory() > self.requested_memory
def METHOD_NAME(self) -> bool:
"""
Compare requested cpus against available cores.
:return: Boolean
"""
return self._cpu_count() >= self.requested_cores
def get_kubectl_directory(self) -> str:
"""
Get the correct directory to install kubectl into,
we can then call this when running `microk8s kubectl`
without interfering with any systemwide install.
:return: String
"""
return get_kubectl_directory()
def get_kubeconfig_path(self) -> str:
"""
Get the correct path to write the kubeconfig
file to. This is then read by the installed
kubectl and won't interfere with one in the user's
home.
:return: String
"""
return get_kubeconfig_path()
def kubectl(self) -> int:
"""
Run kubectl on the host, with the generated kubeconf.
:return: None
"""
kctl_dir = self.get_kubectl_directory()
try:
exit_code = subprocess.check_call(
[
os.path.join(kctl_dir, "kubectl"),
"--kubeconfig={}".format(self.get_kubeconfig_path()),
]
+ self._args,
)
except subprocess.CalledProcessError as e:
return e.returncode
else:
return exit_code
class Windows(Auxiliary):
"""
Windows auxiliary methods.
"""
def __init__(self, args) -> None:
"""
:param args: ArgumentParser
:return: None
"""
super(Windows, self).__init__(args)
@staticmethod
def check_admin() -> bool:
"""
Check if running as admin.
:return: Boolean
"""
return ctypes.windll.shell32.IsUserAnAdmin() == 1
@staticmethod
def check_hyperv() -> bool:
"""
Check if Hyper V is already enabled.
:return: Boolean
"""
try:
out = subprocess.check_output(
["DISM", "/Online", "/Get-FeatureInfo", "/FeatureName:Microsoft-Hyper-V"]
)
except subprocess.CalledProcessError:
return False
if "State : Disabled" in out.decode():
return False
return True
@staticmethod
def enable_hyperv() -> None:
"""
Enable Hyper V feature.
:return: None
"""
try:
subprocess.check_call(
[
"DISM",
"/Online",
"/Enable-Feature",
"/All",
"/NoRestart",
"/FeatureName:Microsoft-Hyper-V",
]
)
except subprocess.CalledProcessError as e:
if e.returncode == 3010:
pass # This is fine, because Windows.
else:
raise
class Linux(Auxiliary):
"""
MacOS auxiliary methods.
"""
def __init__(self, args) -> None:
"""
:param args: ArgumentParser
:return: None
"""
super(Linux, self).__init__(args)
class MacOS(Linux):
"""
MacOS auxiliary methods.
"""
def __init__(self, args) -> None:
"""
:param args: ArgumentParser
:return: None
"""
super(MacOS, self).__init__(args) |
672 | get criterion | import numpy as np
import torch
import torchaudio
from coqpit import Coqpit
from torch import nn
from TTS.encoder.losses import AngleProtoLoss, GE2ELoss, SoftmaxAngleProtoLoss
from TTS.utils.generic_utils import set_init_dict
from TTS.utils.io import load_fsspec
class PreEmphasis(nn.Module):
def __init__(self, coefficient=0.97):
super().__init__()
self.coefficient = coefficient
self.register_buffer("filter", torch.FloatTensor([-self.coefficient, 1.0]).unsqueeze(0).unsqueeze(0))
def forward(self, x):
assert len(x.size()) == 2
x = torch.nn.functional.pad(x.unsqueeze(1), (1, 0), "reflect")
return torch.nn.functional.conv1d(x, self.filter).squeeze(1)
class BaseEncoder(nn.Module):
"""Base `encoder` class. Every new `encoder` model must inherit this.
It defines common `encoder` specific functions.
"""
# pylint: disable=W0102
def __init__(self):
super(BaseEncoder, self).__init__()
def get_torch_mel_spectrogram_class(self, audio_config):
return torch.nn.Sequential(
PreEmphasis(audio_config["preemphasis"]),
# TorchSTFT(
# n_fft=audio_config["fft_size"],
# hop_length=audio_config["hop_length"],
# win_length=audio_config["win_length"],
# sample_rate=audio_config["sample_rate"],
# window="hamming_window",
# mel_fmin=0.0,
# mel_fmax=None,
# use_htk=True,
# do_amp_to_db=False,
# n_mels=audio_config["num_mels"],
# power=2.0,
# use_mel=True,
# mel_norm=None,
# )
torchaudio.transforms.MelSpectrogram(
sample_rate=audio_config["sample_rate"],
n_fft=audio_config["fft_size"],
win_length=audio_config["win_length"],
hop_length=audio_config["hop_length"],
window_fn=torch.hamming_window,
n_mels=audio_config["num_mels"],
),
)
@torch.no_grad()
def inference(self, x, l2_norm=True):
return self.forward(x, l2_norm)
@torch.no_grad()
def compute_embedding(self, x, num_frames=250, num_eval=10, return_mean=True, l2_norm=True):
"""
Generate embeddings for a batch of utterances
x: 1xTxD
"""
# map to the waveform size
if self.use_torch_spec:
num_frames = num_frames * self.audio_config["hop_length"]
max_len = x.shape[1]
if max_len < num_frames:
num_frames = max_len
offsets = np.linspace(0, max_len - num_frames, num=num_eval)
frames_batch = []
for offset in offsets:
offset = int(offset)
end_offset = int(offset + num_frames)
frames = x[:, offset:end_offset]
frames_batch.append(frames)
frames_batch = torch.cat(frames_batch, dim=0)
embeddings = self.inference(frames_batch, l2_norm=l2_norm)
if return_mean:
embeddings = torch.mean(embeddings, dim=0, keepdim=True)
return embeddings
def METHOD_NAME(self, c: Coqpit, num_classes=None):
if c.loss == "ge2e":
criterion = GE2ELoss(loss_method="softmax")
elif c.loss == "angleproto":
criterion = AngleProtoLoss()
elif c.loss == "softmaxproto":
criterion = SoftmaxAngleProtoLoss(c.model_params["proj_dim"], num_classes)
else:
raise Exception("The %s not is a loss supported" % c.loss)
return criterion
def load_checkpoint(
self,
config: Coqpit,
checkpoint_path: str,
eval: bool = False,
use_cuda: bool = False,
criterion=None,
cache=False,
):
state = load_fsspec(checkpoint_path, map_location=torch.device("cpu"), cache=cache)
try:
self.load_state_dict(state["model"])
print(" > Model fully restored. ")
except (KeyError, RuntimeError) as error:
# If eval raise the error
if eval:
raise error
print(" > Partial model initialization.")
model_dict = self.state_dict()
model_dict = set_init_dict(model_dict, state["model"], c)
self.load_state_dict(model_dict)
del model_dict
# load the criterion for restore_path
if criterion is not None and "criterion" in state:
try:
criterion.load_state_dict(state["criterion"])
except (KeyError, RuntimeError) as error:
print(" > Criterion load ignored because of:", error)
# instance and load the criterion for the encoder classifier in inference time
if (
eval
and criterion is None
and "criterion" in state
and getattr(config, "map_classid_to_classname", None) is not None
):
criterion = self.METHOD_NAME(config, len(config.map_classid_to_classname))
criterion.load_state_dict(state["criterion"])
if use_cuda:
self.cuda()
if criterion is not None:
criterion = criterion.cuda()
if eval:
self.eval()
assert not self.training
if not eval:
return criterion, state["step"]
return criterion |
673 | merge | """Lazy ZIP over HTTP"""
__all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"]
from bisect import bisect_left, bisect_right
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Generator, List, Optional, Tuple
from zipfile import BadZipfile, ZipFile
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution
from pip._internal.network.session import PipSession
from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks
class HTTPRangeRequestUnsupported(Exception):
pass
def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution:
"""Return a distribution object from the given wheel URL.
This uses HTTP range requests to only fetch the portion of the wheel
containing metadata, just enough for the object to be constructed.
If such requests are not supported, HTTPRangeRequestUnsupported
is raised.
"""
with LazyZipOverHTTP(url, session) as zf:
# For read-only ZIP files, ZipFile only needs methods read,
# seek, seekable and tell, not the whole IO protocol.
wheel = MemoryWheel(zf.name, zf) # type: ignore
# After context manager exit, wheel.name
# is an invalid file by intention.
return get_wheel_distribution(wheel, canonicalize_name(name))
class LazyZipOverHTTP:
"""File-like object mapped to a ZIP file over HTTP.
This uses HTTP range requests to lazily fetch the file's content,
which is supposed to be fed to ZipFile. If such requests are not
supported by the server, raise HTTPRangeRequestUnsupported
during initialization.
"""
def __init__(
self, url: str, session: PipSession, chunk_size: int = CONTENT_CHUNK_SIZE
) -> None:
head = session.head(url, headers=HEADERS)
raise_for_status(head)
assert head.status_code == 200
self._session, self._url, self._chunk_size = session, url, chunk_size
self._length = int(head.headers["Content-Length"])
self._file = NamedTemporaryFile()
self.truncate(self._length)
self._left: List[int] = []
self._right: List[int] = []
if "bytes" not in head.headers.get("Accept-Ranges", "none"):
raise HTTPRangeRequestUnsupported("range request is not supported")
self._check_zip()
@property
def mode(self) -> str:
"""Opening mode, which is always rb."""
return "rb"
@property
def name(self) -> str:
"""Path to the underlying file."""
return self._file.name
def seekable(self) -> bool:
"""Return whether random access is supported, which is True."""
return True
def close(self) -> None:
"""Close the file."""
self._file.close()
@property
def closed(self) -> bool:
"""Whether the file is closed."""
return self._file.closed
def read(self, size: int = -1) -> bytes:
"""Read up to size bytes from the object and return them.
As a convenience, if size is unspecified or -1,
all bytes until EOF are returned. Fewer than
size bytes may be returned if EOF is reached.
"""
download_size = max(size, self._chunk_size)
start, length = self.tell(), self._length
stop = length if size < 0 else min(start + download_size, length)
start = max(0, stop - download_size)
self._download(start, stop - 1)
return self._file.read(size)
def readable(self) -> bool:
"""Return whether the file is readable, which is True."""
return True
def seek(self, offset: int, whence: int = 0) -> int:
"""Change stream position and return the new absolute position.
Seek to offset relative position indicated by whence:
* 0: Start of stream (the default). pos should be >= 0;
* 1: Current position - pos may be negative;
* 2: End of stream - pos usually negative.
"""
return self._file.seek(offset, whence)
def tell(self) -> int:
"""Return the current position."""
return self._file.tell()
def truncate(self, size: Optional[int] = None) -> int:
"""Resize the stream to the given size in bytes.
If size is unspecified resize to the current position.
The current stream position isn't changed.
Return the new file size.
"""
return self._file.truncate(size)
def writable(self) -> bool:
"""Return False."""
return False
def __enter__(self) -> "LazyZipOverHTTP":
self._file.__enter__()
return self
def __exit__(self, *exc: Any) -> None:
self._file.__exit__(*exc)
@contextmanager
def _stay(self) -> Generator[None, None, None]:
"""Return a context manager keeping the position.
At the end of the block, seek back to original position.
"""
pos = self.tell()
try:
yield
finally:
self.seek(pos)
def _check_zip(self) -> None:
"""Check and download until the file is a valid ZIP."""
end = self._length - 1
for start in reversed(range(0, end, self._chunk_size)):
self._download(start, end)
with self._stay():
try:
# For read-only ZIP files, ZipFile only needs
# methods read, seek, seekable and tell.
ZipFile(self) # type: ignore
except BadZipfile:
pass
else:
break
def _stream_response(
self, start: int, end: int, base_headers: Dict[str, str] = HEADERS
) -> Response:
"""Return HTTP response to a range request from start to end."""
headers = base_headers.copy()
headers["Range"] = f"bytes={start}-{end}"
# TODO: Get range requests to be correctly cached
headers["Cache-Control"] = "no-cache"
return self._session.get(self._url, headers=headers, stream=True)
def METHOD_NAME(
self, start: int, end: int, left: int, right: int
) -> Generator[Tuple[int, int], None, None]:
"""Return a generator of intervals to be fetched.
Args:
start (int): Start of needed interval
end (int): End of needed interval
left (int): Index of first overlapping downloaded data
right (int): Index after last overlapping downloaded data
"""
lslice, rslice = self._left[left:right], self._right[left:right]
i = start = min([start] + lslice[:1])
end = max([end] + rslice[-1:])
for j, k in zip(lslice, rslice):
if j > i:
yield i, j - 1
i = k + 1
if i <= end:
yield i, end
self._left[left:right], self._right[left:right] = [start], [end]
def _download(self, start: int, end: int) -> None:
"""Download bytes from start to end inclusively."""
with self._stay():
left = bisect_left(self._right, start)
right = bisect_right(self._left, end)
for start, end in self.METHOD_NAME(start, end, left, right):
response = self._stream_response(start, end)
response.raise_for_status()
self.seek(start)
for chunk in response_chunks(response, self._chunk_size):
self._file.write(chunk) |
674 | test snake case | # Import AWS utils
from ScoutSuite.providers.aws.utils import (
get_keys,
no_camel,
get_name,
is_throttled,
get_aws_account_id,
get_partition_name,
snake_keys,
)
from ScoutSuite.utils import *
import collections
import unittest
from unittest import mock
import datetime
#
# Test methods for ScoutSuite/utils.py
#
class TestScoutUtilsClass(unittest.TestCase):
def test_format_service_name(self):
assert format_service_name("iAm") == "IAM"
assert format_service_name("cloudformation") == "CloudFormation"
def test_get_keys(self):
test1 = {"a": "b", "c": "d"}
test2 = {"a": "", "e": "f"}
get_keys(test1, test2, "a")
assert test2["a"] == "b"
assert "c" not in test2
get_keys(test1, test2, "c")
assert test2["c"] == "d"
def test_no_camel(self):
assert no_camel("TestTest") == "test_test"
def test_is_throttled(self):
CustomException = collections.namedtuple("CustomException", "response")
# test the throttling cases
for t in ["Throttling", "RequestLimitExceeded", "ThrottlingException"]:
e = CustomException(response={"Error": {"Code": t}})
assert is_throttled(e)
# test the non-throttling exception
e = CustomException(response={"Error": {"Code": "Not Thro_ttling"}})
assert not is_throttled(e)
# test the except block
e = CustomException(response={"Error": ""})
assert not is_throttled(e)
def test_get_name(self):
src = {
"Tags": [
{"Key": "Not Name", "Value": "xyz"},
{"Key": "Name", "Value": "abc"},
],
"default_attribute": "default_value",
}
dst = {}
default_attribute = "default_attribute"
assert get_name(src, dst, default_attribute) == "abc"
assert dst["name"] == "abc"
src = {
"Tags": [{"Key": "Not Name", "Value": "xyz"}],
"default_attribute": "default_value",
}
dst = {}
default_attribute = "default_attribute"
assert get_name(src, dst, default_attribute) == "default_value"
assert dst["name"] == "default_value"
def test_get_identity(self):
with mock.patch(
"ScoutSuite.providers.aws.utils.get_caller_identity",
return_value={"Arn": "a:b:c:d:e:f:"},
):
assert get_aws_account_id("") == "e"
def test_get_partition_name(self):
with mock.patch(
"ScoutSuite.providers.aws.utils.get_caller_identity",
return_value={"Arn": "a:b:c:d:e:f:"},
):
assert get_partition_name("") == "b"
def METHOD_NAME(self):
src = {
"AttributeDefinitions": [
{"AttributeName": "string", "AttributeType": "S"},
],
"TableName": "string",
"KeySchema": [{"AttributeName": "string", "KeyType": "HASH"},],
"TableStatus": "CREATING",
"CreationDateTime": datetime.datetime(2015, 1, 1, 1, 1, 1, 1, None),
"ProvisionedThroughput": {
"LastIncreaseDateTime": datetime.datetime(2015, 1, 1, 1, 1, 1, 1, None),
"LastDecreaseDateTime": datetime.datetime(2015, 1, 1, 1, 1, 1, 1, None),
"NumberOfDecreasesToday": 123,
"ReadCapacityUnits": 123,
"WriteCapacityUnits": 123,
},
"TableSizeBytes": 123,
"AnotherArray": [
"One",
"Two",
"AnotherThing",
]
}
dest = {
"attribute_definitions": [
{"attribute_name": "string", "attribute_type": "S"},
],
"table_name": "string",
"key_schema": [{"attribute_name": "string", "key_type": "HASH"}],
"table_status": "CREATING",
"creation_date_time": datetime.datetime(2015, 1, 1, 1, 1, 1, 1, None),
"provisioned_throughput": {
"last_increase_date_time": datetime.datetime(
2015, 1, 1, 1, 1, 1, 1, None
),
"last_decrease_date_time": datetime.datetime(
2015, 1, 1, 1, 1, 1, 1, None
),
"number_of_decreases_today": 123,
"read_capacity_units": 123,
"write_capacity_units": 123,
},
"table_size_bytes": 123,
"another_array": ["One", "Two", "AnotherThing"]
}
d = snake_keys(src)
self.maxDiff = None
self.assertEquals(d, dest) |
675 | run | """
An example that uses a function from external C library (OpenCV in this case).
Works for all C-based code generation targets (i.e. for cython and cpp_standalone
device) and for numpy (using the Python bindings).
This example needs a working installation of OpenCV 3.x and its Python bindings.
It has been tested on 64 bit Linux in a conda environment with packages from the
``conda-forge`` channels (opencv 3.4.4, x264 1!152.20180717, ffmpeg 4.1).
"""
import os
import urllib.request, urllib.error, urllib.parse
import cv2 # Import OpenCV2
from brian2 import *
defaultclock.dt = 1*ms
prefs.codegen.target = 'cython'
prefs.logging.std_redirection = False
set_device('cpp_standalone', clean=True)
filename = os.path.abspath('Megamind.avi')
if not os.path.exists(filename):
print('Downloading the example video file')
response = urllib.request.urlopen('http://docs.opencv.org/2.4/_downloads/Megamind.avi')
data = response.read()
with open(filename, 'wb') as f:
f.write(data)
video = cv2.VideoCapture(filename)
width, height, frame_count = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(video.get(cv2.CAP_PROP_FRAME_COUNT)))
fps = 24
time_between_frames = 1*second/fps
@implementation('cpp', '''
double* get_frame(bool new_frame)
{
// The following initializations will only be executed once
static cv::VideoCapture source("VIDEO_FILENAME");
static cv::Mat frame;
static double* grayscale_frame = (double*)malloc(VIDEO_WIDTH*VIDEO_HEIGHT*sizeof(double));
if (new_frame)
{
source >> frame;
double mean_value = 0;
for (int row=0; row<VIDEO_HEIGHT; row++)
for (int col=0; col<VIDEO_WIDTH; col++)
{
const double grayscale_value = (frame.at<cv::Vec3b>(row, col)[0] +
frame.at<cv::Vec3b>(row, col)[1] +
frame.at<cv::Vec3b>(row, col)[2])/(3.0*128);
mean_value += grayscale_value / (VIDEO_WIDTH * VIDEO_HEIGHT);
grayscale_frame[row*VIDEO_WIDTH + col] = grayscale_value;
}
// subtract the mean
for (int i=0; i<VIDEO_HEIGHT*VIDEO_WIDTH; i++)
grayscale_frame[i] -= mean_value;
}
return grayscale_frame;
}
double video_input(const int x, const int y)
{
// Get the current frame (or a new frame in case we are asked for the first
// element
double *frame = get_frame(x==0 && y==0);
return frame[y*VIDEO_WIDTH + x];
}
'''.replace('VIDEO_FILENAME', filename),
libraries=['opencv_core',
'opencv_highgui',
'opencv_videoio'],
headers=['<opencv2/core/core.hpp>',
'<opencv2/highgui/highgui.hpp>'],
define_macros=[('VIDEO_WIDTH', width),
('VIDEO_HEIGHT', height)])
@check_units(x=1, y=1, result=1)
def video_input(x, y):
# we assume this will only be called in the custom operation (and not for
# example in a reset or synaptic statement), so we don't need to do indexing
# but we can directly return the full result
_, frame = video.read()
grayscale = frame.mean(axis=2)
grayscale /= 128. # scale everything between 0 and 2
return grayscale.ravel() - grayscale.ravel().mean()
N = width * height
tau, tau_th = 10*ms, time_between_frames
G = NeuronGroup(N, '''dv/dt = (-v + I)/tau : 1
dv_th/dt = -v_th/tau_th : 1
row : integer (constant)
column : integer (constant)
I : 1 # input current''',
threshold='v>v_th', reset='v=0; v_th = 3*v_th + 1.0',
method='exact')
G.v_th = 1
G.row = 'i//width'
G.column = 'i%width'
G.run_regularly('I = video_input(column, row)',
dt=time_between_frames)
mon = SpikeMonitor(G)
runtime = frame_count*time_between_frames
METHOD_NAME(runtime, report='text')
# Avoid going through the whole Brian2 indexing machinery too much
i, t, row, column = mon.i[:], mon.t[:], G.row[:], G.column[:]
import matplotlib.animation as animation
# TODO: Use overlapping windows
stepsize = 100*ms
def next_spikes():
step = next_spikes.step
if step*stepsize > runtime:
next_spikes.step=0
raise StopIteration()
spikes = i[(t>=step*stepsize) & (t<(step+1)*stepsize)]
next_spikes.step += 1
yield column[spikes], row[spikes]
next_spikes.step = 0
fig, ax = plt.subplots()
dots, = ax.plot([], [], 'k.', markersize=2, alpha=.25)
ax.set_xlim(0, width)
ax.set_ylim(0, height)
ax.invert_yaxis()
def METHOD_NAME(data):
x, y = data
dots.set_data(x, y)
ani = animation.FuncAnimation(fig, METHOD_NAME, next_spikes, blit=False, repeat=True,
repeat_delay=1000)
plt.show() |
676 | forward |
import numpy as np
from . import sphere128 as sph
from dedalus.tools.cache import CachedMethod
class Sphere:
def __init__(self,L_max,S_max=0,N_theta=None,m_min=None,m_max=None):
self.L_max, self.S_max = L_max, S_max
if N_theta == None: N_theta = L_max+1
self.N_theta = N_theta
if m_min == None: m_min = -L_max
if m_max == None: m_max = L_max
# grid and weights for the all transforms
self.cos_grid,self.weights = sph.quadrature(self.N_theta-1,niter=3,report_error=False)
self.grid = np.arccos(self.cos_grid)
self.sin_grid = np.sqrt(1-self.cos_grid**2)
self.pushY, self.pullY = {}, {}
for s in range(-S_max,S_max+1):
for m in range(m_min,m_max+1):
Y = sph.Y(self.L_max,m,s,self.cos_grid)
self.pushY[(m,s)] = (self.weights*Y).astype(np.float64)
self.pullY[(m,s)] = (Y.T).astype(np.float64)
# downcast to double precision
self.grid = self.grid.astype(np.float64)
self.weights = self.weights.astype(np.float64)
self.sin_grid = self.sin_grid.astype(np.float64)
self.cos_grid = self.cos_grid.astype(np.float64)
@CachedMethod
def op(self,op_name,m,s):
return sph.operator(op_name,self.L_max,m,s).astype(np.float64)
@CachedMethod
def L_min(self,m,s):
return sph.L_min(m,s)
def zeros(self,m,s_out,s_in):
return sph.zeros(self.L_max,m,s_out,s_in)
def forward_spin(self,m,s,data):
# grid --> coefficients
return self.pushY[(m,s)].dot(data)
def backward_spin(self,m,s,data):
# coefficients --> grid
return self.pullY[(m,s)].dot(data)
@CachedMethod
def tensor_index(self,m,rank):
num = np.arange(2**rank)
spin = (-1)**num
for k in range(2,rank+1):
spin += ((-1)**(num//2**(k-1))).astype(np.int64)
if rank == 0: spin = [0]
start_index = [0]
end_index = []
for k in range(2**rank):
end_index.append(start_index[k]+self.L_max-sph.L_min(m,spin[k])+1)
if k < 2**rank-1:
start_index.append(end_index[k])
return (start_index,end_index,spin)
@CachedMethod
def unitary(self,rank=1,adjoint=False):
return sph.unitary(rank=rank,adjoint=adjoint)
def METHOD_NAME(self,m,rank,data,unitary=None):
if rank == 0:
return self.forward_spin(m,0,data)
(start_index,end_index,spin) = self.tensor_index(m,rank)
if not unitary:
unitary = self.unitary(rank=rank,adjoint=True)
data = np.einsum("ij,j...->i...",unitary,data)
shape = np.array(np.array(data).shape[1:])
shape[0] = end_index[-1]
data_c = np.zeros(shape,dtype=np.complex128)
for i in range(2**rank):
data_c[start_index[i]:end_index[i]] = self.forward_spin(m,spin[i],data[i])
return data_c
def backward(self,m,rank,data,unitary=None):
if rank == 0:
return self.backward_spin(m,0,data)
(start_index,end_index,spin) = self.tensor_index(m,rank)
if not unitary:
unitary = self.unitary(rank=rank,adjoint=False)
shape = np.array(np.array(data).shape)
shape = np.concatenate(([2**rank],shape))
shape[1] = self.N_theta
data_g = np.zeros(shape,dtype=np.complex128)
for i in range(2**rank):
data_g[i] = self.backward_spin(m,spin[i],data[start_index[i]:end_index[i]])
return np.einsum("ij,j...->i...",unitary,data_g)
def grad(self,m,rank_in,data,data_out):
# data and data_out are in coefficient space
(start_index_in,end_index_in,spin_in) = self.tensor_index(m,rank_in)
rank_out = rank_in+1
(start_index_out,end_index_out,spin_out) = self.tensor_index(m,rank_out)
half = 2**(rank_out-1)
for i in range(2**(rank_out)):
if i//half == 0:
operator = self.op('k+',m,spin_in[i%half])
else:
operator = self.op('k-',m,spin_in[i%half])
np.copyto( data_out[start_index_out[i]:end_index_out[i]],
operator.dot(data[start_index_in[i%half]:end_index_in[i%half]]) )
|
677 | test sitemap generators | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test API for Zenodo and GitHub integration."""
from __future__ import absolute_import, print_function
import datetime
import re
from flask import current_app, render_template
from zenodo.modules.sitemap.generators import _sitemapdtformat
from zenodo.modules.sitemap.tasks import update_sitemap_cache
def test_sitemap_cache_update_simple(mocker, app):
"""Test Sitemap cache updating with fixed parameters."""
def make_url(loc):
return {'loc': 'https://localhost' + loc}
urls = [make_url('/record/' + str(i)) for i in range(5)]
cache_mock = mocker.patch('zenodo.modules.sitemap.ext.current_cache')
update_sitemap_cache(urls=urls, max_url_count=2)
sitemap1 = render_template('zenodo_sitemap/sitemap.xml',
urlset=urls[:2])
cache_mock.set.assert_any_call('sitemap:1', sitemap1, timeout=-1)
sitemap2 = render_template('zenodo_sitemap/sitemap.xml',
urlset=urls[2:4])
cache_mock.set.assert_any_call('sitemap:2', sitemap2, timeout=-1)
sitemap3 = render_template('zenodo_sitemap/sitemap.xml',
urlset=urls[4:])
cache_mock.set.assert_any_call('sitemap:3', sitemap3, timeout=-1)
sitemapindex = [make_url('/sitemap{}.xml'.format(i)) for i in range(1, 4)]
sitemap0 = render_template('zenodo_sitemap/sitemapindex.xml',
urlset=sitemapindex, url_scheme='https')
cache_mock.set.assert_any_call('sitemap:0', sitemap0, timeout=-1)
def METHOD_NAME(app, record_with_bucket, communities):
"""Test Sitemap generators."""
with app.test_request_context():
sitemap = current_app.extensions['zenodo-sitemap']
urls = list(sitemap._generate_all_urls())
# Make sure the last modified are there and it's in proper UTC sitemap
# format, but remove from the result for easier comparison of URL sets
# make sure it's in the format 'YYYY-MM-DDTHH-MM-SST'
sitemap_dt_re = re.compile('\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$')
assert all('lastmod' in url and sitemap_dt_re.match(url['lastmod'])
for url in urls)
for url in urls:
del url['lastmod']
expected = [
{'loc': 'https://localhost/record/12345'},
{'loc': 'https://localhost/communities/c1/'},
{'loc': 'https://localhost/communities/c1/search'},
{'loc': 'https://localhost/communities/c1/about/'},
{'loc': 'https://localhost/communities/c2/'},
{'loc': 'https://localhost/communities/c2/search'},
{'loc': 'https://localhost/communities/c2/about/'},
{'loc': 'https://localhost/communities/c3/'},
{'loc': 'https://localhost/communities/c3/search'},
{'loc': 'https://localhost/communities/c3/about/'},
{'loc': 'https://localhost/communities/c4/'},
{'loc': 'https://localhost/communities/c4/search'},
{'loc': 'https://localhost/communities/c4/about/'},
{'loc': 'https://localhost/communities/c5/'},
{'loc': 'https://localhost/communities/c5/search'},
{'loc': 'https://localhost/communities/c5/about/'},
{'loc': 'https://localhost/communities/zenodo/'},
{'loc': 'https://localhost/communities/zenodo/search'},
{'loc': 'https://localhost/communities/zenodo/about/'},
{'loc': 'https://localhost/communities/ecfunded/'},
{'loc': 'https://localhost/communities/ecfunded/search'},
{'loc': 'https://localhost/communities/ecfunded/about/'},
{'loc': 'https://localhost/communities/grants_comm/'},
{'loc': 'https://localhost/communities/grants_comm/search'},
{'loc': 'https://localhost/communities/grants_comm/about/'},
]
assert urls == expected
def test_sitemap_date_generator():
"""Test the sitemap timestamp generation."""
dt = datetime.datetime(2018, 1, 2, 3, 4, 5)
assert _sitemapdtformat(dt) == '2018-01-02T03:04:05Z'
dt = datetime.datetime(2018, 11, 12, 13, 14, 15)
assert _sitemapdtformat(dt) == '2018-11-12T13:14:15Z' |
678 | test make hovertext | import datetime
import logging
from textwrap import dedent
from typing import cast
import numpy as np
import pytest
from pytest import LogCaptureFixture
import optuna
from optuna.distributions import FloatDistribution
from optuna.study import create_study
from optuna.testing.visualization import prepare_study_with_trials
from optuna.trial import create_trial
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.visualization import is_available
from optuna.visualization._utils import _check_plot_args
from optuna.visualization._utils import _filter_nonfinite
from optuna.visualization._utils import _is_log_scale
from optuna.visualization._utils import _make_hovertext
def test_is_log_scale() -> None:
study = create_study()
study.add_trial(
create_trial(
value=0.0,
params={"param_linear": 1.0},
distributions={"param_linear": FloatDistribution(0.0, 3.0)},
)
)
study.add_trial(
create_trial(
value=2.0,
params={"param_linear": 2.0, "param_log": 1e-3},
distributions={
"param_linear": FloatDistribution(0.0, 3.0),
"param_log": FloatDistribution(1e-5, 1.0, log=True),
},
)
)
assert _is_log_scale(study.trials, "param_log")
assert not _is_log_scale(study.trials, "param_linear")
def _is_plotly_available() -> bool:
try:
import plotly # NOQA
available = True
except Exception:
available = False
return available
def test_visualization_is_available() -> None:
assert is_available() == _is_plotly_available()
def test_check_plot_args() -> None:
study = create_study(directions=["minimize", "minimize"])
with pytest.raises(ValueError):
_check_plot_args(study, None, "Objective Value")
with pytest.warns(UserWarning):
_check_plot_args(study, lambda t: cast(float, t.value), "Objective Value")
@pytest.mark.parametrize(
"value, expected", [(float("inf"), 1), (-float("inf"), 1), (float("nan"), 1), (0.0, 2)]
)
def test_filter_inf_trials(value: float, expected: int) -> None:
study = create_study()
study.add_trial(
create_trial(
value=0.0,
params={"x": 1.0},
distributions={"x": FloatDistribution(0.0, 1.0)},
)
)
study.add_trial(
create_trial(
value=value,
params={"x": 0.0},
distributions={"x": FloatDistribution(0.0, 1.0)},
)
)
trials = _filter_nonfinite(study.get_trials(states=(TrialState.COMPLETE,)))
assert len(trials) == expected
assert all([t.number == num for t, num in zip(trials, range(expected))])
@pytest.mark.parametrize(
"value,objective_selected,expected",
[
(float("inf"), 0, 2),
(-float("inf"), 0, 2),
(float("nan"), 0, 2),
(0.0, 0, 3),
(float("inf"), 1, 1),
(-float("inf"), 1, 1),
(float("nan"), 1, 1),
(0.0, 1, 3),
],
)
def test_filter_inf_trials_multiobjective(
value: float, objective_selected: int, expected: int
) -> None:
study = create_study(directions=["minimize", "maximize"])
study.add_trial(
create_trial(
values=[0.0, 1.0],
params={"x": 1.0},
distributions={"x": FloatDistribution(0.0, 1.0)},
)
)
study.add_trial(
create_trial(
values=[0.0, value],
params={"x": 0.0},
distributions={"x": FloatDistribution(0.0, 1.0)},
)
)
study.add_trial(
create_trial(
values=[value, value],
params={"x": 0.0},
distributions={"x": FloatDistribution(0.0, 1.0)},
)
)
def _target(t: FrozenTrial) -> float:
return t.values[objective_selected]
trials = _filter_nonfinite(study.get_trials(states=(TrialState.COMPLETE,)), target=_target)
assert len(trials) == expected
assert all([t.number == num for t, num in zip(trials, range(expected))])
@pytest.mark.parametrize("with_message", [True, False])
def test_filter_inf_trials_message(caplog: LogCaptureFixture, with_message: bool) -> None:
study = create_study()
study.add_trial(
create_trial(
value=0.0,
params={"x": 1.0},
distributions={"x": FloatDistribution(0.0, 1.0)},
)
)
study.add_trial(
create_trial(
value=float("inf"),
params={"x": 0.0},
distributions={"x": FloatDistribution(0.0, 1.0)},
)
)
optuna.logging.enable_propagation()
_filter_nonfinite(study.get_trials(states=(TrialState.COMPLETE,)), with_message=with_message)
msg = "Trial 1 is omitted in visualization because its objective value is inf or nan."
if with_message:
assert msg in caplog.text
n_filtered_as_inf = 0
for record in caplog.records:
if record.msg == msg:
assert record.levelno == logging.WARNING
n_filtered_as_inf += 1
assert n_filtered_as_inf == 1
else:
assert msg not in caplog.text
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_filter_nonfinite_with_invalid_target() -> None:
study = prepare_study_with_trials()
trials = study.get_trials(states=(TrialState.COMPLETE,))
with pytest.raises(ValueError):
_filter_nonfinite(trials, target=lambda t: "invalid target") # type: ignore
def METHOD_NAME() -> None:
trial_no_user_attrs = FrozenTrial(
number=0,
trial_id=0,
state=TrialState.COMPLETE,
value=0.2,
datetime_start=datetime.datetime.now(),
datetime_complete=datetime.datetime.now(),
params={"x": 10},
distributions={"x": FloatDistribution(5, 12)},
user_attrs={},
system_attrs={},
intermediate_values={},
)
assert (
_make_hovertext(trial_no_user_attrs)
== dedent(
"""
{
"number": 0,
"values": [
0.2
],
"params": {
"x": 10
}
}
"""
)
.strip()
.replace("\n", "<br>")
)
trial_user_attrs_valid_json = FrozenTrial(
number=0,
trial_id=0,
state=TrialState.COMPLETE,
value=0.2,
datetime_start=datetime.datetime.now(),
datetime_complete=datetime.datetime.now(),
params={"x": 10},
distributions={"x": FloatDistribution(5, 12)},
user_attrs={"a": 42, "b": 3.14},
system_attrs={},
intermediate_values={},
)
assert (
_make_hovertext(trial_user_attrs_valid_json)
== dedent(
"""
{
"number": 0,
"values": [
0.2
],
"params": {
"x": 10
},
"user_attrs": {
"a": 42,
"b": 3.14
}
}
"""
)
.strip()
.replace("\n", "<br>")
)
trial_user_attrs_invalid_json = FrozenTrial(
number=0,
trial_id=0,
state=TrialState.COMPLETE,
value=0.2,
datetime_start=datetime.datetime.now(),
datetime_complete=datetime.datetime.now(),
params={"x": 10},
distributions={"x": FloatDistribution(5, 12)},
user_attrs={"a": 42, "b": 3.14, "c": np.zeros(1), "d": np.nan},
system_attrs={},
intermediate_values={},
)
assert (
_make_hovertext(trial_user_attrs_invalid_json)
== dedent(
"""
{
"number": 0,
"values": [
0.2
],
"params": {
"x": 10
},
"user_attrs": {
"a": 42,
"b": 3.14,
"c": "[0.]",
"d": NaN
}
}
"""
)
.strip()
.replace("\n", "<br>")
) |
679 | create vm | #!/usr/bin/env python3
# group: rw backing
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Creator/Owner: Kevin Wolf <[email protected]>
#
# Some tests for short backing files and short overlays
import iotests
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'],
unsupported_imgopts=['refcount_bits', 'compat'])
size_short = 1 * 1024 * 1024
size_long = 2 * 1024 * 1024
size_diff = size_long - size_short
def create_chain() -> None:
iotests.qemu_img_create('-f', iotests.imgfmt, base, str(size_long))
iotests.qemu_img_create('-f', iotests.imgfmt, '-b', base,
'-F', iotests.imgfmt, mid, str(size_short))
iotests.qemu_img_create('-f', iotests.imgfmt, '-b', mid,
'-F', iotests.imgfmt, top, str(size_long))
iotests.qemu_io_log('-c', 'write -P 1 0 %d' % size_long, base)
def METHOD_NAME() -> iotests.VM:
vm = iotests.VM()
vm.add_blockdev('file,filename=%s,node-name=base-file' % base)
vm.add_blockdev('%s,file=base-file,node-name=base' % iotests.imgfmt)
vm.add_blockdev('file,filename=%s,node-name=mid-file' % mid)
vm.add_blockdev('%s,file=mid-file,node-name=mid,backing=base'
% iotests.imgfmt)
vm.add_drive(top, 'backing=mid,node-name=top')
return vm
with iotests.FilePath('base') as base, \
iotests.FilePath('mid') as mid, \
iotests.FilePath('top') as top:
iotests.log('== Commit tests ==')
create_chain()
iotests.log('=== Check visible data ===')
iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, top)
iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), top)
iotests.log('=== Checking allocation status ===')
iotests.qemu_io_log('-c', 'alloc 0 %d' % size_short,
'-c', 'alloc %d %d' % (size_short, size_diff),
base)
iotests.qemu_io_log('-c', 'alloc 0 %d' % size_short,
'-c', 'alloc %d %d' % (size_short, size_diff),
mid)
iotests.qemu_io_log('-c', 'alloc 0 %d' % size_short,
'-c', 'alloc %d %d' % (size_short, size_diff),
top)
iotests.log('=== Checking map ===')
iotests.qemu_img_log('map', '--output=json', base)
iotests.qemu_img_log('map', '--output=human', base)
iotests.qemu_img_log('map', '--output=json', mid)
iotests.qemu_img_log('map', '--output=human', mid)
iotests.qemu_img_log('map', '--output=json', top)
iotests.qemu_img_log('map', '--output=human', top)
iotests.log('=== Testing qemu-img commit (top -> mid) ===')
iotests.qemu_img_log('commit', top)
iotests.img_info_log(mid)
iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, mid)
iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), mid)
iotests.log('=== Testing HMP commit (top -> mid) ===')
create_chain()
with METHOD_NAME() as vm:
vm.launch()
vm.qmp_log('human-monitor-command', command_line='commit drive0')
iotests.img_info_log(mid)
iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, mid)
iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), mid)
iotests.log('=== Testing QMP active commit (top -> mid) ===')
create_chain()
with METHOD_NAME() as vm:
vm.launch()
vm.qmp_log('block-commit', device='top', base_node='mid',
job_id='job0', auto_dismiss=False)
vm.run_job('job0', wait=5)
iotests.img_info_log(mid)
iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, mid)
iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), mid)
iotests.log('=== Testing qemu-img commit (top -> base) ===')
create_chain()
iotests.qemu_img_log('commit', '-b', base, top)
iotests.img_info_log(base)
iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, base)
iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), base)
iotests.log('=== Testing QMP active commit (top -> base) ===')
create_chain()
with METHOD_NAME() as vm:
vm.launch()
vm.qmp_log('block-commit', device='top', base_node='base',
job_id='job0', auto_dismiss=False)
vm.run_job('job0', wait=5)
iotests.img_info_log(mid)
iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, base)
iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), base)
iotests.log('== Resize tests ==')
# Use different sizes for different allocation modes:
#
# We want to have at least one test where 32 bit truncation in the size of
# the overlapping area becomes visible. This is covered by the
# prealloc='off' case (1G to 6G is an overlap of 5G).
#
# However, we can only do this for modes that don't preallocate data
# because otherwise we might run out of space on the test host.
#
# We also want to test some unaligned combinations.
for (prealloc, base_size, top_size_old, top_size_new, off) in [
('off', '6G', '1G', '8G', '5G'),
('metadata', '32G', '30G', '33G', '31G'),
('falloc', '10M', '5M', '15M', '9M'),
('full', '16M', '8M', '12M', '11M'),
('off', '384k', '253k', '512k', '253k'),
('off', '400k', '256k', '512k', '336k'),
('off', '512k', '256k', '500k', '436k')]:
iotests.log('=== preallocation=%s ===' % prealloc)
iotests.qemu_img_create('-f', iotests.imgfmt, base, base_size)
iotests.qemu_img_create('-f', iotests.imgfmt, '-b', base,
'-F', iotests.imgfmt, top, top_size_old)
iotests.qemu_io_log('-c', 'write -P 1 %s 64k' % off, base)
# After this, top_size_old to base_size should be allocated/zeroed.
#
# In theory, leaving base_size to top_size_new unallocated would be
# correct, but in practice, if we zero out anything, we zero out
# everything up to top_size_new.
iotests.qemu_img_log('resize', '-f', iotests.imgfmt,
'--preallocation', prealloc, top, top_size_new)
iotests.qemu_io_log('-c', 'read -P 0 %s 64k' % off, top)
iotests.qemu_io_log('-c', 'map', top)
iotests.qemu_img_log('map', '--output=json', top) |
680 | main | #!/usr/bin/env python3
############################################################
# Program is part of MintPy #
# Copyright (c) 2013, Zhang Yunjun, Heresh Fattahi #
# Author: Antonio Valentino, Joshua Zahner, Aug 2022 #
############################################################
import os
import sys
from mintpy.utils.arg_utils import create_argument_parser
############################################################
EXAMPLE = """example:
cd $PROJECT_NAME/mintpy/geo
save_kmz_timeseries.py geo_timeseries_ERA5_ramp_demErr.h5
save_kmz_timeseries.py geo_timeseries_ERA5_ramp_demErr.h5 -v -5 5 --wrap
save_kmz_timeseries.py timeseries_ERA5_demErr.h5 --vel velocity.h5 --tcoh temporalCoherence.h5 --mask maskTempCoh.h5
"""
def create_parser(subparsers=None):
synopsis = 'Generare Google Earth KMZ file for time-series file.'
epilog = EXAMPLE
name = __name__.split('.')[-1]
parser = create_argument_parser(
name, synopsis=synopsis, description=synopsis, epilog=epilog, subparsers=subparsers)
args = parser.add_argument_group('Input files', 'File/Dataset to display')
args.add_argument('ts_file', metavar='timeseries_file', help='Timeseries file to generate KML for')
args.add_argument('--vel', dest='vel_file', metavar='FILE',
help='Velocity file, used for the color of dot')
args.add_argument('--tcoh', dest='tcoh_file', metavar='FILE',
help='temporal coherence file, used for stat info')
args.add_argument('--mask', dest='mask_file', metavar='FILE',
help='Mask file')
args.add_argument('-o','--output', dest='outfile', help='Output KMZ file name.')
opts = parser.add_argument_group('Display options', 'configurations for the display')
opts.add_argument('--steps', type=int, nargs=3, default=[20, 5, 2],
help='list of steps for output pixel (default: %(default)s).\n'
'Set to [20, 5, 0] to skip the 3rd high-resolution level to reduce file size.')
opts.add_argument('--level-of-details','--lods', dest='lods', type=int, nargs=4, default=[0, 1500, 4000, -1],
help='list of level of details to determine the visible range while browering. '
'Default: 0, 1500, 4000, -1.\n'
'Ref: https://developers.google.com/kml/documentation/kml_21tutorial')
opts.add_argument('--vlim','-v', dest='vlim', nargs=2, metavar=('VMIN', 'VMAX'), type=float,
help='min/max range in cm/yr for color coding.')
opts.add_argument('--wrap', dest='wrap', action='store_true',
help='re-wrap data to [VMIN, VMAX) for color coding.')
opts.add_argument('--colormap','-c', dest='cmap_name', default='jet',
help='colormap used for display, i.e. jet, RdBu, hsv, jet_r, temperature, viridis, etc.\n'
'More details at https://mintpy.readthedocs.io/en/latest/api/colormaps/')
defo = parser.add_argument_group('HD for deforming areas', 'High resolution output for deforming areas')
defo.add_argument('--cutoff', dest='cutoff', type=int, default=3,
help='choose points with velocity >= cutoff * MAD. Default: 3.')
defo.add_argument('--min-percentage','--min-perc', dest='min_percentage', type=float, default=0.2,
help='choose boxes with >= min percentage of pixels are deforming. Default: 0.2.')
parser.add_argument('--kk','--keep-kml','--keep-kml-file', dest='keep_kml_file', action='store_true',
help='Do not remove KML and data/resource files after compressing into KMZ file.')
return parser
def cmd_line_parse(iargs=None):
# parse
parser = create_parser()
inps = parser.parse_args(args=iargs)
# import
from mintpy.utils import readfile
# check: input file coordinates system (required in geo)
atr = readfile.read_attribute(inps.ts_file)
if "Y_FIRST" not in atr.keys():
raise ValueError(f"input file {inps.ts_file} is NOT geocoded")
# default: auxliary files
inps = get_aux_filename(inps)
# check: existence of auxliary files
for fname in [inps.vel_file, inps.tcoh_file, inps.mask_file]:
if not os.path.isfile(fname):
raise FileNotFoundError(f'required auxliary file {fname} NOT found!')
return inps
def get_aux_filename(inps):
"""Get auxliary files' default filename."""
ts_dir = os.path.dirname(inps.ts_file)
ts_prefix = os.path.basename(inps.ts_file).split('timeseries')[0]
fbase = os.path.join(ts_dir, ts_prefix)
inps.vel_file = inps.vel_file if inps.vel_file else f'{fbase}velocity.h5'
inps.tcoh_file = inps.tcoh_file if inps.tcoh_file else f'{fbase}temporalCoherence.h5'
inps.mask_file = inps.mask_file if inps.mask_file else f'{fbase}maskTempCoh.h5'
return inps
######################################################################################
def METHOD_NAME(iargs=None):
# parse
inps = cmd_line_parse(iargs)
# import
from mintpy.save_kmz_timeseries import save_kmz_timeseries
# run
save_kmz_timeseries(inps)
######################################################################################
if __name__ == '__main__':
METHOD_NAME(sys.argv[1:]) |
681 | get unit spike train | from pathlib import Path
import numpy as np
from spikeinterface.core import BaseSorting, BaseSortingSegment
from spikeinterface.core.core_tools import define_function_from_class
try:
import h5py
HAVE_HS2SX = True
except ImportError:
HAVE_HS2SX = False
class HerdingspikesSortingExtractor(BaseSorting):
"""Load HerdingSpikes format data as a sorting extractor.
Parameters
----------
folder_path : str or Path
Path to the ALF folder.
load_unit_info : bool, default: True
Whether to load the unit info from the file.
Returns
-------
extractor : HerdingSpikesSortingExtractor
The loaded data.
"""
extractor_name = "HS2Sorting"
installed = HAVE_HS2SX # check at class level if installed or not
mode = "file"
installation_mesg = (
"To use the HS2SortingExtractor install h5py: \n\n pip install h5py\n\n" # error message when not installed
)
name = "herdingspikes"
def __init__(self, file_path, load_unit_info=True):
assert self.installed, self.installation_mesg
self._recording_file = file_path
self._rf = h5py.File(self._recording_file, mode="r")
if "Sampling" in self._rf:
if self._rf["Sampling"][()] == 0:
sampling_frequency = None
else:
sampling_frequency = self._rf["Sampling"][()]
spike_ids = self._rf["cluster_id"][()]
unit_ids = np.unique(spike_ids)
spike_times = self._rf["times"][()]
if load_unit_info:
self.load_unit_info()
BaseSorting.__init__(self, sampling_frequency, unit_ids)
self.add_sorting_segment(HerdingspikesSortingSegment(unit_ids, spike_times, spike_ids))
self._kwargs = {"file_path": str(Path(file_path).absolute()), "load_unit_info": load_unit_info}
self.extra_requirements.append("h5py")
def load_unit_info(self):
# TODO
"""
if 'centres' in self._rf.keys() and len(self._spike_times) > 0:
self._unit_locs = self._rf['centres'][()] # cache for faster access
for u_i, unit_id in enumerate(self._unit_ids):
self.set_unit_property(unit_id, property_name='unit_location', value=self._unit_locs[u_i])
inds = [] # get these only once
for unit_id in self._unit_ids:
inds.append(np.where(self._cluster_id == unit_id)[0])
if 'data' in self._rf.keys() and len(self._spike_times) > 0:
d = self._rf['data'][()]
for i, unit_id in enumerate(self._unit_ids):
self.set_unit_spike_features(unit_id, 'spike_location', d[:, inds[i]].T)
if 'ch' in self._rf.keys() and len(self._spike_times) > 0:
d = self._rf['ch'][()]
for i, unit_id in enumerate(self._unit_ids):
self.set_unit_spike_features(unit_id, 'max_channel', d[inds[i]])
"""
# alias for backward compatiblity
HS2SortingExtractor = HerdingspikesSortingExtractor
class HerdingspikesSortingSegment(BaseSortingSegment):
def __init__(self, unit_ids, spike_times, spike_ids):
BaseSortingSegment.__init__(self)
# spike_times is a dict
self._unit_ids = list(unit_ids)
self._spike_times = spike_times
self._spike_ids = spike_ids
def METHOD_NAME(self, unit_id, start_frame, end_frame):
mask = self._spike_ids == unit_id
times = self._spike_times[mask]
if start_frame is not None:
times = times[times >= start_frame]
if end_frame is not None:
times = times[times < end_frame]
return times
"""
@staticmethod
def write_sorting(sorting, save_path):
assert HAVE_HS2SX, HS2SortingExtractor.installation_mesg
unit_ids = sorting.get_unit_ids()
times_list = []
labels_list = []
for i in range(len(unit_ids)):
unit = unit_ids[i]
times = sorting.get_unit_spike_train(unit_id=unit)
times_list.append(times)
labels_list.append(np.ones(times.shape, dtype=int) * unit)
all_times = np.concatenate(times_list)
all_labels = np.concatenate(labels_list)
rf = h5py.File(save_path, mode='w')
if sorting.get_sampling_frequency() is not None:
rf.create_dataset("Sampling", data=sorting.get_sampling_frequency())
else:
rf.create_dataset("Sampling", data=0)
if 'unit_location' in sorting.get_shared_unit_property_names():
spike_centres = [sorting.get_unit_property(u, 'unit_location') for u in sorting.get_unit_ids()]
spike_centres = np.array(spike_centres)
rf.create_dataset("centres", data=spike_centres)
if 'spike_location' in sorting.get_shared_unit_spike_feature_names():
spike_loc_x = []
spike_loc_y = []
for u in sorting.get_unit_ids():
l = sorting.get_unit_spike_features(u, 'spike_location')
spike_loc_x.append(l[:, 0])
spike_loc_y.append(l[:, 1])
spike_loc = np.vstack((np.concatenate(spike_loc_x), np.concatenate(spike_loc_y)))
rf.create_dataset("data", data=spike_loc)
if 'max_channel' in sorting.get_shared_unit_spike_feature_names():
spike_max_channel = np.concatenate(
[sorting.get_unit_spike_features(u, 'max_channel') for u in sorting.get_unit_ids()])
rf.create_dataset("ch", data=spike_max_channel)
rf.create_dataset("times", data=all_times)
rf.create_dataset("cluster_id", data=all_labels)
rf.close()
"""
read_herdingspikes = define_function_from_class(source_class=HerdingspikesSortingExtractor, name="read_herdingspikes") |
682 | test issue as dict | #
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
from unittest import mock
import testtools
import bandit
from bandit.core import constants
from bandit.core import issue
class IssueTests(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
expect = (
"Issue: 'Test issue' from B999:bandit_plugin:"
" CWE: %s,"
" Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1:8"
)
self.assertEqual(
expect % str(issue.Cwe(issue.Cwe.MULTIPLE_BINDS)), str(test_issue)
)
def METHOD_NAME(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
self.assertEqual("code.py", test_issue_dict["filename"])
self.assertEqual("bandit_plugin", test_issue_dict["test_name"])
self.assertEqual("B999", test_issue_dict["test_id"])
self.assertEqual("MEDIUM", test_issue_dict["issue_severity"])
self.assertEqual(
{
"id": 605,
"link": "https://cwe.mitre.org/data/definitions/605.html",
},
test_issue_dict["issue_cwe"],
)
self.assertEqual("MEDIUM", test_issue_dict["issue_confidence"])
self.assertEqual("Test issue", test_issue_dict["issue_text"])
self.assertEqual(1, test_issue_dict["line_number"])
self.assertEqual([], test_issue_dict["line_range"])
self.assertEqual(8, test_issue_dict["col_offset"])
self.assertEqual(16, test_issue_dict["end_col_offset"])
def test_issue_filter_severity(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(level, bandit.HIGH) for level in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.severity)
result = i.filter(level, bandit.UNDEFINED)
self.assertTrue((test >= rank) == result)
def test_issue_filter_confidence(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(bandit.HIGH, level) for level in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.confidence)
result = i.filter(bandit.UNDEFINED, level)
self.assertTrue((test >= rank) == result)
def test_matches_issue(self):
issue_a = _get_issue_instance()
issue_b = _get_issue_instance(severity=bandit.HIGH)
issue_c = _get_issue_instance(confidence=bandit.LOW)
issue_d = _get_issue_instance()
issue_d.text = "ABCD"
issue_e = _get_issue_instance()
issue_e.fname = "file1.py"
issue_f = issue_a
issue_g = _get_issue_instance()
issue_g.test = "ZZZZ"
issue_h = issue_a
issue_h.lineno = 12345
# positive tests
self.assertEqual(issue_a, issue_a)
self.assertEqual(issue_a, issue_f)
self.assertEqual(issue_f, issue_a)
# severity doesn't match
self.assertNotEqual(issue_a, issue_b)
# confidence doesn't match
self.assertNotEqual(issue_a, issue_c)
# text doesn't match
self.assertNotEqual(issue_a, issue_d)
# filename doesn't match
self.assertNotEqual(issue_a, issue_e)
# plugin name doesn't match
self.assertNotEqual(issue_a, issue_g)
# line number doesn't match but should pass because we don't test that
self.assertEqual(issue_a, issue_h)
@mock.patch("linecache.getline")
def test_get_code(self, getline):
getline.return_value = b"\x08\x30"
new_issue = issue.Issue(
bandit.MEDIUM, cwe=issue.Cwe.MULTIPLE_BINDS, lineno=1
)
try:
new_issue.get_code()
except UnicodeDecodeError:
self.fail("Bytes not properly decoded in issue.get_code()")
def _get_issue_instance(
severity=bandit.MEDIUM,
cwe=issue.Cwe.MULTIPLE_BINDS,
confidence=bandit.MEDIUM,
):
new_issue = issue.Issue(severity, cwe, confidence, "Test issue")
new_issue.fname = "code.py"
new_issue.test = "bandit_plugin"
new_issue.test_id = "B999"
new_issue.lineno = 1
new_issue.col_offset = 8
new_issue.end_col_offset = 16
return new_issue |
683 | check storage needs cleanup | """Handle storage retention and usage."""
import logging
import shutil
import threading
from pathlib import Path
from peewee import fn
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings
logger = logging.getLogger(__name__)
bandwidth_equation = Recordings.segment_size / (
Recordings.end_time - Recordings.start_time
)
class StorageMaintainer(threading.Thread):
"""Maintain frigates recording storage."""
def __init__(self, config: FrigateConfig, stop_event) -> None:
threading.Thread.__init__(self)
self.name = "storage_maintainer"
self.config = config
self.stop_event = stop_event
self.camera_storage_stats: dict[str, dict] = {}
def calculate_camera_bandwidth(self) -> None:
"""Calculate an average MB/hr for each camera."""
for camera in self.config.cameras.keys():
# cameras with < 50 segments should be refreshed to keep size accurate
# when few segments are available
if self.camera_storage_stats.get(camera, {}).get("needs_refresh", True):
self.camera_storage_stats[camera] = {
"needs_refresh": (
Recordings.select(fn.COUNT(Recordings.id))
.where(Recordings.camera == camera, Recordings.segment_size > 0)
.scalar()
< 50
)
}
# calculate MB/hr
try:
bandwidth = round(
Recordings.select(fn.AVG(bandwidth_equation))
.where(Recordings.camera == camera, Recordings.segment_size > 0)
.limit(100)
.scalar()
* 3600,
2,
)
except TypeError:
bandwidth = 0
self.camera_storage_stats[camera]["bandwidth"] = bandwidth
logger.debug(f"{camera} has a bandwidth of {bandwidth} MiB/hr.")
def calculate_camera_usages(self) -> dict[str, dict]:
"""Calculate the storage usage of each camera."""
usages: dict[str, dict] = {}
for camera in self.config.cameras.keys():
camera_storage = (
Recordings.select(fn.SUM(Recordings.segment_size))
.where(Recordings.camera == camera, Recordings.segment_size != 0)
.scalar()
)
usages[camera] = {
"usage": camera_storage,
"bandwidth": self.camera_storage_stats.get(camera, {}).get(
"bandwidth", 0
),
}
return usages
def METHOD_NAME(self) -> bool:
"""Return if storage needs cleanup."""
# currently runs cleanup if less than 1 hour of space is left
# disk_usage should not spin up disks
hourly_bandwidth = sum(
[b["bandwidth"] for b in self.camera_storage_stats.values()]
)
remaining_storage = round(shutil.disk_usage(RECORD_DIR).free / pow(2, 20), 1)
logger.debug(
f"Storage cleanup check: {hourly_bandwidth} hourly with remaining storage: {remaining_storage}."
)
return remaining_storage < hourly_bandwidth
def reduce_storage_consumption(self) -> None:
"""Remove oldest hour of recordings."""
logger.debug("Starting storage cleanup.")
deleted_segments_size = 0
hourly_bandwidth = sum(
[b["bandwidth"] for b in self.camera_storage_stats.values()]
)
recordings: Recordings = Recordings.select().order_by(
Recordings.start_time.asc()
)
retained_events: Event = (
Event.select()
.where(
Event.retain_indefinitely == True,
Event.has_clip,
)
.order_by(Event.start_time.asc())
.objects()
)
event_start = 0
deleted_recordings = set()
for recording in recordings.objects().iterator():
# check if 1 hour of storage has been reclaimed
if deleted_segments_size > hourly_bandwidth:
break
keep = False
# Now look for a reason to keep this recording segment
for idx in range(event_start, len(retained_events)):
event = retained_events[idx]
# if the event starts in the future, stop checking events
# and let this recording segment expire
if event.start_time > recording.end_time:
keep = False
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if event.end_time is None or event.end_time >= recording.start_time:
keep = True
break
# if the event ends before this recording segment starts, skip
# this event and check the next event for an overlap.
# since the events and recordings are sorted, we can skip events
# that end before the previous recording segment started on future segments
if event.end_time < recording.start_time:
event_start = idx
# Delete recordings not retained indefinitely
if not keep:
deleted_segments_size += recording.segment_size
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
# check if need to delete retained segments
if deleted_segments_size < hourly_bandwidth:
logger.error(
f"Could not clear {hourly_bandwidth} MB, currently {deleted_segments_size} MB have been cleared. Retained recordings must be deleted."
)
recordings = Recordings.select().order_by(Recordings.start_time.asc())
for recording in recordings.objects().iterator():
if deleted_segments_size > hourly_bandwidth:
break
deleted_segments_size += recording.segment_size
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_recordings_list = list(deleted_recordings)
for i in range(0, len(deleted_recordings_list), max_deletes):
Recordings.delete().where(
Recordings.id << deleted_recordings_list[i : i + max_deletes]
).execute()
def run(self):
"""Check every 5 minutes if storage needs to be cleaned up."""
self.calculate_camera_bandwidth()
while not self.stop_event.wait(300):
if not self.camera_storage_stats or True in [
r["needs_refresh"] for r in self.camera_storage_stats.values()
]:
self.calculate_camera_bandwidth()
logger.debug(f"Default camera bandwidths: {self.camera_storage_stats}.")
if self.METHOD_NAME():
logger.info(
"Less than 1 hour of recording space left, running storage maintenance..."
)
self.reduce_storage_consumption()
logger.info("Exiting storage maintainer...") |
684 | forward | import warnings
from collections import defaultdict
from typing import Dict, List, Optional
import torch
from torch import Tensor
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.module_dict import ModuleDict
from torch_geometric.typing import Adj, EdgeType, NodeType
from torch_geometric.utils.hetero import check_add_self_loops
def group(xs: List[Tensor], aggr: Optional[str]) -> Optional[Tensor]:
if len(xs) == 0:
return None
elif aggr is None:
return torch.stack(xs, dim=1)
elif len(xs) == 1:
return xs[0]
elif aggr == "cat":
return torch.cat(xs, dim=-1)
else:
out = torch.stack(xs, dim=0)
out = getattr(torch, aggr)(out, dim=0)
out = out[0] if isinstance(out, tuple) else out
return out
class HeteroConv(torch.nn.Module):
r"""A generic wrapper for computing graph convolution on heterogeneous
graphs.
This layer will pass messages from source nodes to target nodes based on
the bipartite GNN layer given for a specific edge type.
If multiple relations point to the same destination, their results will be
aggregated according to :attr:`aggr`.
In comparison to :meth:`torch_geometric.nn.to_hetero`, this layer is
especially useful if you want to apply different message passing modules
for different edge types.
.. code-block:: python
hetero_conv = HeteroConv({
('paper', 'cites', 'paper'): GCNConv(-1, 64),
('author', 'writes', 'paper'): SAGEConv((-1, -1), 64),
('paper', 'written_by', 'author'): GATConv((-1, -1), 64),
}, aggr='sum')
out_dict = hetero_conv(x_dict, edge_index_dict)
print(list(out_dict.keys()))
>>> ['paper', 'author']
Args:
convs (Dict[Tuple[str, str, str], MessagePassing]): A dictionary
holding a bipartite
:class:`~torch_geometric.nn.conv.MessagePassing` layer for each
individual edge type.
aggr (str, optional): The aggregation scheme to use for grouping node
embeddings generated by different relations
(:obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"cat"`, :obj:`None`). (default: :obj:`"sum"`)
"""
def __init__(
self,
convs: Dict[EdgeType, MessagePassing],
aggr: Optional[str] = "sum",
):
super().__init__()
for edge_type, module in convs.items():
check_add_self_loops(module, [edge_type])
src_node_types = set([key[0] for key in convs.keys()])
dst_node_types = set([key[-1] for key in convs.keys()])
if len(src_node_types - dst_node_types) > 0:
warnings.warn(
f"There exist node types ({src_node_types - dst_node_types}) "
f"whose representations do not get updated during message "
f"passing as they do not occur as destination type in any "
f"edge type. This may lead to unexpected behavior.")
self.convs = ModuleDict({'__'.join(k): v for k, v in convs.items()})
self.aggr = aggr
def reset_parameters(self):
r"""Resets all learnable parameters of the module."""
for conv in self.convs.values():
conv.reset_parameters()
def METHOD_NAME(
self,
x_dict: Dict[NodeType, Tensor],
edge_index_dict: Dict[EdgeType, Adj],
*args_dict,
**kwargs_dict,
) -> Dict[NodeType, Tensor]:
r"""Runs the forward pass of the module.
Args:
x_dict (Dict[str, torch.Tensor]): A dictionary holding node feature
information for each individual node type.
edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A
dictionary holding graph connectivity information for each
individual edge type, either as a :class:`torch.Tensor` of
shape :obj:`[2, num_edges]` or a
:class:`torch_sparse.SparseTensor`.
*args_dict (optional): Additional forward arguments of invididual
:class:`torch_geometric.nn.conv.MessagePassing` layers.
**kwargs_dict (optional): Additional forward arguments of
individual :class:`torch_geometric.nn.conv.MessagePassing`
layers.
For example, if a specific GNN layer at edge type
:obj:`edge_type` expects edge attributes :obj:`edge_attr` as a
forward argument, then you can pass them to
:meth:`~torch_geometric.nn.conv.HeteroConv.forward` via
:obj:`edge_attr_dict = { edge_type: edge_attr }`.
"""
out_dict = defaultdict(list)
for edge_type, edge_index in edge_index_dict.items():
src, rel, dst = edge_type
str_edge_type = '__'.join(edge_type)
if str_edge_type not in self.convs:
continue
args = []
for value_dict in args_dict:
if edge_type in value_dict:
args.append(value_dict[edge_type])
elif src == dst and src in value_dict:
args.append(value_dict[src])
elif src in value_dict or dst in value_dict:
args.append(
(value_dict.get(src, None), value_dict.get(dst, None)))
kwargs = {}
for arg, value_dict in kwargs_dict.items():
arg = arg[:-5] # `{*}_dict`
if edge_type in value_dict:
kwargs[arg] = value_dict[edge_type]
elif src == dst and src in value_dict:
kwargs[arg] = value_dict[src]
elif src in value_dict or dst in value_dict:
kwargs[arg] = (value_dict.get(src, None),
value_dict.get(dst, None))
conv = self.convs[str_edge_type]
if src == dst:
out = conv(x_dict[src], edge_index, *args, **kwargs)
else:
out = conv((x_dict[src], x_dict[dst]), edge_index, *args,
**kwargs)
out_dict[dst].append(out)
for key, value in out_dict.items():
out_dict[key] = group(value, self.aggr)
return out_dict
def __repr__(self) -> str:
return f'{self.__class__.__name__}(num_relations={len(self.convs)})' |
685 | write ctl | """
Solaris Platform Module (Incomplete)
"""
# Copyright (C) 2007 Invisigoth - See LICENSE file for details
import os
import struct
import array
# Control codes (long values) for messages written to ctl and lwpctl files.
PCNULL = 0 # null request, advance to next message */
PCSTOP = 1 # direct process or lwp to stop and wait for stop */
PCDSTOP = 2 # direct process or lwp to stop */
PCWSTOP = 3 # wait for process or lwp to stop, no timeout */
PCTWSTOP = 4 # wait for stop, with long millisecond timeout arg */
PCRUN = 5 # make process/lwp runnable, w/ long flags argument */
PCCSIG = 6 # clear current signal from lwp */
PCCFAULT = 7 # clear current fault from lwp */
PCSSIG = 8 # set current signal from siginfo_t argument */
PCKILL = 9 # post a signal to process/lwp, long argument */
PCUNKILL = 10 # delete a pending signal from process/lwp, long arg */
PCSHOLD = 11 # set lwp signal mask from sigset_t argument */
PCSTRACE = 12 # set traced signal set from sigset_t argument */
PCSFAULT = 13 # set traced fault set from fltset_t argument */
PCSENTRY = 14 # set traced syscall entry set from sysset_t arg */
PCSEXIT = 15 # set traced syscall exit set from sysset_t arg */
PCSET = 16 # set modes from long argument */
PCUNSET = 17 # unset modes from long argument */
PCSREG = 18 # set lwp general registers from prgregset_t arg */
PCSFPREG = 19 # set lwp floating-point registers from prfpregset_t */
PCSXREG = 20 # set lwp extra registers from prxregset_t arg */
PCNICE = 21 # set nice priority from long argument */
PCSVADDR = 22 # set %pc virtual address from long argument */
PCWATCH = 23 # set/unset watched memory area from prwatch_t arg */
PCAGENT = 24 # create agent lwp with regs from prgregset_t arg */
PCREAD = 25 # read from the address space via priovec_t arg */
PCWRITE = 26 # write to the address space via priovec_t arg */
PCSCRED = 27 # set process credentials from prcred_t argument */
PCSASRS = 28 # set ancillary state registers from asrset_t arg */
PCSPRIV = 29 # set process privileges from prpriv_t argument */
PCSZONE = 30 # set zoneid from zoneid_t argument */
PCSCREDX = 31 # as PCSCRED but with supplemental groups */
# PCRUN long operand flags.
PRCSIG = 0x01# clear current signal, if any */
PRCFAULT = 0x02# clear current fault, if any */
PRSTEP = 0x04# direct the lwp to single-step */
PRSABORT = 0x08# abort syscall, if in syscall */
PRSTOP = 0x10# set directed stop request */
# Status flags
PR_STOPPED = 0x00000001# lwp is stopped */
PR_ISTOP = 0x00000002# lwp is stopped on an event of interest */
PR_DSTOP = 0x00000004# lwp has a stop directive in effect */
PR_STEP = 0x00000008# lwp has a single-step directive in effect */
PR_ASLEEP = 0x00000010# lwp is sleeping in a system call */
PR_PCINVAL = 0x00000020# contents of pr_instr undefined */
PR_ASLWP = 0x00000040# obsolete flag; never set */
PR_AGENT = 0x00000080# this lwp is the /proc agent lwp */
PR_DETACH = 0x00000100# this is a detached lwp */
PR_DAEMON = 0x00000200# this is a daemon lwp */
# The following flags apply to the process, not to an individual lwp */
PR_ISSYS = 0x00001000# this is a system process */
PR_VFORKP = 0x00002000# process is the parent of a vfork()d child */
PR_ORPHAN = 0x00004000# process's process group is orphaned */
# The following process flags are modes settable by PCSET/PCUNSET */
PR_FORK = 0x00100000# inherit-on-fork is in effect */
PR_RLC = 0x00200000# run-on-last-close is in effect */
PR_KLC = 0x00400000# kill-on-last-close is in effect */
PR_ASYNC = 0x00800000# asynchronous-stop is in effect */
PR_MSACCT = 0x01000000# micro-state usage accounting is in effect */
PR_BPTADJ = 0x02000000# breakpoint trap pc adjustment is in effect */
PR_PTRACE = 0x04000000# ptrace-compatibility mode is in effect */
PR_MSFORK = 0x08000000# micro-state accounting inherited on fork */
PR_IDLE = 0x10000000# lwp is a cpu's idle thread */
# Permissions...
MA_READ = 0x04# readable by the traced process */
MA_WRITE = 0x02# writable by the traced process */
MA_EXEC = 0x01# executable by the traced process */
MA_SHARED = 0x08# changes are shared by mapped object */
MA_ANON = 0x40# anonymous memory (e.g. /dev/zero) */
MA_ISM = 0x80# intimate shared mem (shared MMU resources) */
MA_NORESERVE = 0x100# mapped with MAP_NORESERVE */
MA_SHM = 0x200# System V shared memory */
MA_RESERVED1 = 0x400# reserved for future use */
class SolarisMixin:
def initMixin(self):
#import sunprocfs
self.ctl = None
def platformGetRegs(self):
pid = self.getPid()
#def platformGetThreads(self):
#ret = []
#for name in os.listdir("/proc/%d/lwp" % self.pid):
#ret.append(int(name))
#return ret
def platformAttach(self, pid):
# TODO: uck. make a context handler pls
self.ctl = open("/proc/%d/ctl" % pid, "ab")
self.ctl.write(struct.pack("<L", PRSTOP))
def platformContinue(self):
"""
Tell the process to continue running
"""
self.METHOD_NAME(struct.pack("<LL", PCRUN, 0))
def platformWait(self):
"""
wait for the process to do someting "interesting"
"""
self.METHOD_NAME(struct.pack("<L", PCWSTOP))
with open("/proc/%d/psinfo" % self.pid, "rb") as f:
return f.read()
def METHOD_NAME(self, bytes):
os.write(self.ctl.fileno(), bytes)
def platformDetach(self):
self.ctl.close()
self.ctl = None
class SolarisIntelMixin:
"""
Handle register formats for the intel solaris stuff
"""
def getRegisterFormat(self):
return ""
def getRegisterNames(self):
return []
def platformReadMemory(self, addr, size):
a = array.array('c',"\x00" * size)
baddr, blen = a.buffer_info()
priovec = struct.pack("<4L",PCREAD, baddr, blen, addr)
self.METHOD_NAME(priovec)
return a.tostring()
def platformWriteMemory(self, addr, bytes):
a = array.array('c',bytes)
baddr,blen = a.buffer_info()
priovec = struct.pack("<LLLL", PCWRITE, baddr, blen, addr)
self.METHOD_NAME(priovec)
def platformGetMaps(self):
ret = []
pid = self.getPid()
with open("/proc/%d/map" % pid, "rb") as f:
mapdata = f.read()
while mapdata:
addr,size = struct.unpack("<LL", mapdata[:8])
perms, = struct.unpack("<L", mapdata[80:84])
perms = perms & 0x7
ret.append((addr,size, perms, ""))
mapdata = mapdata[96:]
return ret
|
686 | get debuglevel | """
Copyright (c) 2016 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
class NetwConfObject(object):
def __init__(self):
self._payload = dict()
self._payload['hostname'] = None
self._payload['network'] = None
self._payload['address'] = ''
self._payload['port'] = None
def is_valid(self):
for key in self._payload:
if self._payload[key] is None:
return False
return True
def set(self, prop, value):
if ('set_%s' % prop) in dir(self):
getattr(self,'set_%s' % prop)(value)
elif value.text is not None:
# default copy propery to _payload
self._payload[prop] = value.text
def get_hostname(self):
return self._payload['hostname']
def get_network(self):
return self._payload['network']
def get_basepath(self):
return '/usr/local/etc/tinc/%(network)s' % self._payload
def get_addresses(self):
if not self._payload['address']:
return
yield from self._payload['address'].split(',')
class Network(NetwConfObject):
def __init__(self):
super(Network, self).__init__()
self._payload['id'] = None
self._payload['privkey'] = None
self._payload['intaddress'] = None
self._payload['debuglevel'] = 'd0'
self._payload['mode'] = 'switch'
self._payload['PMTUDiscovery'] = 'yes'
self._payload['StrictSubnets'] = 'no'
self._hosts = list()
def get_id(self):
return self._payload['id']
def get_local_address(self):
return self._payload['intaddress']
def get_mode(self):
return self._payload['mode']
def METHOD_NAME(self):
return self._payload['debuglevel'][1] if len(self._payload['debuglevel']) > 1 else '0'
def set_hosts(self, hosts):
for host in hosts:
hostObj = Host()
for host_prop in host:
hostObj.set(host_prop.tag, host_prop)
self._hosts.append(hostObj)
def set_PMTUDiscovery(self, value):
self._payload['PMTUDiscovery'] = 'no' if value.text != '1' else 'yes'
def set_StrictSubnets(self, value):
self._payload['StrictSubnets'] = 'no' if value.text != '1' else 'yes'
def config_text(self):
result = list()
result.append('AddressFamily=any')
result.append('Mode=%(mode)s' % self._payload)
result.append('PMTUDiscovery=%(PMTUDiscovery)s' % self._payload)
result.append('Port=%(port)s' % self._payload)
result.append('PingTimeout=%(pingtimeout)s' % self._payload)
result.append('StrictSubnets=%(StrictSubnets)s' % self._payload)
for host in self._hosts:
if host.connect_to_this_host():
result.append('ConnectTo = %s' % (host.get_hostname(),))
result.append('Device=/dev/tinc%(id)s' % self._payload)
result.append('Name=%(hostname)s' % self._payload)
return '\n'.join(result) + '\n'
def filename(self):
return self.get_basepath() + '/tinc.conf'
def privkey(self):
return {'filename': self.get_basepath() + '/rsa_key.priv', 'content': self._payload['privkey']}
def all(self):
yield self
for host in self._hosts:
yield host
class Host(NetwConfObject):
def __init__(self):
super(Host, self).__init__()
self._connectTo = "0"
self._payload['pubkey'] = None
self._payload['cipher'] = None
def connect_to_this_host(self):
if self.is_valid() and self._payload['address'] and self._connectTo == "1":
return True
else:
return False
def set_connectto(self, value):
self._connectTo = value.text
def get_subnets(self):
if not 'subnet' in self._payload:
return
yield from self._payload['subnet'].split(',')
def config_text(self):
result = list()
for address in self.get_addresses():
result.append('Address=%s %s' % (address, self._payload['port']))
for network in self.get_subnets():
result.append('Subnet=%s' % network)
result.append('Cipher=%(cipher)s'%self._payload)
result.append('Digest=sha256')
result.append(self._payload['pubkey'])
return '\n'.join(result) + '\n'
def filename(self):
return '%s/hosts/%s' % (self.get_basepath(), self._payload['hostname']) |
687 | handle | """Windows service. Requires pywin32."""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=20)
return
result = win32api.SetConsoleCtrlHandler(self.METHOD_NAME, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=20)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=20)
return
try:
result = win32api.SetConsoleCtrlHandler(self.METHOD_NAME, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=20)
self.is_set = False
def METHOD_NAME(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
'WSPBus %s Event (pid=%r)' %
(state.name, os.getpid()))
self.events[state] = event
return event
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(
events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError('The given object could not be found: %r' % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = 'Python Web Service'
_svc_display_name_ = 'Python Web Service'
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = 'pywebsvc'
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = 'Python Web Service'
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
from cherrypy import process
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService) |
688 | test symlink exists different atomic | """
Tests for file.symlink function
"""
import os
import pytest
import salt.utils.path
from salt.exceptions import CommandExecutionError, SaltInvocationError
pytestmark = [
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def file(modules):
return modules.file
@pytest.fixture(scope="function")
def source():
with pytest.helpers.temp_file(contents="Source content") as source:
yield source
def test_symlink(file, source):
"""
Test symlink with defaults
"""
target = source.parent / "symlink.lnk"
try:
file.symlink(str(source), str(target))
assert salt.utils.path.islink(str(target))
finally:
target.unlink()
def test_symlink_missing_src(file, source):
"""
Test symlink when src is missing should still create the link
"""
target = source.parent / "symlink.lnk"
missing_source = source.parent / "missing.txt"
try:
file.symlink(str(missing_source), str(target))
assert salt.utils.path.islink(str(target))
finally:
target.unlink()
def test_symlink_exists_same(file, source):
"""
Test symlink with an existing symlink to the correct file
Timestamps should not change
"""
target = source.parent / "symlink.lnk"
target.symlink_to(source)
try:
before_time = os.stat(str(target)).st_mtime
ret = file.symlink(str(source), str(target))
after_time = os.stat(str(target)).st_mtime
assert before_time == after_time
assert ret is True
finally:
target.unlink()
def test_symlink_exists_different(file, source):
"""
Test symlink with an existing symlink to a different file
Should throw a CommandExecutionError
"""
dif_source = source.parent / "dif_source.txt"
target = source.parent / "symlink.lnk"
target.symlink_to(dif_source)
try:
with pytest.raises(CommandExecutionError) as exc:
file.symlink(str(source), str(target))
assert "Found existing symlink:" in exc.value.message
finally:
target.unlink()
def test_symlink_exists_file(file, source):
"""
Test symlink when the existing file is not a link
We don't do anything because we do not want to destroy any data
Should throw a CommandExecutionError
"""
with pytest.helpers.temp_file("symlink.txt", contents="Source content") as target:
with pytest.raises(CommandExecutionError) as exc:
file.symlink(str(source), str(target))
assert "Existing path is not a symlink:" in exc.value.message
def test_symlink_exists_different_force(file, source):
"""
Test symlink with an existing symlink to a different file with force=True
Should destroy the existing symlink and generate a new one to the correct
location
"""
dif_source = source.parent / "dif_source.txt"
target = source.parent / "symlink.lnk"
target.symlink_to(dif_source)
try:
file.symlink(str(source), str(target), force=True)
assert salt.utils.path.readlink(str(target)) == str(source)
finally:
target.unlink()
def test_symlink_target_relative_path(file, source):
"""
Test symlink when the target file is a relative path
Should throw a SaltInvocationError
"""
target = "..{}symlink.lnk".format(os.path.sep)
with pytest.raises(SaltInvocationError) as exc:
file.symlink(str(source), str(target))
assert "Link path must be absolute" in exc.value.message
def METHOD_NAME(file, source):
"""
Test symlink with an existing symlink to a different file with atomic=True
Should replace the existing symlink with a new one to the correct location
"""
dif_source = source.parent / "dif_source.txt"
target = source.parent / "symlink.lnk"
target.symlink_to(dif_source)
try:
file.symlink(str(source), str(target), atomic=True)
assert salt.utils.path.readlink(str(target)) == str(source)
finally:
target.unlink() |
689 | identifier | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import logging
from collections import OrderedDict
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
from pretix.base.signals import register_sales_channels
logger = logging.getLogger(__name__)
_ALL_CHANNELS = None
class SalesChannel:
def __repr__(self):
return '<SalesChannel: {}>'.format(self.METHOD_NAME)
@property
def METHOD_NAME(self) -> str:
"""
The internal identifier of this sales channel.
"""
raise NotImplementedError() # NOQA
@property
def verbose_name(self) -> str:
"""
A human-readable name of this sales channel.
"""
raise NotImplementedError() # NOQA
@property
def icon(self) -> str:
"""
The name of a Font Awesome icon to represent this channel
"""
return "circle"
@property
def testmode_supported(self) -> bool:
"""
Indication, if a saleschannels supports test mode orders
"""
return True
@property
def payment_restrictions_supported(self) -> bool:
"""
If this property is ``True``, organizers can restrict the usage of payment providers to this sales channel.
Example: pretixPOS provides its own sales channel, ignores the configured payment providers completely and
handles payments locally. Therefor, this property should be set to ``False`` for the pretixPOS sales channel as
the event organizer cannot restrict the usage of any payment provider through the backend.
"""
return True
@property
def unlimited_items_per_order(self) -> bool:
"""
If this property is ``True``, purchases made using this sales channel are not limited to the maximum amount of
items defined in the event settings.
"""
return False
@property
def customer_accounts_supported(self) -> bool:
"""
If this property is ``True``, checkout will show the customer login step.
"""
return True
@property
def discounts_supported(self) -> bool:
"""
If this property is ``True``, this sales channel can be selected for automatic discounts.
"""
return True
def get_all_sales_channels():
global _ALL_CHANNELS
if _ALL_CHANNELS:
return _ALL_CHANNELS
types = OrderedDict()
for recv, ret in register_sales_channels.send(None):
if isinstance(ret, (list, tuple)):
for r in ret:
types[r.METHOD_NAME] = r
else:
types[ret.METHOD_NAME] = ret
_ALL_CHANNELS = types
return types
class WebshopSalesChannel(SalesChannel):
METHOD_NAME = "web"
verbose_name = _('Online shop')
icon = "globe"
@receiver(register_sales_channels, dispatch_uid="base_register_default_sales_channels")
def base_sales_channels(sender, **kwargs):
return (
WebshopSalesChannel(),
) |
690 | test locate async | from XRootD import client
from XRootD.client.utils import AsyncResponseHandler
from XRootD.client.flags import OpenFlags, QueryCode, MkDirFlags, AccessMode, \
DirListFlags, PrepareFlags
from env import *
import pytest
import sys
import os
import inspect
def test_filesystem():
c = client.FileSystem(SERVER_URL)
funcspecs = [(c.locate, ('/tmp', OpenFlags.REFRESH), True),
(c.deeplocate, ('/tmp', OpenFlags.REFRESH), True),
(c.query, (QueryCode.SPACE, '/tmp'), True),
(c.truncate, ('/tmp/spam', 1000), False),
(c.mv, ('/tmp/spam', '/tmp/ham'), False),
(c.chmod, ('/tmp/ham', AccessMode.UR | AccessMode.UW), False),
(c.rm, ('/tmp/ham',), False),
(c.mkdir, ('/tmp/somedir', MkDirFlags.MAKEPATH), False),
(c.rmdir, ('/tmp/somedir',), False),
(c.ping, (), False),
(c.stat, ('/tmp',), True),
(c.statvfs, ('/tmp',), True),
(c.protocol, (), True),
(c.dirlist, ('/tmp', DirListFlags.STAT), True),
(c.sendinfo, ('important info',), False),
(c.prepare, (['/tmp/foo'], PrepareFlags.STAGE), True),
]
for func, args, hasReturnObject in funcspecs:
sync (func, args, hasReturnObject)
# Create new temp file
f = client.File()
status, response = f.open(smallfile, OpenFlags.NEW)
for func, args, hasReturnObject in funcspecs:
async(func, args, hasReturnObject)
def sync(func, args, hasReturnObject):
status, response = func(*args)
print status
assert status.ok
if hasReturnObject:
print response
assert response
def async(func, args, hasReturnObject):
handler = AsyncResponseHandler()
status = func(callback=handler, *args)
print status
assert status.ok
status, response, hostlist = handler.wait()
assert status.ok
if response:
assert response
for host in hostlist:
assert host.url
print host.url
if hasReturnObject:
assert response
def test_copy_sync():
c = client.FileSystem(SERVER_URL)
f = client.File()
status, response = f.open(smallfile, OpenFlags.DELETE)
assert status.ok
status, response = c.copy(smallfile, '/tmp/eggs', force=True)
assert status.ok
status, response = c.copy('/tmp/nonexistent', '/tmp/eggs')
assert not status.ok
try:
os.remove('/tmp/eggs')
except OSError, __:
pass
def test_locate_sync():
c = client.FileSystem(SERVER_URL)
status, response = c.locate('/tmp', OpenFlags.REFRESH)
assert status.ok
for item in response:
assert item
def METHOD_NAME():
c = client.FileSystem(SERVER_URL)
handler = AsyncResponseHandler()
response = c.locate('/tmp', OpenFlags.REFRESH, callback=handler)
status, response, hostlist = handler.wait()
assert status.ok
for item in response:
assert item
def test_deeplocate_sync():
c = client.FileSystem(SERVER_URL)
status, response = c.deeplocate('/tmp', OpenFlags.REFRESH)
assert status.ok
for item in response:
assert item
def test_deeplocate_async():
c = client.FileSystem(SERVER_URL)
handler = AsyncResponseHandler()
response = c.deeplocate('/tmp', OpenFlags.REFRESH, callback=handler)
status, response, hostlist = handler.wait()
assert status.ok
for item in response:
assert item
def test_dirlist_sync():
c = client.FileSystem(SERVER_URL)
status, response = c.dirlist('/tmp', DirListFlags.STAT)
assert status.ok
for item in response:
assert item.name
print item.statinfo
assert item.statinfo
assert item.hostaddr
status, response = c.dirlist('invalid', DirListFlags.STAT)
assert not status.ok
def test_dirlist_async():
c = client.FileSystem(SERVER_URL)
handler = AsyncResponseHandler()
status = c.dirlist('/tmp', DirListFlags.STAT, callback=handler)
assert status.ok
status, response, hostlist = handler.wait()
assert status.ok
for h in hostlist:
print h.url
for item in response:
assert item.name
print item.statinfo
assert item.statinfo
assert item.hostaddr
assert hostlist
def test_query_sync():
c = client.FileSystem(SERVER_URL)
status, response = c.query(QueryCode.STATS, 'a')
assert status.ok
assert response
print response
def test_query_async():
c = client.FileSystem(SERVER_URL)
handler = AsyncResponseHandler()
status = c.query(QueryCode.STATS, 'a', callback=handler)
assert status.ok
status, response, hostlist = handler.wait()
assert status.ok
assert response
print response
def test_mkdir_flags():
c = client.FileSystem(SERVER_URL)
status, response = c.mkdir('/tmp/dir1/dir2', MkDirFlags.MAKEPATH)
assert status.ok
c.rm('/tmp/dir1/dir2')
c.rm('/tmp/dir1')
def test_args():
c = client.FileSystem(url=SERVER_URL)
assert c
pytest.raises(TypeError, "c = client.FileSystem(foo='root://localhost')")
pytest.raises(TypeError, "c = client.FileSystem(path='root://localhost', foo='bar')")
def test_creation():
c = client.FileSystem(SERVER_URL)
assert c.url is not None
def test_deletion():
c = client.FileSystem(SERVER_URL)
del c
if sys.hexversion > 0x03000000:
pytest.raises(UnboundLocalError, 'assert c')
else:
pytest.raises(NameError, 'assert c')
|
691 | test table metadata | from unittest.mock import ANY, Mock
import pytest
from yarl import URL
from faust.app.router import Router
from faust.exceptions import SameNode
from faust.web.exceptions import ServiceUnavailable
class Test_Router:
@pytest.fixture()
def assignor(self, *, app):
assignor = app.assignor = Mock(name="assignor")
return assignor
@pytest.fixture()
def router(self, *, app, assignor):
return Router(app)
def test_constructor(self, *, router, app, assignor):
assert router.app is app
assert router._assignor is assignor
def test__get_serialized_key(self, *, router):
table = Mock(name="table")
key = Mock(name="key")
prepare_key = table.changelog_topic.prepare_key
prepare_key.return_value = [Mock(name="v1"), Mock(name="v2")]
ret = router._get_serialized_key(table, key)
assert ret is prepare_key.return_value[0]
table.changelog_topic.prepare_key.assert_called_once_with(key, None)
def test_key_store(self, *, router, app, assignor):
table = app.tables["foo"] = Mock(name="table")
router._get_serialized_key = Mock(
return_value=table.changelog_topic.prepare_key.return_value
)
assert router.key_store("foo", "k") is assignor.key_store.return_value
assignor.key_store.assert_called_once_with(
table.changelog_topic.get_topic_name(),
table.changelog_topic.prepare_key.return_value,
)
def test_external_topic_key_store(self, *, router, app, assignor):
topic = Mock()
prepare_key = topic.prepare_key
prepare_key.return_value = [Mock(name="v1"), Mock(name="v2")]
assert (
router.external_topic_key_store(topic, "k")
is assignor.external_key_store.return_value
)
assignor.external_key_store.assert_called_once_with(
topic.get_topic_name(),
topic.prepare_key.return_value[0],
)
def METHOD_NAME(self, *, router, app, assignor):
table = app.tables["foo"] = Mock(name="table")
ret = router.table_metadata("foo")
assert ret is assignor.table_metadata.return_value
assignor.table_metadata.assert_called_once_with(
table.changelog_topic.get_topic_name(),
)
def test_tables_metadata(self, *, router, assignor):
res = router.tables_metadata()
assert res is assignor.tables_metadata.return_value
assignor.tables_metadata.assert_called_once_with()
def test_external_topics_metadata(self, *, router, assignor):
res = router.external_topics_metadata()
assert res is assignor.external_topics_metadata.return_value
assignor.external_topics_metadata.assert_called_once_with()
@pytest.mark.asyncio
async def test_route_req__unavail(self, *, router, app):
web = Mock(name="web")
request = Mock(name="request")
app.router.key_store = Mock()
app.router.key_store.side_effect = KeyError()
with pytest.raises(ServiceUnavailable):
await router.route_req("foo", "k", web, request)
@pytest.mark.asyncio
async def test_route_req__same_node(self, *, router, app):
app.conf.canonical_url = URL("http://example.com:8181")
web = Mock(name="web")
request = Mock(name="request")
app.router.key_store = Mock()
app.router.key_store.return_value = URL("http://example.com:8181")
with pytest.raises(SameNode):
await router.route_req("foo", "k", web, request)
@pytest.mark.asyncio
@pytest.mark.http_session(text=b"foobar")
async def test_route_req(self, *, router, app, mock_http_client):
app.conf.canonical_url = URL("http://ge.example.com:8181")
web = Mock(name="web")
request = Mock(name="request")
app.router.key_store = Mock()
app.router.key_store.return_value = URL("http://el.example.com:8181")
response = await router.route_req("foo", "k", web, request)
assert response is web.text.return_value
web.text.assert_called_once_with(b"foobar", content_type=ANY, status=ANY)
@pytest.mark.asyncio
@pytest.mark.http_session(text=b"foobar")
async def test_route_req_method(self, *, router, app, mock_http_client):
app.conf.canonical_url = URL("http://ge.example.com:8181")
web = Mock(name="web")
request = Mock(name="request")
request_method = "POST"
routed_url = "http://el.example.com"
routed_port = 8181
request.method = request_method
app.router.key_store = Mock()
app.router.key_store.return_value = URL(f"{routed_url}:{routed_port}")
await router.route_req("foo", "k", web, request)
mock_http_client.request.assert_called_once_with(
method=request_method,
headers=request.headers,
url=request.url.with_host(routed_url).with_port(routed_port),
)
@pytest.mark.asyncio
@pytest.mark.http_session(text=b"foobar")
async def test_topic_route_req(self, *, router, app, mock_http_client):
app.conf.canonical_url = URL("http://ge.example.com:8181")
web = Mock(name="web")
request = Mock(name="request")
app.router.external_topic_key_store = Mock()
app.router.external_topic_key_store.return_value = URL(
"http://el.example.com:8181"
)
response = await router.route_topic_req("foo", "k", web, request)
assert response is web.text.return_value
web.text.assert_called_once_with(b"foobar", content_type=ANY, status=ANY) |
692 | tear down | from unittest import skipUnless, SkipTest
import uuid
from datetime import datetime
from django.conf import settings
from django.test import TestCase
from corehq.form_processor.backends.sql.dbaccessors import ShardAccessor
from corehq.form_processor.models import XFormInstance
from corehq.form_processor.tests.utils import sharded
from corehq.messaging.scheduling.scheduling_partitioned.dbaccessors import save_alert_schedule_instance
from corehq.messaging.scheduling.scheduling_partitioned.models import AlertScheduleInstance
from corehq.messaging.scheduling.scheduling_partitioned.tests.test_dbaccessors_partitioned import \
BaseSchedulingPartitionedDBAccessorsTest
from corehq.sql_db.config import plproxy_config
from corehq.sql_db.models import PartitionedModel
from corehq.sql_db.shard_data_management import get_count_of_unmatched_models_by_shard
from corehq.sql_db.tests.utils import DefaultShardingTestConfigMixIn
@sharded
@skipUnless(settings.USE_PARTITIONED_DATABASE, 'Only applicable if sharding is setup')
class ShardManagementTest(DefaultShardingTestConfigMixIn, TestCase):
domain = 'shard-management-test'
@classmethod
def setUpClass(cls):
if not settings.USE_PARTITIONED_DATABASE:
# https://github.com/nose-devs/nose/issues/946
raise SkipTest('Only applicable if sharding is setup')
super(ShardManagementTest, cls).setUpClass()
cls.p1_uuid = uuid.UUID('9d3a283a-25b6-4116-8846-d0fc8f04f50f')
cls.p2_uuid = uuid.UUID('8440dbd6-61b1-4b2f-a310-7e1768902d04')
def METHOD_NAME(self):
for db in plproxy_config.form_processing_dbs:
AlertScheduleInstance.objects.using(db).filter(domain=self.domain).delete()
XFormInstance.objects.using(db).filter(domain=self.domain).delete()
def test_uuids_used(self):
self.assertEqual(ShardAccessor.get_database_for_doc(self.p1_uuid), self.db1)
self.assertEqual(ShardAccessor.get_database_for_doc(self.p2_uuid), self.db2)
def test_uuid_partitioning_correct(self):
from corehq.sql_db.shard_data_management import get_count_of_models_by_shard_for_testing
instance = BaseSchedulingPartitionedDBAccessorsTest.make_alert_schedule_instance(
self.p1_uuid, domain=self.domain
)
save_alert_schedule_instance(instance)
self.assertEqual(AlertScheduleInstance.objects.using(self.db1).count(), 1)
matches = get_count_of_unmatched_models_by_shard(self.db1, AlertScheduleInstance)
self.assertEqual(0, len(matches))
all_data = get_count_of_models_by_shard_for_testing(self.db1, AlertScheduleInstance)
self.assertEqual(1, len(all_data))
self.assertEqual((0, 1), all_data[0])
def test_uuid_partitioning_incorrect(self):
instance = BaseSchedulingPartitionedDBAccessorsTest.make_alert_schedule_instance(
self.p1_uuid, domain=self.domain
)
super(PartitionedModel, instance).save(using=self.db2)
self.assertEqual(AlertScheduleInstance.objects.using(self.db2).count(), 1)
matches = get_count_of_unmatched_models_by_shard(self.db2, AlertScheduleInstance)
self.assertEqual(1, len(matches))
self.assertEqual((0, 1), matches[0])
def test_text_partitioning_correct(self):
from corehq.sql_db.shard_data_management import get_count_of_models_by_shard_for_testing
form = self._make_form_instance(str(self.p2_uuid))
form.save()
self.assertEqual(XFormInstance.objects.using(self.db2).count(), 1)
matches = get_count_of_unmatched_models_by_shard(self.db2, XFormInstance)
self.assertEqual(0, len(matches))
all_data = get_count_of_models_by_shard_for_testing(self.db2, XFormInstance)
self.assertEqual(1, len(all_data))
self.assertEqual((2, 1), all_data[0])
def test_text_partitioning_incorrect(self):
form = self._make_form_instance(str(self.p2_uuid))
super(PartitionedModel, form).save(using=self.db1)
self.assertEqual(XFormInstance.objects.using(self.db1).count(), 1)
matches = get_count_of_unmatched_models_by_shard(self.db1, XFormInstance)
self.assertEqual(1, len(matches))
self.assertEqual((2, 1), matches[0])
@classmethod
def _make_form_instance(cls, form_id):
return XFormInstance(
form_id=form_id,
xmlns='http://openrosa.org/formdesigner/form-processor',
received_on=datetime.utcnow(),
user_id='a-user',
domain=cls.domain,
state=XFormInstance.NORMAL,
) |
693 | test notify | # Author: Marvin Pinto <[email protected]>
# Author: Dennis Lutter <[email protected]>
from sickchill import logger, settings
from sickchill.oldbeard.common import (
NOTIFY_DOWNLOAD,
NOTIFY_LOGIN,
NOTIFY_LOGIN_TEXT,
NOTIFY_SNATCH,
NOTIFY_SUBTITLE_DOWNLOAD,
NOTIFY_UPDATE,
NOTIFY_UPDATE_TEXT,
notifyStrings,
)
from sickchill.oldbeard.helpers import getURL, make_session
class Notifier(object):
"""
Use Telegram to send notifications
https://telegram.org/
"""
def __init__(self):
self.session = make_session()
def METHOD_NAME(self, id=None, api_key=None):
"""
Send a test notification
:param id: The Telegram user/group id to send the message to
:param api_key: Your Telegram bot API token
:returns: the notification
"""
return self._notify_telegram("Test", "This is a test notification from SickChill", id, api_key, force=True)
def _send_telegram_msg(self, title, msg, id=None, api_key=None):
"""
Sends a Telegram notification
:param title: The title of the notification to send
:param msg: The message string to send
:param id: The Telegram user/group id to send the message to
:param api_key: Your Telegram bot API token
:returns: True if the message succeeded, False otherwise
"""
logger.debug("Telegram in use with API KEY: {0}".format(api_key))
params = {"chat_id": id or settings.TELEGRAM_ID, "text": f"{title} : {msg}"}
response = getURL(f"https://api.telegram.org/bot{api_key or settings.TELEGRAM_APIKEY}/sendMessage", params=params, session=self.session, returns="json")
message = ("Telegram message sent successfully.", "Sending Telegram message failed, check the log")[response is None]
logger.info(message)
return response is not None, message
def notify_snatch(self, ep_name, title=notifyStrings[NOTIFY_SNATCH]):
"""
Sends a Telegram notification when an episode is snatched
:param ep_name: The name of the episode snatched
:param title: The title of the notification to send
"""
if settings.TELEGRAM_NOTIFY_ONSNATCH:
self._notify_telegram(title, ep_name)
def notify_download(self, ep_name, title=notifyStrings[NOTIFY_DOWNLOAD]):
"""
Sends a Telegram notification when an episode is downloaded
:param ep_name: The name of the episode downloaded
:param title: The title of the notification to send
"""
if settings.TELEGRAM_NOTIFY_ONDOWNLOAD:
self._notify_telegram(title, ep_name)
def notify_subtitle_download(self, ep_name, lang, title=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]):
"""
Sends a Telegram notification when subtitles for an episode are downloaded
:param ep_name: The name of the episode subtitles were downloaded for
:param lang: The language of the downloaded subtitles
:param title: The title of the notification to send
"""
if settings.TELEGRAM_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_telegram(title, "{0}: {1}".format(ep_name, lang))
def notify_update(self, new_version="??"):
"""
Sends a Telegram notification for git updates
:param new_version: The new version available from git
"""
if settings.USE_TELEGRAM:
update_text = notifyStrings[NOTIFY_UPDATE_TEXT]
title = notifyStrings[NOTIFY_UPDATE]
self._notify_telegram(title, update_text + new_version)
def notify_login(self, ipaddress=""):
"""
Sends a Telegram notification on login
:param ipaddress: The ip address the login is originating from
"""
if settings.USE_TELEGRAM:
update_text = notifyStrings[NOTIFY_LOGIN_TEXT]
title = notifyStrings[NOTIFY_LOGIN]
self._notify_telegram(title, update_text.format(ipaddress))
def _notify_telegram(self, title, message, id=None, api_key=None, force=False):
"""
Sends a Telegram notification
:param title: The title of the notification to send
:param message: The message string to send
:param id: The Telegram user/group id to send the message to
:param api_key: Your Telegram bot API token
:param force: Enforce sending, for instance for testing
:returns: the message to send
"""
if not (force or settings.USE_TELEGRAM):
logger.debug("Notification for Telegram not enabled, skipping this notification")
return False, "Disabled"
logger.debug("Sending a Telegram message for {0}".format(message))
return self._send_telegram_msg(title, message, id, api_key) |
694 | repl mathics | # -*- coding: utf-8 -*-
import re
import unicodedata
from django.utils.html import linebreaks
from mathics.doc.common_doc import (
ALLOWED_TAGS,
ALLOWED_TAGS_RE,
CONSOLE_RE,
DL_ITEM_RE,
DL_RE,
HYPERTEXT_RE,
IMG_PNG_RE,
IMG_RE,
LATEX_RE,
LIST_ITEM_RE,
LIST_RE,
MATHICS_RE,
PYTHON_RE,
QUOTATIONS_RE,
REF_RE,
SPECIAL_COMMANDS,
SUBSECTION_END_RE,
SUBSECTION_RE,
post_sub,
pre_sub,
)
def slugify(value):
"""
Converts to lowercase, removes non-word characters apart from '$',
and converts spaces to hyphens. Also strips leading and trailing
whitespace.
Based on the Django version, but modified to preserve '$'.
"""
value = (
unicodedata.normalize("NFKD", value).encode("ascii", "ignore").decode("ascii")
)
value = re.sub("[^$`\w\s-]", "", value).strip().lower()
return re.sub("[-\s`]+", "-", value)
# FIXME: can we replace this with Python 3's html.escape ?
def escape_html(text, verbatim_mode=False, counters=None, single_line=False):
def repl_python(match):
return (
r"""<pre><![CDATA[
%s
]]></pre>"""
% match.group(1).strip()
)
text, post_substitutions = pre_sub(PYTHON_RE, text, repl_python)
text = text.replace("&", "&").replace("<", "<").replace(">", ">")
if not verbatim_mode:
def repl_quotation(match):
return r"“%s”" % match.group(1)
text = QUOTATIONS_RE.sub(repl_quotation, text)
if counters is None:
counters = {}
text = text.replace('"', """)
if not verbatim_mode:
def repl_latex(match):
return "%s<var>%s</var>%s" % (
match.group(1),
match.group(2),
match.group(3),
)
text = LATEX_RE.sub(repl_latex, text)
def METHOD_NAME(match):
text = match.group(1)
text = text.replace("\\'", "'")
text = text.replace(" ", " ")
if text:
return "<code>%s</code>" % text
else:
return "'"
def repl_allowed(match):
content = replace_all(
match.group(1), [("“", '"'), ("”", '"'), (""", '"')]
)
return "<%s>" % content
text = MATHICS_RE.sub(METHOD_NAME, text)
for allowed in ALLOWED_TAGS:
text = ALLOWED_TAGS_RE[allowed].sub(repl_allowed, text)
text = text.replace("</%s>" % allowed, "</%s>" % allowed)
def repl_dl(match):
text = match.group(1)
text = DL_ITEM_RE.sub(
lambda m: "<%(tag)s>%(content)s</%(tag)s>\n" % m.groupdict(), text
)
return "<dl>%s</dl>" % text
text = DL_RE.sub(repl_dl, text)
def repl_list(match):
tag = match.group("tag")
content = match.group("content")
content = LIST_ITEM_RE.sub(lambda m: "<li>%s</li>" % m.group(1), content)
return "<%s>%s</%s>" % (tag, content, tag)
text = LIST_RE.sub(repl_list, text)
def repl_hypertext(match):
tag = match.group("tag")
content = match.group("content")
#
# Sometimes it happens that the URL does not
# fit in 80 characters. Then, to avoid that
# flake8 complains, and also to have a
# nice and readable ASCII representation,
# we would like to split the URL in several,
# lines, having indentation spaces.
#
# The following line removes these extra
# characters, which would spoil the URL,
# producing a single line, space-free string.
#
content = content.replace(" ", "").replace("\n", "")
if tag == "em":
return r"<em>%s</em>" % content
elif tag == "url":
try:
text = match.group("text")
except IndexError:
text = None
if text is None:
text = content
return r'<a href="%s">%s</a>' % (content, text)
text = HYPERTEXT_RE.sub(repl_hypertext, text)
def repl_console(match):
tag = match.group("tag")
content = match.group("content")
tag = "div" if tag == "console" else "span"
content = content.strip()
pre = post = ""
# gets replaced for <br /> later by DocText.html()
content = content.replace("\n", "<br>")
return r'<%s class="console">%s%s%s</%s>' % (tag, pre, content, post, tag)
text = CONSOLE_RE.sub(repl_console, text)
def repl_img(match):
src = match.group("src")
title = match.group("title")
return (
r'<a href="/media/doc/%(src)s.pdf">'
r'<img src="/media/doc/%(src)s.png" title="%(title)s" />'
r"</a>"
) % {"src": src, "title": title}
text = IMG_RE.sub(repl_img, text)
def repl_imgpng(match):
src = match.group("src")
title = match.group("title")
return (r'<img src="/media/doc/%(src)s" title="%(title)s" />') % {
"src": src,
"title": title,
}
text = IMG_PNG_RE.sub(repl_imgpng, text)
def repl_ref(match):
# TODO: this is not an optimal solution - maybe we need figure
# numbers in the XML doc as well?
return r"the following figure"
text = REF_RE.sub(repl_ref, text)
def repl_subsection(match):
return '\n<h2 label="%s">%s</h2>\n' % (match.group(1), match.group(1))
text = SUBSECTION_RE.sub(repl_subsection, text)
text = SUBSECTION_END_RE.sub("", text)
text = text.replace("\\'", "'")
else:
text = text.replace(" ", " ")
text = "<code>%s</code>" % text
text = text.replace("'", "'")
text = text.replace("---", "—")
for key, (xml, tex) in SPECIAL_COMMANDS.items():
text = text.replace("\\" + key, xml)
if not single_line:
text = linebreaks(text)
text = text.replace("<br />", "\n").replace("<br>", "<br />")
text = post_sub(text, post_substitutions)
text = text.replace("<p><pre>", "<pre>").replace("</pre></p>", "</pre>")
return text
def replace_all(text, pairs):
for i, j in pairs:
text = text.replace(i, j)
return text |
695 | read int | import json
from typing import IO, Any, Tuple, List
from .parser import Parser
from .symbols import (
RecordStart,
FieldStart,
Boolean,
Int,
Null,
String,
Long,
Float,
Double,
Bytes,
FieldEnd,
RecordEnd,
Union,
UnionEnd,
MapStart,
MapEnd,
MapKeyMarker,
Fixed,
ArrayStart,
ArrayEnd,
Enum,
ItemEnd,
)
class AvroJSONDecoder:
"""Decoder for the avro JSON format.
NOTE: All attributes and methods on this class should be considered
private.
Parameters
----------
fo
File-like object to reader from
"""
def __init__(self, fo: IO):
self._fo = fo
self._stack: List[Tuple[Any, str]] = []
self._json_data = [json.loads(line.strip()) for line in fo]
if self._json_data:
self._current = self._json_data.pop(0)
self.done = False
else:
self.done = True
self._key = None
def read_value(self, symbol):
if isinstance(self._current, dict):
if self._key not in self._current:
# Use the default value
return symbol.get_default()
else:
return self._current[self._key]
else:
# If we aren't in a dict or a list then this must be a schema which
# just has a single basic type
return self._current
def _push(self):
self._stack.append((self._current, self._key))
def _push_and_adjust(self, symbol=None):
self._push()
if isinstance(self._current, dict) and self._key is not None:
if self._key not in self._current:
self._current = symbol.get_default()
else:
# self._current = self._current.pop(self._key)
self._current = self._current[self._key]
def _pop(self):
self._current, self._key = self._stack.pop()
def configure(self, schema, named_schemas):
self._parser = Parser(schema, named_schemas, self.do_action)
def do_action(self, action):
if isinstance(action, RecordStart):
self._push_and_adjust(action)
elif isinstance(action, RecordEnd):
self._pop()
elif isinstance(action, FieldStart):
self.read_object_key(action.field_name)
elif isinstance(action, FieldEnd) or isinstance(action, UnionEnd):
# TODO: Do we need a FieldEnd and UnionEnd symbol?
pass
else:
raise Exception(f"cannot handle: {action}")
def drain(self):
self._parser.drain_actions()
if self._json_data:
self._current = self._json_data.pop(0)
self._key = None
else:
self.done = True
def read_null(self):
symbol = self._parser.advance(Null())
return self.read_value(symbol)
def read_boolean(self):
symbol = self._parser.advance(Boolean())
return self.read_value(symbol)
def read_utf8(self, handle_unicode_errors="strict"):
symbol = self._parser.advance(String())
if self._parser.stack[-1] == MapKeyMarker():
self._parser.advance(MapKeyMarker())
for key in self._current:
self._key = key
break
return self._key
else:
return self.read_value(symbol)
def read_bytes(self):
symbol = self._parser.advance(Bytes())
return self.read_value(symbol).encode("iso-8859-1")
def METHOD_NAME(self):
symbol = self._parser.advance(Int())
return self.read_value(symbol)
def read_long(self):
symbol = self._parser.advance(Long())
return self.read_value(symbol)
def read_float(self):
symbol = self._parser.advance(Float())
return self.read_value(symbol)
def read_double(self):
symbol = self._parser.advance(Double())
return self.read_value(symbol)
def read_enum(self):
symbol = self._parser.advance(Enum())
enum_labels = self._parser.pop_symbol() # pop the enumlabels
# TODO: Should we verify the value is one of the symbols?
label = self.read_value(symbol)
return enum_labels.labels.index(label)
def read_fixed(self, size):
symbol = self._parser.advance(Fixed())
return self.read_value(symbol).encode("iso-8859-1")
def read_map_start(self):
symbol = self._parser.advance(MapStart())
self._push_and_adjust(symbol)
def read_object_key(self, key):
self._key = key
def iter_map(self):
while len(self._current) > 0:
self._push()
for key in self._current:
break
yield
self._pop()
del self._current[key]
def read_map_end(self):
self._parser.advance(MapEnd())
self._pop()
def read_array_start(self):
symbol = self._parser.advance(ArrayStart())
self._push_and_adjust(symbol)
self._key = None
def read_array_end(self):
self._parser.advance(ArrayEnd())
self._pop()
def iter_array(self):
while len(self._current) > 0:
self._push()
self._current = self._current.pop(0)
yield
self._pop()
self._parser.advance(ItemEnd())
def read_index(self):
self._parser.advance(Union())
alternative_symbol = self._parser.pop_symbol()
# TODO: Try to clean this up.
# A JSON union is encoded like this: {"union_field": {int: 32}} and so
# what we are doing is trying to change that into {"union_field": 32}
# before eventually reading the value of "union_field"
if self._key is None:
# If self._key is None, self._current is an item in an array
if self._current is None:
label = "null"
else:
label, data = self._current.popitem()
self._current = data
# TODO: Do we need to do this?
self._parser.push_symbol(UnionEnd())
else:
# self._current is a JSON object and self._key should be the name
# of the union field
if self._key not in self._current:
self._current[self._key] = {
alternative_symbol.labels[0]: alternative_symbol.get_default()
}
if self._current[self._key] is None:
label = "null"
else:
label, data = self._current[self._key].popitem()
self._current[self._key] = data
# TODO: Do we need to do this?
self._parser.push_symbol(UnionEnd())
index = alternative_symbol.labels.index(label)
symbol = alternative_symbol.get_symbol(index)
self._parser.push_symbol(symbol)
return index |
696 | output | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network route-table route list",
)
class List(AAZCommand):
"""List routes in a route table.
:example: List routes in a route table.
az network route-table route list -g MyResourceGroup --route-table-name MyRouteTable
"""
_aaz_info = {
"version": "2018-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/routetables/{}/routes", "2018-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self.METHOD_NAME)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.route_table_name = AAZStrArg(
options=["--route-table-name"],
help="Route table name.",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.RoutesList(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class RoutesList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"routeTableName", self.ctx.args.route_table_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.etag = AAZStrType()
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.next_hop_ip_address = AAZStrType(
serialized_name="nextHopIpAddress",
)
properties.next_hop_type = AAZStrType(
serialized_name="nextHopType",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"] |
697 | remove temp results | """This module contains everything involved in parsing and evaluating the
results of test runs. This includes the base for the 'result parser' plugins
themselves, as well as functions for performing this parsing. Additionally,
it contains the functions used to get the base result values, as well as
resolving result evaluations."""
import json
from pathlib import Path
from typing import List
import pavilion.deferred
from pavilion import lockfile as _lockfile
from pavilion import utils
from ..result_parsers import base_classes
from .base import base_results, BASE_RESULTS, RESULT_ERRORS
from .evaluations import check_expression, evaluate_results
from ..errors import StringParserError, ResultError
from .parse import parse_results, DEFAULT_KEY
def check_config(parser_conf, evaluate_conf):
"""Make sure the result config is sensible, both for result parsers and
evaluations.
For result parsers we check for:
- Duplicated key names.
- Reserved key names.
- Bad parser plugin arguments.
For evaluations we check for:
- Reserved key names.
- Invalid expression syntax.
:raises ResultError: When a config breaks the rules.
"""
# Track the key_names seen, along with the 'per_file' setting for each.
# Keys still have to be unique, even if they won't collide due to
# 'per_file'.
key_names = set([])
errors = []
for rtype in parser_conf:
defaults = parser_conf[rtype].get('_defaults', {})
for key_str, rconf in parser_conf[rtype].items():
if ',' in key_str:
keys = [k.strip() for k in key_str.split(',') if k.strip()]
if parse.DEFAULT_KEY in keys:
raise ResultError(
"The default setting key '{}' can't be used in "
"a key list. Found in '{}' under parser '{}'"
.format(parse.DEFAULT_KEY, key_str, rtype))
else:
keys = [key_str]
for key in keys:
# Don't process this as a normal result parser
if key == parse.DEFAULT_KEY:
continue
if key in BASE_RESULTS.keys():
raise ResultError(
"Result parser key '{}' under parser '{}' is reserved."
.format(key, rtype)
)
if key in key_names and key != '_':
raise ResultError(
"Duplicate result parser key name '{}' under parser "
"'{}'".format(key, rtype))
key_names.add(key)
parser = base_classes.get_plugin(rtype)
rconf = parser.set_parser_defaults(rconf, defaults)
parser.check_config(rconf, keys)
for key, expr in evaluate_conf.items():
if key in BASE_RESULTS:
raise ResultError(
"Key '{}' in the result evaluate section is reserved."
.format(key)
)
# Don't check the expression if it is deferred.
if pavilion.deferred.DeferredVariable.was_deferred(expr):
continue
try:
check_expression(expr)
except StringParserError as err:
raise ResultError(
"Error parsing result evaluate expression for key '{}': {}\n"
"{}\n{}"
.format(key, expr, err.message, err.context)
)
return errors
def prune_result_log(log_path: Path, ids: List[str]) -> List[dict]:
"""Remove records corresponding to the given test ids. Ids can be either
an test run id or a test run uuid.
:param log_path: The result log path.
:param ids: A list of test run ids and/or uuids.
:returns: A list of the pruned result dictionaries.
:raises ResultError: When we can't overwrite the log file.
"""
pruned = []
rewrite_log_path = log_path.with_suffix('.rewrite')
lockfile_path = log_path.with_suffix(log_path.suffix + '.lock')
with _lockfile.LockFile(lockfile_path) as lock, \
log_path.open() as result_log, \
rewrite_log_path.open('w') as rewrite_log:
for line in result_log:
lock.renew(rate_limit=True)
try:
result = json.loads(line)
except json.JSONDecodeError:
# If we can't parse the line, just rewrite it as is.
rewrite_log.write(line)
continue
if not (str(result.get('id')) in ids
or result.get('uuid') in ids):
rewrite_log.write(line)
else:
pruned.append(result)
log_path.unlink()
rewrite_log_path.rename(log_path)
return pruned
def METHOD_NAME(results: dict, log: utils.IndentedLog) -> None:
"""Remove all result keys that start with an underscore."""
for key, value in list(results.items()):
if key.startswith('_'):
log("Removing temp key: '{}'".format(key))
del results[key]
if isinstance(value, dict):
METHOD_NAME(value, log) |
698 | save settings | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration page for VPCS nodes
"""
import os
from gns3.qt import QtWidgets
from gns3.local_server import LocalServer
from gns3.node import Node
from gns3.controller import Controller
from ..ui.vpcs_node_configuration_page_ui import Ui_VPCSNodeConfigPageWidget
from gns3.dialogs.symbol_selection_dialog import SymbolSelectionDialog
class VPCSNodeConfigurationPage(QtWidgets.QWidget, Ui_VPCSNodeConfigPageWidget):
"""
QWidget configuration page for VPCS nodes.
"""
def __init__(self):
super().__init__()
self.setupUi(self)
self.uiSymbolToolButton.clicked.connect(self._symbolBrowserSlot)
self.uiScriptFileToolButton.clicked.connect(self._scriptFileBrowserSlot)
self._default_configs_dir = LocalServer.instance().localServerSettings()["configs_path"]
if Controller.instance().isRemote():
self.uiScriptFileToolButton.hide()
# add the categories
for name, category in Node.defaultCategories().items():
self.uiCategoryComboBox.addItem(name, category)
def _symbolBrowserSlot(self):
"""
Slot to open the symbol browser and select a new symbol.
"""
symbol_path = self.uiSymbolLineEdit.text()
dialog = SymbolSelectionDialog(self, symbol=symbol_path)
dialog.show()
if dialog.exec_():
new_symbol_path = dialog.getSymbol()
self.uiSymbolLineEdit.setText(new_symbol_path)
self.uiSymbolLineEdit.setToolTip('<img src="{}"/>'.format(new_symbol_path))
def _scriptFileBrowserSlot(self):
"""
Slot to open a file browser and select a base script file for VPCS
"""
path, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Select a script file", self._default_configs_dir)
if not path:
return
self._default_configs_dir = os.path.dirname(path)
if not os.access(path, os.R_OK):
QtWidgets.QMessageBox.critical(self, "Script file", "{} cannot be read".format(os.path.basename(path)))
return
self.uiScriptFileEdit.setText(os.path.normpath(path))
def loadSettings(self, settings, node=None, group=False):
"""
Loads the VPCS node settings.
:param settings: the settings (dictionary)
:param node: Node instance
:param group: indicates the settings apply to a group of routers
"""
if not group:
self.uiNameLineEdit.setText(settings["name"])
else:
self.uiNameLabel.hide()
self.uiNameLineEdit.hide()
if not node:
# these are template settings
self.uiNameLabel.setText("Template name:")
# load the default name format
self.uiDefaultNameFormatLineEdit.setText(settings["default_name_format"])
# load the symbol
self.uiSymbolLineEdit.setText(settings["symbol"])
self.uiSymbolLineEdit.setToolTip('<img src="{}"/>'.format(settings["symbol"]))
# load the category
index = self.uiCategoryComboBox.findData(settings["category"])
if index != -1:
self.uiCategoryComboBox.setCurrentIndex(index)
self.uiScriptFileEdit.setText(settings["base_script_file"])
else:
self.uiDefaultNameFormatLabel.hide()
self.uiDefaultNameFormatLineEdit.hide()
self.uiSymbolLabel.hide()
self.uiSymbolLineEdit.hide()
self.uiSymbolToolButton.hide()
self.uiCategoryComboBox.hide()
self.uiCategoryLabel.hide()
self.uiCategoryComboBox.hide()
self.uiScriptFileLabel.hide()
self.uiScriptFileEdit.hide()
self.uiScriptFileToolButton.hide()
# load the console type
index = self.uiConsoleTypeComboBox.findText(settings["console_type"])
if index != -1:
self.uiConsoleTypeComboBox.setCurrentIndex(index)
self.uiConsoleAutoStartCheckBox.setChecked(settings["console_auto_start"])
def METHOD_NAME(self, settings, node=None, group=False):
"""
Saves the VPCS nodesettings.
:param settings: the settings (dictionary)
:param node: Node instance
:param group: indicates the settings apply to a group of routers
"""
# these settings cannot be shared by nodes and updated
# in the node properties dialog.
if not group:
# set the node name
name = self.uiNameLineEdit.text()
if not name:
QtWidgets.QMessageBox.critical(self, "Name", "VPCS node name cannot be empty!")
else:
settings["name"] = name
if not node:
default_name_format = self.uiDefaultNameFormatLineEdit.text().strip()
if '{0}' not in default_name_format and '{id}' not in default_name_format:
QtWidgets.QMessageBox.critical(self, "Default name format", "The default name format must contain at least {0} or {id}")
else:
settings["default_name_format"] = default_name_format
symbol_path = self.uiSymbolLineEdit.text()
settings["symbol"] = symbol_path
settings["category"] = self.uiCategoryComboBox.itemData(self.uiCategoryComboBox.currentIndex())
base_script_file = self.uiScriptFileEdit.text().strip()
if not base_script_file:
settings["base_script_file"] = ""
elif base_script_file != settings["base_script_file"]:
if self._configFileValid(base_script_file):
settings["base_script_file"] = base_script_file
else:
QtWidgets.QMessageBox.critical(self, "Base script config file", "Cannot read the base script config file")
# save console type
settings["console_type"] = self.uiConsoleTypeComboBox.currentText().lower()
settings["console_auto_start"] = self.uiConsoleAutoStartCheckBox.isChecked()
return settings
def _configFileValid(self, path):
"""
Return true if it's a valid configuration file
"""
if not os.path.isabs(path):
path = os.path.join(LocalServer.instance().localServerSettings()["configs_path"], path)
return os.access(path, os.R_OK) |
699 | feature map | #!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
from danesfield import gdal_utils
import sys
import os
import argparse
import requests
import subprocess
import json
import re
import pyproj
import logging
def main(args):
parser = argparse.ArgumentParser(description="Download Openstreetmap data \
for a region and convert to GeoJSON")
parser.add_argument(
'--left',
type=str,
help='Longitude of left / westernmost side of bounding box')
parser.add_argument(
'--bottom',
type=str,
help='Latitude of bottom / southernmost side of bounding box')
parser.add_argument(
'--right',
type=str,
help='Longitude of right / easternmost side of bounding box')
parser.add_argument(
'--top',
type=str,
help='Latitude of top / northernmost side of bounding box')
parser.add_argument(
'--bounding-img',
type=str,
help='Get region of interest from image file instead of \
explicitly setting the bounds')
parser.add_argument(
'--api-endpoint',
type=str,
default="https://api.openstreetmap.org/api/0.6/map")
parser.add_argument(
'--output-dir',
type=str,
required=True)
parser.add_argument(
'--output-prefix',
type=str,
default="road_vector")
args = parser.parse_args(args)
if args.bounding_img is not None:
if os.path.isfile(args.bounding_img):
img = gdal_utils.gdal_open(args.bounding_img)
outProj = pyproj.Proj('+proj=longlat +datum=WGS84')
left, bottom, right, top = gdal_utils.gdal_bounding_box(img, outProj)
else:
logging.error("Couldn't find bounding_img file: [{}]. Aborting!".
format(args.bounding_img))
exit(1)
elif all((args.left,
args.bottom,
args.right,
args.top)):
left, bottom, right, top = (args.left,
args.bottom,
args.right,
args.top)
else:
logging.error("Must specify either '--bounding-img' or all of "
"'--left', '--bottom', '--right', '--top'. Aborting!")
exit(1)
request_params = {"bbox": ",".join(map(str, [left,
bottom,
right,
top]))}
response = requests.get(args.api_endpoint, request_params)
output_osm_filepath = os.path.join(args.output_dir, "{}.osm".format(args.output_prefix))
try:
os.mkdir(args.output_dir)
except FileExistsError as e:
pass
with open(output_osm_filepath, 'wb') as fd:
for chunk in response.iter_content(chunk_size=128):
fd.write(chunk)
# Make the initial conversion from OSM to GeoJSON
geojson_preform_output_filepath = os.path.join(
args.output_dir, "{}.preformat.geojson".format(args.output_prefix))
osm_sql_query = 'SELECT * FROM lines'
subprocess.run(['ogr2ogr',
'-f',
'GeoJSON',
geojson_preform_output_filepath,
output_osm_filepath,
'-sql',
osm_sql_query],
check=True)
geojson_output_filepath = os.path.join(
args.output_dir, "{}.geojson".format(args.output_prefix))
# Manually tweak GeoJSON to fit expected format
with open(geojson_preform_output_filepath, 'r', encoding='utf-8') as f:
json_data = json.load(f)
json_data["features"] = [METHOD_NAME(f) for f in json_data["features"]]
with open(geojson_output_filepath, 'w') as f:
f.write(json.dumps(json_data))
return 0
def properties_map(in_properties):
properties = in_properties.copy()
other_tags = json.loads("{" +
re.sub("=>", ":", properties.get("other_tags", "")) +
"}")
if "railway" in other_tags:
properties["railway"] = other_tags["railway"]
# Forcing bridge in output tags
if other_tags.get("bridge") == "yes":
bridge_val = 1
else:
bridge_val = 0
properties["bridge"] = bridge_val
# Only interested in these classes for now; in priority order
class_level_properties = ["highway",
"railway"]
for k in class_level_properties:
if k in properties:
properties["class"] = k
properties["type"] = properties[k]
break
return properties
def METHOD_NAME(in_feature):
feature = in_feature.copy()
feature["properties"] = properties_map(feature["properties"])
return feature
if __name__ == '__main__':
main(sys.argv[1:]) |