max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
tests/util_functions_tests/test_utils.py | michaelhall28/darwinian_shift | 0 | 6632551 | from darwinian_shift.utils.util_functions import *
from tests.conftest import sort_dataframe, compare_sorted_files, MUTATION_DATA_FILE, TEST_DATA_DIR
from darwinian_shift.reference_data.reference_utils import get_source_genome_reference_file_paths
from pandas.testing import assert_frame_equal
import filecmp
import os
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
def test_reverse_complement():
seq = "ATGCCTATTGGATCCAAAGAGAGGCCAACATTTTTTGAAATTTTTAAGACACGCTGCAACAAAGCAGATT"
rev_comp_expected = "AATCTGCTTTGTTGCAGCGTGTCTTAAAAATTTCAAAAAATGTTGGCCTCTCTTTGGATCCAATAGGCAT"
rev_comp_calc = reverse_complement(seq)
assert rev_comp_calc == rev_comp_expected
def test_output_transcript_sequences_to_fasta_aa(project, tmpdir):
output_file = os.path.join(tmpdir, "output_transcript_aa.fa")
output_transcript_sequences_to_fasta(project, output_file, aa_sequence=True)
compare_sorted_files(output_file, os.path.join(FILE_DIR, "reference_transcript_aa.fa"))
def test_output_transcript_sequences_to_fasta_nuc(project, tmpdir):
output_file = os.path.join(tmpdir, "output_transcript_nuc.fa")
output_transcript_sequences_to_fasta(project, output_file, aa_sequence=False)
compare_sorted_files(output_file, os.path.join(FILE_DIR, "reference_transcript_nuc.fa"))
def test_sort_multiple_arrays_using_one():
arr1 = np.array([4, 3, 6, 1, 2.1])
arr2 = np.arange(5)
list3 = [4, 5, 6, 7, 8]
sorted_arrs = sort_multiple_arrays_using_one(arr1, arr2, list3)
expected = np.array([
[1, 2.1, 3, 4, 6],
[3, 4, 1, 0, 2],
[7, 8, 5, 4, 6]
])
assert np.array_equal(sorted_arrs, expected)
def _get_partial_file_path(full_path, num_dir=5):
return "/".join(full_path.split("/")[-num_dir:])
def test_reference_file_paths():
exon_file, reference_file = get_source_genome_reference_file_paths(source_genome='homo_sapiens')
assert _get_partial_file_path(exon_file) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/biomart_exons_homo_sapiens.txt"
assert _get_partial_file_path(
reference_file) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz"
exon_file, reference_file = get_source_genome_reference_file_paths(source_genome='GRch37')
assert _get_partial_file_path(
exon_file, 6) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/GRCh37/biomart_exons_homo_sapiens.txt"
assert _get_partial_file_path(
reference_file, 6) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/GRCh37/Homo_sapiens.GRCh37.dna.primary_assembly.fa.gz"
exon_file, reference_file = get_source_genome_reference_file_paths(source_genome='mus_musculus', ensembl_release=99)
assert _get_partial_file_path(
exon_file) == "darwinian_shift/reference_data/mus_musculus/ensembl-99/biomart_exons_mus_musculus.txt"
assert _get_partial_file_path(
reference_file) == "darwinian_shift/reference_data/mus_musculus/ensembl-99/Mus_musculus.GRCm38.dna.primary_assembly.fa.gz"
def _test_get_uniprot_acc_from_transcript_id():
assert get_uniprot_acc_from_transcript_id("ENST00000263388") == "Q9UM47"
def test_read_vcf():
tsv_df = pd.read_csv(MUTATION_DATA_FILE, sep="\t")
bases = ['A', 'C', 'G', 'T']
tsv_df = tsv_df[(tsv_df['ref'].isin(bases)) & (tsv_df['mut'].isin(bases))]
vcf_df = read_sbs_from_vcf(os.path.join(MUTATION_DATA_FILE[:-4] + '.vcf'))
tsv_df['chr'] = tsv_df['chr'].astype(str)
assert_frame_equal(sort_dataframe(tsv_df[['chr', 'pos', 'ref', 'mut']]), sort_dataframe(vcf_df))
| from darwinian_shift.utils.util_functions import *
from tests.conftest import sort_dataframe, compare_sorted_files, MUTATION_DATA_FILE, TEST_DATA_DIR
from darwinian_shift.reference_data.reference_utils import get_source_genome_reference_file_paths
from pandas.testing import assert_frame_equal
import filecmp
import os
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
def test_reverse_complement():
seq = "ATGCCTATTGGATCCAAAGAGAGGCCAACATTTTTTGAAATTTTTAAGACACGCTGCAACAAAGCAGATT"
rev_comp_expected = "AATCTGCTTTGTTGCAGCGTGTCTTAAAAATTTCAAAAAATGTTGGCCTCTCTTTGGATCCAATAGGCAT"
rev_comp_calc = reverse_complement(seq)
assert rev_comp_calc == rev_comp_expected
def test_output_transcript_sequences_to_fasta_aa(project, tmpdir):
output_file = os.path.join(tmpdir, "output_transcript_aa.fa")
output_transcript_sequences_to_fasta(project, output_file, aa_sequence=True)
compare_sorted_files(output_file, os.path.join(FILE_DIR, "reference_transcript_aa.fa"))
def test_output_transcript_sequences_to_fasta_nuc(project, tmpdir):
output_file = os.path.join(tmpdir, "output_transcript_nuc.fa")
output_transcript_sequences_to_fasta(project, output_file, aa_sequence=False)
compare_sorted_files(output_file, os.path.join(FILE_DIR, "reference_transcript_nuc.fa"))
def test_sort_multiple_arrays_using_one():
arr1 = np.array([4, 3, 6, 1, 2.1])
arr2 = np.arange(5)
list3 = [4, 5, 6, 7, 8]
sorted_arrs = sort_multiple_arrays_using_one(arr1, arr2, list3)
expected = np.array([
[1, 2.1, 3, 4, 6],
[3, 4, 1, 0, 2],
[7, 8, 5, 4, 6]
])
assert np.array_equal(sorted_arrs, expected)
def _get_partial_file_path(full_path, num_dir=5):
return "/".join(full_path.split("/")[-num_dir:])
def test_reference_file_paths():
exon_file, reference_file = get_source_genome_reference_file_paths(source_genome='homo_sapiens')
assert _get_partial_file_path(exon_file) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/biomart_exons_homo_sapiens.txt"
assert _get_partial_file_path(
reference_file) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz"
exon_file, reference_file = get_source_genome_reference_file_paths(source_genome='GRch37')
assert _get_partial_file_path(
exon_file, 6) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/GRCh37/biomart_exons_homo_sapiens.txt"
assert _get_partial_file_path(
reference_file, 6) == "darwinian_shift/reference_data/homo_sapiens/ensembl-99/GRCh37/Homo_sapiens.GRCh37.dna.primary_assembly.fa.gz"
exon_file, reference_file = get_source_genome_reference_file_paths(source_genome='mus_musculus', ensembl_release=99)
assert _get_partial_file_path(
exon_file) == "darwinian_shift/reference_data/mus_musculus/ensembl-99/biomart_exons_mus_musculus.txt"
assert _get_partial_file_path(
reference_file) == "darwinian_shift/reference_data/mus_musculus/ensembl-99/Mus_musculus.GRCm38.dna.primary_assembly.fa.gz"
def _test_get_uniprot_acc_from_transcript_id():
assert get_uniprot_acc_from_transcript_id("ENST00000263388") == "Q9UM47"
def test_read_vcf():
tsv_df = pd.read_csv(MUTATION_DATA_FILE, sep="\t")
bases = ['A', 'C', 'G', 'T']
tsv_df = tsv_df[(tsv_df['ref'].isin(bases)) & (tsv_df['mut'].isin(bases))]
vcf_df = read_sbs_from_vcf(os.path.join(MUTATION_DATA_FILE[:-4] + '.vcf'))
tsv_df['chr'] = tsv_df['chr'].astype(str)
assert_frame_equal(sort_dataframe(tsv_df[['chr', 'pos', 'ref', 'mut']]), sort_dataframe(vcf_df))
| none | 1 | 2.519172 | 3 |
|
loop-operators.py | EnescanAkyuz/Python_Beginner | 4 | 6632552 | # string = 'hello'
# list = []
# for result in string:
# list.append(result)
# print(list)
# #bu iki işlem aynı
# mylist = [result for result in string]
# print(mylist)
# years = [2002,1976,1979,2007,1956]
# age = [2020-year for year in years]
# print(age)
numbers = []
for x in range(2):
for y in range(2):
numbers.append((x,y))
print(numbers) | # string = 'hello'
# list = []
# for result in string:
# list.append(result)
# print(list)
# #bu iki işlem aynı
# mylist = [result for result in string]
# print(mylist)
# years = [2002,1976,1979,2007,1956]
# age = [2020-year for year in years]
# print(age)
numbers = []
for x in range(2):
for y in range(2):
numbers.append((x,y))
print(numbers) | en | 0.456527 | # string = 'hello' # list = [] # for result in string: # list.append(result) # print(list) # #bu iki işlem aynı # mylist = [result for result in string] # print(mylist) # years = [2002,1976,1979,2007,1956] # age = [2020-year for year in years] # print(age) | 4.213524 | 4 |
digger/database/__init__.py | fxxkrlab/Digger | 0 | 6632553 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from urllib import parse
from modules import load
db = load._cfg["database"]
db_string = db["connect"]
db_user = parse.quote_plus(f"{db['user']}")
db_passwd = parse.quote_plus(f"{db['passwd']}")
db_host = db["host"]
db_port = db["port"]
db_database = db["db"]
DB_URI = "{}://{}:{}@{}:{}/{}".format(
db_string, db_user, db_passwd, db_host, db_port, db_database
)
def start() -> scoped_session:
engine = create_engine(DB_URI, client_encoding="utf8")
BASE.metadata.bind = engine
BASE.metadata.create_all(engine)
return scoped_session(sessionmaker(bind=engine, autoflush=False))
BASE = declarative_base()
SESSION = start()
| from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from urllib import parse
from modules import load
db = load._cfg["database"]
db_string = db["connect"]
db_user = parse.quote_plus(f"{db['user']}")
db_passwd = parse.quote_plus(f"{db['passwd']}")
db_host = db["host"]
db_port = db["port"]
db_database = db["db"]
DB_URI = "{}://{}:{}@{}:{}/{}".format(
db_string, db_user, db_passwd, db_host, db_port, db_database
)
def start() -> scoped_session:
engine = create_engine(DB_URI, client_encoding="utf8")
BASE.metadata.bind = engine
BASE.metadata.create_all(engine)
return scoped_session(sessionmaker(bind=engine, autoflush=False))
BASE = declarative_base()
SESSION = start()
| none | 1 | 2.571733 | 3 |
|
consumer/test.py | Kareem-Emad/switch | 2 | 6632554 | import unittest
import responses
import json
from requests import ConnectionError
from utils import check_filter_expression_satisfied, parse_message, process_job
class TestUtils(unittest.TestCase):
def test_filters_should_include_body_data_in_expression_happy(self):
"""
filters should allow you to use the data from the body/headrs/query_params
as varaibles in the boolean expression
happy scenario should equate to true
"""
data = {
'body': {
'test_true': True,
'test_false': False
},
'headers': {
'test_number': 1,
'test_string': 'yy'
}
}
test_truth = "data['body']['test_true'] and data['headers']['test_number'] == 1"
self.assertEqual(check_filter_expression_satisfied(test_truth, data),
True)
def test_filters_should_include_body_data_in_expression_fail(self):
"""
filters should allow you to use the data from the body/headrs/query_params
as varaibles in the boolean expression
fail scenario should equate to false
"""
data = {
'body': {
'test_true': True,
'test_false': False
},
'headers': {
'test_number': 1,
'test_string': 'yy'
}
}
test_truth = "data['body']['test_false'] and data['headers']['test_string'] == 'yyyy'"
self.assertEqual(check_filter_expression_satisfied(test_truth, data),
False)
def test_filters_should_include_body_data_in_expression_raise_error(self):
"""
passing an invalid filter string should not break the function
only return false if string is not to be processed or evaluated to false
"""
data = {}
test_truth = "data['body']['test_false'] and data['headers']['test_string'] == 'yyyy'"
self.assertEqual(check_filter_expression_satisfied(test_truth, data),
False)
def test_parsing_should_return_original_message_as_json_happy(self):
"""
a base64 string passed to this function should be transformed back to its
original json format
"""
encoded_data = "<KEY>
decoded_data = parse_message(encoded_data)
expected_data = {
'body': {
'message': 'The force is strong with this one...'
},
'headers': {
'alpha': 'beta',
'Content-Type': 'application/json'
},
'query_params': {
'mi': 'piacito'
},
'path_params': {
'domain': 'pipedream'
}
}
self.assertEqual(decoded_data, expected_data)
def test_parsing_should_return_original_message_as_json_fail(self):
"""
passing an invalid base64 string should not break the function
should only return false
"""
encoded_data = "fake_base64_string_will_not_parse_to_json_isa"
decoded_data = parse_message(encoded_data)
self.assertEqual(decoded_data, False)
@responses.activate
def test_process_job_happy(self):
"""
process job should pass filtering and data decoding phases
and sucessfully send request to mocked url
"""
expected_payload = {'message': 'The force is strong with this one...'}
encoded_data = "<KEY>
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito'"
url = 'https://leadrole.cage'
responses.add(responses.POST,
f'{url}/?mi=piacito',
json={'sucess': 'thank you'},
status=200)
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
self.assertEqual(json.loads(responses.calls[0].request.body), expected_payload)
def test_process_job_fail(self):
"""
process job should pass filtering and data decoding phases
but fail to send request as url is invalid
"""
encoded_data = "<KEY>
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito'"
url = 'https://leadrole.cage'
with self.assertRaises(ConnectionError):
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
def test_process_job_fail_decode(self):
"""
failure in decoding should still pass sucessfully(skip job as no meaning in repeating it)
"""
encoded_data = "malformed_base64"
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito'"
url = 'https://leadrole.cage'
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
def test_process_job_fail_filter(self):
"""
failure in filtering should still pass sucessfully(skip job as no meaning in repeating it)
"""
encoded_data = "<KEY>
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito' and unkown"
url = 'https://leadrole.cage'
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
if __name__ == '__main__':
unittest.main() | import unittest
import responses
import json
from requests import ConnectionError
from utils import check_filter_expression_satisfied, parse_message, process_job
class TestUtils(unittest.TestCase):
def test_filters_should_include_body_data_in_expression_happy(self):
"""
filters should allow you to use the data from the body/headrs/query_params
as varaibles in the boolean expression
happy scenario should equate to true
"""
data = {
'body': {
'test_true': True,
'test_false': False
},
'headers': {
'test_number': 1,
'test_string': 'yy'
}
}
test_truth = "data['body']['test_true'] and data['headers']['test_number'] == 1"
self.assertEqual(check_filter_expression_satisfied(test_truth, data),
True)
def test_filters_should_include_body_data_in_expression_fail(self):
"""
filters should allow you to use the data from the body/headrs/query_params
as varaibles in the boolean expression
fail scenario should equate to false
"""
data = {
'body': {
'test_true': True,
'test_false': False
},
'headers': {
'test_number': 1,
'test_string': 'yy'
}
}
test_truth = "data['body']['test_false'] and data['headers']['test_string'] == 'yyyy'"
self.assertEqual(check_filter_expression_satisfied(test_truth, data),
False)
def test_filters_should_include_body_data_in_expression_raise_error(self):
"""
passing an invalid filter string should not break the function
only return false if string is not to be processed or evaluated to false
"""
data = {}
test_truth = "data['body']['test_false'] and data['headers']['test_string'] == 'yyyy'"
self.assertEqual(check_filter_expression_satisfied(test_truth, data),
False)
def test_parsing_should_return_original_message_as_json_happy(self):
"""
a base64 string passed to this function should be transformed back to its
original json format
"""
encoded_data = "<KEY>
decoded_data = parse_message(encoded_data)
expected_data = {
'body': {
'message': 'The force is strong with this one...'
},
'headers': {
'alpha': 'beta',
'Content-Type': 'application/json'
},
'query_params': {
'mi': 'piacito'
},
'path_params': {
'domain': 'pipedream'
}
}
self.assertEqual(decoded_data, expected_data)
def test_parsing_should_return_original_message_as_json_fail(self):
"""
passing an invalid base64 string should not break the function
should only return false
"""
encoded_data = "fake_base64_string_will_not_parse_to_json_isa"
decoded_data = parse_message(encoded_data)
self.assertEqual(decoded_data, False)
@responses.activate
def test_process_job_happy(self):
"""
process job should pass filtering and data decoding phases
and sucessfully send request to mocked url
"""
expected_payload = {'message': 'The force is strong with this one...'}
encoded_data = "<KEY>
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito'"
url = 'https://leadrole.cage'
responses.add(responses.POST,
f'{url}/?mi=piacito',
json={'sucess': 'thank you'},
status=200)
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
self.assertEqual(json.loads(responses.calls[0].request.body), expected_payload)
def test_process_job_fail(self):
"""
process job should pass filtering and data decoding phases
but fail to send request as url is invalid
"""
encoded_data = "<KEY>
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito'"
url = 'https://leadrole.cage'
with self.assertRaises(ConnectionError):
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
def test_process_job_fail_decode(self):
"""
failure in decoding should still pass sucessfully(skip job as no meaning in repeating it)
"""
encoded_data = "malformed_base64"
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito'"
url = 'https://leadrole.cage'
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
def test_process_job_fail_filter(self):
"""
failure in filtering should still pass sucessfully(skip job as no meaning in repeating it)
"""
encoded_data = "<KEY>
filter_exp = "data['headers']['alpha'] == 'beta' and data['query_params']['mi'] == 'piacito' and unkown"
url = 'https://leadrole.cage'
process_job(url=url, filter_exp=filter_exp, payload=encoded_data)
if __name__ == '__main__':
unittest.main() | en | 0.867785 | filters should allow you to use the data from the body/headrs/query_params as varaibles in the boolean expression happy scenario should equate to true filters should allow you to use the data from the body/headrs/query_params as varaibles in the boolean expression fail scenario should equate to false passing an invalid filter string should not break the function only return false if string is not to be processed or evaluated to false a base64 string passed to this function should be transformed back to its original json format passing an invalid base64 string should not break the function should only return false process job should pass filtering and data decoding phases and sucessfully send request to mocked url process job should pass filtering and data decoding phases but fail to send request as url is invalid failure in decoding should still pass sucessfully(skip job as no meaning in repeating it) failure in filtering should still pass sucessfully(skip job as no meaning in repeating it) | 2.898209 | 3 |
src/sage/parallel/use_fork.py | saraedum/sage-renamed | 3 | 6632555 | <filename>src/sage/parallel/use_fork.py
"""
Parallel iterator built using the ``fork()`` system call
"""
#*****************************************************************************
# Copyright (C) 2010 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import, print_function
from shutil import rmtree
from cysignals.alarm import AlarmInterrupt, alarm, cancel_alarm
from sage.interfaces.process import ContainChildren
from sage.misc.misc import walltime
class WorkerData(object):
"""
Simple class which stores data about a running ``p_iter_fork``
worker.
This just stores three attributes:
- ``input``: the input value used by this worker
- ``starttime``: the walltime when this worker started
- ``failure``: an optional message indicating the kind of failure
EXAMPLES::
sage: from sage.parallel.use_fork import WorkerData
sage: W = WorkerData(42); W
<sage.parallel.use_fork.WorkerData object at ...>
sage: W.starttime # random
1499330252.463206
"""
def __init__(self, input, starttime=None, failure=""):
r"""
See the class documentation for description of the inputs.
EXAMPLES::
sage: from sage.parallel.use_fork import WorkerData
sage: W = WorkerData(42)
"""
self.input = input
self.starttime = starttime or walltime()
self.failure = failure
class p_iter_fork(object):
"""
A parallel iterator implemented using ``fork()``.
INPUT:
- ``ncpus`` -- the maximal number of simultaneous
subprocesses to spawn
- ``timeout`` -- (float, default: 0) wall time in seconds until
a subprocess is automatically killed
- ``verbose`` -- (default: False) whether to print
anything about what the iterator does (e.g., killing
subprocesses)
- ``reset_interfaces`` -- (default: True) whether to reset
all pexpect interfaces
EXAMPLES::
sage: X = sage.parallel.use_fork.p_iter_fork(2,3, False); X
<sage.parallel.use_fork.p_iter_fork object at ...>
sage: X.ncpus
2
sage: X.timeout
3.0
sage: X.verbose
False
"""
def __init__(self, ncpus, timeout=0, verbose=False, reset_interfaces=True):
"""
Create a ``fork()``-based parallel iterator.
See the class documentation for description of the inputs.
EXAMPLES::
sage: X = sage.parallel.use_fork.p_iter_fork(2,3, False); X
<sage.parallel.use_fork.p_iter_fork object at ...>
sage: X.ncpus
2
sage: X.timeout
3.0
sage: X.verbose
False
"""
self.ncpus = int(ncpus)
if self.ncpus != ncpus: # check that there wasn't a roundoff
raise TypeError("ncpus must be an integer")
self.timeout = float(timeout) # require a float
self.verbose = verbose
self.reset_interfaces = reset_interfaces
def __call__(self, f, inputs):
"""
Parallel iterator using ``fork()``.
INPUT:
- ``f`` -- a function (or more general, any callable)
- ``inputs`` -- a list of pairs ``(args, kwds)`` to be used as
arguments to ``f``, where ``args`` is a tuple and ``kwds`` is
a dictionary.
OUTPUT:
EXAMPLES::
sage: F = sage.parallel.use_fork.p_iter_fork(2,3)
sage: sorted(list( F( (lambda x: x^2), [([10],{}), ([20],{})])))
[(([10], {}), 100), (([20], {}), 400)]
sage: sorted(list( F( (lambda x, y: x^2+y), [([10],{'y':1}), ([20],{'y':2})])))
[(([10], {'y': 1}), 101), (([20], {'y': 2}), 402)]
TESTS:
The output of functions decorated with :func:`parallel` is read
as a pickle by the parent process. We intentionally break the
unpickling and demonstrate that this failure is handled
gracefully (the exception is put in the list instead of the
answer)::
sage: Polygen = parallel(polygen)
sage: list(Polygen([QQ]))
[(((Rational Field,), {}), x)]
sage: from sage.structure.sage_object import unpickle_override, register_unpickle_override
sage: register_unpickle_override('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint', Integer)
sage: L = list(Polygen([QQ]))
sage: L
[(((Rational Field,), {}),
'INVALID DATA __init__() takes at most 2 positional arguments (4 given)')]
Fix the unpickling::
sage: del unpickle_override[('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint')]
sage: list(Polygen([QQ,QQ]))
[(((Rational Field,), {}), x), (((Rational Field,), {}), x)]
"""
n = self.ncpus
v = list(inputs)
import os, sys, signal
from sage.structure.sage_object import loads
from sage.misc.temporary_file import tmp_dir
dir = tmp_dir()
timeout = self.timeout
workers = {}
try:
while len(v) > 0 or len(workers) > 0:
# Spawn up to n subprocesses
while len(v) > 0 and len(workers) < n:
v0 = v.pop(0) # Input value for the next subprocess
with ContainChildren():
pid = os.fork()
# The way fork works is that pid returns the
# nonzero pid of the subprocess for the master
# process and returns 0 for the subprocess.
if not pid:
# This is the subprocess.
self._subprocess(f, dir, *v0)
workers[pid] = WorkerData(v0)
if len(workers) > 0:
# Now wait for one subprocess to finish and report the result.
# However, wait at most the time since the oldest process started.
T = walltime()
if timeout:
oldest = min(W.starttime for W in workers.values())
alarm(max(timeout - (T - oldest), 0.1))
try:
pid = os.wait()[0]
cancel_alarm()
W = workers.pop(pid)
except AlarmInterrupt:
# Kill workers that are too old
for pid, W in workers.items():
if T - W.starttime > timeout:
if self.verbose:
print(
"Killing subprocess %s with input %s which took too long"
% (pid, W.input) )
os.kill(pid, signal.SIGKILL)
W.failure = " (timed out)"
except KeyError:
# Some other process exited, not our problem...
pass
else:
# collect data from process that successfully terminated
sobj = os.path.join(dir, '%s.sobj'%pid)
try:
with open(sobj) as file:
data = file.read()
except IOError:
answer = "NO DATA" + W.failure
else:
os.unlink(sobj)
try:
answer = loads(data, compress=False)
except Exception as E:
answer = "INVALID DATA {}".format(E)
out = os.path.join(dir, '%s.out'%pid)
try:
with open(out) as file:
sys.stdout.write(file.read())
os.unlink(out)
except IOError:
pass
yield (W.input, answer)
finally:
# Send SIGKILL signal to workers that are left.
if workers:
if self.verbose:
print("Killing any remaining workers...")
sys.stdout.flush()
for pid in workers:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
# If kill() failed, it is most likely because
# the process already exited.
pass
else:
try:
os.waitpid(pid, 0)
except OSError as msg:
if self.verbose:
print(msg)
# Clean up all temporary files.
rmtree(dir)
def _subprocess(self, f, dir, args, kwds={}):
"""
Setup and run evaluation of ``f(*args, **kwds)``, storing the
result in the given directory ``dir``.
This method is called by each forked subprocess.
INPUT:
- ``f`` -- a function
- ``dir`` -- name of a directory
- ``args`` -- a tuple with positional arguments for ``f``
- ``kwds`` -- (optional) a dict with keyword arguments for ``f``
TESTS:
The method ``_subprocess`` is really meant to be run only in a
subprocess. It doesn't print not return anything, the output is
saved in pickles. It redirects stdout, so we save and later
restore stdout in order not to break the doctester::
sage: saved_stdout = sys.stdout
sage: F = sage.parallel.use_fork.p_iter_fork(2,3)
sage: F._subprocess(operator.add, tmp_dir(), (1, 2))
sage: sys.stdout = saved_stdout
"""
import imp, os, sys
from sage.structure.sage_object import save
# Make it so all stdout is sent to a file so it can
# be displayed.
out = os.path.join(dir, '%s.out'%os.getpid())
sys.stdout = open(out, 'w')
# Run some commands to tell Sage that its
# pid has changed (forcing a reload of
# misc).
import sage.misc.misc
imp.reload(sage.misc.misc)
# The pexpect interfaces (and objects defined in them) are
# not valid.
if self.reset_interfaces:
sage.interfaces.quit.invalidate_all()
# Now evaluate the function f.
value = f(*args, **kwds)
# And save the result to disk.
sobj = os.path.join(dir, '%s.sobj'%os.getpid())
save(value, sobj, compress=False)
| <filename>src/sage/parallel/use_fork.py
"""
Parallel iterator built using the ``fork()`` system call
"""
#*****************************************************************************
# Copyright (C) 2010 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import, print_function
from shutil import rmtree
from cysignals.alarm import AlarmInterrupt, alarm, cancel_alarm
from sage.interfaces.process import ContainChildren
from sage.misc.misc import walltime
class WorkerData(object):
"""
Simple class which stores data about a running ``p_iter_fork``
worker.
This just stores three attributes:
- ``input``: the input value used by this worker
- ``starttime``: the walltime when this worker started
- ``failure``: an optional message indicating the kind of failure
EXAMPLES::
sage: from sage.parallel.use_fork import WorkerData
sage: W = WorkerData(42); W
<sage.parallel.use_fork.WorkerData object at ...>
sage: W.starttime # random
1499330252.463206
"""
def __init__(self, input, starttime=None, failure=""):
r"""
See the class documentation for description of the inputs.
EXAMPLES::
sage: from sage.parallel.use_fork import WorkerData
sage: W = WorkerData(42)
"""
self.input = input
self.starttime = starttime or walltime()
self.failure = failure
class p_iter_fork(object):
"""
A parallel iterator implemented using ``fork()``.
INPUT:
- ``ncpus`` -- the maximal number of simultaneous
subprocesses to spawn
- ``timeout`` -- (float, default: 0) wall time in seconds until
a subprocess is automatically killed
- ``verbose`` -- (default: False) whether to print
anything about what the iterator does (e.g., killing
subprocesses)
- ``reset_interfaces`` -- (default: True) whether to reset
all pexpect interfaces
EXAMPLES::
sage: X = sage.parallel.use_fork.p_iter_fork(2,3, False); X
<sage.parallel.use_fork.p_iter_fork object at ...>
sage: X.ncpus
2
sage: X.timeout
3.0
sage: X.verbose
False
"""
def __init__(self, ncpus, timeout=0, verbose=False, reset_interfaces=True):
"""
Create a ``fork()``-based parallel iterator.
See the class documentation for description of the inputs.
EXAMPLES::
sage: X = sage.parallel.use_fork.p_iter_fork(2,3, False); X
<sage.parallel.use_fork.p_iter_fork object at ...>
sage: X.ncpus
2
sage: X.timeout
3.0
sage: X.verbose
False
"""
self.ncpus = int(ncpus)
if self.ncpus != ncpus: # check that there wasn't a roundoff
raise TypeError("ncpus must be an integer")
self.timeout = float(timeout) # require a float
self.verbose = verbose
self.reset_interfaces = reset_interfaces
def __call__(self, f, inputs):
"""
Parallel iterator using ``fork()``.
INPUT:
- ``f`` -- a function (or more general, any callable)
- ``inputs`` -- a list of pairs ``(args, kwds)`` to be used as
arguments to ``f``, where ``args`` is a tuple and ``kwds`` is
a dictionary.
OUTPUT:
EXAMPLES::
sage: F = sage.parallel.use_fork.p_iter_fork(2,3)
sage: sorted(list( F( (lambda x: x^2), [([10],{}), ([20],{})])))
[(([10], {}), 100), (([20], {}), 400)]
sage: sorted(list( F( (lambda x, y: x^2+y), [([10],{'y':1}), ([20],{'y':2})])))
[(([10], {'y': 1}), 101), (([20], {'y': 2}), 402)]
TESTS:
The output of functions decorated with :func:`parallel` is read
as a pickle by the parent process. We intentionally break the
unpickling and demonstrate that this failure is handled
gracefully (the exception is put in the list instead of the
answer)::
sage: Polygen = parallel(polygen)
sage: list(Polygen([QQ]))
[(((Rational Field,), {}), x)]
sage: from sage.structure.sage_object import unpickle_override, register_unpickle_override
sage: register_unpickle_override('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint', Integer)
sage: L = list(Polygen([QQ]))
sage: L
[(((Rational Field,), {}),
'INVALID DATA __init__() takes at most 2 positional arguments (4 given)')]
Fix the unpickling::
sage: del unpickle_override[('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint')]
sage: list(Polygen([QQ,QQ]))
[(((Rational Field,), {}), x), (((Rational Field,), {}), x)]
"""
n = self.ncpus
v = list(inputs)
import os, sys, signal
from sage.structure.sage_object import loads
from sage.misc.temporary_file import tmp_dir
dir = tmp_dir()
timeout = self.timeout
workers = {}
try:
while len(v) > 0 or len(workers) > 0:
# Spawn up to n subprocesses
while len(v) > 0 and len(workers) < n:
v0 = v.pop(0) # Input value for the next subprocess
with ContainChildren():
pid = os.fork()
# The way fork works is that pid returns the
# nonzero pid of the subprocess for the master
# process and returns 0 for the subprocess.
if not pid:
# This is the subprocess.
self._subprocess(f, dir, *v0)
workers[pid] = WorkerData(v0)
if len(workers) > 0:
# Now wait for one subprocess to finish and report the result.
# However, wait at most the time since the oldest process started.
T = walltime()
if timeout:
oldest = min(W.starttime for W in workers.values())
alarm(max(timeout - (T - oldest), 0.1))
try:
pid = os.wait()[0]
cancel_alarm()
W = workers.pop(pid)
except AlarmInterrupt:
# Kill workers that are too old
for pid, W in workers.items():
if T - W.starttime > timeout:
if self.verbose:
print(
"Killing subprocess %s with input %s which took too long"
% (pid, W.input) )
os.kill(pid, signal.SIGKILL)
W.failure = " (timed out)"
except KeyError:
# Some other process exited, not our problem...
pass
else:
# collect data from process that successfully terminated
sobj = os.path.join(dir, '%s.sobj'%pid)
try:
with open(sobj) as file:
data = file.read()
except IOError:
answer = "NO DATA" + W.failure
else:
os.unlink(sobj)
try:
answer = loads(data, compress=False)
except Exception as E:
answer = "INVALID DATA {}".format(E)
out = os.path.join(dir, '%s.out'%pid)
try:
with open(out) as file:
sys.stdout.write(file.read())
os.unlink(out)
except IOError:
pass
yield (W.input, answer)
finally:
# Send SIGKILL signal to workers that are left.
if workers:
if self.verbose:
print("Killing any remaining workers...")
sys.stdout.flush()
for pid in workers:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
# If kill() failed, it is most likely because
# the process already exited.
pass
else:
try:
os.waitpid(pid, 0)
except OSError as msg:
if self.verbose:
print(msg)
# Clean up all temporary files.
rmtree(dir)
def _subprocess(self, f, dir, args, kwds={}):
"""
Setup and run evaluation of ``f(*args, **kwds)``, storing the
result in the given directory ``dir``.
This method is called by each forked subprocess.
INPUT:
- ``f`` -- a function
- ``dir`` -- name of a directory
- ``args`` -- a tuple with positional arguments for ``f``
- ``kwds`` -- (optional) a dict with keyword arguments for ``f``
TESTS:
The method ``_subprocess`` is really meant to be run only in a
subprocess. It doesn't print not return anything, the output is
saved in pickles. It redirects stdout, so we save and later
restore stdout in order not to break the doctester::
sage: saved_stdout = sys.stdout
sage: F = sage.parallel.use_fork.p_iter_fork(2,3)
sage: F._subprocess(operator.add, tmp_dir(), (1, 2))
sage: sys.stdout = saved_stdout
"""
import imp, os, sys
from sage.structure.sage_object import save
# Make it so all stdout is sent to a file so it can
# be displayed.
out = os.path.join(dir, '%s.out'%os.getpid())
sys.stdout = open(out, 'w')
# Run some commands to tell Sage that its
# pid has changed (forcing a reload of
# misc).
import sage.misc.misc
imp.reload(sage.misc.misc)
# The pexpect interfaces (and objects defined in them) are
# not valid.
if self.reset_interfaces:
sage.interfaces.quit.invalidate_all()
# Now evaluate the function f.
value = f(*args, **kwds)
# And save the result to disk.
sobj = os.path.join(dir, '%s.sobj'%os.getpid())
save(value, sobj, compress=False)
| en | 0.698689 | Parallel iterator built using the ``fork()`` system call #***************************************************************************** # Copyright (C) 2010 <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** Simple class which stores data about a running ``p_iter_fork`` worker. This just stores three attributes: - ``input``: the input value used by this worker - ``starttime``: the walltime when this worker started - ``failure``: an optional message indicating the kind of failure EXAMPLES:: sage: from sage.parallel.use_fork import WorkerData sage: W = WorkerData(42); W <sage.parallel.use_fork.WorkerData object at ...> sage: W.starttime # random 1499330252.463206 See the class documentation for description of the inputs. EXAMPLES:: sage: from sage.parallel.use_fork import WorkerData sage: W = WorkerData(42) A parallel iterator implemented using ``fork()``. INPUT: - ``ncpus`` -- the maximal number of simultaneous subprocesses to spawn - ``timeout`` -- (float, default: 0) wall time in seconds until a subprocess is automatically killed - ``verbose`` -- (default: False) whether to print anything about what the iterator does (e.g., killing subprocesses) - ``reset_interfaces`` -- (default: True) whether to reset all pexpect interfaces EXAMPLES:: sage: X = sage.parallel.use_fork.p_iter_fork(2,3, False); X <sage.parallel.use_fork.p_iter_fork object at ...> sage: X.ncpus 2 sage: X.timeout 3.0 sage: X.verbose False Create a ``fork()``-based parallel iterator. See the class documentation for description of the inputs. EXAMPLES:: sage: X = sage.parallel.use_fork.p_iter_fork(2,3, False); X <sage.parallel.use_fork.p_iter_fork object at ...> sage: X.ncpus 2 sage: X.timeout 3.0 sage: X.verbose False # check that there wasn't a roundoff # require a float Parallel iterator using ``fork()``. INPUT: - ``f`` -- a function (or more general, any callable) - ``inputs`` -- a list of pairs ``(args, kwds)`` to be used as arguments to ``f``, where ``args`` is a tuple and ``kwds`` is a dictionary. OUTPUT: EXAMPLES:: sage: F = sage.parallel.use_fork.p_iter_fork(2,3) sage: sorted(list( F( (lambda x: x^2), [([10],{}), ([20],{})]))) [(([10], {}), 100), (([20], {}), 400)] sage: sorted(list( F( (lambda x, y: x^2+y), [([10],{'y':1}), ([20],{'y':2})]))) [(([10], {'y': 1}), 101), (([20], {'y': 2}), 402)] TESTS: The output of functions decorated with :func:`parallel` is read as a pickle by the parent process. We intentionally break the unpickling and demonstrate that this failure is handled gracefully (the exception is put in the list instead of the answer):: sage: Polygen = parallel(polygen) sage: list(Polygen([QQ])) [(((Rational Field,), {}), x)] sage: from sage.structure.sage_object import unpickle_override, register_unpickle_override sage: register_unpickle_override('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint', Integer) sage: L = list(Polygen([QQ])) sage: L [(((Rational Field,), {}), 'INVALID DATA __init__() takes at most 2 positional arguments (4 given)')] Fix the unpickling:: sage: del unpickle_override[('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint')] sage: list(Polygen([QQ,QQ])) [(((Rational Field,), {}), x), (((Rational Field,), {}), x)] # Spawn up to n subprocesses # Input value for the next subprocess # The way fork works is that pid returns the # nonzero pid of the subprocess for the master # process and returns 0 for the subprocess. # This is the subprocess. # Now wait for one subprocess to finish and report the result. # However, wait at most the time since the oldest process started. # Kill workers that are too old # Some other process exited, not our problem... # collect data from process that successfully terminated # Send SIGKILL signal to workers that are left. # If kill() failed, it is most likely because # the process already exited. # Clean up all temporary files. Setup and run evaluation of ``f(*args, **kwds)``, storing the result in the given directory ``dir``. This method is called by each forked subprocess. INPUT: - ``f`` -- a function - ``dir`` -- name of a directory - ``args`` -- a tuple with positional arguments for ``f`` - ``kwds`` -- (optional) a dict with keyword arguments for ``f`` TESTS: The method ``_subprocess`` is really meant to be run only in a subprocess. It doesn't print not return anything, the output is saved in pickles. It redirects stdout, so we save and later restore stdout in order not to break the doctester:: sage: saved_stdout = sys.stdout sage: F = sage.parallel.use_fork.p_iter_fork(2,3) sage: F._subprocess(operator.add, tmp_dir(), (1, 2)) sage: sys.stdout = saved_stdout # Make it so all stdout is sent to a file so it can # be displayed. # Run some commands to tell Sage that its # pid has changed (forcing a reload of # misc). # The pexpect interfaces (and objects defined in them) are # not valid. # Now evaluate the function f. # And save the result to disk. | 2.435136 | 2 |
lusidtools/lpt/map_instruments.py | entityoneuk/lusid-python-tools | 1 | 6632556 | <filename>lusidtools/lpt/map_instruments.py<gh_stars>1-10
import os
import pandas as pd
from lusidtools.lpt import lpt
from lusidtools.lpt import lse
from lusidtools.lpt import stdargs
from lusidtools.lpt.either import Either
mapping_prefixes = {"Figi": "FIGI", "ClientInternal": "INT", "QuotePermId": "QPI"}
mapping_table = {}
def parse(extend=None, args=None):
return (
stdargs.Parser("Map Instruments", ["filename"])
.add("--folder", help="include all 'txn' files in the folder")
.add("input", nargs="*", metavar="file", help="file(s) containing instruments")
.add(
"--column",
metavar="input-column",
default="instrument_uid",
help="column name for instrument column",
)
.extend(extend)
.parse(args)
)
def process_args(api, args):
if args.folder:
args.input.extend(
[
os.path.join(args.folder, f)
for f in os.listdir(args.folder)
if "-txn-" in f and f.endswith(".csv")
]
)
df = (
pd.concat(
[lpt.read_csv(f)[[args.column]].drop_duplicates() for f in args.input],
ignore_index=True,
sort=True,
)
.drop_duplicates()
.reset_index(drop=True)
)
df.columns = ["FROM"]
df["TO"] = df["FROM"]
return map_instruments(api, df, "TO")
def main():
lpt.standard_flow(parse, lse.connect, process_args)
def map_instruments(api, df, column):
WORKING = "__:::__" # temporary column name
# Apply any known mappings to avoid unecessary i/o
if len(mapping_table) > 0:
srs = df[column].map(mapping_table)
srs = srs[srs.notnull()]
if len(srs) > 0:
df.loc[srs.index, column] = srs
# updates the mappings table
def update_mappings(src, prefix):
mapping_table.update(
{prefix + k: v.lusid_instrument_id for k, v in src.items()}
)
def batch_query(instr_type, prefix, outstanding):
if len(outstanding) > 0:
batch = outstanding[:500] # records to process now
remainder = outstanding[500:] # remaining records
# called if the get_instruments() succeeds
def get_success(result):
get_found = result.content.values
get_failed = result.content.failed
# Update successfully found items
update_mappings(get_found, prefix)
if len(get_failed) > 0:
if instr_type == "ClientInternal":
# For un-mapped internal codes, we will try to add (upsert)
# called if the upsert_instruments() succeeds
def add_success(result):
add_worked = result.content.values
add_failed = result.content.failed
if len(add_failed) > 0:
return Either.Left("Failed to add internal instruments")
# Update successfully added items
update_mappings(add_worked, prefix)
# Kick off the next batch
return batch_query(instr_type, prefix, remainder)
# Create the upsert request from the failed items
request = {
k: api.models.InstrumentDefinition(
name=v.id, identifiers={"ClientInternal": v.id}
)
for k, v in get_failed.items()
}
return api.call.upsert_instruments(request).bind(add_success)
else:
# Instruments are not mapped. Nothing we cando.
return Either.Left(
"Failed to locate instruments of type {}".format(instr_type)
)
else:
# No failures, kick off the next batch
return batch_query(instr_type, prefix, remainder)
return api.call.get_instruments(instr_type, batch[WORKING].values).bind(
get_success
)
else:
# No records remaining. Return the now-enriched dataframe
return Either.Right(df)
def map_type(key, instr_type):
prefix = key + ":"
subset = df[df[column].str.startswith(prefix)]
# See if there are any entries of this type
if len(subset) > 0:
width = len(prefix)
uniques = subset[[column]].drop_duplicates(column)
uniques[WORKING] = uniques[column].str.slice(width)
def map_success(v):
df.loc[subset.index, column] = subset[column].map(mapping_table)
return Either.Right(df)
return batch_query(instr_type, prefix, uniques).bind(map_success)
else:
# Nothing to be done, pass the full result back
return Either.Right(df)
return (
map_type("FIGI", "Figi")
.bind(lambda r: map_type("INT", "ClientInternal"))
.bind(lambda r: map_type("QPI", "QuotePermId"))
)
def include_mappings(path):
if path:
df = lpt.read_csv(path)
mapping_table.update(df.set_index("FROM")["TO"].to_dict())
| <filename>lusidtools/lpt/map_instruments.py<gh_stars>1-10
import os
import pandas as pd
from lusidtools.lpt import lpt
from lusidtools.lpt import lse
from lusidtools.lpt import stdargs
from lusidtools.lpt.either import Either
mapping_prefixes = {"Figi": "FIGI", "ClientInternal": "INT", "QuotePermId": "QPI"}
mapping_table = {}
def parse(extend=None, args=None):
return (
stdargs.Parser("Map Instruments", ["filename"])
.add("--folder", help="include all 'txn' files in the folder")
.add("input", nargs="*", metavar="file", help="file(s) containing instruments")
.add(
"--column",
metavar="input-column",
default="instrument_uid",
help="column name for instrument column",
)
.extend(extend)
.parse(args)
)
def process_args(api, args):
if args.folder:
args.input.extend(
[
os.path.join(args.folder, f)
for f in os.listdir(args.folder)
if "-txn-" in f and f.endswith(".csv")
]
)
df = (
pd.concat(
[lpt.read_csv(f)[[args.column]].drop_duplicates() for f in args.input],
ignore_index=True,
sort=True,
)
.drop_duplicates()
.reset_index(drop=True)
)
df.columns = ["FROM"]
df["TO"] = df["FROM"]
return map_instruments(api, df, "TO")
def main():
lpt.standard_flow(parse, lse.connect, process_args)
def map_instruments(api, df, column):
WORKING = "__:::__" # temporary column name
# Apply any known mappings to avoid unecessary i/o
if len(mapping_table) > 0:
srs = df[column].map(mapping_table)
srs = srs[srs.notnull()]
if len(srs) > 0:
df.loc[srs.index, column] = srs
# updates the mappings table
def update_mappings(src, prefix):
mapping_table.update(
{prefix + k: v.lusid_instrument_id for k, v in src.items()}
)
def batch_query(instr_type, prefix, outstanding):
if len(outstanding) > 0:
batch = outstanding[:500] # records to process now
remainder = outstanding[500:] # remaining records
# called if the get_instruments() succeeds
def get_success(result):
get_found = result.content.values
get_failed = result.content.failed
# Update successfully found items
update_mappings(get_found, prefix)
if len(get_failed) > 0:
if instr_type == "ClientInternal":
# For un-mapped internal codes, we will try to add (upsert)
# called if the upsert_instruments() succeeds
def add_success(result):
add_worked = result.content.values
add_failed = result.content.failed
if len(add_failed) > 0:
return Either.Left("Failed to add internal instruments")
# Update successfully added items
update_mappings(add_worked, prefix)
# Kick off the next batch
return batch_query(instr_type, prefix, remainder)
# Create the upsert request from the failed items
request = {
k: api.models.InstrumentDefinition(
name=v.id, identifiers={"ClientInternal": v.id}
)
for k, v in get_failed.items()
}
return api.call.upsert_instruments(request).bind(add_success)
else:
# Instruments are not mapped. Nothing we cando.
return Either.Left(
"Failed to locate instruments of type {}".format(instr_type)
)
else:
# No failures, kick off the next batch
return batch_query(instr_type, prefix, remainder)
return api.call.get_instruments(instr_type, batch[WORKING].values).bind(
get_success
)
else:
# No records remaining. Return the now-enriched dataframe
return Either.Right(df)
def map_type(key, instr_type):
prefix = key + ":"
subset = df[df[column].str.startswith(prefix)]
# See if there are any entries of this type
if len(subset) > 0:
width = len(prefix)
uniques = subset[[column]].drop_duplicates(column)
uniques[WORKING] = uniques[column].str.slice(width)
def map_success(v):
df.loc[subset.index, column] = subset[column].map(mapping_table)
return Either.Right(df)
return batch_query(instr_type, prefix, uniques).bind(map_success)
else:
# Nothing to be done, pass the full result back
return Either.Right(df)
return (
map_type("FIGI", "Figi")
.bind(lambda r: map_type("INT", "ClientInternal"))
.bind(lambda r: map_type("QPI", "QuotePermId"))
)
def include_mappings(path):
if path:
df = lpt.read_csv(path)
mapping_table.update(df.set_index("FROM")["TO"].to_dict())
| en | 0.797532 | # temporary column name # Apply any known mappings to avoid unecessary i/o # updates the mappings table # records to process now # remaining records # called if the get_instruments() succeeds # Update successfully found items # For un-mapped internal codes, we will try to add (upsert) # called if the upsert_instruments() succeeds # Update successfully added items # Kick off the next batch # Create the upsert request from the failed items # Instruments are not mapped. Nothing we cando. # No failures, kick off the next batch # No records remaining. Return the now-enriched dataframe # See if there are any entries of this type # Nothing to be done, pass the full result back | 2.451305 | 2 |
covid19_id/update_covid19/update.py | hexatester/covid19-id | 0 | 6632557 | import attr
from datetime import datetime
from typing import List, Optional
from . import Penambahan
from . import Harian
from . import Total
@attr.dataclass(slots=True)
class Update:
penambahan: Penambahan
harian: List[Harian]
total: Total
_today: Optional[Harian] = None
@property
def today(self) -> Optional[Harian]:
if self._today:
return self._today
now = datetime.now().date()
for harian in self.harian:
if harian.datetime.date() == now:
self._today = harian
break
return self._today
| import attr
from datetime import datetime
from typing import List, Optional
from . import Penambahan
from . import Harian
from . import Total
@attr.dataclass(slots=True)
class Update:
penambahan: Penambahan
harian: List[Harian]
total: Total
_today: Optional[Harian] = None
@property
def today(self) -> Optional[Harian]:
if self._today:
return self._today
now = datetime.now().date()
for harian in self.harian:
if harian.datetime.date() == now:
self._today = harian
break
return self._today
| none | 1 | 2.873542 | 3 |
|
oxasl/prequantify.py | ibme-qubic/oxasl | 1 | 6632558 | <reponame>ibme-qubic/oxasl
#!/bin/env python
"""
OXASL - Pre-quantification module
This is run before model fitting and should generate effectively differenced ASL data
e.g. by multiphase decoding
Copyright (c) 2008-2020 Univerisity of Oxford
"""
try:
import oxasl_mp
except ImportError:
oxasl_mp = None
def run(wsp):
if wsp.asldata.iaf == "mp":
if oxasl_mp is None:
raise ValueError("Multiphase data supplied but oxasl_mp is not installed")
oxasl_mp.run(wsp)
| #!/bin/env python
"""
OXASL - Pre-quantification module
This is run before model fitting and should generate effectively differenced ASL data
e.g. by multiphase decoding
Copyright (c) 2008-2020 Univerisity of Oxford
"""
try:
import oxasl_mp
except ImportError:
oxasl_mp = None
def run(wsp):
if wsp.asldata.iaf == "mp":
if oxasl_mp is None:
raise ValueError("Multiphase data supplied but oxasl_mp is not installed")
oxasl_mp.run(wsp) | en | 0.780724 | #!/bin/env python OXASL - Pre-quantification module This is run before model fitting and should generate effectively differenced ASL data e.g. by multiphase decoding Copyright (c) 2008-2020 Univerisity of Oxford | 2.348059 | 2 |
deform/tests/test_api.py | sixfeetup/deform | 0 | 6632559 | import unittest
class TestAPI(unittest.TestCase):
def test_it(self):
# none of these imports should fail
from deform import Form
from deform import Button
from deform import Field
from deform import FileData
from deform import ValidationFailure
from deform import TemplateError
from deform import ZPTRendererFactory
from deform import default_renderer
from deform import widget
| import unittest
class TestAPI(unittest.TestCase):
def test_it(self):
# none of these imports should fail
from deform import Form
from deform import Button
from deform import Field
from deform import FileData
from deform import ValidationFailure
from deform import TemplateError
from deform import ZPTRendererFactory
from deform import default_renderer
from deform import widget
| en | 0.878856 | # none of these imports should fail | 1.890921 | 2 |
cutters.py | CockHeroJoe/CHAP | 0 | 6632560 | <reponame>CockHeroJoe/CHAP
from abc import ABCMeta, abstractmethod
import random
from contextlib import ExitStack
from tkinter import TclError
from moviepy.Clip import Clip
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.fx.resize import resize
from utils import draw_progress_bar, SourceFile
from parsing import BeatMeterConfig, OutputConfig, RoundConfig
from preview import PreviewGUI
def get_cutter(
stack: ExitStack,
output_config: OutputConfig,
round_config: RoundConfig
):
sources = [SourceFile(stack.enter_context(VideoFileClip(s)))
for s in round_config.sources]
bmcfg = (round_config.beatmeter_config
if round_config.bmcfg else None)
Cutter = {
"skip": Skipper,
"interleave": Interleaver,
"randomize": Randomizer,
"sequence": Sequencer
}[round_config.cut]
return Cutter(output_config.versions,
output_config.fps,
(output_config.xdim, output_config.ydim),
round_config.duration,
round_config.speed,
round_config.bpm,
bmcfg,
sources)
class _AbstractCutter(metaclass=ABCMeta):
def __init__(
self,
versions: int,
fps: int,
dims: (int, int),
duration: float,
speed: int,
bpm: float,
beatmeter_config: BeatMeterConfig,
sources: [VideoFileClip]
) -> [Clip]:
self.versions = versions
self.fps = fps
self.dims = dims
self.duration = duration
self.speed = speed
self.bpm = bpm
self.sources = sources
self.bmcfg = beatmeter_config
self.all_sources_length = sum(map(lambda s: s.clip.duration, sources))
self._index = 0
@abstractmethod
def get_source_clip_index(self, length: float) -> int:
pass
def get_compilation(self):
duration = self.duration
clips = []
# Cut randomized clips from random videos in chronological order
section_index = 0
subsection_index = 0
sections = self.bmcfg.sections if self.bmcfg else []
current_time = 0.0
frame_time = 1.0 / self.fps
while duration - current_time > frame_time:
# Select random clip length that is a whole multiple of beats long
if self.bmcfg and len(sections) >= 1:
# Use beatmeter generator config: time cuts to beats perfectly
if subsection_index == 0:
# compute number of subsections in this pattern and length
section = sections[section_index]
next_section_start = (sections[section_index + 1].start
if section_index + 1 < len(sections)
else section.stop)
section_length = next_section_start - section.start
subsection_length = section.pattern_duration or (
4 * 60 / (section.bpm or self.bpm))
subsection_length *= 2 ** (3 - self.speed)
if subsection_length >= section_length:
subsection_length = section_length
num_subsections = round(section_length / subsection_length)
elif subsection_index == num_subsections:
# Go to next beat pattern section
subsection_index = 0
section_index += 1
continue
length = subsection_length
if section_index == 0 and subsection_index == 0:
# First cut in round is longer, since beat hasn't started
length += sections[0].start
elif (section_index == len(sections) - 1
and subsection_index == num_subsections - 1):
# Last cut in round extended to match Base track length
length = duration - current_time
elif subsection_index == num_subsections - 1:
# Last cut per section is adjusted to account for drift
# due to imperfect beat timings given in beatmeter config
length = sections[section_index + 1].start - current_time
subsection_index += 1
else:
# Simple accelerating cuts if beatmeter config is not provided
seconds_per_beat = 60 / self.bpm
current_multiple = 4 * 2 ** (5 - self.speed)
current_progress = current_time / duration
for step in (0.25, 0.5, 0.75):
if current_progress > step:
current_multiple /= 2
length = seconds_per_beat * max(1, current_multiple)
# Cut multiple clips from various sources
out_clips = []
for _ in range(self.versions):
# Advance all clips by simlar percentage of total duration
self.advance_sources(length, current_time)
# Get the next clip source
i = self.get_source_clip_index(length)
out_clip = self.sources[i].clip
start = self.sources[i].start
# Cut a subclip
out_clip = out_clip.subclip(start, start + length)
out_clip = resize(out_clip, self.dims)
out_clips.append(out_clip)
self.choose_version(out_clips)
clips.append(out_clips[self._chosen or 0])
current_time += length
# TODO: move progress into GUI
if self.versions > 1:
draw_progress_bar(min(1, current_time / duration), 80)
if self.versions > 1:
print("\nDone!")
return clips
@abstractmethod
def advance_sources(self, length: float, current_time: float):
pass
def choose_version(self, clips) -> int:
self._chosen = None
if self.versions > 1:
try:
PreviewGUI(clips, self._choose).run()
except TclError:
pass
if self._chosen is None:
print("\r{}".format("Preview disabled: choices randomized"))
self.versions = 1
return self._chosen
def _choose(self, version: int):
self._chosen = version
def _set_start(self, source: SourceFile, start: float, length: float):
random_start = SourceFile.get_random_start()
if source.clip.duration > 3 * random_start:
min_start = random_start
else:
min_start = 0
source.start = max(min_start, start)
def _advance_source(
self,
source: SourceFile,
length: float,
current_time: float
):
current_progress = current_time / self.duration
time_in_source = current_progress * source.clip.duration
randomized_start = random.gauss(time_in_source, self.versions * length)
randomized_start = min(randomized_start, source.clip.duration - length)
self._set_start(source, randomized_start, length)
class _AbstractRandomSelector(_AbstractCutter):
def get_source_clip_index(self, length: float) -> int:
i = -1
counter = 0
while i == -1:
i = random.randrange(0, len(self.sources))
if self.sources[i].start + length > self.sources[i].clip.duration:
i = -1
counter += 1
if counter >= 1000:
print("Warning: not enough source material")
for src in self.sources:
src.start /= 2
return i
class Interleaver(_AbstractRandomSelector):
def advance_sources(self, length: float, current_time: float):
for source in self.sources:
self._advance_source(source, length, current_time)
class Randomizer(_AbstractRandomSelector):
def advance_sources(self, length: float, current_time: float):
for source in self.sources:
max_start = source.clip.duration - length
randomized_start = random.uniform(0, max_start)
max_start = source.clip.duration - length
randomized_start = min(randomized_start, max_start)
self._set_start(source, randomized_start, length)
class Sequencer(_AbstractCutter):
def get_source_clip_index(self, length: float) -> int:
source = self.sources[self._index]
if source.start + length > source.clip.duration:
print("Warning: not enough source material")
source.start /= 2
return self.get_source_clip_index(length)
else:
index = self._index
self._index += 1
self._index %= len(self.sources)
return index
def advance_sources(self, length: float, current_time: float):
self._advance_source(self.sources[self._index], length, current_time)
class Skipper(_AbstractCutter):
def get_source_clip_index(self, length: float) -> int:
source = self.sources[self._index]
if source.start + length >= source.clip.duration:
self._index += 1
self._set_start(self.sources[self._index], 0, length)
if self._index >= len(self.sources):
print("Warning: not enough source material")
for source in self.sources:
source.start /= 2
self._index = 0
return self._index
def advance_sources(self, length: float, current_time: float):
source = self.sources[self._index]
length_fraction = source.clip.duration / self.all_sources_length
completed_fraction = sum(map(
lambda s: s.clip.duration,
self.sources[:self._index]))
completed_fraction /= self.all_sources_length
current_progress = current_time / self.duration
current_progress_in_source = ((current_progress - completed_fraction)
/ length_fraction)
time_in_source = current_progress_in_source * source.clip.duration
randomized_start = random.gauss(time_in_source, self.versions * length)
self._set_start(source, randomized_start, length)
| from abc import ABCMeta, abstractmethod
import random
from contextlib import ExitStack
from tkinter import TclError
from moviepy.Clip import Clip
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.fx.resize import resize
from utils import draw_progress_bar, SourceFile
from parsing import BeatMeterConfig, OutputConfig, RoundConfig
from preview import PreviewGUI
def get_cutter(
stack: ExitStack,
output_config: OutputConfig,
round_config: RoundConfig
):
sources = [SourceFile(stack.enter_context(VideoFileClip(s)))
for s in round_config.sources]
bmcfg = (round_config.beatmeter_config
if round_config.bmcfg else None)
Cutter = {
"skip": Skipper,
"interleave": Interleaver,
"randomize": Randomizer,
"sequence": Sequencer
}[round_config.cut]
return Cutter(output_config.versions,
output_config.fps,
(output_config.xdim, output_config.ydim),
round_config.duration,
round_config.speed,
round_config.bpm,
bmcfg,
sources)
class _AbstractCutter(metaclass=ABCMeta):
def __init__(
self,
versions: int,
fps: int,
dims: (int, int),
duration: float,
speed: int,
bpm: float,
beatmeter_config: BeatMeterConfig,
sources: [VideoFileClip]
) -> [Clip]:
self.versions = versions
self.fps = fps
self.dims = dims
self.duration = duration
self.speed = speed
self.bpm = bpm
self.sources = sources
self.bmcfg = beatmeter_config
self.all_sources_length = sum(map(lambda s: s.clip.duration, sources))
self._index = 0
@abstractmethod
def get_source_clip_index(self, length: float) -> int:
pass
def get_compilation(self):
duration = self.duration
clips = []
# Cut randomized clips from random videos in chronological order
section_index = 0
subsection_index = 0
sections = self.bmcfg.sections if self.bmcfg else []
current_time = 0.0
frame_time = 1.0 / self.fps
while duration - current_time > frame_time:
# Select random clip length that is a whole multiple of beats long
if self.bmcfg and len(sections) >= 1:
# Use beatmeter generator config: time cuts to beats perfectly
if subsection_index == 0:
# compute number of subsections in this pattern and length
section = sections[section_index]
next_section_start = (sections[section_index + 1].start
if section_index + 1 < len(sections)
else section.stop)
section_length = next_section_start - section.start
subsection_length = section.pattern_duration or (
4 * 60 / (section.bpm or self.bpm))
subsection_length *= 2 ** (3 - self.speed)
if subsection_length >= section_length:
subsection_length = section_length
num_subsections = round(section_length / subsection_length)
elif subsection_index == num_subsections:
# Go to next beat pattern section
subsection_index = 0
section_index += 1
continue
length = subsection_length
if section_index == 0 and subsection_index == 0:
# First cut in round is longer, since beat hasn't started
length += sections[0].start
elif (section_index == len(sections) - 1
and subsection_index == num_subsections - 1):
# Last cut in round extended to match Base track length
length = duration - current_time
elif subsection_index == num_subsections - 1:
# Last cut per section is adjusted to account for drift
# due to imperfect beat timings given in beatmeter config
length = sections[section_index + 1].start - current_time
subsection_index += 1
else:
# Simple accelerating cuts if beatmeter config is not provided
seconds_per_beat = 60 / self.bpm
current_multiple = 4 * 2 ** (5 - self.speed)
current_progress = current_time / duration
for step in (0.25, 0.5, 0.75):
if current_progress > step:
current_multiple /= 2
length = seconds_per_beat * max(1, current_multiple)
# Cut multiple clips from various sources
out_clips = []
for _ in range(self.versions):
# Advance all clips by simlar percentage of total duration
self.advance_sources(length, current_time)
# Get the next clip source
i = self.get_source_clip_index(length)
out_clip = self.sources[i].clip
start = self.sources[i].start
# Cut a subclip
out_clip = out_clip.subclip(start, start + length)
out_clip = resize(out_clip, self.dims)
out_clips.append(out_clip)
self.choose_version(out_clips)
clips.append(out_clips[self._chosen or 0])
current_time += length
# TODO: move progress into GUI
if self.versions > 1:
draw_progress_bar(min(1, current_time / duration), 80)
if self.versions > 1:
print("\nDone!")
return clips
@abstractmethod
def advance_sources(self, length: float, current_time: float):
pass
def choose_version(self, clips) -> int:
self._chosen = None
if self.versions > 1:
try:
PreviewGUI(clips, self._choose).run()
except TclError:
pass
if self._chosen is None:
print("\r{}".format("Preview disabled: choices randomized"))
self.versions = 1
return self._chosen
def _choose(self, version: int):
self._chosen = version
def _set_start(self, source: SourceFile, start: float, length: float):
random_start = SourceFile.get_random_start()
if source.clip.duration > 3 * random_start:
min_start = random_start
else:
min_start = 0
source.start = max(min_start, start)
def _advance_source(
self,
source: SourceFile,
length: float,
current_time: float
):
current_progress = current_time / self.duration
time_in_source = current_progress * source.clip.duration
randomized_start = random.gauss(time_in_source, self.versions * length)
randomized_start = min(randomized_start, source.clip.duration - length)
self._set_start(source, randomized_start, length)
class _AbstractRandomSelector(_AbstractCutter):
def get_source_clip_index(self, length: float) -> int:
i = -1
counter = 0
while i == -1:
i = random.randrange(0, len(self.sources))
if self.sources[i].start + length > self.sources[i].clip.duration:
i = -1
counter += 1
if counter >= 1000:
print("Warning: not enough source material")
for src in self.sources:
src.start /= 2
return i
class Interleaver(_AbstractRandomSelector):
def advance_sources(self, length: float, current_time: float):
for source in self.sources:
self._advance_source(source, length, current_time)
class Randomizer(_AbstractRandomSelector):
def advance_sources(self, length: float, current_time: float):
for source in self.sources:
max_start = source.clip.duration - length
randomized_start = random.uniform(0, max_start)
max_start = source.clip.duration - length
randomized_start = min(randomized_start, max_start)
self._set_start(source, randomized_start, length)
class Sequencer(_AbstractCutter):
def get_source_clip_index(self, length: float) -> int:
source = self.sources[self._index]
if source.start + length > source.clip.duration:
print("Warning: not enough source material")
source.start /= 2
return self.get_source_clip_index(length)
else:
index = self._index
self._index += 1
self._index %= len(self.sources)
return index
def advance_sources(self, length: float, current_time: float):
self._advance_source(self.sources[self._index], length, current_time)
class Skipper(_AbstractCutter):
def get_source_clip_index(self, length: float) -> int:
source = self.sources[self._index]
if source.start + length >= source.clip.duration:
self._index += 1
self._set_start(self.sources[self._index], 0, length)
if self._index >= len(self.sources):
print("Warning: not enough source material")
for source in self.sources:
source.start /= 2
self._index = 0
return self._index
def advance_sources(self, length: float, current_time: float):
source = self.sources[self._index]
length_fraction = source.clip.duration / self.all_sources_length
completed_fraction = sum(map(
lambda s: s.clip.duration,
self.sources[:self._index]))
completed_fraction /= self.all_sources_length
current_progress = current_time / self.duration
current_progress_in_source = ((current_progress - completed_fraction)
/ length_fraction)
time_in_source = current_progress_in_source * source.clip.duration
randomized_start = random.gauss(time_in_source, self.versions * length)
self._set_start(source, randomized_start, length) | en | 0.822291 | # Cut randomized clips from random videos in chronological order # Select random clip length that is a whole multiple of beats long # Use beatmeter generator config: time cuts to beats perfectly # compute number of subsections in this pattern and length # Go to next beat pattern section # First cut in round is longer, since beat hasn't started # Last cut in round extended to match Base track length # Last cut per section is adjusted to account for drift # due to imperfect beat timings given in beatmeter config # Simple accelerating cuts if beatmeter config is not provided # Cut multiple clips from various sources # Advance all clips by simlar percentage of total duration # Get the next clip source # Cut a subclip # TODO: move progress into GUI | 2.000347 | 2 |
qf_lib/backtesting/events/time_event/periodic_event/periodic_event.py | webclinic017/qf-lib | 198 | 6632561 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod, ABCMeta
from datetime import datetime
from typing import Dict, List
from qf_lib.backtesting.events.time_event.regular_date_time_rule import RegularDateTimeRule
from qf_lib.backtesting.events.time_event.regular_time_event.regular_time_event import RegularTimeEvent
from qf_lib.backtesting.events.time_event.time_event import TimeEvent
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta
class PeriodicEvent(TimeEvent, metaclass=ABCMeta):
"""
TimeEvent which is triggered every certain amount of time (given a certain frequency) within a predefined time
range.
As some of the available frequencies (like e.g. Frequency.MIN_15) can not be implemented using only one
RegularTimeEvent, the PeriodicEvent contains a list of multiple custom RegularTimeEvents (_events_list).
Each of these RegularTimeEvents contains its own _trigger_time_rule. These rules are used to define the desired
frequency of PeriodicEvent.
E.g. PeriodicEvent, which has the frequency parameter set to Frequency.MIN_15 is further translated to a list
of multiple RegularTimeEvents, which correspond to RegularDateTimeRules with following trigger time parameters:
{"minute": 0, "second": 0, "microsecond": 0} - event triggered at every full hour, eg. 12:00, 13:00 etc.
{"minute": 15, "second": 0, "microsecond": 0} - event triggered at 12:15, 13:15 etc.
{"minute": 30, "second": 0, "microsecond": 0} - event triggered at 12:30, 13:30 etc.
{"minute": 45, "second": 0, "microsecond": 0} - event triggered at 12:45, 13:45 etc.
All the above defined Events together, create an Event which is triggered every 15 minutes.
The PeriodicEvent is triggered only within the [start_time, end_time] time range with the given frequency.
It is triggered always at the start_time, but not necessarily at the end_time.
E.g.
start_time = {"hour": 13, "minute": 20, "second": 0, "microsecond": 0},
end_time = {"hour": 16, "minute": 0, "second": 0, "microsecond": 0},
frequency = Frequency.MIN_30
This event will be triggered at 13:20, 13:50, 14:20, 14:50, 15:20, 15:50, but not at 16:00.
Both start_time and end_time dictionaries should contain "hour", "minute", "second" and "microsecond" fields.
"""
start_time = None # type: Dict[str, int] # {"hour": 13, "minute": 20, "second": 0, "microsecond": 0}
end_time = None # type: Dict[str, int] # {"hour": 20, "minute": 0, "second": 0, "microsecond": 0}
frequency = None # type: Frequency
_events_list = None # List[RegularTimeEvent]
def __init__(self):
if None in (self.start_time, self.end_time):
raise ValueError("set up the start and end time by calling set_start_and_end_time() before using the event")
if self.frequency is None:
raise ValueError("set up the frequency by calling set_frequency() before using the event")
self._init_events_list()
@classmethod
def set_start_and_end_time(cls, start_time: Dict[str, int], end_time: Dict[str, int]):
cls.start_time = start_time
cls.end_time = end_time
@classmethod
def set_frequency(cls, frequency: Frequency):
cls.frequency = frequency
def _init_events_list(self):
"""
Updates the _events_list - list consisting of custom RegularTimeEvents (PeriodicRegularEvent), which are used to
compute the next trigger time of PeriodicEvent. The notify function of PeriodicRegularEvent is empty, as it is
not currently used - the purpose of defining the PeriodicRegularEvents is to easily obtain the next trigger
time and then notify the listener attached to the PeriodicEvent.
"""
self._events_list = []
# Generate the list of time dictionaries
_trigger_time_list = self._frequency_to_trigger_time()
for _trigger_time in _trigger_time_list:
# Define a custom regular time event
self._events_list.append(self._PeriodicRegularEvent(_trigger_time, self.start_time, self.end_time))
class _PeriodicRegularEvent(RegularTimeEvent):
_trigger_time = None
def __init__(self, trigger_time, start_time, end_time):
self._trigger_time = trigger_time
self._start_time = start_time
self._end_time = end_time
self._trigger_time_rule = RegularDateTimeRule(**trigger_time)
@classmethod
def trigger_time(cls) -> RelativeDelta:
return RelativeDelta(**cls._trigger_time)
def next_trigger_time(self, now: datetime) -> datetime:
def _within_time_frame(_time):
return (_time + RelativeDelta(**self._end_time) >= _time) and \
(_time + RelativeDelta(**self._start_time) <= _time)
_start_time_rule = RegularDateTimeRule(**self._start_time)
# Before midnight and after end time or after midnight and before start time (e.g. after the market
# close and before the market open), the next trigger time should always point to the next time
# triggered by the _start_time_rule (e.g. to the market open time)
if not _within_time_frame(now):
_next_trigger_time = _start_time_rule.next_trigger_time(now)
else:
_next_trigger_time = self._trigger_time_rule.next_trigger_time(now)
# If the next trigger time is outside the desired time frame, find the next event time moving
# current time to the next start_time date.
if not _within_time_frame(_next_trigger_time):
_next_trigger_time = _start_time_rule.next_trigger_time(_next_trigger_time)
# Check if the next trigger time points to Saturday or Sunday and if so, shift it to Monday
if _next_trigger_time.weekday() in (5, 6):
_next_trigger_time_shifted = _next_trigger_time + RelativeDelta(weekday=0)
assert _next_trigger_time_shifted >= _next_trigger_time
_next_trigger_time = _next_trigger_time_shifted
return _next_trigger_time
def notify(self, listener) -> None:
pass
def next_trigger_time(self, now: datetime) -> datetime:
# List of times triggered by any of the constituent of the PeriodicEvent. The next trigger time of PeriodicEvent
# is the first of the times listed in next_trigger_times list.
next_trigger_times = [event.next_trigger_time(now) for event in self._events_list]
return min(next_trigger_times)
@abstractmethod
def notify(self, listener) -> None:
pass
def _frequency_to_trigger_time(self) -> List[Dict]:
"""
Helper function, which creates a list of regular time dictionaries, which are compliant with the certain
given self.frequency and shifted according to the self.start_time.
E.g. in case of frequency equal to 30 minutes and zeroed self.start_time = {"minute": 0, "second": 0}
the function will create the following list of dictionaries:
[{"minute": 0, "second": 0, "microsecond": 0},
{"minute": 30, "second": 0, "microsecond": 0}].
In case of 15 minutes frequency and self.start_time = {"minute": 20, "second": 0} the output will
be as follows:
[{"minute": 20, "second": 0, "microsecond": 0},
{"minute": 35, "second": 0, "microsecond": 0},
{"minute": 55, "second": 0, "microsecond": 0},
{"minute": 5, "second": 0, "microsecond": 0}].
Two most important parameters used as a base for generating the trigger time dictionaries are self.frequency
and the self.start_time.
self.start_time - this dictionary contains e.g. minute, second fields, which denote the shift which should be
applied to the returned values. It is necessary to provide the values of self.start_time to align the returned
values to the start time (e.g. market open time). If the market open time opens at 9:30, then without shifting
the data, the returned list will be as follows: [{"minute": 0, "second": 0, "microsecond": 0}]. If we will use
this dictionary as the base for time triggering, the events will be triggered at 10:00, 11:00 etc.
After providing the self.start_time shift equal to {"minute": 30"}, it will be possible to trigger events at
9:30, 10:30, etc.
"""
# Helper dictionary consisting of number of minutes per hour, months per year, etc. used to compute the time
# shift for a given frequency
max_time_values = {
"microsecond": 1000000,
"second": 60,
"minute": 60,
"hour": 24,
"weekday": 7,
"month": 12,
}
def _generate_time_dict(slice_field: str, slice_field_value: int, shift: Dict) -> Dict:
"""
Generate a single time dictionary, e.g. {"minute": 13, "second": 0, "microsecond": 0}
Parameters:
===========
slice_field
The first meaningful time parameter, which will appear in the created time dictionary. E.g. in case of
minutely frequency, we would like to receive {"second": 0, "microsecond": 0} as function output, as this
dictionary may be used to trigger events for every minute, when seconds and microseconds part are equal to
0. Thus, in case of 1 minute frequency, slice_field, the first meaningful time field is "second".
slice_field_value
The value, which should appear in the final dictionary for the given slice_field, e.g. {"second": 14}. This
value may be changed in final dictionary, to align with the provided shift parameter.
shift
Time dictionary, denoting values of time shift, that should be applied for each time parameter ("hour",
"minute" etc.). E.g. In case of slice_field = "minute", slice_field_value = 45 and shift = {"minute": 20}
the following dictionary will be returned:
{"minute": 5, "second": 0, "microsecond": 0}.
"""
# All fields that may appear in the output dictionary
fields = ("year", "month", "day", "weekday", "hour", "minute", "second", "microsecond")
# Create dictionary from consecutive fields, starting from the first most meaningful - slice_field. Set all
# values to 0, except of the dictionary[slice_field], which should be set to the desired slice_field_value.
slice_index = fields.index(slice_field)
time_dict = dict.fromkeys(fields[slice_index:], 0)
time_dict[slice_field] = slice_field_value
# Shift the values according to the shift dictionary - the resulting dictionary should contain all these
# fields, which appear in time_dict. As some of the keys may appear in both dictionaries,
# e.g. time_dict["minute"] = 20, shift["minute"] = 30, we have to sum them to receive
# time_dict["minute"] = 50.
# In order to prevent this sum from exceeding the maximum values, we additionally use modulo function with
# the maximum value given by the max_time_values dictionary.
# E.g. time_dict["minute"] = 40, shift["minute"] = 40,
# time_dict["minute"] = 80 % max_time_values["minute"] = 20
result = {
key: (time_dict.get(key, 0) + shift.get(key, 0)) % max_time_values[key]
for key in time_dict.keys()
}
return result
# Dictionary containing the first meaningful time parameters and their corresponding values for each frequency
# The fields values are used to compute the frequency
frequency_to_time_dict = {
Frequency.MIN_1: ("second", 0),
Frequency.MIN_5: ("minute", 5),
Frequency.MIN_10: ("minute", 10),
Frequency.MIN_15: ("minute", 15),
Frequency.MIN_30: ("minute", 30),
Frequency.MIN_60: ("minute", 0),
Frequency.DAILY: ("hour", 0),
Frequency.WEEKLY: ("weekday", 0),
Frequency.MONTHLY: ("day", 0),
Frequency.QUARTERLY: ("month", 3),
Frequency.SEMI_ANNUALLY: ("month", 6),
Frequency.YEARLY: ("month", 0)
}
# Get the first meaningful field and the field value
field, time_freq = frequency_to_time_dict[self.frequency]
# Create a range of field_values (e.g. in case of 15 minutes frequency: (0, 15, 30, 45)
freq_range = range(0, max_time_values[field], time_freq) if time_freq > 0 else (0,)
# Create a list of time dictionaries for each field and its value, given by the freq_range
time_dictionaries = [_generate_time_dict(field, field_value, self.start_time) for field_value in freq_range]
return time_dictionaries
| # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod, ABCMeta
from datetime import datetime
from typing import Dict, List
from qf_lib.backtesting.events.time_event.regular_date_time_rule import RegularDateTimeRule
from qf_lib.backtesting.events.time_event.regular_time_event.regular_time_event import RegularTimeEvent
from qf_lib.backtesting.events.time_event.time_event import TimeEvent
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta
class PeriodicEvent(TimeEvent, metaclass=ABCMeta):
"""
TimeEvent which is triggered every certain amount of time (given a certain frequency) within a predefined time
range.
As some of the available frequencies (like e.g. Frequency.MIN_15) can not be implemented using only one
RegularTimeEvent, the PeriodicEvent contains a list of multiple custom RegularTimeEvents (_events_list).
Each of these RegularTimeEvents contains its own _trigger_time_rule. These rules are used to define the desired
frequency of PeriodicEvent.
E.g. PeriodicEvent, which has the frequency parameter set to Frequency.MIN_15 is further translated to a list
of multiple RegularTimeEvents, which correspond to RegularDateTimeRules with following trigger time parameters:
{"minute": 0, "second": 0, "microsecond": 0} - event triggered at every full hour, eg. 12:00, 13:00 etc.
{"minute": 15, "second": 0, "microsecond": 0} - event triggered at 12:15, 13:15 etc.
{"minute": 30, "second": 0, "microsecond": 0} - event triggered at 12:30, 13:30 etc.
{"minute": 45, "second": 0, "microsecond": 0} - event triggered at 12:45, 13:45 etc.
All the above defined Events together, create an Event which is triggered every 15 minutes.
The PeriodicEvent is triggered only within the [start_time, end_time] time range with the given frequency.
It is triggered always at the start_time, but not necessarily at the end_time.
E.g.
start_time = {"hour": 13, "minute": 20, "second": 0, "microsecond": 0},
end_time = {"hour": 16, "minute": 0, "second": 0, "microsecond": 0},
frequency = Frequency.MIN_30
This event will be triggered at 13:20, 13:50, 14:20, 14:50, 15:20, 15:50, but not at 16:00.
Both start_time and end_time dictionaries should contain "hour", "minute", "second" and "microsecond" fields.
"""
start_time = None # type: Dict[str, int] # {"hour": 13, "minute": 20, "second": 0, "microsecond": 0}
end_time = None # type: Dict[str, int] # {"hour": 20, "minute": 0, "second": 0, "microsecond": 0}
frequency = None # type: Frequency
_events_list = None # List[RegularTimeEvent]
def __init__(self):
if None in (self.start_time, self.end_time):
raise ValueError("set up the start and end time by calling set_start_and_end_time() before using the event")
if self.frequency is None:
raise ValueError("set up the frequency by calling set_frequency() before using the event")
self._init_events_list()
@classmethod
def set_start_and_end_time(cls, start_time: Dict[str, int], end_time: Dict[str, int]):
cls.start_time = start_time
cls.end_time = end_time
@classmethod
def set_frequency(cls, frequency: Frequency):
cls.frequency = frequency
def _init_events_list(self):
"""
Updates the _events_list - list consisting of custom RegularTimeEvents (PeriodicRegularEvent), which are used to
compute the next trigger time of PeriodicEvent. The notify function of PeriodicRegularEvent is empty, as it is
not currently used - the purpose of defining the PeriodicRegularEvents is to easily obtain the next trigger
time and then notify the listener attached to the PeriodicEvent.
"""
self._events_list = []
# Generate the list of time dictionaries
_trigger_time_list = self._frequency_to_trigger_time()
for _trigger_time in _trigger_time_list:
# Define a custom regular time event
self._events_list.append(self._PeriodicRegularEvent(_trigger_time, self.start_time, self.end_time))
class _PeriodicRegularEvent(RegularTimeEvent):
_trigger_time = None
def __init__(self, trigger_time, start_time, end_time):
self._trigger_time = trigger_time
self._start_time = start_time
self._end_time = end_time
self._trigger_time_rule = RegularDateTimeRule(**trigger_time)
@classmethod
def trigger_time(cls) -> RelativeDelta:
return RelativeDelta(**cls._trigger_time)
def next_trigger_time(self, now: datetime) -> datetime:
def _within_time_frame(_time):
return (_time + RelativeDelta(**self._end_time) >= _time) and \
(_time + RelativeDelta(**self._start_time) <= _time)
_start_time_rule = RegularDateTimeRule(**self._start_time)
# Before midnight and after end time or after midnight and before start time (e.g. after the market
# close and before the market open), the next trigger time should always point to the next time
# triggered by the _start_time_rule (e.g. to the market open time)
if not _within_time_frame(now):
_next_trigger_time = _start_time_rule.next_trigger_time(now)
else:
_next_trigger_time = self._trigger_time_rule.next_trigger_time(now)
# If the next trigger time is outside the desired time frame, find the next event time moving
# current time to the next start_time date.
if not _within_time_frame(_next_trigger_time):
_next_trigger_time = _start_time_rule.next_trigger_time(_next_trigger_time)
# Check if the next trigger time points to Saturday or Sunday and if so, shift it to Monday
if _next_trigger_time.weekday() in (5, 6):
_next_trigger_time_shifted = _next_trigger_time + RelativeDelta(weekday=0)
assert _next_trigger_time_shifted >= _next_trigger_time
_next_trigger_time = _next_trigger_time_shifted
return _next_trigger_time
def notify(self, listener) -> None:
pass
def next_trigger_time(self, now: datetime) -> datetime:
# List of times triggered by any of the constituent of the PeriodicEvent. The next trigger time of PeriodicEvent
# is the first of the times listed in next_trigger_times list.
next_trigger_times = [event.next_trigger_time(now) for event in self._events_list]
return min(next_trigger_times)
@abstractmethod
def notify(self, listener) -> None:
pass
def _frequency_to_trigger_time(self) -> List[Dict]:
"""
Helper function, which creates a list of regular time dictionaries, which are compliant with the certain
given self.frequency and shifted according to the self.start_time.
E.g. in case of frequency equal to 30 minutes and zeroed self.start_time = {"minute": 0, "second": 0}
the function will create the following list of dictionaries:
[{"minute": 0, "second": 0, "microsecond": 0},
{"minute": 30, "second": 0, "microsecond": 0}].
In case of 15 minutes frequency and self.start_time = {"minute": 20, "second": 0} the output will
be as follows:
[{"minute": 20, "second": 0, "microsecond": 0},
{"minute": 35, "second": 0, "microsecond": 0},
{"minute": 55, "second": 0, "microsecond": 0},
{"minute": 5, "second": 0, "microsecond": 0}].
Two most important parameters used as a base for generating the trigger time dictionaries are self.frequency
and the self.start_time.
self.start_time - this dictionary contains e.g. minute, second fields, which denote the shift which should be
applied to the returned values. It is necessary to provide the values of self.start_time to align the returned
values to the start time (e.g. market open time). If the market open time opens at 9:30, then without shifting
the data, the returned list will be as follows: [{"minute": 0, "second": 0, "microsecond": 0}]. If we will use
this dictionary as the base for time triggering, the events will be triggered at 10:00, 11:00 etc.
After providing the self.start_time shift equal to {"minute": 30"}, it will be possible to trigger events at
9:30, 10:30, etc.
"""
# Helper dictionary consisting of number of minutes per hour, months per year, etc. used to compute the time
# shift for a given frequency
max_time_values = {
"microsecond": 1000000,
"second": 60,
"minute": 60,
"hour": 24,
"weekday": 7,
"month": 12,
}
def _generate_time_dict(slice_field: str, slice_field_value: int, shift: Dict) -> Dict:
"""
Generate a single time dictionary, e.g. {"minute": 13, "second": 0, "microsecond": 0}
Parameters:
===========
slice_field
The first meaningful time parameter, which will appear in the created time dictionary. E.g. in case of
minutely frequency, we would like to receive {"second": 0, "microsecond": 0} as function output, as this
dictionary may be used to trigger events for every minute, when seconds and microseconds part are equal to
0. Thus, in case of 1 minute frequency, slice_field, the first meaningful time field is "second".
slice_field_value
The value, which should appear in the final dictionary for the given slice_field, e.g. {"second": 14}. This
value may be changed in final dictionary, to align with the provided shift parameter.
shift
Time dictionary, denoting values of time shift, that should be applied for each time parameter ("hour",
"minute" etc.). E.g. In case of slice_field = "minute", slice_field_value = 45 and shift = {"minute": 20}
the following dictionary will be returned:
{"minute": 5, "second": 0, "microsecond": 0}.
"""
# All fields that may appear in the output dictionary
fields = ("year", "month", "day", "weekday", "hour", "minute", "second", "microsecond")
# Create dictionary from consecutive fields, starting from the first most meaningful - slice_field. Set all
# values to 0, except of the dictionary[slice_field], which should be set to the desired slice_field_value.
slice_index = fields.index(slice_field)
time_dict = dict.fromkeys(fields[slice_index:], 0)
time_dict[slice_field] = slice_field_value
# Shift the values according to the shift dictionary - the resulting dictionary should contain all these
# fields, which appear in time_dict. As some of the keys may appear in both dictionaries,
# e.g. time_dict["minute"] = 20, shift["minute"] = 30, we have to sum them to receive
# time_dict["minute"] = 50.
# In order to prevent this sum from exceeding the maximum values, we additionally use modulo function with
# the maximum value given by the max_time_values dictionary.
# E.g. time_dict["minute"] = 40, shift["minute"] = 40,
# time_dict["minute"] = 80 % max_time_values["minute"] = 20
result = {
key: (time_dict.get(key, 0) + shift.get(key, 0)) % max_time_values[key]
for key in time_dict.keys()
}
return result
# Dictionary containing the first meaningful time parameters and their corresponding values for each frequency
# The fields values are used to compute the frequency
frequency_to_time_dict = {
Frequency.MIN_1: ("second", 0),
Frequency.MIN_5: ("minute", 5),
Frequency.MIN_10: ("minute", 10),
Frequency.MIN_15: ("minute", 15),
Frequency.MIN_30: ("minute", 30),
Frequency.MIN_60: ("minute", 0),
Frequency.DAILY: ("hour", 0),
Frequency.WEEKLY: ("weekday", 0),
Frequency.MONTHLY: ("day", 0),
Frequency.QUARTERLY: ("month", 3),
Frequency.SEMI_ANNUALLY: ("month", 6),
Frequency.YEARLY: ("month", 0)
}
# Get the first meaningful field and the field value
field, time_freq = frequency_to_time_dict[self.frequency]
# Create a range of field_values (e.g. in case of 15 minutes frequency: (0, 15, 30, 45)
freq_range = range(0, max_time_values[field], time_freq) if time_freq > 0 else (0,)
# Create a list of time dictionaries for each field and its value, given by the freq_range
time_dictionaries = [_generate_time_dict(field, field_value, self.start_time) for field_value in freq_range]
return time_dictionaries
| en | 0.823512 | # Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. TimeEvent which is triggered every certain amount of time (given a certain frequency) within a predefined time range. As some of the available frequencies (like e.g. Frequency.MIN_15) can not be implemented using only one RegularTimeEvent, the PeriodicEvent contains a list of multiple custom RegularTimeEvents (_events_list). Each of these RegularTimeEvents contains its own _trigger_time_rule. These rules are used to define the desired frequency of PeriodicEvent. E.g. PeriodicEvent, which has the frequency parameter set to Frequency.MIN_15 is further translated to a list of multiple RegularTimeEvents, which correspond to RegularDateTimeRules with following trigger time parameters: {"minute": 0, "second": 0, "microsecond": 0} - event triggered at every full hour, eg. 12:00, 13:00 etc. {"minute": 15, "second": 0, "microsecond": 0} - event triggered at 12:15, 13:15 etc. {"minute": 30, "second": 0, "microsecond": 0} - event triggered at 12:30, 13:30 etc. {"minute": 45, "second": 0, "microsecond": 0} - event triggered at 12:45, 13:45 etc. All the above defined Events together, create an Event which is triggered every 15 minutes. The PeriodicEvent is triggered only within the [start_time, end_time] time range with the given frequency. It is triggered always at the start_time, but not necessarily at the end_time. E.g. start_time = {"hour": 13, "minute": 20, "second": 0, "microsecond": 0}, end_time = {"hour": 16, "minute": 0, "second": 0, "microsecond": 0}, frequency = Frequency.MIN_30 This event will be triggered at 13:20, 13:50, 14:20, 14:50, 15:20, 15:50, but not at 16:00. Both start_time and end_time dictionaries should contain "hour", "minute", "second" and "microsecond" fields. # type: Dict[str, int] # {"hour": 13, "minute": 20, "second": 0, "microsecond": 0} # type: Dict[str, int] # {"hour": 20, "minute": 0, "second": 0, "microsecond": 0} # type: Frequency # List[RegularTimeEvent] Updates the _events_list - list consisting of custom RegularTimeEvents (PeriodicRegularEvent), which are used to compute the next trigger time of PeriodicEvent. The notify function of PeriodicRegularEvent is empty, as it is not currently used - the purpose of defining the PeriodicRegularEvents is to easily obtain the next trigger time and then notify the listener attached to the PeriodicEvent. # Generate the list of time dictionaries # Define a custom regular time event # Before midnight and after end time or after midnight and before start time (e.g. after the market # close and before the market open), the next trigger time should always point to the next time # triggered by the _start_time_rule (e.g. to the market open time) # If the next trigger time is outside the desired time frame, find the next event time moving # current time to the next start_time date. # Check if the next trigger time points to Saturday or Sunday and if so, shift it to Monday # List of times triggered by any of the constituent of the PeriodicEvent. The next trigger time of PeriodicEvent # is the first of the times listed in next_trigger_times list. Helper function, which creates a list of regular time dictionaries, which are compliant with the certain given self.frequency and shifted according to the self.start_time. E.g. in case of frequency equal to 30 minutes and zeroed self.start_time = {"minute": 0, "second": 0} the function will create the following list of dictionaries: [{"minute": 0, "second": 0, "microsecond": 0}, {"minute": 30, "second": 0, "microsecond": 0}]. In case of 15 minutes frequency and self.start_time = {"minute": 20, "second": 0} the output will be as follows: [{"minute": 20, "second": 0, "microsecond": 0}, {"minute": 35, "second": 0, "microsecond": 0}, {"minute": 55, "second": 0, "microsecond": 0}, {"minute": 5, "second": 0, "microsecond": 0}]. Two most important parameters used as a base for generating the trigger time dictionaries are self.frequency and the self.start_time. self.start_time - this dictionary contains e.g. minute, second fields, which denote the shift which should be applied to the returned values. It is necessary to provide the values of self.start_time to align the returned values to the start time (e.g. market open time). If the market open time opens at 9:30, then without shifting the data, the returned list will be as follows: [{"minute": 0, "second": 0, "microsecond": 0}]. If we will use this dictionary as the base for time triggering, the events will be triggered at 10:00, 11:00 etc. After providing the self.start_time shift equal to {"minute": 30"}, it will be possible to trigger events at 9:30, 10:30, etc. # Helper dictionary consisting of number of minutes per hour, months per year, etc. used to compute the time # shift for a given frequency Generate a single time dictionary, e.g. {"minute": 13, "second": 0, "microsecond": 0} Parameters: =========== slice_field The first meaningful time parameter, which will appear in the created time dictionary. E.g. in case of minutely frequency, we would like to receive {"second": 0, "microsecond": 0} as function output, as this dictionary may be used to trigger events for every minute, when seconds and microseconds part are equal to 0. Thus, in case of 1 minute frequency, slice_field, the first meaningful time field is "second". slice_field_value The value, which should appear in the final dictionary for the given slice_field, e.g. {"second": 14}. This value may be changed in final dictionary, to align with the provided shift parameter. shift Time dictionary, denoting values of time shift, that should be applied for each time parameter ("hour", "minute" etc.). E.g. In case of slice_field = "minute", slice_field_value = 45 and shift = {"minute": 20} the following dictionary will be returned: {"minute": 5, "second": 0, "microsecond": 0}. # All fields that may appear in the output dictionary # Create dictionary from consecutive fields, starting from the first most meaningful - slice_field. Set all # values to 0, except of the dictionary[slice_field], which should be set to the desired slice_field_value. # Shift the values according to the shift dictionary - the resulting dictionary should contain all these # fields, which appear in time_dict. As some of the keys may appear in both dictionaries, # e.g. time_dict["minute"] = 20, shift["minute"] = 30, we have to sum them to receive # time_dict["minute"] = 50. # In order to prevent this sum from exceeding the maximum values, we additionally use modulo function with # the maximum value given by the max_time_values dictionary. # E.g. time_dict["minute"] = 40, shift["minute"] = 40, # time_dict["minute"] = 80 % max_time_values["minute"] = 20 # Dictionary containing the first meaningful time parameters and their corresponding values for each frequency # The fields values are used to compute the frequency # Get the first meaningful field and the field value # Create a range of field_values (e.g. in case of 15 minutes frequency: (0, 15, 30, 45) # Create a list of time dictionaries for each field and its value, given by the freq_range | 2.197229 | 2 |
storage_integration/controller.py | frappe/storage_integration | 8 | 6632562 | <reponame>frappe/storage_integration
import frappe
from frappe.utils.password import get_decrypted_password
import os
import re
from minio import Minio
from urllib.parse import urlparse, parse_qs
class MinioConnection:
def __init__(self, doc):
self.settings = frappe.get_doc("Storage Integration Settings", None)
self.file = doc
self.client = Minio(
self.settings.ip,
access_key=self.settings.access_key,
secret_key=get_decrypted_password(
"Storage Integration Settings", "Storage Integration Settings", "secret_key"
),
region=self.settings.region,
secure=False,
)
def upload_file(self):
if re.search(r"\bhttps://\b", self.file.file_url):
# if file already on s3
return
key = self.get_object_key()
with open("./" + key, "rb") as f:
self.client.put_object(
self.settings.bucket_name, key, f, length=-1, part_size=10 * 1024 * 1024
)
method = "storage_integration.controller.download_from_s3"
self.file.file_url = f"https://{frappe.local.site}/api/method/{method}?doc_name={self.file.name}&local_file_url={self.file.file_url}"
os.remove("./" + key)
self.file.save()
frappe.db.commit()
def upload_backup(self, path):
with open(path, "rb") as f:
self.client.fput_object(self.settings.bucket_name, path[2:], path)
# on upload successful create storage_backup_doc()
url = re.search(r"\bbackups/\b", path)
doc = frappe.get_doc(
{
"doctype": "Storage Backup",
"file_name": path[url.span()[1] :],
"key": path[2:],
"available": 1,
}
)
doc.insert()
os.remove(path)
frappe.db.commit()
def download_backup(self, file_name):
key = frappe.local.site + "/private" + "/backups/" + file_name
try:
response = self.client.get_object(self.settings.bucket_name, key)
frappe.local.response["filename"] = file_name
frappe.local.response["filecontent"] = response.read()
frappe.local.response["type"] = "download"
finally:
response.close()
response.release_conn()
def delete_file(self):
obj_key = self.get_object_key()
self.client.remove_object(self.settings.bucket_name, obj_key)
def download_file(self, action_type):
obj_key = self.get_object_key()
try:
response = self.client.get_object(
self.settings.bucket_name, obj_key, self.file.file_name
)
if action_type == "download":
frappe.local.response["filename"] = self.file.file_name
frappe.local.response["filecontent"] = response.read()
frappe.local.response["type"] = "download"
elif action_type == "clone":
with open("./" + obj_key, "wb") as f:
f.write(response.read())
elif action_type == "restore":
with open("./" + obj_key, "wb") as f:
f.write(response.read())
if self.file.is_private:
pattern = frappe.local.site
else:
pattern = "/public"
match = re.search(rf"\b{pattern}\b", obj_key)
obj_key = obj_key[match.span()[1] :]
self.file.file_url = obj_key
self.file.save()
frappe.db.commit()
finally:
response.close()
response.release_conn()
def get_object_key(self):
match = re.search(r"\bhttps://\b", self.file.file_url)
if match:
query = urlparse(self.file.file_url).query
self.file.file_url = parse_qs(query)["local_file_url"][0]
if not self.file.is_private:
key = frappe.local.site + "/public" + self.file.file_url
else:
key = frappe.local.site + self.file.file_url
return key
def upload_to_s3(doc, method):
conn = MinioConnection(doc)
conn.upload_file()
def delete_from_s3(doc, method):
conn = MinioConnection(doc)
conn.delete_file()
@frappe.whitelist()
def download_from_s3(doc_name):
doc = frappe.get_doc("File", doc_name)
conn = MinioConnection(doc)
conn.download_file(action_type="download")
@frappe.whitelist(allow_guest=True)
def download_backup(file_name):
conn = MinioConnection(None)
conn.download_backup(file_name)
@frappe.whitelist()
def migrate_existing_files():
files = frappe.get_all("File", pluck="name", filters={"file_url": ("!=", "")})
for file in files:
doc = frappe.get_doc("File", file)
upload_to_s3(doc, None)
@frappe.whitelist()
def delete_all_remote():
files = frappe.get_all("File", pluck="name", filters={"file_url": ("!=", "")})
for file in files:
doc = frappe.get_doc("File", file)
delete_from_s3(doc, None)
@frappe.whitelist()
def clone_files(action_type):
files = frappe.get_all("File", pluck="name", filters={"file_url": ("!=", "")})
for file in files:
doc = frappe.get_doc("File", file)
conn = MinioConnection(doc)
conn.download_file(action_type=action_type)
| import frappe
from frappe.utils.password import get_decrypted_password
import os
import re
from minio import Minio
from urllib.parse import urlparse, parse_qs
class MinioConnection:
def __init__(self, doc):
self.settings = frappe.get_doc("Storage Integration Settings", None)
self.file = doc
self.client = Minio(
self.settings.ip,
access_key=self.settings.access_key,
secret_key=get_decrypted_password(
"Storage Integration Settings", "Storage Integration Settings", "secret_key"
),
region=self.settings.region,
secure=False,
)
def upload_file(self):
if re.search(r"\bhttps://\b", self.file.file_url):
# if file already on s3
return
key = self.get_object_key()
with open("./" + key, "rb") as f:
self.client.put_object(
self.settings.bucket_name, key, f, length=-1, part_size=10 * 1024 * 1024
)
method = "storage_integration.controller.download_from_s3"
self.file.file_url = f"https://{frappe.local.site}/api/method/{method}?doc_name={self.file.name}&local_file_url={self.file.file_url}"
os.remove("./" + key)
self.file.save()
frappe.db.commit()
def upload_backup(self, path):
with open(path, "rb") as f:
self.client.fput_object(self.settings.bucket_name, path[2:], path)
# on upload successful create storage_backup_doc()
url = re.search(r"\bbackups/\b", path)
doc = frappe.get_doc(
{
"doctype": "Storage Backup",
"file_name": path[url.span()[1] :],
"key": path[2:],
"available": 1,
}
)
doc.insert()
os.remove(path)
frappe.db.commit()
def download_backup(self, file_name):
key = frappe.local.site + "/private" + "/backups/" + file_name
try:
response = self.client.get_object(self.settings.bucket_name, key)
frappe.local.response["filename"] = file_name
frappe.local.response["filecontent"] = response.read()
frappe.local.response["type"] = "download"
finally:
response.close()
response.release_conn()
def delete_file(self):
obj_key = self.get_object_key()
self.client.remove_object(self.settings.bucket_name, obj_key)
def download_file(self, action_type):
obj_key = self.get_object_key()
try:
response = self.client.get_object(
self.settings.bucket_name, obj_key, self.file.file_name
)
if action_type == "download":
frappe.local.response["filename"] = self.file.file_name
frappe.local.response["filecontent"] = response.read()
frappe.local.response["type"] = "download"
elif action_type == "clone":
with open("./" + obj_key, "wb") as f:
f.write(response.read())
elif action_type == "restore":
with open("./" + obj_key, "wb") as f:
f.write(response.read())
if self.file.is_private:
pattern = frappe.local.site
else:
pattern = "/public"
match = re.search(rf"\b{pattern}\b", obj_key)
obj_key = obj_key[match.span()[1] :]
self.file.file_url = obj_key
self.file.save()
frappe.db.commit()
finally:
response.close()
response.release_conn()
def get_object_key(self):
match = re.search(r"\bhttps://\b", self.file.file_url)
if match:
query = urlparse(self.file.file_url).query
self.file.file_url = parse_qs(query)["local_file_url"][0]
if not self.file.is_private:
key = frappe.local.site + "/public" + self.file.file_url
else:
key = frappe.local.site + self.file.file_url
return key
def upload_to_s3(doc, method):
conn = MinioConnection(doc)
conn.upload_file()
def delete_from_s3(doc, method):
conn = MinioConnection(doc)
conn.delete_file()
@frappe.whitelist()
def download_from_s3(doc_name):
doc = frappe.get_doc("File", doc_name)
conn = MinioConnection(doc)
conn.download_file(action_type="download")
@frappe.whitelist(allow_guest=True)
def download_backup(file_name):
conn = MinioConnection(None)
conn.download_backup(file_name)
@frappe.whitelist()
def migrate_existing_files():
files = frappe.get_all("File", pluck="name", filters={"file_url": ("!=", "")})
for file in files:
doc = frappe.get_doc("File", file)
upload_to_s3(doc, None)
@frappe.whitelist()
def delete_all_remote():
files = frappe.get_all("File", pluck="name", filters={"file_url": ("!=", "")})
for file in files:
doc = frappe.get_doc("File", file)
delete_from_s3(doc, None)
@frappe.whitelist()
def clone_files(action_type):
files = frappe.get_all("File", pluck="name", filters={"file_url": ("!=", "")})
for file in files:
doc = frappe.get_doc("File", file)
conn = MinioConnection(doc)
conn.download_file(action_type=action_type) | en | 0.849396 | # if file already on s3 # on upload successful create storage_backup_doc() | 2.285351 | 2 |
scripts/migration/set_unique_challenge_slug.py | kaustubh-s1/EvalAI | 1,470 | 6632563 | <reponame>kaustubh-s1/EvalAI
# To run the file:
# 1. Open django shell using -- python manage.py shell
# 2. Run the script in shell -- exec(open('scripts/migration/set_unique_challenge_slug.py').read())
from challenges.models import Challenge
def set_challenge_slug_as_unique():
challenges = Challenge.objects.all()
try:
for challenge in challenges:
slug = "{}-{}".format(
challenge.title.replace(" ", "-").lower(), challenge.pk
)[:199]
print(
"Adding challenge slug: `%s` --> `%s` "
% (challenge.title, slug)
)
challenge.slug = slug
challenge.save()
print("Successfully added challenge slug")
except Exception as e:
print(e)
set_challenge_slug_as_unique()
| # To run the file:
# 1. Open django shell using -- python manage.py shell
# 2. Run the script in shell -- exec(open('scripts/migration/set_unique_challenge_slug.py').read())
from challenges.models import Challenge
def set_challenge_slug_as_unique():
challenges = Challenge.objects.all()
try:
for challenge in challenges:
slug = "{}-{}".format(
challenge.title.replace(" ", "-").lower(), challenge.pk
)[:199]
print(
"Adding challenge slug: `%s` --> `%s` "
% (challenge.title, slug)
)
challenge.slug = slug
challenge.save()
print("Successfully added challenge slug")
except Exception as e:
print(e)
set_challenge_slug_as_unique() | en | 0.389295 | # To run the file: # 1. Open django shell using -- python manage.py shell # 2. Run the script in shell -- exec(open('scripts/migration/set_unique_challenge_slug.py').read()) | 2.60505 | 3 |
tests/pasio_parallel_wrapper.py | autosome-ru/pasio | 0 | 6632564 | import argparse
import os
import logging
import tempfile
import pasio
logger = logging.getLogger(__name__)
stderr = logging.StreamHandler()
stderr.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
stderr.setFormatter(formatter)
logger.addHandler(stderr)
logger.setLevel(logging.INFO)
def write_bedgraph(outfilename, chromosome_label, data):
with open(outfilename, 'w') as outfile:
for line in data:
outfile.write('\t'.join((chromosome_label, line[0], line[1], line[2])) + '\n')
def parse_bedgraph_compact(bedgraph_filename):
with open(bedgraph_filename) as infile:
for line in infile:
chrom, start, stop, coverage = line.strip().split()
yield chrom, start, stop, coverage
def split_by_chromosomes_and_get_sizes(source_filename, tmppath):
logger.info('Parsing {}'.format(source_filename) )
previous_chrom = None
chromosome_index = 0
chromosome_metadata = {}
chromosome_data = []
for chromosome_label, start, stop, coverage in parse_bedgraph_compact(source_filename):
if previous_chrom is not None and previous_chrom != chromosome_label:
outfile_path = os.path.join(tmppath, '{}_{}'.format(chromosome_label,
os.path.basename(source_filename)))
write_bedgraph(outfile_path,
chromosome_label,
chromosome_data)
chromosome_length = int(chromosome_data[-1][1])
chromosome_metadata[previous_chrom] = {'length':chromosome_length,
'index': chromosome_index,
'label': previous_chrom,
'outfile': outfile_path}
logger.info('Written {} of length {}'.format(previous_chrom,
chromosome_length) )
chromosome_data = []
chromosome_index += 1
chromosome_data.append((start, stop, coverage))
previous_chrom = chromosome_label
outfile_path = os.path.join(tmppath, '{}_{}'.format(chromosome_label,
os.path.basename(source_filename)))
write_bedgraph(outfile_path,
chromosome_label,
chromosome_data)
chromosome_length = int(chromosome_data[-1][1])
chromosome_metadata[chromosome_label] = {'length':chromosome_length,
'index': chromosome_index,
'label': chromosome_label,
'outfile': outfile_path}
logger.info('Written {} of length {}'.format(chromosome_label, chromosome_length) )
return chromosome_metadata
def generate_commandlines_for_each_chrom(executable_path, chrom_data, arguments):
chrom_data = sorted(chrom_data, key = lambda x:x['length'], reverse=True)
commandlines = []
for chrom in chrom_data:
commandline = [executable_path] + arguments + ['--out_bedgraph', chrom['outfile']]
commandlines.append(commandline)
return commandlines
def add_parallel_arguments_to_parser(argparser, include_outfile = False):
parallel_arguments = argparser.add_argument_group('Pasio parallel wrapper arguments')
parallel_arguments.add_argument('--tmpdir')
parallel_arguments.add_argument('--out_script', help="File to write bash script to",
required=True)
parallel_arguments.add_argument('--path_to_pasio', help="Path for pasio script",
required=True)
if include_outfile:
parallel_arguments.add_argument('-o', '--out_bedgraph', help="Output begraph path",
required=True)
return argparser
def get_parallel_parsers():
joined_argparser = pasio.get_argparser()
joined_argparser = add_parallel_arguments_to_parser(joined_argparser)
parallel_parser = argparse.ArgumentParser()
parallel_parser = add_parallel_arguments_to_parser(parallel_parser,
include_outfile=True)
return joined_argparser, parallel_parser
def get_args():
joined_parser, parallel_parser = get_parallel_parsers()
args = joined_parser.parse_args()
parallel_args, pasio_cmdline = parallel_parser.parse_known_args()
return args, pasio_cmdline
def generate_sequential(cmdlines, outfilename):
with open(outfilename, 'w') as outfile:
for line in cmdlines:
outfile.write(' '.join(line))
outfile.write('\n')
def generate_parallel(cmdlines, outfilename):
with open(outfilename, 'w') as outfile:
for line in cmdlines:
outfile.write(' '.join(line))
outfile.write('\n')
def main():
args, pasio_args_list = get_args()
tmpdir = args.tmpdir
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix = 'pasio_' + args.bedgraph, dir='.')
else:
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
logger.info('Pasio temporary directory is chosen to be %s' % tmpdir)
chrom_data = split_by_chromosomes_and_get_sizes(args.bedgraph, tmpdir)
commandlines = generate_commandlines_for_each_chrom(args.path_to_pasio, chrom_data.values(), pasio_args_list)
generate_sequential(commandlines, args.out_script)
if __name__=='__main__':
main()
| import argparse
import os
import logging
import tempfile
import pasio
logger = logging.getLogger(__name__)
stderr = logging.StreamHandler()
stderr.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
stderr.setFormatter(formatter)
logger.addHandler(stderr)
logger.setLevel(logging.INFO)
def write_bedgraph(outfilename, chromosome_label, data):
with open(outfilename, 'w') as outfile:
for line in data:
outfile.write('\t'.join((chromosome_label, line[0], line[1], line[2])) + '\n')
def parse_bedgraph_compact(bedgraph_filename):
with open(bedgraph_filename) as infile:
for line in infile:
chrom, start, stop, coverage = line.strip().split()
yield chrom, start, stop, coverage
def split_by_chromosomes_and_get_sizes(source_filename, tmppath):
logger.info('Parsing {}'.format(source_filename) )
previous_chrom = None
chromosome_index = 0
chromosome_metadata = {}
chromosome_data = []
for chromosome_label, start, stop, coverage in parse_bedgraph_compact(source_filename):
if previous_chrom is not None and previous_chrom != chromosome_label:
outfile_path = os.path.join(tmppath, '{}_{}'.format(chromosome_label,
os.path.basename(source_filename)))
write_bedgraph(outfile_path,
chromosome_label,
chromosome_data)
chromosome_length = int(chromosome_data[-1][1])
chromosome_metadata[previous_chrom] = {'length':chromosome_length,
'index': chromosome_index,
'label': previous_chrom,
'outfile': outfile_path}
logger.info('Written {} of length {}'.format(previous_chrom,
chromosome_length) )
chromosome_data = []
chromosome_index += 1
chromosome_data.append((start, stop, coverage))
previous_chrom = chromosome_label
outfile_path = os.path.join(tmppath, '{}_{}'.format(chromosome_label,
os.path.basename(source_filename)))
write_bedgraph(outfile_path,
chromosome_label,
chromosome_data)
chromosome_length = int(chromosome_data[-1][1])
chromosome_metadata[chromosome_label] = {'length':chromosome_length,
'index': chromosome_index,
'label': chromosome_label,
'outfile': outfile_path}
logger.info('Written {} of length {}'.format(chromosome_label, chromosome_length) )
return chromosome_metadata
def generate_commandlines_for_each_chrom(executable_path, chrom_data, arguments):
chrom_data = sorted(chrom_data, key = lambda x:x['length'], reverse=True)
commandlines = []
for chrom in chrom_data:
commandline = [executable_path] + arguments + ['--out_bedgraph', chrom['outfile']]
commandlines.append(commandline)
return commandlines
def add_parallel_arguments_to_parser(argparser, include_outfile = False):
parallel_arguments = argparser.add_argument_group('Pasio parallel wrapper arguments')
parallel_arguments.add_argument('--tmpdir')
parallel_arguments.add_argument('--out_script', help="File to write bash script to",
required=True)
parallel_arguments.add_argument('--path_to_pasio', help="Path for pasio script",
required=True)
if include_outfile:
parallel_arguments.add_argument('-o', '--out_bedgraph', help="Output begraph path",
required=True)
return argparser
def get_parallel_parsers():
joined_argparser = pasio.get_argparser()
joined_argparser = add_parallel_arguments_to_parser(joined_argparser)
parallel_parser = argparse.ArgumentParser()
parallel_parser = add_parallel_arguments_to_parser(parallel_parser,
include_outfile=True)
return joined_argparser, parallel_parser
def get_args():
joined_parser, parallel_parser = get_parallel_parsers()
args = joined_parser.parse_args()
parallel_args, pasio_cmdline = parallel_parser.parse_known_args()
return args, pasio_cmdline
def generate_sequential(cmdlines, outfilename):
with open(outfilename, 'w') as outfile:
for line in cmdlines:
outfile.write(' '.join(line))
outfile.write('\n')
def generate_parallel(cmdlines, outfilename):
with open(outfilename, 'w') as outfile:
for line in cmdlines:
outfile.write(' '.join(line))
outfile.write('\n')
def main():
args, pasio_args_list = get_args()
tmpdir = args.tmpdir
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix = 'pasio_' + args.bedgraph, dir='.')
else:
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
logger.info('Pasio temporary directory is chosen to be %s' % tmpdir)
chrom_data = split_by_chromosomes_and_get_sizes(args.bedgraph, tmpdir)
commandlines = generate_commandlines_for_each_chrom(args.path_to_pasio, chrom_data.values(), pasio_args_list)
generate_sequential(commandlines, args.out_script)
if __name__=='__main__':
main()
| none | 1 | 2.549735 | 3 |
|
projetos/aula/operators/logical-operators.py | GiuseppeMP/udacity-fundamentos-ia-machine-learning | 0 | 6632565 | '''and True if both the operands are true x and y
or True if either of the operands is true x or y
not True if operand is false (complements the operand) not x'''
x = True
y = False
# Output: x and y is False
print('x and y is',x and y)
# Output: x or y is True
print('x or y is',x or y)
# Output: not x is False
print('not x is',not x)
| '''and True if both the operands are true x and y
or True if either of the operands is true x or y
not True if operand is false (complements the operand) not x'''
x = True
y = False
# Output: x and y is False
print('x and y is',x and y)
# Output: x or y is True
print('x or y is',x or y)
# Output: not x is False
print('not x is',not x)
| en | 0.779289 | and True if both the operands are true x and y or True if either of the operands is true x or y not True if operand is false (complements the operand) not x # Output: x and y is False # Output: x or y is True # Output: not x is False | 4.329314 | 4 |
tests/test_models.py | dopry/orm | 0 | 6632566 | <filename>tests/test_models.py
import asyncio
import functools
import pytest
import sqlalchemy
import databases
import orm
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
class User(orm.Model):
__tablename__ = "users"
__metadata__ = metadata
__database__ = database
id = orm.Integer(primary_key=True)
name = orm.String(max_length=100)
class Product(orm.Model):
__tablename__ = "product"
__metadata__ = metadata
__database__ = database
id = orm.Integer(primary_key=True)
name = orm.String(max_length=100)
rating = orm.Integer(minimum=1, maximum=5)
in_stock = orm.Boolean(default=False)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
def async_adapter(wrapped_func):
"""
Decorator used to run async test cases.
"""
@functools.wraps(wrapped_func)
def run_sync(*args, **kwargs):
loop = asyncio.get_event_loop()
task = wrapped_func(*args, **kwargs)
return loop.run_until_complete(task)
return run_sync
def test_model_class():
assert list(User.fields.keys()) == ["id", "name"]
assert isinstance(User.fields["id"], orm.Integer)
assert User.fields["id"].primary_key is True
assert isinstance(User.fields["name"], orm.String)
assert User.fields["name"].max_length == 100
assert isinstance(User.__table__, sqlalchemy.Table)
def test_model_pk():
user = User(pk=1)
assert user.pk == 1
assert user.id == 1
@async_adapter
async def test_model_crud():
async with database:
users = await User.objects.all()
assert users == []
user = await User.objects.create(name="Tom")
users = await User.objects.all()
assert user.name == "Tom"
assert user.pk is not None
assert users == [user]
lookup = await User.objects.get()
assert lookup == user
await user.update(name="Jane")
users = await User.objects.all()
assert user.name == "Jane"
assert user.pk is not None
assert users == [user]
await user.delete()
users = await User.objects.all()
assert users == []
@async_adapter
async def test_model_get():
async with database:
with pytest.raises(orm.NoMatch):
await User.objects.get()
user = await User.objects.create(name="Tom")
lookup = await User.objects.get()
assert lookup == user
user = await User.objects.create(name="Jane")
with pytest.raises(orm.MultipleMatches):
await User.objects.get()
@async_adapter
async def test_model_filter():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
user = await User.objects.get(name="Lucy")
assert user.name == "Lucy"
with pytest.raises(orm.NoMatch):
await User.objects.get(name="Jim")
await Product.objects.create(name="T-Shirt", rating=5, in_stock=True)
await Product.objects.create(name="Dress", rating=4)
await Product.objects.create(name="Coat", rating=3, in_stock=True)
product = await Product.objects.get(name__iexact="t-shirt", rating=5)
assert product.pk is not None
assert product.name == "T-Shirt"
assert product.rating == 5
products = await Product.objects.all(rating__gte=2, in_stock=True)
assert len(products) == 2
products = await Product.objects.all(name__icontains="T")
assert len(products) == 2
# Test escaping % character from icontains, contains, and iexact
await Product.objects.create(name="100%-Cotton", rating=3)
await Product.objects.create(name="Cotton-100%-Egyptian", rating=3)
await Product.objects.create(name="Cotton-100%", rating=3)
products = Product.objects.filter(name__iexact="100%-cotton")
assert await products.count() == 1
products = Product.objects.filter(name__contains="%")
assert await products.count() == 3
products = Product.objects.filter(name__icontains="%")
assert await products.count() == 3
@async_adapter
async def test_model_exists():
async with database:
await User.objects.create(name="Tom")
assert await User.objects.filter(name="Tom").exists() is True
assert await User.objects.filter(name="Jane").exists() is False
@async_adapter
async def test_model_count():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
assert await User.objects.count() == 3
assert await User.objects.filter(name__icontains="T").count() == 1
@async_adapter
async def test_model_limit():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
assert len(await User.objects.limit(2).all()) == 2
@async_adapter
async def test_model_limit_with_filter():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Tom")
await User.objects.create(name="Tom")
assert len(await User.objects.limit(2).filter(name__iexact='Tom').all()) == 2
| <filename>tests/test_models.py
import asyncio
import functools
import pytest
import sqlalchemy
import databases
import orm
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
class User(orm.Model):
__tablename__ = "users"
__metadata__ = metadata
__database__ = database
id = orm.Integer(primary_key=True)
name = orm.String(max_length=100)
class Product(orm.Model):
__tablename__ = "product"
__metadata__ = metadata
__database__ = database
id = orm.Integer(primary_key=True)
name = orm.String(max_length=100)
rating = orm.Integer(minimum=1, maximum=5)
in_stock = orm.Boolean(default=False)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
def async_adapter(wrapped_func):
"""
Decorator used to run async test cases.
"""
@functools.wraps(wrapped_func)
def run_sync(*args, **kwargs):
loop = asyncio.get_event_loop()
task = wrapped_func(*args, **kwargs)
return loop.run_until_complete(task)
return run_sync
def test_model_class():
assert list(User.fields.keys()) == ["id", "name"]
assert isinstance(User.fields["id"], orm.Integer)
assert User.fields["id"].primary_key is True
assert isinstance(User.fields["name"], orm.String)
assert User.fields["name"].max_length == 100
assert isinstance(User.__table__, sqlalchemy.Table)
def test_model_pk():
user = User(pk=1)
assert user.pk == 1
assert user.id == 1
@async_adapter
async def test_model_crud():
async with database:
users = await User.objects.all()
assert users == []
user = await User.objects.create(name="Tom")
users = await User.objects.all()
assert user.name == "Tom"
assert user.pk is not None
assert users == [user]
lookup = await User.objects.get()
assert lookup == user
await user.update(name="Jane")
users = await User.objects.all()
assert user.name == "Jane"
assert user.pk is not None
assert users == [user]
await user.delete()
users = await User.objects.all()
assert users == []
@async_adapter
async def test_model_get():
async with database:
with pytest.raises(orm.NoMatch):
await User.objects.get()
user = await User.objects.create(name="Tom")
lookup = await User.objects.get()
assert lookup == user
user = await User.objects.create(name="Jane")
with pytest.raises(orm.MultipleMatches):
await User.objects.get()
@async_adapter
async def test_model_filter():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
user = await User.objects.get(name="Lucy")
assert user.name == "Lucy"
with pytest.raises(orm.NoMatch):
await User.objects.get(name="Jim")
await Product.objects.create(name="T-Shirt", rating=5, in_stock=True)
await Product.objects.create(name="Dress", rating=4)
await Product.objects.create(name="Coat", rating=3, in_stock=True)
product = await Product.objects.get(name__iexact="t-shirt", rating=5)
assert product.pk is not None
assert product.name == "T-Shirt"
assert product.rating == 5
products = await Product.objects.all(rating__gte=2, in_stock=True)
assert len(products) == 2
products = await Product.objects.all(name__icontains="T")
assert len(products) == 2
# Test escaping % character from icontains, contains, and iexact
await Product.objects.create(name="100%-Cotton", rating=3)
await Product.objects.create(name="Cotton-100%-Egyptian", rating=3)
await Product.objects.create(name="Cotton-100%", rating=3)
products = Product.objects.filter(name__iexact="100%-cotton")
assert await products.count() == 1
products = Product.objects.filter(name__contains="%")
assert await products.count() == 3
products = Product.objects.filter(name__icontains="%")
assert await products.count() == 3
@async_adapter
async def test_model_exists():
async with database:
await User.objects.create(name="Tom")
assert await User.objects.filter(name="Tom").exists() is True
assert await User.objects.filter(name="Jane").exists() is False
@async_adapter
async def test_model_count():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
assert await User.objects.count() == 3
assert await User.objects.filter(name__icontains="T").count() == 1
@async_adapter
async def test_model_limit():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
assert len(await User.objects.limit(2).all()) == 2
@async_adapter
async def test_model_limit_with_filter():
async with database:
await User.objects.create(name="Tom")
await User.objects.create(name="Tom")
await User.objects.create(name="Tom")
assert len(await User.objects.limit(2).filter(name__iexact='Tom').all()) == 2
| en | 0.734049 | Decorator used to run async test cases. # Test escaping % character from icontains, contains, and iexact | 2.286574 | 2 |
test.py | wgy5446/TIL | 1 | 6632567 | <filename>test.py
aaaa`:wq
aaaa
| <filename>test.py
aaaa`:wq
aaaa
| none | 1 | 0.863636 | 1 |
|
customerio/__init__.py | hungrydk/customerio-python | 34 | 6632568 | <gh_stars>10-100
import warnings
from customerio.client_base import CustomerIOException
from customerio.track import CustomerIO
from customerio.api import APIClient, SendEmailRequest
from customerio.regions import Regions
| import warnings
from customerio.client_base import CustomerIOException
from customerio.track import CustomerIO
from customerio.api import APIClient, SendEmailRequest
from customerio.regions import Regions | none | 1 | 1.072237 | 1 |
|
pymetabolism/lpmodels.py | Midnighter/pymetabolism | 1 | 6632569 | <filename>pymetabolism/lpmodels.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
====================
LP Solver Interfaces
====================
:Authors:
<NAME>
<NAME>
<NAME>
:Date:
2011-03-28
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
lpmodels.py
"""
import os
import itertools
import copy
import logging
import tempfile
from . import miscellaneous as misc
from .errors import PyMetabolismError
logger = logging.getLogger(__name__)
logger.addHandler(misc.NullHandler())
options = misc.OptionsManager.get_instance()
class MetaLPModelFacade(type):
"""
Meta class that, according to the solver chosen in the options, populates
the given class with methods that are solver specific.
The general interface of the created class is supposed to look like the one
described by `FBAModel`.
"""
def __new__(mcls, name, bases, dct):
"""
"""
if options.lp_solver.lower() == "gurobi":
_grb_populate(dct)
elif options.lp_solver.lower() == "cvxopt":
_cvx_populate(dct)
elif options.lp_solver.lower() == "glpk":
_cvx_populate(dct)
_cvx_glpk(dct)
elif options.lp_solver.lower() == "mosek":
_cvx_populate(dct)
_cvx_mosek(dct)
return super(MetaLPModelFacade, mcls).__new__(mcls, name, bases, dct)
################################################################################
# Gurobi Facade
################################################################################
def _grb_populate(attrs):
grb = misc.load_module("gurobipy", "Gurobi", "http://www.gurobi.com/")
# suppress reports to stdout
grb.setParam("OutputFlag", 0)
# deal with Gurobi's annoying log file
tmp_file = tempfile.mkstemp()[1] # absolute path component
grb.setParam("LogFile", tmp_file)
os.remove("gurobi.log")
# set the number of processes
grb.setParam("Threads", options.num_proc)
# set the feasability tolerance (smaller is more accurate but harder)
grb.setParam("FeasibilityTol", options.numeric_threshold)
# set class attributes
attrs["_grb"] = grb
for (key, value) in attrs.iteritems():
if key.startswith("_"):
continue
try:
attrs[key] = eval("_grb_" + key)
attrs[key].__doc__ = value.__doc__
except NameError:
pass
# gurobi helper functions
attrs["__init__"] = _grb___init__
# attrs["__copy__"] = _grb___copy__
# attrs["__deepcopy__"] = _grb___deepcopy__
attrs["__str__"] = _grb___str__
attrs["_add_compound"] = _grb__add_compound
attrs["_change_participation"] = _grb__change_participation
attrs["_make_binary"] = _grb__make_binary
attrs["_make_integer"] = _grb__make_integer
attrs["_add_reaction"] = _grb__add_reaction
attrs["_change_coefficients"] = _grb__change_coefficients
attrs["_adjust_bounds"] = _grb__adjust_bounds
attrs["_bounds"] = _grb__bounds
attrs["_fixed"] = _grb__fixed
attrs["_add_transport"] = _grb__add_transport
attrs["_add_source"] = _grb__add_source
attrs["_add_drain"] = _grb__add_drain
attrs["_var2reaction"] = _grb__var2reaction
attrs["_reset_objective"] = _grb__reset_objective
attrs["_changed_objective"] = _grb__changed_objective
attrs["_status"] = _grb__status
attrs["_flux"] = _grb__flux
attrs["_reduced_cost"] = _grb__reduced_cost
def _grb___init__(self, name):
self._model = self._grb.Model(name)
self._rxn2var = dict()
self._var2rxn = dict()
self._rev2var = dict()
self._var2rev = dict()
self._cmpd2cnstrnt = dict()
self._cnstrnt2cmpd = dict()
self._sources = dict()
self._drains = dict()
self._objective = dict()
self._tmp_lb = dict()
#def _grb___copy__(self):
# # TODO
# cpy = self.__class__(self.name)
# cpy._model = self._model.copy()
# cpy._system2grb = dict()
# for col in cpy._model.getVars():
# cpy._system2grb[col.getAttr("VarName")] = col
# cpy._system2grb = dict()
# for row in cpy._model.getConstrs():
# cpy._system2grb[row.getAttr("ConstrName")] = row
# return cpy
#
#def _grb___deepcopy__(self, memo=dict()):
# # TODO
# return self.__copy__()
#
#def _grb_copy(self):
# # TODO
# return self.__deepcopy__()
def _grb___str__(self):
self._model.update()
message = list()
message.append("Objective:")
lin_expr = self._model.getObjective()
msg = ["%f %s" % (lin_expr.getCoeff(i), lin_expr.getVar(i).varName) for
i in range(lin_expr.size())]
message.append(" + ".join(msg))
message.append("Constraints:")
for cnstrnt in self._model.getConstrs():
lin_expr = self._model.getRow(cnstrnt)
msg = ["%f %s" % (lin_expr.getCoeff(i), lin_expr.getVar(i).varName) for
i in range(lin_expr.size())]
message.append("%s: %s = %s" % (cnstrnt.constrName, " + ".join(msg),
str(cnstrnt.rhs)))
message.append("Bounds:")
for var in self._model.getVars():
message.append("%f <= %s <= %f" % (var.lb, var.varName, var.ub))
return "\n".join(message)
def _grb__add_compound(self, compound):
if compound in self._cmpd2cnstrnt:
return False
cnstrnt = self._model.addConstr(0.0, self._grb.GRB.EQUAL,
0.0, name=str(compound))
self._cmpd2cnstrnt[compound] = cnstrnt
self._cnstrnt2cmpd[cnstrnt] = compound
return True
def _grb__change_participation(self, compound, coefficients):
cnstrnt = self._cmpd2cnstrnt[compound]
for (rxn, factor) in coefficients:
# will throw KeyError if reaction doesn't exist yet
var = self._rxn2var[rxn]
self._model.chgCoeff(cnstrnt, var, factor)
if rxn.reversible:
var = self._rev2var[rxn]
self._model.chgCoeff(cnstrnt, var, -factor)
def _grb_add_compound(self, compound, coefficients=None):
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(coefficients, "__iter__"):
coefficients = itertools.repeat(coefficients)
changes = [self._add_compound(cmpd) for cmpd in compound]
if any(changes):
self._model.update()
if coefficients is None:
return
for (cmpd, coeff_iter) in itertools.izip(compound, coefficients):
if self._model.getRow(self._cmpd2cnstrnt[cmpd]).size() > 0:
# compound participates in existing reactions thus was added before
continue
self._change_participation(cmpd, coeff_iter)
else:
if self._add_compound(compound):
self._model.update()
if coefficients is None:
return
self._change_participation(compound, coefficients)
def _grb_iter_compounds(self, reaction=None, coefficients=False):
# reports model data (may require update to be current)
if reaction is None:
return self._cmpd2cnstrnt.iterkeys()
column = self._model.getCol(self._rxn2var[reaction])
if coefficients:
return ((self._cnstrnt2cmpd[column.getConstr(i)], column.getCoeff(i))\
for i in range(column.size()))
else:
return (self._cnstrnt2cmpd[column.getConstr(i)]\
for i in range(column.size()))
def _grb_modify_compound_coefficients(self, compound, coefficients):
if hasattr(compound, "__iter__"):
if hasattr(coefficients, "__iter__"):
coefficients = itertools.repeat(coefficients)
for (cmpd, coeff_iter) in itertools.izip(compound, coefficients):
self._change_participation(cmpd, coeff_iter)
else:
self._change_participation(compound, coefficients)
def _grb_free_compound(self, compound):
cnstrnt = self._cmpd2cnstrnt[compound]
lin_expr = self._model.getRow(cnstrnt)
for i in range(lin_expr.size()):
var = lin_expr.getVar(i)
self._model.chgCoeff(cnstrnt, var, 0.0)
def _grb_knockout_compound(self, compound):
cnstrnt = self._cmpd2cnstrnt[compound]
lin_expr = self._model.getRow(cnstrnt)
for i in range(lin_expr.size()):
var = lin_expr.getVar(i)
var.lb = 0.0
var.ub = 0.0
def _grb__del_compound(self, compound):
cnstrnt = self._cmpd2cnstrnt.pop(compound)
del self._cnstrnt2cmpd[cnstrnt]
self._model.remove(cnstrnt)
def _grb_delete_compound(self, compound):
if hasattr(compound, "__iter__"):
for cmpd in compound:
self._del_compound(cmpd)
else:
self._del_compound(compound)
self._model.update()
def _grb__make_binary(self, reaction):
if hasattr(reaction, "__iter__"):
for rxn in reaction:
var = self._rxn2var[rxn]
var.vType = "B"
if rxn.reversible:
var = self._rev2var[rxn]
var.vType = "B"
else:
var = self._rxn2var[reaction]
var.vType = "B"
if reaction.reversible:
var = self._rev2var[reaction]
var.vType = "B"
def _grb__make_integer(self, reaction):
if hasattr(reaction, "__iter__"):
for rxn in reaction:
var = self._rxn2var[rxn]
var.vType = "I"
if rxn.reversible:
var = self._rev2var[rxn]
var.vType = "I"
else:
var = self._rxn2var[reaction]
var.vType = "I"
if reaction.reversible:
var = self._rev2var[reaction]
var.vType = "I"
def _grb__add_reaction(self, reaction, lb, ub):
if self._rxn2var.has_key(reaction):
return False
if reaction.reversible:
# we rely on lb being numeric here due to default options
if lb < 0:
var_rev = self._model.addVar(0.0, abs(lb), name=str(reaction) +
options.reversible_suffix)
var = self._model.addVar(0.0, ub, name=str(reaction))
else:
var_rev = self._model.addVar(lb, ub, name=str(reaction) +
options.reversible_suffix)
var = self._model.addVar(lb, ub, name=str(reaction))
self._rev2var[reaction] = var_rev
self._var2rev[var_rev] = reaction
else:
var = self._model.addVar(lb, ub, name=str(reaction))
self._rxn2var[reaction] = var
self._var2rxn[var] = reaction
return True
def _grb__change_coefficients(self, reaction, coefficients):
var = self._rxn2var[reaction]
for (cmpd, factor) in coefficients:
cnstrnt = self._cmpd2cnstrnt[cmpd]
self._model.chgCoeff(cnstrnt, var, factor)
if reaction.reversible:
var = self._rev2var[reaction]
for (cmpd, factor) in coefficients:
cnstrnt = self._cmpd2cnstrnt[cmpd]
self._model.chgCoeff(cnstrnt, var, -factor)
def _grb_add_reaction(self, reaction, coefficients=None, lb=None, ub=None):
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
if hasattr(reaction, "__iter__"):
# we really add multiple reactions
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
changes = [self._add_reaction(rxn, lb, ub) for (rxn, lb, ub)\
in itertools.izip(reaction, lb_iter, ub_iter)]
if any(changes):
self._model.update()
if coefficients is None:
return
# need to find out if we are dealing with a nested list or not
if not (isinstance(coefficients, list) and isinstance(coefficients[0],
list)):
coefficients = itertools.repeat(coefficients)
for (rxn, coeff_iter) in itertools.izip(reaction, coefficients):
changes = [self._add_compound(pair[0]) for pair in coeff_iter]
if any(changes):
self._model.update()
if self._model.getCol(self._rxn2var[rxn]).size() > 0:
# reaction has constraints and was added before
continue
self._change_coefficients(rxn, coeff_iter)
else:
if self._add_reaction(reaction, lb, ub):
self._model.update()
if coefficients is None:
return
changes = [self._add_compound(pair[0]) for pair in coefficients]
if any(changes):
self._model.update()
self._change_coefficients(reaction, coefficients)
def _grb_iter_reactions(self, compound=None, coefficients=False):
# reports model data (may require update to be current)
if not compound:
return self._rxn2var.iterkeys()
lin_expr = self._model.getRow(self._cmpd2cnstrnt[compound])
if coefficients:
return ((self._var2reaction(lin_expr.getVar(i)), lin_expr.getCoeff(i))\
for i in range(lin_expr.size()))
else:
return (self._var2reaction(lin_expr.getVar(i))\
for i in range(lin_expr.size()))
def _grb_modify_reaction_coefficients(self, reaction, coefficients):
# we allow for lazy updating of the model here (better not be a bug)
if hasattr(reaction, "__iter__"):
if hasattr(coefficients, "__iter__"):
coefficients = itertools.repeat(coefficients)
for (rxn, coeff_iter) in itertools.izip(reaction, coefficients):
self._change_coefficients(rxn, coeff_iter)
else:
self._change_coefficients(reaction, coefficients)
def _grb__adjust_bounds(self, reaction, lb, ub):
"""
Adjust the lower and upper bound for a reaction.
Reversible reactions are treated specially since bounds may be split for
both directions.
"""
numeric_ub = not ub is None
numeric_lb = not lb is None
if numeric_ub and numeric_lb and ub < lb:
raise PyMetabolismError("Trying to set an upper bound that is smaller"\
" than the lower bound for '%s'.", str(reaction))
var = self._rxn2var[reaction]
if reaction.reversible:
var_rev = self._rev2var[reaction]
if numeric_ub:
var.ub = ub
var_rev.ub = ub
if numeric_lb:
if lb < 0.0:
var_rev.lb = 0.0
var_rev.ub = abs(lb)
var.lb = 0.0
else:
var_rev.lb = lb
var.lb = lb
else:
if numeric_ub:
var.ub = ub
if numeric_lb:
var.lb = lb
def _grb_modify_reaction_bounds(self, reaction, lb=None, ub=None):
# we allow for lazy updating of the model here (better not be a bug)
if hasattr(reaction, "__iter__"):
# we really modify multiple reactions
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
for (rxn, lb, ub) in itertools.izip(reaction, lb_iter, ub_iter):
self._adjust_bounds(rxn, lb, ub)
else:
self._adjust_bounds(reaction, lb, ub)
# for some reasons lazy updating of bounds does not work
self._model.update()
def _grb__bounds(self, reaction):
var = self._rxn2var[reaction]
if reaction.reversible:
var_rev = self._rev2var[reaction]
return (-var_rev.ub, var.ub)
else:
return (var.lb, var.ub)
def _grb_iter_reaction_bounds(self, reaction=None):
# reports model data (may require update to be current)
# we rely on reversible reactions being treated in unison
if reaction is None:
reaction = self._rxn2var.iterkeys()
return ((rxn, self._bounds(rxn)) for rxn in reaction)
elif hasattr(reaction, "__iter__"):
# we really get multiple reactions
return (self._bounds(rxn) for rxn in reaction)
else:
return self._bounds(reaction)
def _grb__fixed(self, reaction):
var = self.rxn2var[reaction]
fixed = var.lb == var.ub
if reaction.reversible:
var = self.rev2var[reaction]
fixed &= var.lb == var.ub
return fixed
def _grb_is_fixed(self, reaction=None):
# updating is the only way currently to return newly added information
self._model.update()
if reaction is None:
reaction = self._rxn2var.iterkeys()
return all(self._fixed(rxn) for rxn in reaction)
elif hasattr(reaction, "__iter__"):
# we really get multiple reactions
return all(self._fixed(rxn) for rxn in reaction)
else:
return self._bounds(reaction)
def _grb_free_reaction(self, reaction):
self.modify_reaction_bounds(reaction, lb=-self._grb.GRB.INFINITY,
ub=self._grb.GRB.INFINITY)
def _grb__del_reaction(self, reaction):
var = self._rxn2var.pop(reaction)
del self._var2rxn[var]
self._model.remove(var)
if reaction.reversible:
var = self._rev2var.pop(reaction)
del self._var2rev[var]
self._model.remove(var)
def _grb_delete_reaction(self, reaction):
if hasattr(reaction, "__iter__"):
for rxn in reaction:
self._del_reaction(rxn)
else:
self._del_reaction(reaction)
self._model.update()
def _grb__add_transport(self, compound, var, factor):
if self._model.getCol(var).size() > 0:
# transport already added
return
cnstrnt = self._cmpd2cnstrnt[compound]
self._model.chgCoeff(cnstrnt, var, factor)
def _grb__add_source(self, compound, lb, ub):
if compound in self._sources:
return False
self._sources[compound] = self._model.addVar(lb, ub,
name=str(compound) + "_Source")
return True
def _grb_add_compound_source(self, compound, lb=None, ub=None):
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
changes = [self._add_source(cmpd, lb, ub) for (cmpd, lb, ub)\
in itertools.izip(compound, lb_iter, ub_iter)]
if any(changes):
self._model.update()
# we allow for lazy updating of the model here (better not be a bug)
for cmpd in compound:
var = self._sources[cmpd]
self._add_transport(cmpd, var, 1.0)
else:
if self._add_source(compound, lb, ub):
self._model.update()
var = self._sources[compound]
self._add_transport(compound, var, 1.0)
def _grb_iter_sources(self):
return self._sources.iterkeys()
def _grb_delete_source(self, compound):
if hasattr(compound, "__iter__"):
for cmpd in compound:
var = self._sources.pop(cmpd)
self._model.remove(var)
else:
var = self._sources.pop(compound)
self._model.remove(var)
self._model.update()
def _grb__add_drain(self, compound, lb, ub):
if compound in self._drains:
return False
self._drains[compound] = self._model.addVar(lb, ub,
name=str(compound) + "_Drain")
return True
def _grb_add_compound_drain(self, compound, lb=None, ub=None):
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
changes = [self._add_drain(cmpd, lb, ub) for (cmpd, lb, ub)\
in itertools.izip(compound, lb_iter, ub_iter)]
if any(changes):
self._model.update()
# we allow for lazy updating of the model here (better not be a bug)
for cmpd in compound:
var = self._drains[cmpd]
self._add_transport(cmpd, var, -1.0)
else:
if self._add_drain(compound, lb, ub):
self._model.update()
var = self._drains[compound]
self._add_transport(compound, var, -1.0)
def _grb_iter_drains(self):
return self._drains.iterkeys()
def _grb_delete_drain(self, compound):
if hasattr(compound, "__iter__"):
for cmpd in compound:
var = self._drains.pop(cmpd)
self._model.remove(var)
else:
var = self._drains.pop(compound)
self._model.remove(var)
self._model.update()
def _grb_set_objective_reaction(self, reaction, factor):
# we allow for lazy updating of the model here (better not be a bug)
self._objective = dict()
if hasattr(reaction, "__iter__"):
if hasattr(factor, "__iter__"):
fctr_iter = factor
else:
fctr_iter = itertools.repeat(factor)
for (rxn, factor) in itertools.izip(reaction, fctr_iter):
self._objective[rxn] = factor
else:
self._objective[reaction] = factor
self._reset_objective()
def _grb__var2reaction(self, var):
return self._var2rxn[var] if var in self._var2rxn else self._var2rev[var]
def _grb_iter_objective_reaction(self, coefficients=False):
if coefficients:
return self._objective.iteritems()
else:
return self._objective.iterkeys()
def _grb_set_medium(self, compound, lb=None, ub=None):
# we allow for lazy updating of the model here (better not be a bug)
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
# constrain all sources first
for source in self._sources.itervalues():
source.lb = 0.0
source.ub = 0.0
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
for (cmpd, lb, ub) in itertools.izip(compound, lb_iter, ub_iter):
var = self._sources[cmpd]
var.lb = lb
var.ub = ub
else:
var = self._sources[compound]
var.lb = lb
var.ub = ub
def _grb__reset_objective(self):
objective = list()
for (rxn, factor) in self._objective.iteritems():
var = self._rxn2var[rxn]
var.lb = self._tmp_lb.get(var, var.lb)
objective.append((factor, var))
if rxn.reversible:
var = self._rev2var[rxn]
var.lb = self._tmp_lb.pop(var, var.lb)
objective.append((factor, var))
if objective:
self._model.setObjective(self._grb.LinExpr(objective))
def _grb__changed_objective(self):
# test whether we need to reset the objective, because of parsimonious_fba
# before
lin_expr = self._model.getObjective()
current = set([lin_expr.getVar(i) for i in range(lin_expr.size())])
objective = set()
for rxn in self._objective.iterkeys():
var = self._rxn2var[rxn]
objective.add(var)
if rxn.reversible:
var = self._rev2var[rxn]
objective.add(var)
return current != objective
def _grb_fba(self, maximize=True):
if self._changed_objective():
self._reset_objective()
if maximize:
self._model.modelSense = self._grb.GRB.MAXIMIZE
else:
self._model.modelSense = self._grb.GRB.MINIMIZE
self._model.optimize()
def _grb_parsimonious_fba(self):
if self._changed_objective():
self._reset_objective()
self._model.modelSense = self._grb.GRB.MAXIMIZE
self._model.optimize()
# _status should catch all problems (monitor this)
self._status()
lin_expr = self._model.getObjective()
objective = set([lin_expr.getVar(i) for i in range(lin_expr.size())])
for var in objective:
var = lin_expr.getVar(i)
self._tmp_lb[var] = var.lb
var.lb = var.x
# now minimize all other variables
minimize = list()
for var in itertools.chain(self._rxn2var.itervalues(),
self._rev2var.itervalues()):
if not var in objective:
minimize.append((1.0, var))
if minimize:
self._model.setObjective(self._grb.LinExpr(minimize))
self._model.modelSense = self._grb.GRB.MINIMIZE
self._model.optimize()
def _grb__status(self):
"""
Determine the current status of the Gurobi model.
"""
status = self._model.status
if status == self._grb.GRB.LOADED:
raise PyMetabolismError("optimize before accessing flux information", errorno=status)
elif status == self._grb.GRB.OPTIMAL:
pass
elif status == self._grb.GRB.INFEASIBLE:
raise PyMetabolismError("model is infeasible", errorno=status)
elif status == self._grb.GRB.INF_OR_UNBD:
raise PyMetabolismError("model is infeasible or unbounded", errorno=status)
elif status == self._grb.GRB.UNBOUNDED:
raise PyMetabolismError("model is unbounded", errorno=status)
elif status == self._grb.GRB.CUTOFF:
raise PyMetabolismError("model solution is worse than provided cut-off", errorno=status)
elif status == self._grb.GRB.ITERATION_LIMIT:
raise PyMetabolismError("iteration limit exceeded", errorno=status)
elif status == self._grb.GRB.NODE_LIMIT:
raise PyMetabolismError("node limit exceeded", errorno=status)
elif status == self._grb.GRB.TIME_LIMIT:
raise PyMetabolismError("time limit exceeded", errorno=status)
elif status == self._grb.GRB.SOLUTION_LIMIT:
raise PyMetabolismError("solution limit reached", errorno=status)
elif status == self._grb.GRB.INTERRUPTED:
raise PyMetabolismError("optimization process was interrupted", errorno=status)
elif status == self._grb.GRB.SUBOPTIMAL:
raise PyMetabolismError("solution is suboptimal", errorno=status)
elif status == self._grb.GRB.NUMERIC:
raise PyMetabolismError("optimization aborted due to numeric difficulties", errorno=status)
def _grb_get_objective_value(self, threshold=None):
# _status should catch all problems (monitor this)
self._status()
if threshold is None:
threshold = options.numeric_threshold
return sum(self._flux(rxn, threshold) * factor for (rxn, factor)\
in self._objective.iteritems())
def _grb__flux(self, reaction, threshold):
flux = self._rxn2var[reaction].x
if reaction.reversible:
flux -= self._rev2var[reaction].x
return flux if abs(flux) > threshold else 0.0
def _grb_iter_flux(self, reaction=None, threshold=None):
# _status should catch all problems (monitor this)
self._status()
if threshold is None:
threshold = options.numeric_threshold
if reaction is None:
return ((rxn, self._flux(rxn, threshold)) for rxn in\
self._rxn2var.iterkeys())
elif hasattr(reaction, "__iter__"):
return (self._flux(rxn, threshold) for rxn in reaction)
else:
return self._flux(reaction, threshold)
def _grb__reduced_cost(self, reaction, threshold):
cost = self._rxn2var[reaction].rc
if reaction.reversible:
cost -= self._rev2var[reaction].rc
return cost if abs(cost) > threshold else 0.0
def _grb_iter_reduced_cost(self, reaction=None, threshold=None):
# _status should catch all problems (monitor this)
self._status()
if threshold is None:
threshold = options.numeric_threshold
if reaction is None:
return ((rxn, self._reduced_cost(rxn, threshold)) for rxn in\
self._rxn2var.iterkeys())
elif hasattr(reaction, "__iter__"):
return (self._reduced_cost(rxn, threshold) for rxn in reaction)
else:
return self._reduced_cost(reaction, threshold)
def _grb_iter_shadow_price(self, compound=None):
# _status should catch all problems (monitor this)
self._status()
if compound is None:
compound = self._rxn2var.iterkeys()
return ((cmpd, cnstrnt.pi) for (cmpd, cnstrnt) in\
self._cmpd2cnstrnt.iteritems())
elif hasattr(compound, "__iter__"):
return (self._cmpd2cnstrnt[cmpd].pi for cmpd in compound)
else:
return self._cmpd2cnstrnt[cmpd].pi
def _grb_export2lp(self, filename):
filename += ".lp"
self._model.write(filename)
################################################################################
# CVXOPT Facade
################################################################################
def _cvx_populate(attrs):
cvxopt = misc.load_module("cvxopt", "CVXOPT",
"http://abel.ee.ucla.edu/cvxopt/")
# set cvxopt solver options
cvxopt.solvers.options["show_progress"] = False
cvxopt.solvers.options["feastol"] = options.numeric_threshold
# set class attributes
attrs["_cvx"] = cvxopt
for (key, value) in attrs.iteritems():
if key.startswith("_"):
continue
try:
attrs[key] = eval("_cvx_" + key)
attrs[key].__doc__ = value.__doc__
except NameError:
pass
################################################################################
# GLPK Facade
################################################################################
def _cvx_glpk(attrs):
cvxopt = misc.load_module("cvxopt", "CVXOPT",
"http://abel.ee.ucla.edu/cvxopt/")
misc.load_module("cvxopt.glpk", "CVXOPT-GLPK",
"http://abel.ee.ucla.edu/cvxopt/")
cvxopt.solvers.options['LPX_K_MSGLEV'] = 0
################################################################################
# MOSEK Facade
################################################################################
def _cvx_mosek(attrs):
cvxopt = misc.load_module("cvxopt", "CVXOPT",
"http://abel.ee.ucla.edu/cvxopt/")
misc.load_module("cvxopt.msk", "CVXOPT-MOSEK",
"http://abel.ee.ucla.edu/cvxopt/")
mosek = misc.load_module("mosek", "CVXOPT-MOSEK",
"http://abel.ee.ucla.edu/cvxopt/")
cvxopt.solvers.options['MOSEK'] = {mosek.iparam.log: 0}
| <filename>pymetabolism/lpmodels.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
====================
LP Solver Interfaces
====================
:Authors:
<NAME>
<NAME>
<NAME>
:Date:
2011-03-28
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
lpmodels.py
"""
import os
import itertools
import copy
import logging
import tempfile
from . import miscellaneous as misc
from .errors import PyMetabolismError
logger = logging.getLogger(__name__)
logger.addHandler(misc.NullHandler())
options = misc.OptionsManager.get_instance()
class MetaLPModelFacade(type):
"""
Meta class that, according to the solver chosen in the options, populates
the given class with methods that are solver specific.
The general interface of the created class is supposed to look like the one
described by `FBAModel`.
"""
def __new__(mcls, name, bases, dct):
"""
"""
if options.lp_solver.lower() == "gurobi":
_grb_populate(dct)
elif options.lp_solver.lower() == "cvxopt":
_cvx_populate(dct)
elif options.lp_solver.lower() == "glpk":
_cvx_populate(dct)
_cvx_glpk(dct)
elif options.lp_solver.lower() == "mosek":
_cvx_populate(dct)
_cvx_mosek(dct)
return super(MetaLPModelFacade, mcls).__new__(mcls, name, bases, dct)
################################################################################
# Gurobi Facade
################################################################################
def _grb_populate(attrs):
grb = misc.load_module("gurobipy", "Gurobi", "http://www.gurobi.com/")
# suppress reports to stdout
grb.setParam("OutputFlag", 0)
# deal with Gurobi's annoying log file
tmp_file = tempfile.mkstemp()[1] # absolute path component
grb.setParam("LogFile", tmp_file)
os.remove("gurobi.log")
# set the number of processes
grb.setParam("Threads", options.num_proc)
# set the feasability tolerance (smaller is more accurate but harder)
grb.setParam("FeasibilityTol", options.numeric_threshold)
# set class attributes
attrs["_grb"] = grb
for (key, value) in attrs.iteritems():
if key.startswith("_"):
continue
try:
attrs[key] = eval("_grb_" + key)
attrs[key].__doc__ = value.__doc__
except NameError:
pass
# gurobi helper functions
attrs["__init__"] = _grb___init__
# attrs["__copy__"] = _grb___copy__
# attrs["__deepcopy__"] = _grb___deepcopy__
attrs["__str__"] = _grb___str__
attrs["_add_compound"] = _grb__add_compound
attrs["_change_participation"] = _grb__change_participation
attrs["_make_binary"] = _grb__make_binary
attrs["_make_integer"] = _grb__make_integer
attrs["_add_reaction"] = _grb__add_reaction
attrs["_change_coefficients"] = _grb__change_coefficients
attrs["_adjust_bounds"] = _grb__adjust_bounds
attrs["_bounds"] = _grb__bounds
attrs["_fixed"] = _grb__fixed
attrs["_add_transport"] = _grb__add_transport
attrs["_add_source"] = _grb__add_source
attrs["_add_drain"] = _grb__add_drain
attrs["_var2reaction"] = _grb__var2reaction
attrs["_reset_objective"] = _grb__reset_objective
attrs["_changed_objective"] = _grb__changed_objective
attrs["_status"] = _grb__status
attrs["_flux"] = _grb__flux
attrs["_reduced_cost"] = _grb__reduced_cost
def _grb___init__(self, name):
self._model = self._grb.Model(name)
self._rxn2var = dict()
self._var2rxn = dict()
self._rev2var = dict()
self._var2rev = dict()
self._cmpd2cnstrnt = dict()
self._cnstrnt2cmpd = dict()
self._sources = dict()
self._drains = dict()
self._objective = dict()
self._tmp_lb = dict()
#def _grb___copy__(self):
# # TODO
# cpy = self.__class__(self.name)
# cpy._model = self._model.copy()
# cpy._system2grb = dict()
# for col in cpy._model.getVars():
# cpy._system2grb[col.getAttr("VarName")] = col
# cpy._system2grb = dict()
# for row in cpy._model.getConstrs():
# cpy._system2grb[row.getAttr("ConstrName")] = row
# return cpy
#
#def _grb___deepcopy__(self, memo=dict()):
# # TODO
# return self.__copy__()
#
#def _grb_copy(self):
# # TODO
# return self.__deepcopy__()
def _grb___str__(self):
self._model.update()
message = list()
message.append("Objective:")
lin_expr = self._model.getObjective()
msg = ["%f %s" % (lin_expr.getCoeff(i), lin_expr.getVar(i).varName) for
i in range(lin_expr.size())]
message.append(" + ".join(msg))
message.append("Constraints:")
for cnstrnt in self._model.getConstrs():
lin_expr = self._model.getRow(cnstrnt)
msg = ["%f %s" % (lin_expr.getCoeff(i), lin_expr.getVar(i).varName) for
i in range(lin_expr.size())]
message.append("%s: %s = %s" % (cnstrnt.constrName, " + ".join(msg),
str(cnstrnt.rhs)))
message.append("Bounds:")
for var in self._model.getVars():
message.append("%f <= %s <= %f" % (var.lb, var.varName, var.ub))
return "\n".join(message)
def _grb__add_compound(self, compound):
if compound in self._cmpd2cnstrnt:
return False
cnstrnt = self._model.addConstr(0.0, self._grb.GRB.EQUAL,
0.0, name=str(compound))
self._cmpd2cnstrnt[compound] = cnstrnt
self._cnstrnt2cmpd[cnstrnt] = compound
return True
def _grb__change_participation(self, compound, coefficients):
cnstrnt = self._cmpd2cnstrnt[compound]
for (rxn, factor) in coefficients:
# will throw KeyError if reaction doesn't exist yet
var = self._rxn2var[rxn]
self._model.chgCoeff(cnstrnt, var, factor)
if rxn.reversible:
var = self._rev2var[rxn]
self._model.chgCoeff(cnstrnt, var, -factor)
def _grb_add_compound(self, compound, coefficients=None):
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(coefficients, "__iter__"):
coefficients = itertools.repeat(coefficients)
changes = [self._add_compound(cmpd) for cmpd in compound]
if any(changes):
self._model.update()
if coefficients is None:
return
for (cmpd, coeff_iter) in itertools.izip(compound, coefficients):
if self._model.getRow(self._cmpd2cnstrnt[cmpd]).size() > 0:
# compound participates in existing reactions thus was added before
continue
self._change_participation(cmpd, coeff_iter)
else:
if self._add_compound(compound):
self._model.update()
if coefficients is None:
return
self._change_participation(compound, coefficients)
def _grb_iter_compounds(self, reaction=None, coefficients=False):
# reports model data (may require update to be current)
if reaction is None:
return self._cmpd2cnstrnt.iterkeys()
column = self._model.getCol(self._rxn2var[reaction])
if coefficients:
return ((self._cnstrnt2cmpd[column.getConstr(i)], column.getCoeff(i))\
for i in range(column.size()))
else:
return (self._cnstrnt2cmpd[column.getConstr(i)]\
for i in range(column.size()))
def _grb_modify_compound_coefficients(self, compound, coefficients):
if hasattr(compound, "__iter__"):
if hasattr(coefficients, "__iter__"):
coefficients = itertools.repeat(coefficients)
for (cmpd, coeff_iter) in itertools.izip(compound, coefficients):
self._change_participation(cmpd, coeff_iter)
else:
self._change_participation(compound, coefficients)
def _grb_free_compound(self, compound):
cnstrnt = self._cmpd2cnstrnt[compound]
lin_expr = self._model.getRow(cnstrnt)
for i in range(lin_expr.size()):
var = lin_expr.getVar(i)
self._model.chgCoeff(cnstrnt, var, 0.0)
def _grb_knockout_compound(self, compound):
cnstrnt = self._cmpd2cnstrnt[compound]
lin_expr = self._model.getRow(cnstrnt)
for i in range(lin_expr.size()):
var = lin_expr.getVar(i)
var.lb = 0.0
var.ub = 0.0
def _grb__del_compound(self, compound):
cnstrnt = self._cmpd2cnstrnt.pop(compound)
del self._cnstrnt2cmpd[cnstrnt]
self._model.remove(cnstrnt)
def _grb_delete_compound(self, compound):
if hasattr(compound, "__iter__"):
for cmpd in compound:
self._del_compound(cmpd)
else:
self._del_compound(compound)
self._model.update()
def _grb__make_binary(self, reaction):
if hasattr(reaction, "__iter__"):
for rxn in reaction:
var = self._rxn2var[rxn]
var.vType = "B"
if rxn.reversible:
var = self._rev2var[rxn]
var.vType = "B"
else:
var = self._rxn2var[reaction]
var.vType = "B"
if reaction.reversible:
var = self._rev2var[reaction]
var.vType = "B"
def _grb__make_integer(self, reaction):
if hasattr(reaction, "__iter__"):
for rxn in reaction:
var = self._rxn2var[rxn]
var.vType = "I"
if rxn.reversible:
var = self._rev2var[rxn]
var.vType = "I"
else:
var = self._rxn2var[reaction]
var.vType = "I"
if reaction.reversible:
var = self._rev2var[reaction]
var.vType = "I"
def _grb__add_reaction(self, reaction, lb, ub):
if self._rxn2var.has_key(reaction):
return False
if reaction.reversible:
# we rely on lb being numeric here due to default options
if lb < 0:
var_rev = self._model.addVar(0.0, abs(lb), name=str(reaction) +
options.reversible_suffix)
var = self._model.addVar(0.0, ub, name=str(reaction))
else:
var_rev = self._model.addVar(lb, ub, name=str(reaction) +
options.reversible_suffix)
var = self._model.addVar(lb, ub, name=str(reaction))
self._rev2var[reaction] = var_rev
self._var2rev[var_rev] = reaction
else:
var = self._model.addVar(lb, ub, name=str(reaction))
self._rxn2var[reaction] = var
self._var2rxn[var] = reaction
return True
def _grb__change_coefficients(self, reaction, coefficients):
var = self._rxn2var[reaction]
for (cmpd, factor) in coefficients:
cnstrnt = self._cmpd2cnstrnt[cmpd]
self._model.chgCoeff(cnstrnt, var, factor)
if reaction.reversible:
var = self._rev2var[reaction]
for (cmpd, factor) in coefficients:
cnstrnt = self._cmpd2cnstrnt[cmpd]
self._model.chgCoeff(cnstrnt, var, -factor)
def _grb_add_reaction(self, reaction, coefficients=None, lb=None, ub=None):
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
if hasattr(reaction, "__iter__"):
# we really add multiple reactions
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
changes = [self._add_reaction(rxn, lb, ub) for (rxn, lb, ub)\
in itertools.izip(reaction, lb_iter, ub_iter)]
if any(changes):
self._model.update()
if coefficients is None:
return
# need to find out if we are dealing with a nested list or not
if not (isinstance(coefficients, list) and isinstance(coefficients[0],
list)):
coefficients = itertools.repeat(coefficients)
for (rxn, coeff_iter) in itertools.izip(reaction, coefficients):
changes = [self._add_compound(pair[0]) for pair in coeff_iter]
if any(changes):
self._model.update()
if self._model.getCol(self._rxn2var[rxn]).size() > 0:
# reaction has constraints and was added before
continue
self._change_coefficients(rxn, coeff_iter)
else:
if self._add_reaction(reaction, lb, ub):
self._model.update()
if coefficients is None:
return
changes = [self._add_compound(pair[0]) for pair in coefficients]
if any(changes):
self._model.update()
self._change_coefficients(reaction, coefficients)
def _grb_iter_reactions(self, compound=None, coefficients=False):
# reports model data (may require update to be current)
if not compound:
return self._rxn2var.iterkeys()
lin_expr = self._model.getRow(self._cmpd2cnstrnt[compound])
if coefficients:
return ((self._var2reaction(lin_expr.getVar(i)), lin_expr.getCoeff(i))\
for i in range(lin_expr.size()))
else:
return (self._var2reaction(lin_expr.getVar(i))\
for i in range(lin_expr.size()))
def _grb_modify_reaction_coefficients(self, reaction, coefficients):
# we allow for lazy updating of the model here (better not be a bug)
if hasattr(reaction, "__iter__"):
if hasattr(coefficients, "__iter__"):
coefficients = itertools.repeat(coefficients)
for (rxn, coeff_iter) in itertools.izip(reaction, coefficients):
self._change_coefficients(rxn, coeff_iter)
else:
self._change_coefficients(reaction, coefficients)
def _grb__adjust_bounds(self, reaction, lb, ub):
"""
Adjust the lower and upper bound for a reaction.
Reversible reactions are treated specially since bounds may be split for
both directions.
"""
numeric_ub = not ub is None
numeric_lb = not lb is None
if numeric_ub and numeric_lb and ub < lb:
raise PyMetabolismError("Trying to set an upper bound that is smaller"\
" than the lower bound for '%s'.", str(reaction))
var = self._rxn2var[reaction]
if reaction.reversible:
var_rev = self._rev2var[reaction]
if numeric_ub:
var.ub = ub
var_rev.ub = ub
if numeric_lb:
if lb < 0.0:
var_rev.lb = 0.0
var_rev.ub = abs(lb)
var.lb = 0.0
else:
var_rev.lb = lb
var.lb = lb
else:
if numeric_ub:
var.ub = ub
if numeric_lb:
var.lb = lb
def _grb_modify_reaction_bounds(self, reaction, lb=None, ub=None):
# we allow for lazy updating of the model here (better not be a bug)
if hasattr(reaction, "__iter__"):
# we really modify multiple reactions
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
for (rxn, lb, ub) in itertools.izip(reaction, lb_iter, ub_iter):
self._adjust_bounds(rxn, lb, ub)
else:
self._adjust_bounds(reaction, lb, ub)
# for some reasons lazy updating of bounds does not work
self._model.update()
def _grb__bounds(self, reaction):
var = self._rxn2var[reaction]
if reaction.reversible:
var_rev = self._rev2var[reaction]
return (-var_rev.ub, var.ub)
else:
return (var.lb, var.ub)
def _grb_iter_reaction_bounds(self, reaction=None):
# reports model data (may require update to be current)
# we rely on reversible reactions being treated in unison
if reaction is None:
reaction = self._rxn2var.iterkeys()
return ((rxn, self._bounds(rxn)) for rxn in reaction)
elif hasattr(reaction, "__iter__"):
# we really get multiple reactions
return (self._bounds(rxn) for rxn in reaction)
else:
return self._bounds(reaction)
def _grb__fixed(self, reaction):
var = self.rxn2var[reaction]
fixed = var.lb == var.ub
if reaction.reversible:
var = self.rev2var[reaction]
fixed &= var.lb == var.ub
return fixed
def _grb_is_fixed(self, reaction=None):
# updating is the only way currently to return newly added information
self._model.update()
if reaction is None:
reaction = self._rxn2var.iterkeys()
return all(self._fixed(rxn) for rxn in reaction)
elif hasattr(reaction, "__iter__"):
# we really get multiple reactions
return all(self._fixed(rxn) for rxn in reaction)
else:
return self._bounds(reaction)
def _grb_free_reaction(self, reaction):
self.modify_reaction_bounds(reaction, lb=-self._grb.GRB.INFINITY,
ub=self._grb.GRB.INFINITY)
def _grb__del_reaction(self, reaction):
var = self._rxn2var.pop(reaction)
del self._var2rxn[var]
self._model.remove(var)
if reaction.reversible:
var = self._rev2var.pop(reaction)
del self._var2rev[var]
self._model.remove(var)
def _grb_delete_reaction(self, reaction):
if hasattr(reaction, "__iter__"):
for rxn in reaction:
self._del_reaction(rxn)
else:
self._del_reaction(reaction)
self._model.update()
def _grb__add_transport(self, compound, var, factor):
if self._model.getCol(var).size() > 0:
# transport already added
return
cnstrnt = self._cmpd2cnstrnt[compound]
self._model.chgCoeff(cnstrnt, var, factor)
def _grb__add_source(self, compound, lb, ub):
if compound in self._sources:
return False
self._sources[compound] = self._model.addVar(lb, ub,
name=str(compound) + "_Source")
return True
def _grb_add_compound_source(self, compound, lb=None, ub=None):
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
changes = [self._add_source(cmpd, lb, ub) for (cmpd, lb, ub)\
in itertools.izip(compound, lb_iter, ub_iter)]
if any(changes):
self._model.update()
# we allow for lazy updating of the model here (better not be a bug)
for cmpd in compound:
var = self._sources[cmpd]
self._add_transport(cmpd, var, 1.0)
else:
if self._add_source(compound, lb, ub):
self._model.update()
var = self._sources[compound]
self._add_transport(compound, var, 1.0)
def _grb_iter_sources(self):
return self._sources.iterkeys()
def _grb_delete_source(self, compound):
if hasattr(compound, "__iter__"):
for cmpd in compound:
var = self._sources.pop(cmpd)
self._model.remove(var)
else:
var = self._sources.pop(compound)
self._model.remove(var)
self._model.update()
def _grb__add_drain(self, compound, lb, ub):
if compound in self._drains:
return False
self._drains[compound] = self._model.addVar(lb, ub,
name=str(compound) + "_Drain")
return True
def _grb_add_compound_drain(self, compound, lb=None, ub=None):
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
changes = [self._add_drain(cmpd, lb, ub) for (cmpd, lb, ub)\
in itertools.izip(compound, lb_iter, ub_iter)]
if any(changes):
self._model.update()
# we allow for lazy updating of the model here (better not be a bug)
for cmpd in compound:
var = self._drains[cmpd]
self._add_transport(cmpd, var, -1.0)
else:
if self._add_drain(compound, lb, ub):
self._model.update()
var = self._drains[compound]
self._add_transport(compound, var, -1.0)
def _grb_iter_drains(self):
return self._drains.iterkeys()
def _grb_delete_drain(self, compound):
if hasattr(compound, "__iter__"):
for cmpd in compound:
var = self._drains.pop(cmpd)
self._model.remove(var)
else:
var = self._drains.pop(compound)
self._model.remove(var)
self._model.update()
def _grb_set_objective_reaction(self, reaction, factor):
# we allow for lazy updating of the model here (better not be a bug)
self._objective = dict()
if hasattr(reaction, "__iter__"):
if hasattr(factor, "__iter__"):
fctr_iter = factor
else:
fctr_iter = itertools.repeat(factor)
for (rxn, factor) in itertools.izip(reaction, fctr_iter):
self._objective[rxn] = factor
else:
self._objective[reaction] = factor
self._reset_objective()
def _grb__var2reaction(self, var):
return self._var2rxn[var] if var in self._var2rxn else self._var2rev[var]
def _grb_iter_objective_reaction(self, coefficients=False):
if coefficients:
return self._objective.iteritems()
else:
return self._objective.iterkeys()
def _grb_set_medium(self, compound, lb=None, ub=None):
# we allow for lazy updating of the model here (better not be a bug)
if lb is None:
lb = options.lower_bound
if ub is None:
ub = options.upper_bound
# constrain all sources first
for source in self._sources.itervalues():
source.lb = 0.0
source.ub = 0.0
if hasattr(compound, "__iter__"):
# we really add multiple compounds
if hasattr(lb, "__iter__"):
lb_iter = lb
else:
lb_iter = itertools.repeat(lb)
if hasattr(ub, "__iter__"):
ub_iter = ub
else:
ub_iter = itertools.repeat(ub)
for (cmpd, lb, ub) in itertools.izip(compound, lb_iter, ub_iter):
var = self._sources[cmpd]
var.lb = lb
var.ub = ub
else:
var = self._sources[compound]
var.lb = lb
var.ub = ub
def _grb__reset_objective(self):
objective = list()
for (rxn, factor) in self._objective.iteritems():
var = self._rxn2var[rxn]
var.lb = self._tmp_lb.get(var, var.lb)
objective.append((factor, var))
if rxn.reversible:
var = self._rev2var[rxn]
var.lb = self._tmp_lb.pop(var, var.lb)
objective.append((factor, var))
if objective:
self._model.setObjective(self._grb.LinExpr(objective))
def _grb__changed_objective(self):
# test whether we need to reset the objective, because of parsimonious_fba
# before
lin_expr = self._model.getObjective()
current = set([lin_expr.getVar(i) for i in range(lin_expr.size())])
objective = set()
for rxn in self._objective.iterkeys():
var = self._rxn2var[rxn]
objective.add(var)
if rxn.reversible:
var = self._rev2var[rxn]
objective.add(var)
return current != objective
def _grb_fba(self, maximize=True):
if self._changed_objective():
self._reset_objective()
if maximize:
self._model.modelSense = self._grb.GRB.MAXIMIZE
else:
self._model.modelSense = self._grb.GRB.MINIMIZE
self._model.optimize()
def _grb_parsimonious_fba(self):
if self._changed_objective():
self._reset_objective()
self._model.modelSense = self._grb.GRB.MAXIMIZE
self._model.optimize()
# _status should catch all problems (monitor this)
self._status()
lin_expr = self._model.getObjective()
objective = set([lin_expr.getVar(i) for i in range(lin_expr.size())])
for var in objective:
var = lin_expr.getVar(i)
self._tmp_lb[var] = var.lb
var.lb = var.x
# now minimize all other variables
minimize = list()
for var in itertools.chain(self._rxn2var.itervalues(),
self._rev2var.itervalues()):
if not var in objective:
minimize.append((1.0, var))
if minimize:
self._model.setObjective(self._grb.LinExpr(minimize))
self._model.modelSense = self._grb.GRB.MINIMIZE
self._model.optimize()
def _grb__status(self):
"""
Determine the current status of the Gurobi model.
"""
status = self._model.status
if status == self._grb.GRB.LOADED:
raise PyMetabolismError("optimize before accessing flux information", errorno=status)
elif status == self._grb.GRB.OPTIMAL:
pass
elif status == self._grb.GRB.INFEASIBLE:
raise PyMetabolismError("model is infeasible", errorno=status)
elif status == self._grb.GRB.INF_OR_UNBD:
raise PyMetabolismError("model is infeasible or unbounded", errorno=status)
elif status == self._grb.GRB.UNBOUNDED:
raise PyMetabolismError("model is unbounded", errorno=status)
elif status == self._grb.GRB.CUTOFF:
raise PyMetabolismError("model solution is worse than provided cut-off", errorno=status)
elif status == self._grb.GRB.ITERATION_LIMIT:
raise PyMetabolismError("iteration limit exceeded", errorno=status)
elif status == self._grb.GRB.NODE_LIMIT:
raise PyMetabolismError("node limit exceeded", errorno=status)
elif status == self._grb.GRB.TIME_LIMIT:
raise PyMetabolismError("time limit exceeded", errorno=status)
elif status == self._grb.GRB.SOLUTION_LIMIT:
raise PyMetabolismError("solution limit reached", errorno=status)
elif status == self._grb.GRB.INTERRUPTED:
raise PyMetabolismError("optimization process was interrupted", errorno=status)
elif status == self._grb.GRB.SUBOPTIMAL:
raise PyMetabolismError("solution is suboptimal", errorno=status)
elif status == self._grb.GRB.NUMERIC:
raise PyMetabolismError("optimization aborted due to numeric difficulties", errorno=status)
def _grb_get_objective_value(self, threshold=None):
# _status should catch all problems (monitor this)
self._status()
if threshold is None:
threshold = options.numeric_threshold
return sum(self._flux(rxn, threshold) * factor for (rxn, factor)\
in self._objective.iteritems())
def _grb__flux(self, reaction, threshold):
flux = self._rxn2var[reaction].x
if reaction.reversible:
flux -= self._rev2var[reaction].x
return flux if abs(flux) > threshold else 0.0
def _grb_iter_flux(self, reaction=None, threshold=None):
# _status should catch all problems (monitor this)
self._status()
if threshold is None:
threshold = options.numeric_threshold
if reaction is None:
return ((rxn, self._flux(rxn, threshold)) for rxn in\
self._rxn2var.iterkeys())
elif hasattr(reaction, "__iter__"):
return (self._flux(rxn, threshold) for rxn in reaction)
else:
return self._flux(reaction, threshold)
def _grb__reduced_cost(self, reaction, threshold):
cost = self._rxn2var[reaction].rc
if reaction.reversible:
cost -= self._rev2var[reaction].rc
return cost if abs(cost) > threshold else 0.0
def _grb_iter_reduced_cost(self, reaction=None, threshold=None):
# _status should catch all problems (monitor this)
self._status()
if threshold is None:
threshold = options.numeric_threshold
if reaction is None:
return ((rxn, self._reduced_cost(rxn, threshold)) for rxn in\
self._rxn2var.iterkeys())
elif hasattr(reaction, "__iter__"):
return (self._reduced_cost(rxn, threshold) for rxn in reaction)
else:
return self._reduced_cost(reaction, threshold)
def _grb_iter_shadow_price(self, compound=None):
# _status should catch all problems (monitor this)
self._status()
if compound is None:
compound = self._rxn2var.iterkeys()
return ((cmpd, cnstrnt.pi) for (cmpd, cnstrnt) in\
self._cmpd2cnstrnt.iteritems())
elif hasattr(compound, "__iter__"):
return (self._cmpd2cnstrnt[cmpd].pi for cmpd in compound)
else:
return self._cmpd2cnstrnt[cmpd].pi
def _grb_export2lp(self, filename):
filename += ".lp"
self._model.write(filename)
################################################################################
# CVXOPT Facade
################################################################################
def _cvx_populate(attrs):
cvxopt = misc.load_module("cvxopt", "CVXOPT",
"http://abel.ee.ucla.edu/cvxopt/")
# set cvxopt solver options
cvxopt.solvers.options["show_progress"] = False
cvxopt.solvers.options["feastol"] = options.numeric_threshold
# set class attributes
attrs["_cvx"] = cvxopt
for (key, value) in attrs.iteritems():
if key.startswith("_"):
continue
try:
attrs[key] = eval("_cvx_" + key)
attrs[key].__doc__ = value.__doc__
except NameError:
pass
################################################################################
# GLPK Facade
################################################################################
def _cvx_glpk(attrs):
cvxopt = misc.load_module("cvxopt", "CVXOPT",
"http://abel.ee.ucla.edu/cvxopt/")
misc.load_module("cvxopt.glpk", "CVXOPT-GLPK",
"http://abel.ee.ucla.edu/cvxopt/")
cvxopt.solvers.options['LPX_K_MSGLEV'] = 0
################################################################################
# MOSEK Facade
################################################################################
def _cvx_mosek(attrs):
cvxopt = misc.load_module("cvxopt", "CVXOPT",
"http://abel.ee.ucla.edu/cvxopt/")
misc.load_module("cvxopt.msk", "CVXOPT-MOSEK",
"http://abel.ee.ucla.edu/cvxopt/")
mosek = misc.load_module("mosek", "CVXOPT-MOSEK",
"http://abel.ee.ucla.edu/cvxopt/")
cvxopt.solvers.options['MOSEK'] = {mosek.iparam.log: 0}
| en | 0.638232 | #!/usr/bin/env python # -*- coding: utf-8 -*- ==================== LP Solver Interfaces ==================== :Authors: <NAME> <NAME> <NAME> :Date: 2011-03-28 :Copyright: Copyright(c) 2011 Jacobs University of Bremen. All rights reserved. :File: lpmodels.py Meta class that, according to the solver chosen in the options, populates the given class with methods that are solver specific. The general interface of the created class is supposed to look like the one described by `FBAModel`. ################################################################################ # Gurobi Facade ################################################################################ # suppress reports to stdout # deal with Gurobi's annoying log file # absolute path component # set the number of processes # set the feasability tolerance (smaller is more accurate but harder) # set class attributes # gurobi helper functions # attrs["__copy__"] = _grb___copy__ # attrs["__deepcopy__"] = _grb___deepcopy__ #def _grb___copy__(self): # # TODO # cpy = self.__class__(self.name) # cpy._model = self._model.copy() # cpy._system2grb = dict() # for col in cpy._model.getVars(): # cpy._system2grb[col.getAttr("VarName")] = col # cpy._system2grb = dict() # for row in cpy._model.getConstrs(): # cpy._system2grb[row.getAttr("ConstrName")] = row # return cpy # #def _grb___deepcopy__(self, memo=dict()): # # TODO # return self.__copy__() # #def _grb_copy(self): # # TODO # return self.__deepcopy__() # will throw KeyError if reaction doesn't exist yet # we really add multiple compounds # compound participates in existing reactions thus was added before # reports model data (may require update to be current) # we rely on lb being numeric here due to default options # we really add multiple reactions # need to find out if we are dealing with a nested list or not # reaction has constraints and was added before # reports model data (may require update to be current) # we allow for lazy updating of the model here (better not be a bug) Adjust the lower and upper bound for a reaction. Reversible reactions are treated specially since bounds may be split for both directions. # we allow for lazy updating of the model here (better not be a bug) # we really modify multiple reactions # for some reasons lazy updating of bounds does not work # reports model data (may require update to be current) # we rely on reversible reactions being treated in unison # we really get multiple reactions # updating is the only way currently to return newly added information # we really get multiple reactions # transport already added # we really add multiple compounds # we allow for lazy updating of the model here (better not be a bug) # we really add multiple compounds # we allow for lazy updating of the model here (better not be a bug) # we allow for lazy updating of the model here (better not be a bug) # we allow for lazy updating of the model here (better not be a bug) # constrain all sources first # we really add multiple compounds # test whether we need to reset the objective, because of parsimonious_fba # before # _status should catch all problems (monitor this) # now minimize all other variables Determine the current status of the Gurobi model. # _status should catch all problems (monitor this) # _status should catch all problems (monitor this) # _status should catch all problems (monitor this) # _status should catch all problems (monitor this) ################################################################################ # CVXOPT Facade ################################################################################ # set cvxopt solver options # set class attributes ################################################################################ # GLPK Facade ################################################################################ ################################################################################ # MOSEK Facade ################################################################################ | 2.458836 | 2 |
backend/api/battles/serializers.py | pantoja/PokeBattle | 0 | 6632570 | from rest_framework import exceptions, serializers
from api.battles.helpers import order_pokemon_in_team
from api.pokemon.serializers import PokemonSerializer
from api.users.serializers import UserSerializer
from battles.choices import POKEMON_ORDER_CHOICES
from battles.helpers.common import duplicate_in_set, pokemon_team_exceeds_limit
from battles.models import Battle, Team
class DetailTeamSerializer(serializers.ModelSerializer):
trainer = UserSerializer()
team = serializers.SerializerMethodField()
def get_team(self, obj):
team_set = [obj.first_pokemon, obj.second_pokemon, obj.third_pokemon]
return PokemonSerializer(team_set, many=True).data
class Meta:
model = Team
fields = ["trainer", "team"]
class CreateTeamSerializer(serializers.ModelSerializer):
trainer = serializers.HiddenField(default=serializers.CurrentUserDefault())
choice_1 = serializers.ChoiceField(choices=POKEMON_ORDER_CHOICES, initial=1, write_only=True)
choice_2 = serializers.ChoiceField(choices=POKEMON_ORDER_CHOICES, initial=2, write_only=True)
choice_3 = serializers.ChoiceField(choices=POKEMON_ORDER_CHOICES, initial=3, write_only=True)
class Meta:
model = Team
fields = [
"battle",
"trainer",
"first_pokemon",
"second_pokemon",
"third_pokemon",
"choice_1",
"choice_2",
"choice_3",
]
def validate(self, attrs):
team = (attrs["first_pokemon"], attrs["second_pokemon"], attrs["third_pokemon"])
choices = (attrs["choice_1"], attrs["choice_2"], attrs["choice_3"])
if attrs["trainer"] not in (attrs["battle"].user_creator, attrs["battle"].user_opponent):
raise exceptions.NotFound()
if duplicate_in_set(team):
raise serializers.ValidationError("Your team has duplicates, please use unique pokemon")
if duplicate_in_set(choices):
raise serializers.ValidationError("Please allocate one pokemon per round")
if pokemon_team_exceeds_limit(team):
raise serializers.ValidationError(
"Your team exceeds the 600 points limit, please choose another team"
)
attrs = order_pokemon_in_team(attrs)
return attrs
class DetailBattleSerializer(serializers.ModelSerializer):
creator = serializers.SerializerMethodField()
opponent = serializers.SerializerMethodField()
winner = serializers.SerializerMethodField()
created = serializers.DateTimeField(format="%d/%m/%y")
def get_winner(self, obj):
if not obj.winner:
return None
return obj.winner.email
def get_creator(self, obj):
team = obj.teams.get(trainer=obj.user_creator)
serializer = DetailTeamSerializer(team)
return serializer.data
def get_opponent(self, obj):
if not obj.settled:
return {"trainer": UserSerializer(obj.user_opponent).data, "team": None}
team = obj.teams.get(trainer=obj.user_opponent)
serializer = DetailTeamSerializer(team)
return serializer.data
class Meta:
model = Battle
fields = ["id", "winner", "created", "creator", "opponent"]
class CreateBattleSerializer(serializers.ModelSerializer):
user_creator = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Battle
fields = ["user_creator", "user_opponent"]
def validate(self, attrs):
if self.context["request"].user == attrs["user_opponent"]:
raise serializers.ValidationError("You can't battle yourself")
return attrs
| from rest_framework import exceptions, serializers
from api.battles.helpers import order_pokemon_in_team
from api.pokemon.serializers import PokemonSerializer
from api.users.serializers import UserSerializer
from battles.choices import POKEMON_ORDER_CHOICES
from battles.helpers.common import duplicate_in_set, pokemon_team_exceeds_limit
from battles.models import Battle, Team
class DetailTeamSerializer(serializers.ModelSerializer):
trainer = UserSerializer()
team = serializers.SerializerMethodField()
def get_team(self, obj):
team_set = [obj.first_pokemon, obj.second_pokemon, obj.third_pokemon]
return PokemonSerializer(team_set, many=True).data
class Meta:
model = Team
fields = ["trainer", "team"]
class CreateTeamSerializer(serializers.ModelSerializer):
trainer = serializers.HiddenField(default=serializers.CurrentUserDefault())
choice_1 = serializers.ChoiceField(choices=POKEMON_ORDER_CHOICES, initial=1, write_only=True)
choice_2 = serializers.ChoiceField(choices=POKEMON_ORDER_CHOICES, initial=2, write_only=True)
choice_3 = serializers.ChoiceField(choices=POKEMON_ORDER_CHOICES, initial=3, write_only=True)
class Meta:
model = Team
fields = [
"battle",
"trainer",
"first_pokemon",
"second_pokemon",
"third_pokemon",
"choice_1",
"choice_2",
"choice_3",
]
def validate(self, attrs):
team = (attrs["first_pokemon"], attrs["second_pokemon"], attrs["third_pokemon"])
choices = (attrs["choice_1"], attrs["choice_2"], attrs["choice_3"])
if attrs["trainer"] not in (attrs["battle"].user_creator, attrs["battle"].user_opponent):
raise exceptions.NotFound()
if duplicate_in_set(team):
raise serializers.ValidationError("Your team has duplicates, please use unique pokemon")
if duplicate_in_set(choices):
raise serializers.ValidationError("Please allocate one pokemon per round")
if pokemon_team_exceeds_limit(team):
raise serializers.ValidationError(
"Your team exceeds the 600 points limit, please choose another team"
)
attrs = order_pokemon_in_team(attrs)
return attrs
class DetailBattleSerializer(serializers.ModelSerializer):
creator = serializers.SerializerMethodField()
opponent = serializers.SerializerMethodField()
winner = serializers.SerializerMethodField()
created = serializers.DateTimeField(format="%d/%m/%y")
def get_winner(self, obj):
if not obj.winner:
return None
return obj.winner.email
def get_creator(self, obj):
team = obj.teams.get(trainer=obj.user_creator)
serializer = DetailTeamSerializer(team)
return serializer.data
def get_opponent(self, obj):
if not obj.settled:
return {"trainer": UserSerializer(obj.user_opponent).data, "team": None}
team = obj.teams.get(trainer=obj.user_opponent)
serializer = DetailTeamSerializer(team)
return serializer.data
class Meta:
model = Battle
fields = ["id", "winner", "created", "creator", "opponent"]
class CreateBattleSerializer(serializers.ModelSerializer):
user_creator = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Battle
fields = ["user_creator", "user_opponent"]
def validate(self, attrs):
if self.context["request"].user == attrs["user_opponent"]:
raise serializers.ValidationError("You can't battle yourself")
return attrs
| none | 1 | 2.362895 | 2 |
|
env/lib/python3.6/site-packages/scipy/stats/tests/test_tukeylambda_stats.py | anthowen/duplify | 6,989 | 6632571 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy.stats._tukeylambda_stats import (tukeylambda_variance,
tukeylambda_kurtosis)
def test_tukeylambda_stats_known_exact():
"""Compare results with some known exact formulas."""
# Some exact values of the Tukey Lambda variance and kurtosis:
# lambda var kurtosis
# 0 pi**2/3 6/5 (logistic distribution)
# 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3
# 1 1/3 -6/5 (uniform distribution on (-1,1))
# 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2))
# lambda = 0
var = tukeylambda_variance(0)
assert_allclose(var, np.pi**2 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(0)
assert_allclose(kurt, 1.2, atol=1e-10)
# lambda = 0.5
var = tukeylambda_variance(0.5)
assert_allclose(var, 4 - np.pi, atol=1e-12)
kurt = tukeylambda_kurtosis(0.5)
desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3
assert_allclose(kurt, desired, atol=1e-10)
# lambda = 1
var = tukeylambda_variance(1)
assert_allclose(var, 1.0 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(1)
assert_allclose(kurt, -1.2, atol=1e-10)
# lambda = 2
var = tukeylambda_variance(2)
assert_allclose(var, 1.0 / 12, atol=1e-12)
kurt = tukeylambda_kurtosis(2)
assert_allclose(kurt, -1.2, atol=1e-10)
def test_tukeylambda_stats_mpmath():
"""Compare results with some values that were computed using mpmath."""
a10 = dict(atol=1e-10, rtol=0)
a12 = dict(atol=1e-12, rtol=0)
data = [
# lambda variance kurtosis
[-0.1, 4.78050217874253547, 3.78559520346454510],
[-0.0649, 4.16428023599895777, 2.52019675947435718],
[-0.05, 3.93672267890775277, 2.13129793057777277],
[-0.001, 3.30128380390964882, 1.21452460083542988],
[0.001, 3.27850775649572176, 1.18560634779287585],
[0.03125, 2.95927803254615800, 0.804487555161819980],
[0.05, 2.78281053405464501, 0.611604043886644327],
[0.0649, 2.65282386754100551, 0.476834119532774540],
[1.2, 0.242153920578588346, -1.23428047169049726],
[10.0, 0.00095237579757703597, 2.37810697355144933],
[20.0, 0.00012195121951131043, 7.37654321002709531],
]
for lam, var_expected, kurt_expected in data:
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
# Test with vector arguments (most of the other tests are for single
# values).
lam, var_expected, kurt_expected = zip(*data)
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
def test_tukeylambda_stats_invalid():
"""Test values of lambda outside the domains of the functions."""
lam = [-1.0, -0.5]
var = tukeylambda_variance(lam)
assert_equal(var, np.array([np.nan, np.inf]))
lam = [-1.0, -0.25]
kurt = tukeylambda_kurtosis(lam)
assert_equal(kurt, np.array([np.nan, np.inf]))
| from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy.stats._tukeylambda_stats import (tukeylambda_variance,
tukeylambda_kurtosis)
def test_tukeylambda_stats_known_exact():
"""Compare results with some known exact formulas."""
# Some exact values of the Tukey Lambda variance and kurtosis:
# lambda var kurtosis
# 0 pi**2/3 6/5 (logistic distribution)
# 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3
# 1 1/3 -6/5 (uniform distribution on (-1,1))
# 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2))
# lambda = 0
var = tukeylambda_variance(0)
assert_allclose(var, np.pi**2 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(0)
assert_allclose(kurt, 1.2, atol=1e-10)
# lambda = 0.5
var = tukeylambda_variance(0.5)
assert_allclose(var, 4 - np.pi, atol=1e-12)
kurt = tukeylambda_kurtosis(0.5)
desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3
assert_allclose(kurt, desired, atol=1e-10)
# lambda = 1
var = tukeylambda_variance(1)
assert_allclose(var, 1.0 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(1)
assert_allclose(kurt, -1.2, atol=1e-10)
# lambda = 2
var = tukeylambda_variance(2)
assert_allclose(var, 1.0 / 12, atol=1e-12)
kurt = tukeylambda_kurtosis(2)
assert_allclose(kurt, -1.2, atol=1e-10)
def test_tukeylambda_stats_mpmath():
"""Compare results with some values that were computed using mpmath."""
a10 = dict(atol=1e-10, rtol=0)
a12 = dict(atol=1e-12, rtol=0)
data = [
# lambda variance kurtosis
[-0.1, 4.78050217874253547, 3.78559520346454510],
[-0.0649, 4.16428023599895777, 2.52019675947435718],
[-0.05, 3.93672267890775277, 2.13129793057777277],
[-0.001, 3.30128380390964882, 1.21452460083542988],
[0.001, 3.27850775649572176, 1.18560634779287585],
[0.03125, 2.95927803254615800, 0.804487555161819980],
[0.05, 2.78281053405464501, 0.611604043886644327],
[0.0649, 2.65282386754100551, 0.476834119532774540],
[1.2, 0.242153920578588346, -1.23428047169049726],
[10.0, 0.00095237579757703597, 2.37810697355144933],
[20.0, 0.00012195121951131043, 7.37654321002709531],
]
for lam, var_expected, kurt_expected in data:
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
# Test with vector arguments (most of the other tests are for single
# values).
lam, var_expected, kurt_expected = zip(*data)
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
def test_tukeylambda_stats_invalid():
"""Test values of lambda outside the domains of the functions."""
lam = [-1.0, -0.5]
var = tukeylambda_variance(lam)
assert_equal(var, np.array([np.nan, np.inf]))
lam = [-1.0, -0.25]
kurt = tukeylambda_kurtosis(lam)
assert_equal(kurt, np.array([np.nan, np.inf]))
| en | 0.799724 | Compare results with some known exact formulas. # Some exact values of the Tukey Lambda variance and kurtosis: # lambda var kurtosis # 0 pi**2/3 6/5 (logistic distribution) # 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3 # 1 1/3 -6/5 (uniform distribution on (-1,1)) # 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2)) # lambda = 0 # lambda = 0.5 # lambda = 1 # lambda = 2 Compare results with some values that were computed using mpmath. # lambda variance kurtosis # Test with vector arguments (most of the other tests are for single # values). Test values of lambda outside the domains of the functions. | 2.38423 | 2 |
application/models/event.py | ukayaj620/itevenz | 0 | 6632572 | <filename>application/models/event.py<gh_stars>0
from application.app import db
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), unique=False, nullable=False)
title = db.Column(db.String(255), unique=False, nullable=False)
description = db.Column(db.Text, unique=False, nullable=False)
poster_filename = db.Column(db.String(255), unique=False, nullable=False)
start_date = db.Column(db.Date, unique=False, nullable=False)
end_date = db.Column(db.Date, unique=False, nullable=False)
category = db.Column(db.String(255), unique=False, nullable=False)
start_time = db.Column(db.Time, unique=False, nullable=False)
end_time = db.Column(db.Time, unique=False, nullable=False)
due_date = db.Column(db.Date, nullable=False)
speaker = db.Column(db.String(255), unique=False, nullable=False)
participates = db.relationship('Participation', backref='event', lazy=True)
def __repr__(self):
return '<Event %r>' % self.title
def create(self, title, description, start_date, end_date, category, start_time, end_time, due_date, poster_filename, user_id, speaker):
event = Event(
title=title,
description=description,
start_date=start_date,
end_date=end_date,
category=category,
start_time=start_time,
end_time=end_time,
due_date=due_date,
poster_filename=poster_filename,
user_id=user_id,
speaker=speaker
)
db.session.add(event)
db.session.commit()
def update(self, title, description, start_date, end_date, category, start_time, end_time, due_date, event_id, speaker, poster_filename=None):
event = Event.query.filter_by(id=event_id).first()
event.title = title
event.description = description
event.start_date = start_date
event.start_time = start_time
event.end_date = end_date
event.end_time = end_time
event.due_date = due_date
event.speaker = speaker
event.category = category
if poster_filename is not None:
event.poster_filename = poster_filename
db.session.commit()
def delete(self, event_id):
event = Event.query.filter_by(id=event_id).first()
db.session.delete(event)
db.session.commit()
| <filename>application/models/event.py<gh_stars>0
from application.app import db
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), unique=False, nullable=False)
title = db.Column(db.String(255), unique=False, nullable=False)
description = db.Column(db.Text, unique=False, nullable=False)
poster_filename = db.Column(db.String(255), unique=False, nullable=False)
start_date = db.Column(db.Date, unique=False, nullable=False)
end_date = db.Column(db.Date, unique=False, nullable=False)
category = db.Column(db.String(255), unique=False, nullable=False)
start_time = db.Column(db.Time, unique=False, nullable=False)
end_time = db.Column(db.Time, unique=False, nullable=False)
due_date = db.Column(db.Date, nullable=False)
speaker = db.Column(db.String(255), unique=False, nullable=False)
participates = db.relationship('Participation', backref='event', lazy=True)
def __repr__(self):
return '<Event %r>' % self.title
def create(self, title, description, start_date, end_date, category, start_time, end_time, due_date, poster_filename, user_id, speaker):
event = Event(
title=title,
description=description,
start_date=start_date,
end_date=end_date,
category=category,
start_time=start_time,
end_time=end_time,
due_date=due_date,
poster_filename=poster_filename,
user_id=user_id,
speaker=speaker
)
db.session.add(event)
db.session.commit()
def update(self, title, description, start_date, end_date, category, start_time, end_time, due_date, event_id, speaker, poster_filename=None):
event = Event.query.filter_by(id=event_id).first()
event.title = title
event.description = description
event.start_date = start_date
event.start_time = start_time
event.end_date = end_date
event.end_time = end_time
event.due_date = due_date
event.speaker = speaker
event.category = category
if poster_filename is not None:
event.poster_filename = poster_filename
db.session.commit()
def delete(self, event_id):
event = Event.query.filter_by(id=event_id).first()
db.session.delete(event)
db.session.commit()
| none | 1 | 2.355268 | 2 |
|
test/internet_tests/test_post_submit.py | brikkho-net/windmill | 61 | 6632573 | from windmill.authoring import WindmillTestClient
def test_post_submit():
client = WindmillTestClient(__name__)
client.open(url=u'http://tutorial.getwindmill.com/windmill-unittests/domain_switcher.html')
client.type(text=u'simpletest', name=u'search_theme_form')
client.click(name=u'op')
client.waits.forPageLoad(timeout=20000)
client.waits.forElement(xpath="//div[@id='squeeze']/h1", timeout=8000)
client.asserts.assertJS(js=u"windmill.testWin().document.title == 'Search | drupal.org'")
def test_get_submit():
client = WindmillTestClient(__name__)
client.open(url=u'http://tutorial.getwindmill.com/windmill-unittests/domain_switcher.html')
client.type(text=u'simpletest', name=u'q')
client.click(name=u'btnG')
client.waits.forPageLoad(timeout=20000)
client.waits.forElement(link=u'SimpleTest - Unit Testing for PHP', timeout=u'8000') | from windmill.authoring import WindmillTestClient
def test_post_submit():
client = WindmillTestClient(__name__)
client.open(url=u'http://tutorial.getwindmill.com/windmill-unittests/domain_switcher.html')
client.type(text=u'simpletest', name=u'search_theme_form')
client.click(name=u'op')
client.waits.forPageLoad(timeout=20000)
client.waits.forElement(xpath="//div[@id='squeeze']/h1", timeout=8000)
client.asserts.assertJS(js=u"windmill.testWin().document.title == 'Search | drupal.org'")
def test_get_submit():
client = WindmillTestClient(__name__)
client.open(url=u'http://tutorial.getwindmill.com/windmill-unittests/domain_switcher.html')
client.type(text=u'simpletest', name=u'q')
client.click(name=u'btnG')
client.waits.forPageLoad(timeout=20000)
client.waits.forElement(link=u'SimpleTest - Unit Testing for PHP', timeout=u'8000') | none | 1 | 2.531265 | 3 |
|
tests/db/test_client.py | Veritaris/fastapi_contrib | 504 | 6632574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from fastapi import FastAPI
from fastapi_contrib.db.client import MongoDBClient
from fastapi_contrib.db.models import MongoDBModel, MongoDBTimeStampedModel
from tests.mock import MongoDBMock
from tests.utils import override_settings, AsyncMock, AsyncIterator
from unittest.mock import patch
app = FastAPI()
app.mongodb = MongoDBMock()
class Model(MongoDBModel):
class Meta:
collection = "collection"
@override_settings(fastapi_app="tests.db.test_client.app")
def test_mongodbclient_is_singleton():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
assert client == MongoDBClient()
@override_settings(fastapi_app="tests.db.test_client.app")
def test_get_collection():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
collection = client.get_collection("collection")
assert collection.name == "collection"
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_insert():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
insert_result = await client.insert(model)
assert insert_result.inserted_id == model.id
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_count():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
count = await client.count(model, id=1)
assert count == 1
# Test whether it correctly handles filter by non-id
count = await client.count(model, field="value")
assert count == 1
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_delete():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
delete_result = await client.delete(model, id=1)
assert delete_result.raw_result == {}
# Test whether it correctly handles filter by non-id
delete_result = await client.delete(model, field="value")
assert delete_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_one():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
update_result = await client.update_one(
model, filter_kwargs={"id": 1}, id=2
)
assert update_result.raw_result == {}
# Test whether it correctly handles filter by non-id
update_result = await client.update_one(
model, filter_kwargs={"field": "value"}, field="value2"
)
assert update_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_one_params():
with patch(
'fastapi_contrib.db.client.MongoDBClient.update_one',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
client = MongoDBClient()
model = Model()
await model.update_one(
filter_kwargs={"id": 1}, kwargs={'$set': {'bla': 1}}
)
mock_update.mock.assert_called_with(
client,
Model,
filter_kwargs={'id': 1},
kwargs={'$set': {'bla': 1}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_many_params():
with patch(
'fastapi_contrib.db.client.MongoDBClient.update_many',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
client = MongoDBClient()
model = Model()
await model.update_many(
filter_kwargs={"id": 1}, kwargs={'$set': {'bla': 1}}
)
mock_update.mock.assert_called_with(
client,
Model,
filter_kwargs={'id': 1}, kwargs={'$set': {'bla': 1}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_many():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
update_result = await client.update_many(
model, filter_kwargs={"id": 1}, id=2
)
assert update_result.raw_result == {}
# Test whether it correctly handles filter by non-id
update_result = await client.update_many(
model, filter_kwargs={"field": "value"}, field="value2"
)
assert update_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_get():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
_dict = await client.get(model, id=1)
assert _dict == {"_id": 1}
# Test whether it correctly handles filter by non-id
_dict = await client.get(model, field="value")
assert _dict == {"_id": 1}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_list():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
cursor = client.list(model, id=1)
assert cursor
# Test whether it correctly handles filter by non-id
_dict = client.list(model, field="value")
assert _dict
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_list_with_sort():
with patch('fastapi_contrib.db.client.MongoDBClient.list') as mock_list:
mock_list.return_value = AsyncIterator([])
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
model = Model()
await model.list(model, _limit=0, _offset=0, _sort=[('i', -1)])
mock_list.assert_called_with(
Model, _limit=0, _offset=0, _sort=[('i', -1)]
)
await model.list(model)
mock_list.assert_called_with(Model, _limit=0, _offset=0, _sort=None)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from fastapi import FastAPI
from fastapi_contrib.db.client import MongoDBClient
from fastapi_contrib.db.models import MongoDBModel, MongoDBTimeStampedModel
from tests.mock import MongoDBMock
from tests.utils import override_settings, AsyncMock, AsyncIterator
from unittest.mock import patch
app = FastAPI()
app.mongodb = MongoDBMock()
class Model(MongoDBModel):
class Meta:
collection = "collection"
@override_settings(fastapi_app="tests.db.test_client.app")
def test_mongodbclient_is_singleton():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
assert client == MongoDBClient()
@override_settings(fastapi_app="tests.db.test_client.app")
def test_get_collection():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
collection = client.get_collection("collection")
assert collection.name == "collection"
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_insert():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
insert_result = await client.insert(model)
assert insert_result.inserted_id == model.id
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_count():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
count = await client.count(model, id=1)
assert count == 1
# Test whether it correctly handles filter by non-id
count = await client.count(model, field="value")
assert count == 1
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_delete():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
delete_result = await client.delete(model, id=1)
assert delete_result.raw_result == {}
# Test whether it correctly handles filter by non-id
delete_result = await client.delete(model, field="value")
assert delete_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_one():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
update_result = await client.update_one(
model, filter_kwargs={"id": 1}, id=2
)
assert update_result.raw_result == {}
# Test whether it correctly handles filter by non-id
update_result = await client.update_one(
model, filter_kwargs={"field": "value"}, field="value2"
)
assert update_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_one_params():
with patch(
'fastapi_contrib.db.client.MongoDBClient.update_one',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
client = MongoDBClient()
model = Model()
await model.update_one(
filter_kwargs={"id": 1}, kwargs={'$set': {'bla': 1}}
)
mock_update.mock.assert_called_with(
client,
Model,
filter_kwargs={'id': 1},
kwargs={'$set': {'bla': 1}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_many_params():
with patch(
'fastapi_contrib.db.client.MongoDBClient.update_many',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
client = MongoDBClient()
model = Model()
await model.update_many(
filter_kwargs={"id": 1}, kwargs={'$set': {'bla': 1}}
)
mock_update.mock.assert_called_with(
client,
Model,
filter_kwargs={'id': 1}, kwargs={'$set': {'bla': 1}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_many():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
update_result = await client.update_many(
model, filter_kwargs={"id": 1}, id=2
)
assert update_result.raw_result == {}
# Test whether it correctly handles filter by non-id
update_result = await client.update_many(
model, filter_kwargs={"field": "value"}, field="value2"
)
assert update_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_get():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
_dict = await client.get(model, id=1)
assert _dict == {"_id": 1}
# Test whether it correctly handles filter by non-id
_dict = await client.get(model, field="value")
assert _dict == {"_id": 1}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_list():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
cursor = client.list(model, id=1)
assert cursor
# Test whether it correctly handles filter by non-id
_dict = client.list(model, field="value")
assert _dict
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_list_with_sort():
with patch('fastapi_contrib.db.client.MongoDBClient.list') as mock_list:
mock_list.return_value = AsyncIterator([])
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
model = Model()
await model.list(model, _limit=0, _offset=0, _sort=[('i', -1)])
mock_list.assert_called_with(
Model, _limit=0, _offset=0, _sort=[('i', -1)]
)
await model.list(model)
mock_list.assert_called_with(Model, _limit=0, _offset=0, _sort=None)
| en | 0.636026 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Test whether it correctly handles filter by non-id # Test whether it correctly handles filter by non-id # Test whether it correctly handles filter by non-id # Test whether it correctly handles filter by non-id # Test whether it correctly handles filter by non-id # Test whether it correctly handles filter by non-id | 2.134696 | 2 |
setup.py | holmosapien/PyGerbv | 0 | 6632575 | from setuptools import setup, find_packages
import os, sys
stable = os.environ.get('PYGERBV_STABLE', "YES")
branch_name = os.environ.get('PYGERBV_BRANCH', None)
if stable == "YES":
version_format = '{tag}'
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3.6',
],
elif branch_name:
version_format = '{tag}b{commitcount}+%s' % branch_name
classifiers = [
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3.6',
],
else:
version_format = '{tag}b{commitcount}+{gitsha}'
classifiers = [
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3.6',
],
install_requires = []
# Add enum34 **ONLY** if necessary -- installing in 3.5+ will break things
if (sys.version_info.major < 3) or (sys.version_info.major == 3 and sys.version_info.minor < 4):
install_requires.append('enum34')
setup(
name = 'pygerbv',
version_format=version_format,
description = 'Python wrapper for libgerbv',
url = 'https://github.com/holmosapien/PyGerbv',
author = 'Elephantec',
author_email = '<EMAIL>',
license = 'Other/Proprietary License',
classifiers = classifiers,
packages = find_packages(),
install_requires = install_requires,
extras_require = {},
package_data = {},
data_files = [],
entry_points = {},
setup_requires=['setuptools-git-version']
)
| from setuptools import setup, find_packages
import os, sys
stable = os.environ.get('PYGERBV_STABLE', "YES")
branch_name = os.environ.get('PYGERBV_BRANCH', None)
if stable == "YES":
version_format = '{tag}'
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3.6',
],
elif branch_name:
version_format = '{tag}b{commitcount}+%s' % branch_name
classifiers = [
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3.6',
],
else:
version_format = '{tag}b{commitcount}+{gitsha}'
classifiers = [
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3.6',
],
install_requires = []
# Add enum34 **ONLY** if necessary -- installing in 3.5+ will break things
if (sys.version_info.major < 3) or (sys.version_info.major == 3 and sys.version_info.minor < 4):
install_requires.append('enum34')
setup(
name = 'pygerbv',
version_format=version_format,
description = 'Python wrapper for libgerbv',
url = 'https://github.com/holmosapien/PyGerbv',
author = 'Elephantec',
author_email = '<EMAIL>',
license = 'Other/Proprietary License',
classifiers = classifiers,
packages = find_packages(),
install_requires = install_requires,
extras_require = {},
package_data = {},
data_files = [],
entry_points = {},
setup_requires=['setuptools-git-version']
)
| en | 0.226674 | # Add enum34 **ONLY** if necessary -- installing in 3.5+ will break things | 1.769689 | 2 |
drhard/star/prepare_hardneg.py | tonellotto/drhard | 69 | 6632576 | <filename>drhard/star/prepare_hardneg.py
import sys
sys.path.append('./')
import os
import json
import random
import argparse
import faiss
import logging
import numpy as np
import torch
import subprocess
from transformers import RobertaConfig
from tqdm import tqdm
from model import RobertaDot
from dataset import load_rank, load_rel
from retrieve_utils import (
construct_flatindex_from_embeddings,
index_retrieve, convert_index_to_gpu
)
from star.inference import doc_inference, query_inference
logger = logging.Logger(__name__)
def retrieve_top(args):
config = RobertaConfig.from_pretrained(args.model_path, gradient_checkpointing=False)
model = RobertaDot.from_pretrained(args.model_path, config=config)
output_embedding_size = model.output_embedding_size
model = model.to(args.device)
query_inference(model, args, output_embedding_size)
doc_inference(model, args, output_embedding_size)
model = None
torch.cuda.empty_cache()
doc_embeddings = np.memmap(args.doc_memmap_path,
dtype=np.float32, mode="r")
doc_ids = np.memmap(args.docid_memmap_path,
dtype=np.int32, mode="r")
doc_embeddings = doc_embeddings.reshape(-1, output_embedding_size)
query_embeddings = np.memmap(args.query_memmap_path,
dtype=np.float32, mode="r")
query_embeddings = query_embeddings.reshape(-1, output_embedding_size)
query_ids = np.memmap(args.queryids_memmap_path,
dtype=np.int32, mode="r")
index = construct_flatindex_from_embeddings(doc_embeddings, doc_ids)
if torch.cuda.is_available() and not args.not_faiss_cuda:
index = convert_index_to_gpu(index, list(range(args.n_gpu)), False)
else:
faiss.omp_set_num_threads(32)
nearest_neighbors = index_retrieve(index, query_embeddings, args.topk + 10, batch=320)
with open(args.output_rank_file, 'w') as outputfile:
for qid, neighbors in zip(query_ids, nearest_neighbors):
for idx, pid in enumerate(neighbors):
outputfile.write(f"{qid}\t{pid}\t{idx+1}\n")
def gen_static_hardnegs(args):
rank_dict = load_rank(args.output_rank_file)
rel_dict = load_rel(args.label_path)
query_ids_set = sorted(rel_dict.keys())
for k in tqdm(query_ids_set, desc="gen hard negs"):
v = rank_dict[k]
v = list(filter(lambda x:x not in rel_dict[k], v))
v = v[:args.topk]
assert len(v) == args.topk
rank_dict[k] = v
json.dump(rank_dict, open(args.output_hard_path, 'w'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_type", choices=["passage", 'doc'], type=str, required=True)
parser.add_argument("--max_query_length", type=int, default=32)
parser.add_argument("--max_doc_length", type=int, default=512)
parser.add_argument("--eval_batch_size", type=int, default=128)
parser.add_argument("--mode", type=str, choices=["train", "dev", "test", "lead"], required=True)
parser.add_argument("--topk", type=int, default=200)
parser.add_argument("--not_faiss_cuda", action="store_true")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.preprocess_dir = f"./data/{args.data_type}/preprocess"
args.model_path = f"./data/{args.data_type}/warmup"
args.output_dir = f"./data/{args.data_type}/warmup_retrieve"
args.label_path = os.path.join(args.preprocess_dir, f"{args.mode}-qrel.tsv")
args.query_memmap_path = os.path.join(args.output_dir, f"{args.mode}-query.memmap")
args.queryids_memmap_path = os.path.join(args.output_dir, f"{args.mode}-query-id.memmap")
args.doc_memmap_path = os.path.join(args.output_dir, "passages.memmap")
args.docid_memmap_path = os.path.join(args.output_dir, "passages-id.memmap")
args.output_rank_file = os.path.join(args.output_dir, f"{args.mode}.rank.tsv")
args.output_hard_path = os.path.join(args.output_dir, "hard.json")
logger.info(args)
os.makedirs(args.output_dir, exist_ok=True)
retrieve_top(args)
gen_static_hardnegs(args)
results = subprocess.check_output(["python", "msmarco_eval.py", args.label_path, args.output_rank_file])
print(results) | <filename>drhard/star/prepare_hardneg.py
import sys
sys.path.append('./')
import os
import json
import random
import argparse
import faiss
import logging
import numpy as np
import torch
import subprocess
from transformers import RobertaConfig
from tqdm import tqdm
from model import RobertaDot
from dataset import load_rank, load_rel
from retrieve_utils import (
construct_flatindex_from_embeddings,
index_retrieve, convert_index_to_gpu
)
from star.inference import doc_inference, query_inference
logger = logging.Logger(__name__)
def retrieve_top(args):
config = RobertaConfig.from_pretrained(args.model_path, gradient_checkpointing=False)
model = RobertaDot.from_pretrained(args.model_path, config=config)
output_embedding_size = model.output_embedding_size
model = model.to(args.device)
query_inference(model, args, output_embedding_size)
doc_inference(model, args, output_embedding_size)
model = None
torch.cuda.empty_cache()
doc_embeddings = np.memmap(args.doc_memmap_path,
dtype=np.float32, mode="r")
doc_ids = np.memmap(args.docid_memmap_path,
dtype=np.int32, mode="r")
doc_embeddings = doc_embeddings.reshape(-1, output_embedding_size)
query_embeddings = np.memmap(args.query_memmap_path,
dtype=np.float32, mode="r")
query_embeddings = query_embeddings.reshape(-1, output_embedding_size)
query_ids = np.memmap(args.queryids_memmap_path,
dtype=np.int32, mode="r")
index = construct_flatindex_from_embeddings(doc_embeddings, doc_ids)
if torch.cuda.is_available() and not args.not_faiss_cuda:
index = convert_index_to_gpu(index, list(range(args.n_gpu)), False)
else:
faiss.omp_set_num_threads(32)
nearest_neighbors = index_retrieve(index, query_embeddings, args.topk + 10, batch=320)
with open(args.output_rank_file, 'w') as outputfile:
for qid, neighbors in zip(query_ids, nearest_neighbors):
for idx, pid in enumerate(neighbors):
outputfile.write(f"{qid}\t{pid}\t{idx+1}\n")
def gen_static_hardnegs(args):
rank_dict = load_rank(args.output_rank_file)
rel_dict = load_rel(args.label_path)
query_ids_set = sorted(rel_dict.keys())
for k in tqdm(query_ids_set, desc="gen hard negs"):
v = rank_dict[k]
v = list(filter(lambda x:x not in rel_dict[k], v))
v = v[:args.topk]
assert len(v) == args.topk
rank_dict[k] = v
json.dump(rank_dict, open(args.output_hard_path, 'w'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_type", choices=["passage", 'doc'], type=str, required=True)
parser.add_argument("--max_query_length", type=int, default=32)
parser.add_argument("--max_doc_length", type=int, default=512)
parser.add_argument("--eval_batch_size", type=int, default=128)
parser.add_argument("--mode", type=str, choices=["train", "dev", "test", "lead"], required=True)
parser.add_argument("--topk", type=int, default=200)
parser.add_argument("--not_faiss_cuda", action="store_true")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.preprocess_dir = f"./data/{args.data_type}/preprocess"
args.model_path = f"./data/{args.data_type}/warmup"
args.output_dir = f"./data/{args.data_type}/warmup_retrieve"
args.label_path = os.path.join(args.preprocess_dir, f"{args.mode}-qrel.tsv")
args.query_memmap_path = os.path.join(args.output_dir, f"{args.mode}-query.memmap")
args.queryids_memmap_path = os.path.join(args.output_dir, f"{args.mode}-query-id.memmap")
args.doc_memmap_path = os.path.join(args.output_dir, "passages.memmap")
args.docid_memmap_path = os.path.join(args.output_dir, "passages-id.memmap")
args.output_rank_file = os.path.join(args.output_dir, f"{args.mode}.rank.tsv")
args.output_hard_path = os.path.join(args.output_dir, "hard.json")
logger.info(args)
os.makedirs(args.output_dir, exist_ok=True)
retrieve_top(args)
gen_static_hardnegs(args)
results = subprocess.check_output(["python", "msmarco_eval.py", args.label_path, args.output_rank_file])
print(results) | none | 1 | 1.938685 | 2 |
|
tests/test_apk.py | pytxxy/creditutils | 0 | 6632577 | # -*- coding:UTF-8 -*-
'''
Created on 2013年10月28日
@author: work_cfh
'''
import unittest
import creditutils.apk_util as apk
import pprint
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testName(self):
pass
def test_extract_apk_info():
apk_path = r'D:\temp\83860_544781eb-1c79-4b3b-a49d-89ff124c220d.apk'
# apk_path = r'D:\temp\qing.apk'
exePath = r'D:\workspace\ApkInstalling\build\exe.win32-2.7'
apkInfoPath = r'D:\temp\apkInfo'
extractor = apk.Extractor(exePath, apkInfoPath)
apkInfo = extractor.extract_apk_info(apk_path)
pprint.pprint(apkInfo)
def test_sign_apk():
keystore = r'D:\auto_build\pytxxy\projects\pytxxy\pytxxy\pycreditKeystore'
storepass = '<PASSWORD>'
store_alias = 'pycreditKeystoreAlias'
signer = apk.ApkSigner(keystore, storepass, store_alias)
src_path = r'D:\programs\shieldpy_v4\upload\3.0.0beta_p_12-294-20170401_sec.apk'
dst_path = r'D:\programs\shieldpy_v4\upload\3.0.0beta_p_12-294-20170401_sec_signed.apk'
signer.sign(src_path, dst_path)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
# unittest.main()
# test_extract_apk_info()
test_sign_apk()
print('to the end!')
| # -*- coding:UTF-8 -*-
'''
Created on 2013年10月28日
@author: work_cfh
'''
import unittest
import creditutils.apk_util as apk
import pprint
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testName(self):
pass
def test_extract_apk_info():
apk_path = r'D:\temp\83860_544781eb-1c79-4b3b-a49d-89ff124c220d.apk'
# apk_path = r'D:\temp\qing.apk'
exePath = r'D:\workspace\ApkInstalling\build\exe.win32-2.7'
apkInfoPath = r'D:\temp\apkInfo'
extractor = apk.Extractor(exePath, apkInfoPath)
apkInfo = extractor.extract_apk_info(apk_path)
pprint.pprint(apkInfo)
def test_sign_apk():
keystore = r'D:\auto_build\pytxxy\projects\pytxxy\pytxxy\pycreditKeystore'
storepass = '<PASSWORD>'
store_alias = 'pycreditKeystoreAlias'
signer = apk.ApkSigner(keystore, storepass, store_alias)
src_path = r'D:\programs\shieldpy_v4\upload\3.0.0beta_p_12-294-20170401_sec.apk'
dst_path = r'D:\programs\shieldpy_v4\upload\3.0.0beta_p_12-294-20170401_sec_signed.apk'
signer.sign(src_path, dst_path)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
# unittest.main()
# test_extract_apk_info()
test_sign_apk()
print('to the end!')
| zh | 0.32762 | # -*- coding:UTF-8 -*- Created on 2013年10月28日 @author: work_cfh # apk_path = r'D:\temp\qing.apk' # import sys;sys.argv = ['', 'Test.testName'] # unittest.main() # test_extract_apk_info() | 2.244165 | 2 |
seqal/data.py | tech-sketch/SeqAL | 0 | 6632578 | <filename>seqal/data.py
import functools
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from flair.data import Span
@dataclass
class Entity:
"""Entity with predicted information"""
id: int # Entity id in the same sentence
sent_id: int # Sentence id
span: Span # Entity span
cluster: Optional[int] = None # Cluster number
@property
def vector(self) -> torch.Tensor:
"""Get entity embeddings"""
embeddings = [token.embedding for token in self.span.tokens]
if not any([e.nelement() != 0 for e in embeddings]):
raise TypeError(
"Tokens have no embeddings. Make sure embedding sentence first."
)
return torch.mean(torch.stack(embeddings), dim=0)
@property
def label(self) -> str:
"""Get entity label"""
return self.span.tag
@property
def text(self) -> str:
"""Get entity text"""
return self.span.text
class Entities:
"""Entity list"""
def __init__(self):
self.entities = []
def add(self, entity: Entity):
"""Add entity to list"""
self.entities.append(entity)
@functools.cached_property
def group_by_sentence(self) -> Dict[int, List[Entity]]:
"""Group entities by sentence"""
entities_per_sentence = defaultdict(list)
for entity in self.entities:
entities_per_sentence[entity.sent_id].append(entity)
return entities_per_sentence
@functools.cached_property
def group_by_label(self) -> Dict[str, List[Entity]]:
"""Group entities by label"""
entities_per_label = defaultdict(list)
for entity in self.entities:
entities_per_label[entity.label].append(entity)
return entities_per_label
@functools.cached_property
def group_by_cluster(self) -> Dict[str, List[Entity]]:
"""Group entities by cluster"""
entities_per_cluster = defaultdict(list)
for entity in self.entities:
entities_per_cluster[entity.cluster].append(entity)
return entities_per_cluster
| <filename>seqal/data.py
import functools
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from flair.data import Span
@dataclass
class Entity:
"""Entity with predicted information"""
id: int # Entity id in the same sentence
sent_id: int # Sentence id
span: Span # Entity span
cluster: Optional[int] = None # Cluster number
@property
def vector(self) -> torch.Tensor:
"""Get entity embeddings"""
embeddings = [token.embedding for token in self.span.tokens]
if not any([e.nelement() != 0 for e in embeddings]):
raise TypeError(
"Tokens have no embeddings. Make sure embedding sentence first."
)
return torch.mean(torch.stack(embeddings), dim=0)
@property
def label(self) -> str:
"""Get entity label"""
return self.span.tag
@property
def text(self) -> str:
"""Get entity text"""
return self.span.text
class Entities:
"""Entity list"""
def __init__(self):
self.entities = []
def add(self, entity: Entity):
"""Add entity to list"""
self.entities.append(entity)
@functools.cached_property
def group_by_sentence(self) -> Dict[int, List[Entity]]:
"""Group entities by sentence"""
entities_per_sentence = defaultdict(list)
for entity in self.entities:
entities_per_sentence[entity.sent_id].append(entity)
return entities_per_sentence
@functools.cached_property
def group_by_label(self) -> Dict[str, List[Entity]]:
"""Group entities by label"""
entities_per_label = defaultdict(list)
for entity in self.entities:
entities_per_label[entity.label].append(entity)
return entities_per_label
@functools.cached_property
def group_by_cluster(self) -> Dict[str, List[Entity]]:
"""Group entities by cluster"""
entities_per_cluster = defaultdict(list)
for entity in self.entities:
entities_per_cluster[entity.cluster].append(entity)
return entities_per_cluster
| en | 0.685299 | Entity with predicted information # Entity id in the same sentence # Sentence id # Entity span # Cluster number Get entity embeddings Get entity label Get entity text Entity list Add entity to list Group entities by sentence Group entities by label Group entities by cluster | 2.496592 | 2 |
tensorflow_federated/python/aggregators/primitives.py | truthiswill/federated | 1 | 6632579 | <reponame>truthiswill/federated
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A package of primitive (stateless) aggregations."""
import collections
import attr
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.federated_context import value_impl
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
def _validate_value_on_clients(value):
py_typecheck.check_type(value, value_impl.Value)
py_typecheck.check_type(value.type_signature, computation_types.FederatedType)
if value.type_signature.placement is not placements.CLIENTS:
raise TypeError(
'`value` argument must be a tff.Value placed at CLIENTS. Got: {!s}'
.format(value.type_signature))
def _validate_dtype_is_min_max_compatible(dtype):
if not (dtype.is_integer or dtype.is_floating):
raise TypeError(
f'Unsupported dtype. The dtype for min and max must be either an '
f'integer or floating:. Got: {dtype}.')
def _federated_reduce_with_func(value, tf_func, zeros):
"""Applies to `tf_func` to accumulated `value`s.
This utility provides a generic aggregation for accumulating a value and
applying a simple aggregation (like minimum or maximum aggregations).
Args:
value: A `tff.Value` placed on the `tff.CLIENTS`.
tf_func: A function to be applied to the accumulated values. Must be a
binary operation where both parameters are of type `U` and the return type
is also `U`.
zeros: The zero of the same type as `value` in the algebra of reduction
operators.
Returns:
A representation on the `tff.SERVER` of the result of aggregating `value`.
"""
value_type = value.type_signature.member
@computations.tf_computation(value_type, value_type)
def accumulate(current, value):
return tf.nest.map_structure(tf_func, current, value)
@computations.tf_computation(value_type)
def report(value):
return value
return intrinsics.federated_aggregate(value, zeros, accumulate, accumulate,
report)
def _initial_values(initial_value_fn, member_type):
"""Create a nested structure of initial values for the reduction.
Args:
initial_value_fn: A function that maps a tff.TensorType to a specific value
constant for initialization.
member_type: A `tff.Type` representing the member components of the
federated type.
Returns:
A function of the result of reducing a value with no constituents.
"""
@computations.tf_computation
def zeros_fn():
if member_type.is_struct():
structure.map_structure(
lambda v: _validate_dtype_is_min_max_compatible(v.dtype), member_type)
return structure.map_structure(
lambda v: tf.fill(v.shape, value=initial_value_fn(v)), member_type)
_validate_dtype_is_min_max_compatible(member_type.dtype)
return tf.fill(member_type.shape, value=initial_value_fn(member_type))
return zeros_fn()
def federated_min(value):
"""Computes the minimum at `tff.SERVER` of a `value` placed at `tff.CLIENTS`.
The minimum is computed element-wise, for each scalar and every scalar in a
tensor contained in `value`.
In the degenerate scenario that the `value` is aggregated over an empty set
of `tff.CLIENTS`, the tensor constituents of the result are set to the
maximum of the underlying numeric data type.
Args:
value: A value of a TFF federated type placed at the tff.CLIENTS.
Returns:
A representation of the min of the member constituents of `value` placed at
`tff.SERVER`.
"""
_validate_value_on_clients(value)
member_type = value.type_signature.member
# Explicit cast because v.dtype.max returns a Python constant, which could be
# implicitly converted to a tensor of different dtype by TensorFlow.
zeros = _initial_values(lambda v: tf.cast(v.dtype.max, v.dtype), member_type)
return _federated_reduce_with_func(value, tf.minimum, zeros)
def federated_max(value):
"""Computes the maximum at `tff.SERVER` of a `value` placed at `tff.CLIENTS`.
The maximum is computed element-wise, for each scalar and every scalar in a
tensor contained in `value`.
In the degenerate scenario that the `value` is aggregated over an empty set
of `tff.CLIENTS`, the tensor constituents of the result are set to the
minimum of the underlying numeric data type.
Args:
value: A value of a TFF federated type placed at the tff.CLIENTS.
Returns:
A representation of the min of the member constituents of `value` placed at
`tff.SERVER`.
"""
_validate_value_on_clients(value)
member_type = value.type_signature.member
# Explicit cast because v.dtype.min returns a Python constant, which could be
# implicitly converted to a tensor of different dtype by TensorFlow.
zeros = _initial_values(lambda v: tf.cast(v.dtype.min, v.dtype), member_type)
return _federated_reduce_with_func(value, tf.maximum, zeros)
@attr.s
class _Samples(object):
"""Class representing internal sample data structure.
The class contains two parts, `accumulators` and `rands`, that are parallel
lists (e.g. the i-th index in one corresponds to the i-th index in the other).
These two lists are used to sample from the accumulators with equal
probability.
"""
accumulators = attr.ib()
rands = attr.ib()
def _zeros_for_sample(member_type):
"""Create an empty nested structure for the sample aggregation.
Args:
member_type: A `tff.Type` representing the member components of the
federated type.
Returns:
A function of the result of zeros to first concatenate.
"""
@computations.tf_computation
def accumlator_type_fn():
"""Gets the type for the accumulators."""
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
a = structure.map_structure(
lambda v: tf.zeros([0] + v.shape.dims, v.dtype), member_type)
return _Samples(structure.to_odict(a, True), tf.zeros([0], tf.float32))
if member_type.shape:
s = [0] + member_type.shape.dims
return _Samples(tf.zeros(s, member_type.dtype), tf.zeros([0], tf.float32))
return accumlator_type_fn()
def _get_accumulator_type(member_type):
"""Constructs a `tff.Type` for the accumulator in sample aggregation.
Args:
member_type: A `tff.Type` representing the member components of the
federated type.
Returns:
The `tff.StructType` associated with the accumulator. The tuple contains
two parts, `accumulators` and `rands`, that are parallel lists (e.g. the
i-th index in one corresponds to the i-th index in the other). These two
lists are used to sample from the accumulators with equal probability.
"""
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
a = structure.map_structure(
lambda v: computation_types.TensorType(v.dtype, [None] + v.shape.dims),
member_type)
return computation_types.StructType(
collections.OrderedDict({
'accumulators':
computation_types.StructType(structure.to_odict(a, True)),
'rands':
computation_types.TensorType(tf.float32, shape=[None]),
}))
return computation_types.StructType(
collections.OrderedDict({
'accumulators':
computation_types.TensorType(
member_type.dtype, shape=[None] + member_type.shape.dims),
'rands':
computation_types.TensorType(tf.float32, shape=[None]),
}))
def federated_sample(value, max_num_samples=100):
"""Aggregation to produce uniform sample of at most `max_num_samples` values.
Each client value is assigned a random number when it is examined during each
accumulation. Each accumulate and merge only keeps the top N values based
on the random number. Report drops the random numbers and only returns the
at most N values sampled from the accumulated client values using standard
reservoir sampling (https://en.wikipedia.org/wiki/Reservoir_sampling), where
N is user provided `max_num_samples`.
Args:
value: A `tff.Value` placed on the `tff.CLIENTS`.
max_num_samples: The maximum number of samples to collect from client
values. If fewer clients than the defined max sample size participated in
the round of computation, the actual number of samples will equal the
number of clients in the round.
Returns:
At most `max_num_samples` samples of the value from the `tff.CLIENTS`.
"""
_validate_value_on_clients(value)
member_type = value.type_signature.member
accumulator_type = _get_accumulator_type(member_type)
zeros = _zeros_for_sample(member_type)
@tf.function
def fed_concat_expand_dims(a, b):
b = tf.expand_dims(b, axis=0)
return tf.concat([a, b], axis=0)
@tf.function
def fed_concat(a, b):
return tf.concat([a, b], axis=0)
@tf.function
def fed_gather(value, indices):
return tf.gather(value, indices)
def apply_sampling(accumulators, rands):
size = tf.shape(rands)[0]
k = tf.minimum(size, max_num_samples)
indices = tf.math.top_k(rands, k=k).indices
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
return structure.map_structure(lambda v: fed_gather(v, indices),
accumulators), fed_gather(rands, indices)
return fed_gather(accumulators, indices), fed_gather(rands, indices)
@computations.tf_computation(accumulator_type, value.type_signature.member)
def accumulate(current, value):
"""Accumulates samples through concatenation."""
rands = fed_concat_expand_dims(current.rands, tf.random.uniform(shape=()))
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
accumulators = structure.map_structure(
fed_concat_expand_dims, _ensure_structure(current.accumulators),
_ensure_structure(value))
else:
accumulators = fed_concat_expand_dims(current.accumulators, value)
accumulators, rands = apply_sampling(accumulators, rands)
return _Samples(accumulators, rands)
@computations.tf_computation(accumulator_type, accumulator_type)
def merge(a, b):
"""Merges accumulators through concatenation."""
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if accumulator_type.is_struct():
samples = structure.map_structure(fed_concat, _ensure_structure(a),
_ensure_structure(b))
else:
samples = fed_concat(a, b)
accumulators, rands = apply_sampling(samples.accumulators, samples.rands)
return _Samples(accumulators, rands)
@computations.tf_computation(accumulator_type)
def report(value):
return value.accumulators
return intrinsics.federated_aggregate(value, zeros, accumulate, merge, report)
def _ensure_structure(obj):
return structure.from_container(obj, True)
# Lower precision types are not supported to avoid potential hard to discover
# numerical issues in conversion to/from format compatible with secure sum.
_SECURE_QUANTIZED_SUM_ALLOWED_DTYPES = (tf.int32, tf.int64, tf.float32,
tf.float64)
# The largest integer value provided to federated_secure_sum_bitwidth operator.
_SECAGG_MAX = 2**32 - 1
class BoundsDifferentTypesError(TypeError):
def __init__(self, lower_bound, upper_bound):
message = (f'Both lower_bound and upper_bound must be either federated '
f'values or Python constants. Found: type(lower_bound): '
f'{type(lower_bound)}, type(upper_bound): {type(upper_bound)}')
super().__init__(message)
class BoundsDifferentSignaturesError(TypeError):
def __init__(self, lower_bound, upper_bound):
message = (f'Provided lower_bound and upper_bound must have the same '
f'type_signature. Found: lower_bound signature: '
f'{lower_bound.type_signature}, upper_bound signature: '
f'{upper_bound.type_signature}.')
super().__init__(message)
class BoundsNotPlacedAtServerError(TypeError):
def __init__(self, placement):
message = (f'Provided lower_bound and upper_bound must be placed at '
f'tff.SERVER. Placement found: {placement}')
super().__init__(message)
class StructuredBoundsTypeMismatchError(TypeError):
def __init__(self, value_type, bounds_type):
message = (f'If bounds are specified as structures (not scalars), the '
f'structures must match the structure of provided client_value, '
f'with identical dtypes, but not necessarily shapes. Found: '
f'client_value type: {value_type}, bounds type: {bounds_type}.')
super().__init__(message)
class ScalarBoundStructValueDTypeError(TypeError):
def __init__(self, value_type, bounds_type):
message = (f'If scalar bounds are provided, all parts of client_value must '
f'be of matching dtype. Found: client_value type: {value_type}, '
f'bounds type: {bounds_type}.')
super().__init__(message)
class ScalarBoundSimpleValueDTypeError(TypeError):
def __init__(self, value_type, bounds_type):
message = (f'Bounds must have the same dtype as client_value. Found: '
f'client_value type: {value_type}, bounds type: {bounds_type}.')
super().__init__(message)
class UnsupportedDTypeError(TypeError):
def __init__(self, dtype):
message = (f'Value is of unsupported dtype {dtype}. Currently supported '
f'types are {_SECURE_QUANTIZED_SUM_ALLOWED_DTYPES}.')
super().__init__(message)
def _check_secure_quantized_sum_dtype(dtype):
if dtype not in _SECURE_QUANTIZED_SUM_ALLOWED_DTYPES:
raise UnsupportedDTypeError(dtype)
# pylint: disable=g-doc-exception
def _normalize_secure_quantized_sum_args(client_value, lower_bound,
upper_bound):
"""Normalizes inputs to `secure_quantized_sum` method.
Validates the epxected structure of arguments, as documented in the docstring
of `secure_quantized_sum` method. The arguments provided are also returned,
possibly normalized to meet those expectations. In particular, if
`lower_bound` and `upper_bound` are Python constants, these are converted to
`tff.SERVER`-placed federated values.
Args:
client_value: A `tff.Value` placed at `tff.CLIENTS`.
lower_bound: The smallest possible value for `client_value` (inclusive).
Values smaller than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
upper_bound: The largest possible value for `client_value` (inclusive).
Values greater than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
Returns:
Normalized `(client_value, lower_bound, upper_bound)` tuple.
"""
# Validation of client_value.
_validate_value_on_clients(client_value)
client_value_member = client_value.type_signature.member
if client_value.type_signature.member.is_struct():
dtypes = [v.dtype for v in structure.flatten(client_value_member)]
for dtype in dtypes:
_check_secure_quantized_sum_dtype(dtype)
else:
dtypes = client_value_member.dtype
_check_secure_quantized_sum_dtype(dtypes)
# Validation of bounds.
if isinstance(lower_bound, value_impl.Value) != isinstance(
upper_bound, value_impl.Value):
raise BoundsDifferentTypesError(lower_bound, upper_bound)
elif not isinstance(lower_bound, value_impl.Value):
# Normalization of bounds to federated values.
lower_bound = intrinsics.federated_value(lower_bound, placements.SERVER)
upper_bound = intrinsics.federated_value(upper_bound, placements.SERVER)
if lower_bound.type_signature != upper_bound.type_signature:
raise BoundsDifferentSignaturesError(lower_bound, upper_bound)
# The remaining type checks only use lower_bound as the upper_bound has
# itendical type_signature.
if lower_bound.type_signature.placement != placements.SERVER:
raise BoundsNotPlacedAtServerError(lower_bound.type_signature.placement)
# Validation of client_value and bounds compatibility.
bound_member = lower_bound.type_signature.member
if bound_member.is_struct():
if not client_value_member.is_struct() or (structure.map_structure(
lambda v: v.dtype, bound_member) != structure.map_structure(
lambda v: v.dtype, client_value_member)):
raise StructuredBoundsTypeMismatchError(client_value_member, bound_member)
else:
# If bounds are scalar, must be compatible with all tensors in client_value.
if client_value_member.is_struct():
if len(set(dtypes)) > 1 or (bound_member.dtype != dtypes[0]):
raise ScalarBoundStructValueDTypeError(client_value_member,
bound_member)
else:
if bound_member.dtype != client_value_member.dtype:
raise ScalarBoundSimpleValueDTypeError(client_value_member,
bound_member)
return client_value, lower_bound, upper_bound
@tf.function
def _client_tensor_shift_for_secure_sum(value, lower_bound, upper_bound):
"""Mapping to be applied to every tensor before secure sum.
This operation is performed on `tff.CLIENTS` to prepare values to format
compatible with `tff.federated_secure_sum_bitwidth` operator.
This clips elements of `value` to `[lower_bound, upper_bound]`, shifts and
scales it to range `[0, 2**32-1]` and casts it to `tf.int64`. The specific
operation depends on dtype of `value`.
Args:
value: A Tensor to be shifted for compatibility with
`federated_secure_sum_bitwidth`.
lower_bound: The smallest value expected in `value`.
upper_bound: The largest value expected in `value`.
Returns:
Shifted value of dtype `tf.int64`.
"""
tf.Assert(lower_bound <= upper_bound, [lower_bound, upper_bound])
if value.dtype == tf.int32:
clipped_val = tf.clip_by_value(value, lower_bound, upper_bound)
# Cast BEFORE shift in order to avoid overflow if full int32 range is used.
return tf.cast(clipped_val, tf.int64) - tf.cast(lower_bound, tf.int64)
elif value.dtype == tf.int64:
clipped_val = tf.clip_by_value(value, lower_bound, upper_bound)
range_span = upper_bound - lower_bound
scale_factor = tf.math.floordiv(range_span, _SECAGG_MAX) + 1
shifted_value = tf.cond(
scale_factor > 1,
lambda: tf.math.floordiv(clipped_val - lower_bound, scale_factor),
lambda: clipped_val - lower_bound)
return shifted_value
else:
# This should be ensured earlier and thus not user-facing.
assert value.dtype in [tf.float32, tf.float64]
clipped_value = tf.clip_by_value(value, lower_bound, upper_bound)
# Prevent NaNs if `lower_bound` and `upper_bound` are the same.
scale_factor = tf.math.divide_no_nan(
tf.constant(_SECAGG_MAX, tf.float64),
tf.cast(upper_bound - lower_bound, tf.float64))
scaled_value = tf.cast(clipped_value, tf.float64) * scale_factor
# Perform deterministic rounding here, which may introduce bias as every
# value may be rounded in the same direction for some input data.
rounded_value = tf.saturate_cast(tf.round(scaled_value), tf.int64)
# Perform shift in integer space to minimize float precision errors.
shifted_value = rounded_value - tf.saturate_cast(
tf.round(tf.cast(lower_bound, tf.float64) * scale_factor), tf.int64)
# Clip to expected range in case of numerical stability issues.
quantized_value = tf.clip_by_value(shifted_value,
tf.constant(0, dtype=tf.int64),
tf.constant(_SECAGG_MAX, dtype=tf.int64))
return quantized_value
@tf.function
def _server_tensor_shift_for_secure_sum(num_summands, value, lower_bound,
upper_bound, output_dtype):
"""Mapping to be applied to every tensor after secure sum.
This operation is performed on `tff.SERVER` to dequantize outputs of the
`tff.federated_secure_sum_bitwidth` operator.
It is reverse of `_client_tensor_shift_for_secure_sum` taking into account
that `num_summands` elements were summed, so the inverse shift needs to be
appropriately scaled.
Args:
num_summands: The number of summands that formed `value`.
value: A summed Tensor to be shifted to original representation.
lower_bound: The smallest value expected in `value` before it was summed.
upper_bound: The largest value expected in `value` before it was summed.
output_dtype: The dtype of value after being shifted.
Returns:
Shifted value of dtype `output_dtype`.
"""
# Ensure summed `value` is within the expected range given `num_summands`.
min_valid_value = tf.constant(0, tf.int64)
# Cast to tf.int64 before multiplication to prevent overflow.
max_valid_value = tf.constant(_SECAGG_MAX, tf.int64) * tf.cast(
num_summands, tf.int64)
tf.Assert(
tf.math.logical_and(
tf.math.reduce_min(value) >= min_valid_value,
tf.math.reduce_max(value) <= max_valid_value),
[value, min_valid_value, max_valid_value])
if output_dtype == tf.int32:
value = value + tf.cast(num_summands, tf.int64) * tf.cast(
lower_bound, tf.int64)
elif output_dtype == tf.int64:
range_span = upper_bound - lower_bound
scale_factor = tf.math.floordiv(range_span, _SECAGG_MAX) + 1
num_summands = tf.cast(num_summands, tf.int64)
value = tf.cond(scale_factor > 1,
lambda: value * scale_factor + num_summands * lower_bound,
lambda: value + num_summands * lower_bound)
else:
# This should be ensured earlier and thus not user-facing.
assert output_dtype in [tf.float32, tf.float64]
# Use exactly the same `scale_factor` as during client quantization so that
# float precision errors (which are deterministic) cancel out. This ensures
# that the sum of [0] is exactly 0 for any clipping range.
scale_factor = tf.math.divide_no_nan(
tf.constant(_SECAGG_MAX, tf.float64),
tf.cast(upper_bound - lower_bound, tf.float64))
# Scale the shift by `num_summands` as an integer to prevent additional
# float precision errors for multiple summands. This also ensures that the
# sum of [0] * num_summands is exactly 0 for any clipping range.
value = value + tf.saturate_cast(
tf.round(tf.cast(lower_bound, tf.float64) * scale_factor),
tf.int64) * tf.cast(num_summands, tf.int64)
value = tf.cast(value, tf.float64)
value = value * (
tf.cast(upper_bound - lower_bound, tf.float64) / _SECAGG_MAX)
# If `lower_bound` and `upper_bound` are the same, the above shift had no
# effect since `scale_factor` is 0. Shift here instead.
shifted_value = value + tf.cast(num_summands, tf.float64) * tf.cast(
lower_bound, tf.float64)
value = tf.cond(
tf.equal(lower_bound, upper_bound), lambda: shifted_value,
lambda: value)
return tf.cast(value, output_dtype)
def secure_quantized_sum(client_value, lower_bound, upper_bound):
"""Quantizes and sums values securely.
Provided `client_value` can be either a Tensor or a nested structure of
Tensors. If it is a nested structure, `lower_bound` and `upper_bound` must be
either both scalars, or both have the same structure as `client_value`, with
each element being a scalar, representing the bounds to be used for each
corresponding Tensor in `client_value`.
This method converts each Tensor in provided `client_value` to appropriate
format and uses the `tff.federated_secure_sum_bitwidth` operator to realize
the sum.
The dtype of Tensors in provided `client_value` can be one of `[tf.int32,
tf.int64, tf.float32, tf.float64]`.
If the dtype of `client_value` is `tf.int32` or `tf.int64`, the summation is
possibly exact, depending on `lower_bound` and `upper_bound`: In the case that
`upper_bound - lower_bound < 2**32`, the summation will be exact. If it is
not, `client_value` will be quantized to precision of 32 bits, so the worst
case error introduced for the value of each client will be approximately
`(upper_bound - lower_bound) / 2**32`. Deterministic rounding to nearest value
is used in such cases.
If the dtype of `client_value` is `tf.float32` or `tf.float64`, the summation
is generally *not* accurate up to full floating point precision. Instead, the
values are first clipped to the `[lower_bound, upper_bound]` range. These
values are then uniformly quantized to 32 bit resolution, using deterministic
rounding to round the values to the quantization points. Rounding happens
roughly as follows (implementation is a bit more complex to mitigate numerical
stability issues):
```
values = tf.round(
(client_value - lower_bound) * ((2**32 - 1) / (upper_bound - lower_bound))
```
After summation, the inverse operation if performed, so the return value
is of the same dtype as the input `client_value`.
In terms of accuracy, it is safe to assume accuracy within 7-8 significant
digits for `tf.float32` inputs, and 8-9 significant digits for `tf.float64`
inputs, where the significant digits refer to precision relative to the range
of the provided bounds. Thus, these bounds should not be set extremely wide.
Accuracy losses arise due to (1) quantization within the given clipping range,
(2) float precision of final outputs (e.g. `tf.float32` has 23 bits in its
mantissa), and (3) precision losses that arise in doing math on `tf.float32`
and `tf.float64` inputs.
As a concrete example, if the range is `+/- 1000`, errors up to `1e-4` per
element should be expected for `tf.float32` and up to `1e-5` for `tf.float64`.
Args:
client_value: A `tff.Value` placed at `tff.CLIENTS`.
lower_bound: The smallest possible value for `client_value` (inclusive).
Values smaller than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
upper_bound: The largest possible value for `client_value` (inclusive).
Values greater than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
Returns:
Summed `client_value` placed at `tff.SERVER`, of the same dtype as
`client_value`.
Raises:
TypeError (or its subclasses): If input arguments do not satisfy the type
constraints specified above.
"""
# Possibly converts Python constants to federated values.
client_value, lower_bound, upper_bound = _normalize_secure_quantized_sum_args(
client_value, lower_bound, upper_bound)
# This object is used during decoration of the `client_shift` method, and the
# value stored in this mutable container is used during decoration of the
# `server_shift` method. The reason for this is that we cannot currently get
# the needed information out of `client_value.type_signature.member` as we
# need both the `TensorType` information as well as the Python container
# attached to them.
temp_box = []
# These tf_computations assume the inputs were already validated. In
# particular, that lower_bnd and upper_bnd have the same structure, and if not
# scalar, the structure matches the structure of value.
@computations.tf_computation()
def client_shift(value, lower_bnd, upper_bnd):
assert not temp_box
temp_box.append(tf.nest.map_structure(lambda v: v.dtype, value))
fn = _client_tensor_shift_for_secure_sum
if tf.is_tensor(lower_bnd):
return tf.nest.map_structure(lambda v: fn(v, lower_bnd, upper_bnd), value)
else:
return tf.nest.map_structure(fn, value, lower_bnd, upper_bnd)
@computations.tf_computation()
def server_shift(value, lower_bnd, upper_bnd, summands):
fn = _server_tensor_shift_for_secure_sum
if tf.is_tensor(lower_bnd):
return tf.nest.map_structure(
lambda v, dtype: fn(summands, v, lower_bnd, upper_bnd, dtype), value,
temp_box[0])
else:
return tf.nest.map_structure(lambda *args: fn(summands, *args), value,
lower_bnd, upper_bnd, temp_box[0])
client_one = intrinsics.federated_value(1, placements.CLIENTS)
# Orchestration.
client_lower_bound = intrinsics.federated_broadcast(lower_bound)
client_upper_bound = intrinsics.federated_broadcast(upper_bound)
value = intrinsics.federated_map(
client_shift, (client_value, client_lower_bound, client_upper_bound))
num_summands = intrinsics.federated_secure_sum_bitwidth(
client_one, bitwidth=1)
secagg_value_type = value.type_signature.member
assert secagg_value_type.is_tensor() or secagg_value_type.is_struct()
if secagg_value_type.is_tensor():
bitwidths = 32
else:
bitwidths = structure.map_structure(lambda t: 32, secagg_value_type)
value = intrinsics.federated_secure_sum_bitwidth(value, bitwidth=bitwidths)
value = intrinsics.federated_map(
server_shift, (value, lower_bound, upper_bound, num_summands))
return value
| # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A package of primitive (stateless) aggregations."""
import collections
import attr
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.federated_context import value_impl
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
def _validate_value_on_clients(value):
py_typecheck.check_type(value, value_impl.Value)
py_typecheck.check_type(value.type_signature, computation_types.FederatedType)
if value.type_signature.placement is not placements.CLIENTS:
raise TypeError(
'`value` argument must be a tff.Value placed at CLIENTS. Got: {!s}'
.format(value.type_signature))
def _validate_dtype_is_min_max_compatible(dtype):
if not (dtype.is_integer or dtype.is_floating):
raise TypeError(
f'Unsupported dtype. The dtype for min and max must be either an '
f'integer or floating:. Got: {dtype}.')
def _federated_reduce_with_func(value, tf_func, zeros):
"""Applies to `tf_func` to accumulated `value`s.
This utility provides a generic aggregation for accumulating a value and
applying a simple aggregation (like minimum or maximum aggregations).
Args:
value: A `tff.Value` placed on the `tff.CLIENTS`.
tf_func: A function to be applied to the accumulated values. Must be a
binary operation where both parameters are of type `U` and the return type
is also `U`.
zeros: The zero of the same type as `value` in the algebra of reduction
operators.
Returns:
A representation on the `tff.SERVER` of the result of aggregating `value`.
"""
value_type = value.type_signature.member
@computations.tf_computation(value_type, value_type)
def accumulate(current, value):
return tf.nest.map_structure(tf_func, current, value)
@computations.tf_computation(value_type)
def report(value):
return value
return intrinsics.federated_aggregate(value, zeros, accumulate, accumulate,
report)
def _initial_values(initial_value_fn, member_type):
"""Create a nested structure of initial values for the reduction.
Args:
initial_value_fn: A function that maps a tff.TensorType to a specific value
constant for initialization.
member_type: A `tff.Type` representing the member components of the
federated type.
Returns:
A function of the result of reducing a value with no constituents.
"""
@computations.tf_computation
def zeros_fn():
if member_type.is_struct():
structure.map_structure(
lambda v: _validate_dtype_is_min_max_compatible(v.dtype), member_type)
return structure.map_structure(
lambda v: tf.fill(v.shape, value=initial_value_fn(v)), member_type)
_validate_dtype_is_min_max_compatible(member_type.dtype)
return tf.fill(member_type.shape, value=initial_value_fn(member_type))
return zeros_fn()
def federated_min(value):
"""Computes the minimum at `tff.SERVER` of a `value` placed at `tff.CLIENTS`.
The minimum is computed element-wise, for each scalar and every scalar in a
tensor contained in `value`.
In the degenerate scenario that the `value` is aggregated over an empty set
of `tff.CLIENTS`, the tensor constituents of the result are set to the
maximum of the underlying numeric data type.
Args:
value: A value of a TFF federated type placed at the tff.CLIENTS.
Returns:
A representation of the min of the member constituents of `value` placed at
`tff.SERVER`.
"""
_validate_value_on_clients(value)
member_type = value.type_signature.member
# Explicit cast because v.dtype.max returns a Python constant, which could be
# implicitly converted to a tensor of different dtype by TensorFlow.
zeros = _initial_values(lambda v: tf.cast(v.dtype.max, v.dtype), member_type)
return _federated_reduce_with_func(value, tf.minimum, zeros)
def federated_max(value):
"""Computes the maximum at `tff.SERVER` of a `value` placed at `tff.CLIENTS`.
The maximum is computed element-wise, for each scalar and every scalar in a
tensor contained in `value`.
In the degenerate scenario that the `value` is aggregated over an empty set
of `tff.CLIENTS`, the tensor constituents of the result are set to the
minimum of the underlying numeric data type.
Args:
value: A value of a TFF federated type placed at the tff.CLIENTS.
Returns:
A representation of the min of the member constituents of `value` placed at
`tff.SERVER`.
"""
_validate_value_on_clients(value)
member_type = value.type_signature.member
# Explicit cast because v.dtype.min returns a Python constant, which could be
# implicitly converted to a tensor of different dtype by TensorFlow.
zeros = _initial_values(lambda v: tf.cast(v.dtype.min, v.dtype), member_type)
return _federated_reduce_with_func(value, tf.maximum, zeros)
@attr.s
class _Samples(object):
"""Class representing internal sample data structure.
The class contains two parts, `accumulators` and `rands`, that are parallel
lists (e.g. the i-th index in one corresponds to the i-th index in the other).
These two lists are used to sample from the accumulators with equal
probability.
"""
accumulators = attr.ib()
rands = attr.ib()
def _zeros_for_sample(member_type):
"""Create an empty nested structure for the sample aggregation.
Args:
member_type: A `tff.Type` representing the member components of the
federated type.
Returns:
A function of the result of zeros to first concatenate.
"""
@computations.tf_computation
def accumlator_type_fn():
"""Gets the type for the accumulators."""
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
a = structure.map_structure(
lambda v: tf.zeros([0] + v.shape.dims, v.dtype), member_type)
return _Samples(structure.to_odict(a, True), tf.zeros([0], tf.float32))
if member_type.shape:
s = [0] + member_type.shape.dims
return _Samples(tf.zeros(s, member_type.dtype), tf.zeros([0], tf.float32))
return accumlator_type_fn()
def _get_accumulator_type(member_type):
"""Constructs a `tff.Type` for the accumulator in sample aggregation.
Args:
member_type: A `tff.Type` representing the member components of the
federated type.
Returns:
The `tff.StructType` associated with the accumulator. The tuple contains
two parts, `accumulators` and `rands`, that are parallel lists (e.g. the
i-th index in one corresponds to the i-th index in the other). These two
lists are used to sample from the accumulators with equal probability.
"""
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
a = structure.map_structure(
lambda v: computation_types.TensorType(v.dtype, [None] + v.shape.dims),
member_type)
return computation_types.StructType(
collections.OrderedDict({
'accumulators':
computation_types.StructType(structure.to_odict(a, True)),
'rands':
computation_types.TensorType(tf.float32, shape=[None]),
}))
return computation_types.StructType(
collections.OrderedDict({
'accumulators':
computation_types.TensorType(
member_type.dtype, shape=[None] + member_type.shape.dims),
'rands':
computation_types.TensorType(tf.float32, shape=[None]),
}))
def federated_sample(value, max_num_samples=100):
"""Aggregation to produce uniform sample of at most `max_num_samples` values.
Each client value is assigned a random number when it is examined during each
accumulation. Each accumulate and merge only keeps the top N values based
on the random number. Report drops the random numbers and only returns the
at most N values sampled from the accumulated client values using standard
reservoir sampling (https://en.wikipedia.org/wiki/Reservoir_sampling), where
N is user provided `max_num_samples`.
Args:
value: A `tff.Value` placed on the `tff.CLIENTS`.
max_num_samples: The maximum number of samples to collect from client
values. If fewer clients than the defined max sample size participated in
the round of computation, the actual number of samples will equal the
number of clients in the round.
Returns:
At most `max_num_samples` samples of the value from the `tff.CLIENTS`.
"""
_validate_value_on_clients(value)
member_type = value.type_signature.member
accumulator_type = _get_accumulator_type(member_type)
zeros = _zeros_for_sample(member_type)
@tf.function
def fed_concat_expand_dims(a, b):
b = tf.expand_dims(b, axis=0)
return tf.concat([a, b], axis=0)
@tf.function
def fed_concat(a, b):
return tf.concat([a, b], axis=0)
@tf.function
def fed_gather(value, indices):
return tf.gather(value, indices)
def apply_sampling(accumulators, rands):
size = tf.shape(rands)[0]
k = tf.minimum(size, max_num_samples)
indices = tf.math.top_k(rands, k=k).indices
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
return structure.map_structure(lambda v: fed_gather(v, indices),
accumulators), fed_gather(rands, indices)
return fed_gather(accumulators, indices), fed_gather(rands, indices)
@computations.tf_computation(accumulator_type, value.type_signature.member)
def accumulate(current, value):
"""Accumulates samples through concatenation."""
rands = fed_concat_expand_dims(current.rands, tf.random.uniform(shape=()))
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if member_type.is_struct():
accumulators = structure.map_structure(
fed_concat_expand_dims, _ensure_structure(current.accumulators),
_ensure_structure(value))
else:
accumulators = fed_concat_expand_dims(current.accumulators, value)
accumulators, rands = apply_sampling(accumulators, rands)
return _Samples(accumulators, rands)
@computations.tf_computation(accumulator_type, accumulator_type)
def merge(a, b):
"""Merges accumulators through concatenation."""
# TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed.
if accumulator_type.is_struct():
samples = structure.map_structure(fed_concat, _ensure_structure(a),
_ensure_structure(b))
else:
samples = fed_concat(a, b)
accumulators, rands = apply_sampling(samples.accumulators, samples.rands)
return _Samples(accumulators, rands)
@computations.tf_computation(accumulator_type)
def report(value):
return value.accumulators
return intrinsics.federated_aggregate(value, zeros, accumulate, merge, report)
def _ensure_structure(obj):
return structure.from_container(obj, True)
# Lower precision types are not supported to avoid potential hard to discover
# numerical issues in conversion to/from format compatible with secure sum.
_SECURE_QUANTIZED_SUM_ALLOWED_DTYPES = (tf.int32, tf.int64, tf.float32,
tf.float64)
# The largest integer value provided to federated_secure_sum_bitwidth operator.
_SECAGG_MAX = 2**32 - 1
class BoundsDifferentTypesError(TypeError):
def __init__(self, lower_bound, upper_bound):
message = (f'Both lower_bound and upper_bound must be either federated '
f'values or Python constants. Found: type(lower_bound): '
f'{type(lower_bound)}, type(upper_bound): {type(upper_bound)}')
super().__init__(message)
class BoundsDifferentSignaturesError(TypeError):
def __init__(self, lower_bound, upper_bound):
message = (f'Provided lower_bound and upper_bound must have the same '
f'type_signature. Found: lower_bound signature: '
f'{lower_bound.type_signature}, upper_bound signature: '
f'{upper_bound.type_signature}.')
super().__init__(message)
class BoundsNotPlacedAtServerError(TypeError):
def __init__(self, placement):
message = (f'Provided lower_bound and upper_bound must be placed at '
f'tff.SERVER. Placement found: {placement}')
super().__init__(message)
class StructuredBoundsTypeMismatchError(TypeError):
def __init__(self, value_type, bounds_type):
message = (f'If bounds are specified as structures (not scalars), the '
f'structures must match the structure of provided client_value, '
f'with identical dtypes, but not necessarily shapes. Found: '
f'client_value type: {value_type}, bounds type: {bounds_type}.')
super().__init__(message)
class ScalarBoundStructValueDTypeError(TypeError):
def __init__(self, value_type, bounds_type):
message = (f'If scalar bounds are provided, all parts of client_value must '
f'be of matching dtype. Found: client_value type: {value_type}, '
f'bounds type: {bounds_type}.')
super().__init__(message)
class ScalarBoundSimpleValueDTypeError(TypeError):
def __init__(self, value_type, bounds_type):
message = (f'Bounds must have the same dtype as client_value. Found: '
f'client_value type: {value_type}, bounds type: {bounds_type}.')
super().__init__(message)
class UnsupportedDTypeError(TypeError):
def __init__(self, dtype):
message = (f'Value is of unsupported dtype {dtype}. Currently supported '
f'types are {_SECURE_QUANTIZED_SUM_ALLOWED_DTYPES}.')
super().__init__(message)
def _check_secure_quantized_sum_dtype(dtype):
if dtype not in _SECURE_QUANTIZED_SUM_ALLOWED_DTYPES:
raise UnsupportedDTypeError(dtype)
# pylint: disable=g-doc-exception
def _normalize_secure_quantized_sum_args(client_value, lower_bound,
upper_bound):
"""Normalizes inputs to `secure_quantized_sum` method.
Validates the epxected structure of arguments, as documented in the docstring
of `secure_quantized_sum` method. The arguments provided are also returned,
possibly normalized to meet those expectations. In particular, if
`lower_bound` and `upper_bound` are Python constants, these are converted to
`tff.SERVER`-placed federated values.
Args:
client_value: A `tff.Value` placed at `tff.CLIENTS`.
lower_bound: The smallest possible value for `client_value` (inclusive).
Values smaller than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
upper_bound: The largest possible value for `client_value` (inclusive).
Values greater than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
Returns:
Normalized `(client_value, lower_bound, upper_bound)` tuple.
"""
# Validation of client_value.
_validate_value_on_clients(client_value)
client_value_member = client_value.type_signature.member
if client_value.type_signature.member.is_struct():
dtypes = [v.dtype for v in structure.flatten(client_value_member)]
for dtype in dtypes:
_check_secure_quantized_sum_dtype(dtype)
else:
dtypes = client_value_member.dtype
_check_secure_quantized_sum_dtype(dtypes)
# Validation of bounds.
if isinstance(lower_bound, value_impl.Value) != isinstance(
upper_bound, value_impl.Value):
raise BoundsDifferentTypesError(lower_bound, upper_bound)
elif not isinstance(lower_bound, value_impl.Value):
# Normalization of bounds to federated values.
lower_bound = intrinsics.federated_value(lower_bound, placements.SERVER)
upper_bound = intrinsics.federated_value(upper_bound, placements.SERVER)
if lower_bound.type_signature != upper_bound.type_signature:
raise BoundsDifferentSignaturesError(lower_bound, upper_bound)
# The remaining type checks only use lower_bound as the upper_bound has
# itendical type_signature.
if lower_bound.type_signature.placement != placements.SERVER:
raise BoundsNotPlacedAtServerError(lower_bound.type_signature.placement)
# Validation of client_value and bounds compatibility.
bound_member = lower_bound.type_signature.member
if bound_member.is_struct():
if not client_value_member.is_struct() or (structure.map_structure(
lambda v: v.dtype, bound_member) != structure.map_structure(
lambda v: v.dtype, client_value_member)):
raise StructuredBoundsTypeMismatchError(client_value_member, bound_member)
else:
# If bounds are scalar, must be compatible with all tensors in client_value.
if client_value_member.is_struct():
if len(set(dtypes)) > 1 or (bound_member.dtype != dtypes[0]):
raise ScalarBoundStructValueDTypeError(client_value_member,
bound_member)
else:
if bound_member.dtype != client_value_member.dtype:
raise ScalarBoundSimpleValueDTypeError(client_value_member,
bound_member)
return client_value, lower_bound, upper_bound
@tf.function
def _client_tensor_shift_for_secure_sum(value, lower_bound, upper_bound):
"""Mapping to be applied to every tensor before secure sum.
This operation is performed on `tff.CLIENTS` to prepare values to format
compatible with `tff.federated_secure_sum_bitwidth` operator.
This clips elements of `value` to `[lower_bound, upper_bound]`, shifts and
scales it to range `[0, 2**32-1]` and casts it to `tf.int64`. The specific
operation depends on dtype of `value`.
Args:
value: A Tensor to be shifted for compatibility with
`federated_secure_sum_bitwidth`.
lower_bound: The smallest value expected in `value`.
upper_bound: The largest value expected in `value`.
Returns:
Shifted value of dtype `tf.int64`.
"""
tf.Assert(lower_bound <= upper_bound, [lower_bound, upper_bound])
if value.dtype == tf.int32:
clipped_val = tf.clip_by_value(value, lower_bound, upper_bound)
# Cast BEFORE shift in order to avoid overflow if full int32 range is used.
return tf.cast(clipped_val, tf.int64) - tf.cast(lower_bound, tf.int64)
elif value.dtype == tf.int64:
clipped_val = tf.clip_by_value(value, lower_bound, upper_bound)
range_span = upper_bound - lower_bound
scale_factor = tf.math.floordiv(range_span, _SECAGG_MAX) + 1
shifted_value = tf.cond(
scale_factor > 1,
lambda: tf.math.floordiv(clipped_val - lower_bound, scale_factor),
lambda: clipped_val - lower_bound)
return shifted_value
else:
# This should be ensured earlier and thus not user-facing.
assert value.dtype in [tf.float32, tf.float64]
clipped_value = tf.clip_by_value(value, lower_bound, upper_bound)
# Prevent NaNs if `lower_bound` and `upper_bound` are the same.
scale_factor = tf.math.divide_no_nan(
tf.constant(_SECAGG_MAX, tf.float64),
tf.cast(upper_bound - lower_bound, tf.float64))
scaled_value = tf.cast(clipped_value, tf.float64) * scale_factor
# Perform deterministic rounding here, which may introduce bias as every
# value may be rounded in the same direction for some input data.
rounded_value = tf.saturate_cast(tf.round(scaled_value), tf.int64)
# Perform shift in integer space to minimize float precision errors.
shifted_value = rounded_value - tf.saturate_cast(
tf.round(tf.cast(lower_bound, tf.float64) * scale_factor), tf.int64)
# Clip to expected range in case of numerical stability issues.
quantized_value = tf.clip_by_value(shifted_value,
tf.constant(0, dtype=tf.int64),
tf.constant(_SECAGG_MAX, dtype=tf.int64))
return quantized_value
@tf.function
def _server_tensor_shift_for_secure_sum(num_summands, value, lower_bound,
upper_bound, output_dtype):
"""Mapping to be applied to every tensor after secure sum.
This operation is performed on `tff.SERVER` to dequantize outputs of the
`tff.federated_secure_sum_bitwidth` operator.
It is reverse of `_client_tensor_shift_for_secure_sum` taking into account
that `num_summands` elements were summed, so the inverse shift needs to be
appropriately scaled.
Args:
num_summands: The number of summands that formed `value`.
value: A summed Tensor to be shifted to original representation.
lower_bound: The smallest value expected in `value` before it was summed.
upper_bound: The largest value expected in `value` before it was summed.
output_dtype: The dtype of value after being shifted.
Returns:
Shifted value of dtype `output_dtype`.
"""
# Ensure summed `value` is within the expected range given `num_summands`.
min_valid_value = tf.constant(0, tf.int64)
# Cast to tf.int64 before multiplication to prevent overflow.
max_valid_value = tf.constant(_SECAGG_MAX, tf.int64) * tf.cast(
num_summands, tf.int64)
tf.Assert(
tf.math.logical_and(
tf.math.reduce_min(value) >= min_valid_value,
tf.math.reduce_max(value) <= max_valid_value),
[value, min_valid_value, max_valid_value])
if output_dtype == tf.int32:
value = value + tf.cast(num_summands, tf.int64) * tf.cast(
lower_bound, tf.int64)
elif output_dtype == tf.int64:
range_span = upper_bound - lower_bound
scale_factor = tf.math.floordiv(range_span, _SECAGG_MAX) + 1
num_summands = tf.cast(num_summands, tf.int64)
value = tf.cond(scale_factor > 1,
lambda: value * scale_factor + num_summands * lower_bound,
lambda: value + num_summands * lower_bound)
else:
# This should be ensured earlier and thus not user-facing.
assert output_dtype in [tf.float32, tf.float64]
# Use exactly the same `scale_factor` as during client quantization so that
# float precision errors (which are deterministic) cancel out. This ensures
# that the sum of [0] is exactly 0 for any clipping range.
scale_factor = tf.math.divide_no_nan(
tf.constant(_SECAGG_MAX, tf.float64),
tf.cast(upper_bound - lower_bound, tf.float64))
# Scale the shift by `num_summands` as an integer to prevent additional
# float precision errors for multiple summands. This also ensures that the
# sum of [0] * num_summands is exactly 0 for any clipping range.
value = value + tf.saturate_cast(
tf.round(tf.cast(lower_bound, tf.float64) * scale_factor),
tf.int64) * tf.cast(num_summands, tf.int64)
value = tf.cast(value, tf.float64)
value = value * (
tf.cast(upper_bound - lower_bound, tf.float64) / _SECAGG_MAX)
# If `lower_bound` and `upper_bound` are the same, the above shift had no
# effect since `scale_factor` is 0. Shift here instead.
shifted_value = value + tf.cast(num_summands, tf.float64) * tf.cast(
lower_bound, tf.float64)
value = tf.cond(
tf.equal(lower_bound, upper_bound), lambda: shifted_value,
lambda: value)
return tf.cast(value, output_dtype)
def secure_quantized_sum(client_value, lower_bound, upper_bound):
"""Quantizes and sums values securely.
Provided `client_value` can be either a Tensor or a nested structure of
Tensors. If it is a nested structure, `lower_bound` and `upper_bound` must be
either both scalars, or both have the same structure as `client_value`, with
each element being a scalar, representing the bounds to be used for each
corresponding Tensor in `client_value`.
This method converts each Tensor in provided `client_value` to appropriate
format and uses the `tff.federated_secure_sum_bitwidth` operator to realize
the sum.
The dtype of Tensors in provided `client_value` can be one of `[tf.int32,
tf.int64, tf.float32, tf.float64]`.
If the dtype of `client_value` is `tf.int32` or `tf.int64`, the summation is
possibly exact, depending on `lower_bound` and `upper_bound`: In the case that
`upper_bound - lower_bound < 2**32`, the summation will be exact. If it is
not, `client_value` will be quantized to precision of 32 bits, so the worst
case error introduced for the value of each client will be approximately
`(upper_bound - lower_bound) / 2**32`. Deterministic rounding to nearest value
is used in such cases.
If the dtype of `client_value` is `tf.float32` or `tf.float64`, the summation
is generally *not* accurate up to full floating point precision. Instead, the
values are first clipped to the `[lower_bound, upper_bound]` range. These
values are then uniformly quantized to 32 bit resolution, using deterministic
rounding to round the values to the quantization points. Rounding happens
roughly as follows (implementation is a bit more complex to mitigate numerical
stability issues):
```
values = tf.round(
(client_value - lower_bound) * ((2**32 - 1) / (upper_bound - lower_bound))
```
After summation, the inverse operation if performed, so the return value
is of the same dtype as the input `client_value`.
In terms of accuracy, it is safe to assume accuracy within 7-8 significant
digits for `tf.float32` inputs, and 8-9 significant digits for `tf.float64`
inputs, where the significant digits refer to precision relative to the range
of the provided bounds. Thus, these bounds should not be set extremely wide.
Accuracy losses arise due to (1) quantization within the given clipping range,
(2) float precision of final outputs (e.g. `tf.float32` has 23 bits in its
mantissa), and (3) precision losses that arise in doing math on `tf.float32`
and `tf.float64` inputs.
As a concrete example, if the range is `+/- 1000`, errors up to `1e-4` per
element should be expected for `tf.float32` and up to `1e-5` for `tf.float64`.
Args:
client_value: A `tff.Value` placed at `tff.CLIENTS`.
lower_bound: The smallest possible value for `client_value` (inclusive).
Values smaller than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
upper_bound: The largest possible value for `client_value` (inclusive).
Values greater than this bound will be clipped. Must be either a scalar or
a nested structure of scalars, matching the structure of `client_value`.
Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`,
with dtype matching that of `client_value`.
Returns:
Summed `client_value` placed at `tff.SERVER`, of the same dtype as
`client_value`.
Raises:
TypeError (or its subclasses): If input arguments do not satisfy the type
constraints specified above.
"""
# Possibly converts Python constants to federated values.
client_value, lower_bound, upper_bound = _normalize_secure_quantized_sum_args(
client_value, lower_bound, upper_bound)
# This object is used during decoration of the `client_shift` method, and the
# value stored in this mutable container is used during decoration of the
# `server_shift` method. The reason for this is that we cannot currently get
# the needed information out of `client_value.type_signature.member` as we
# need both the `TensorType` information as well as the Python container
# attached to them.
temp_box = []
# These tf_computations assume the inputs were already validated. In
# particular, that lower_bnd and upper_bnd have the same structure, and if not
# scalar, the structure matches the structure of value.
@computations.tf_computation()
def client_shift(value, lower_bnd, upper_bnd):
assert not temp_box
temp_box.append(tf.nest.map_structure(lambda v: v.dtype, value))
fn = _client_tensor_shift_for_secure_sum
if tf.is_tensor(lower_bnd):
return tf.nest.map_structure(lambda v: fn(v, lower_bnd, upper_bnd), value)
else:
return tf.nest.map_structure(fn, value, lower_bnd, upper_bnd)
@computations.tf_computation()
def server_shift(value, lower_bnd, upper_bnd, summands):
fn = _server_tensor_shift_for_secure_sum
if tf.is_tensor(lower_bnd):
return tf.nest.map_structure(
lambda v, dtype: fn(summands, v, lower_bnd, upper_bnd, dtype), value,
temp_box[0])
else:
return tf.nest.map_structure(lambda *args: fn(summands, *args), value,
lower_bnd, upper_bnd, temp_box[0])
client_one = intrinsics.federated_value(1, placements.CLIENTS)
# Orchestration.
client_lower_bound = intrinsics.federated_broadcast(lower_bound)
client_upper_bound = intrinsics.federated_broadcast(upper_bound)
value = intrinsics.federated_map(
client_shift, (client_value, client_lower_bound, client_upper_bound))
num_summands = intrinsics.federated_secure_sum_bitwidth(
client_one, bitwidth=1)
secagg_value_type = value.type_signature.member
assert secagg_value_type.is_tensor() or secagg_value_type.is_struct()
if secagg_value_type.is_tensor():
bitwidths = 32
else:
bitwidths = structure.map_structure(lambda t: 32, secagg_value_type)
value = intrinsics.federated_secure_sum_bitwidth(value, bitwidth=bitwidths)
value = intrinsics.federated_map(
server_shift, (value, lower_bound, upper_bound, num_summands))
return value | en | 0.823154 | # Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pytype: skip-file # This modules disables the Pytype analyzer, see # https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more # information. A package of primitive (stateless) aggregations. Applies to `tf_func` to accumulated `value`s. This utility provides a generic aggregation for accumulating a value and applying a simple aggregation (like minimum or maximum aggregations). Args: value: A `tff.Value` placed on the `tff.CLIENTS`. tf_func: A function to be applied to the accumulated values. Must be a binary operation where both parameters are of type `U` and the return type is also `U`. zeros: The zero of the same type as `value` in the algebra of reduction operators. Returns: A representation on the `tff.SERVER` of the result of aggregating `value`. Create a nested structure of initial values for the reduction. Args: initial_value_fn: A function that maps a tff.TensorType to a specific value constant for initialization. member_type: A `tff.Type` representing the member components of the federated type. Returns: A function of the result of reducing a value with no constituents. Computes the minimum at `tff.SERVER` of a `value` placed at `tff.CLIENTS`. The minimum is computed element-wise, for each scalar and every scalar in a tensor contained in `value`. In the degenerate scenario that the `value` is aggregated over an empty set of `tff.CLIENTS`, the tensor constituents of the result are set to the maximum of the underlying numeric data type. Args: value: A value of a TFF federated type placed at the tff.CLIENTS. Returns: A representation of the min of the member constituents of `value` placed at `tff.SERVER`. # Explicit cast because v.dtype.max returns a Python constant, which could be # implicitly converted to a tensor of different dtype by TensorFlow. Computes the maximum at `tff.SERVER` of a `value` placed at `tff.CLIENTS`. The maximum is computed element-wise, for each scalar and every scalar in a tensor contained in `value`. In the degenerate scenario that the `value` is aggregated over an empty set of `tff.CLIENTS`, the tensor constituents of the result are set to the minimum of the underlying numeric data type. Args: value: A value of a TFF federated type placed at the tff.CLIENTS. Returns: A representation of the min of the member constituents of `value` placed at `tff.SERVER`. # Explicit cast because v.dtype.min returns a Python constant, which could be # implicitly converted to a tensor of different dtype by TensorFlow. Class representing internal sample data structure. The class contains two parts, `accumulators` and `rands`, that are parallel lists (e.g. the i-th index in one corresponds to the i-th index in the other). These two lists are used to sample from the accumulators with equal probability. Create an empty nested structure for the sample aggregation. Args: member_type: A `tff.Type` representing the member components of the federated type. Returns: A function of the result of zeros to first concatenate. Gets the type for the accumulators. # TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed. Constructs a `tff.Type` for the accumulator in sample aggregation. Args: member_type: A `tff.Type` representing the member components of the federated type. Returns: The `tff.StructType` associated with the accumulator. The tuple contains two parts, `accumulators` and `rands`, that are parallel lists (e.g. the i-th index in one corresponds to the i-th index in the other). These two lists are used to sample from the accumulators with equal probability. # TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed. Aggregation to produce uniform sample of at most `max_num_samples` values. Each client value is assigned a random number when it is examined during each accumulation. Each accumulate and merge only keeps the top N values based on the random number. Report drops the random numbers and only returns the at most N values sampled from the accumulated client values using standard reservoir sampling (https://en.wikipedia.org/wiki/Reservoir_sampling), where N is user provided `max_num_samples`. Args: value: A `tff.Value` placed on the `tff.CLIENTS`. max_num_samples: The maximum number of samples to collect from client values. If fewer clients than the defined max sample size participated in the round of computation, the actual number of samples will equal the number of clients in the round. Returns: At most `max_num_samples` samples of the value from the `tff.CLIENTS`. # TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed. Accumulates samples through concatenation. # TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed. Merges accumulators through concatenation. # TODO(b/121288403): Special-casing anonymous tuple shouldn't be needed. # Lower precision types are not supported to avoid potential hard to discover # numerical issues in conversion to/from format compatible with secure sum. # The largest integer value provided to federated_secure_sum_bitwidth operator. # pylint: disable=g-doc-exception Normalizes inputs to `secure_quantized_sum` method. Validates the epxected structure of arguments, as documented in the docstring of `secure_quantized_sum` method. The arguments provided are also returned, possibly normalized to meet those expectations. In particular, if `lower_bound` and `upper_bound` are Python constants, these are converted to `tff.SERVER`-placed federated values. Args: client_value: A `tff.Value` placed at `tff.CLIENTS`. lower_bound: The smallest possible value for `client_value` (inclusive). Values smaller than this bound will be clipped. Must be either a scalar or a nested structure of scalars, matching the structure of `client_value`. Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`, with dtype matching that of `client_value`. upper_bound: The largest possible value for `client_value` (inclusive). Values greater than this bound will be clipped. Must be either a scalar or a nested structure of scalars, matching the structure of `client_value`. Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`, with dtype matching that of `client_value`. Returns: Normalized `(client_value, lower_bound, upper_bound)` tuple. # Validation of client_value. # Validation of bounds. # Normalization of bounds to federated values. # The remaining type checks only use lower_bound as the upper_bound has # itendical type_signature. # Validation of client_value and bounds compatibility. # If bounds are scalar, must be compatible with all tensors in client_value. Mapping to be applied to every tensor before secure sum. This operation is performed on `tff.CLIENTS` to prepare values to format compatible with `tff.federated_secure_sum_bitwidth` operator. This clips elements of `value` to `[lower_bound, upper_bound]`, shifts and scales it to range `[0, 2**32-1]` and casts it to `tf.int64`. The specific operation depends on dtype of `value`. Args: value: A Tensor to be shifted for compatibility with `federated_secure_sum_bitwidth`. lower_bound: The smallest value expected in `value`. upper_bound: The largest value expected in `value`. Returns: Shifted value of dtype `tf.int64`. # Cast BEFORE shift in order to avoid overflow if full int32 range is used. # This should be ensured earlier and thus not user-facing. # Prevent NaNs if `lower_bound` and `upper_bound` are the same. # Perform deterministic rounding here, which may introduce bias as every # value may be rounded in the same direction for some input data. # Perform shift in integer space to minimize float precision errors. # Clip to expected range in case of numerical stability issues. Mapping to be applied to every tensor after secure sum. This operation is performed on `tff.SERVER` to dequantize outputs of the `tff.federated_secure_sum_bitwidth` operator. It is reverse of `_client_tensor_shift_for_secure_sum` taking into account that `num_summands` elements were summed, so the inverse shift needs to be appropriately scaled. Args: num_summands: The number of summands that formed `value`. value: A summed Tensor to be shifted to original representation. lower_bound: The smallest value expected in `value` before it was summed. upper_bound: The largest value expected in `value` before it was summed. output_dtype: The dtype of value after being shifted. Returns: Shifted value of dtype `output_dtype`. # Ensure summed `value` is within the expected range given `num_summands`. # Cast to tf.int64 before multiplication to prevent overflow. # This should be ensured earlier and thus not user-facing. # Use exactly the same `scale_factor` as during client quantization so that # float precision errors (which are deterministic) cancel out. This ensures # that the sum of [0] is exactly 0 for any clipping range. # Scale the shift by `num_summands` as an integer to prevent additional # float precision errors for multiple summands. This also ensures that the # sum of [0] * num_summands is exactly 0 for any clipping range. # If `lower_bound` and `upper_bound` are the same, the above shift had no # effect since `scale_factor` is 0. Shift here instead. Quantizes and sums values securely. Provided `client_value` can be either a Tensor or a nested structure of Tensors. If it is a nested structure, `lower_bound` and `upper_bound` must be either both scalars, or both have the same structure as `client_value`, with each element being a scalar, representing the bounds to be used for each corresponding Tensor in `client_value`. This method converts each Tensor in provided `client_value` to appropriate format and uses the `tff.federated_secure_sum_bitwidth` operator to realize the sum. The dtype of Tensors in provided `client_value` can be one of `[tf.int32, tf.int64, tf.float32, tf.float64]`. If the dtype of `client_value` is `tf.int32` or `tf.int64`, the summation is possibly exact, depending on `lower_bound` and `upper_bound`: In the case that `upper_bound - lower_bound < 2**32`, the summation will be exact. If it is not, `client_value` will be quantized to precision of 32 bits, so the worst case error introduced for the value of each client will be approximately `(upper_bound - lower_bound) / 2**32`. Deterministic rounding to nearest value is used in such cases. If the dtype of `client_value` is `tf.float32` or `tf.float64`, the summation is generally *not* accurate up to full floating point precision. Instead, the values are first clipped to the `[lower_bound, upper_bound]` range. These values are then uniformly quantized to 32 bit resolution, using deterministic rounding to round the values to the quantization points. Rounding happens roughly as follows (implementation is a bit more complex to mitigate numerical stability issues): ``` values = tf.round( (client_value - lower_bound) * ((2**32 - 1) / (upper_bound - lower_bound)) ``` After summation, the inverse operation if performed, so the return value is of the same dtype as the input `client_value`. In terms of accuracy, it is safe to assume accuracy within 7-8 significant digits for `tf.float32` inputs, and 8-9 significant digits for `tf.float64` inputs, where the significant digits refer to precision relative to the range of the provided bounds. Thus, these bounds should not be set extremely wide. Accuracy losses arise due to (1) quantization within the given clipping range, (2) float precision of final outputs (e.g. `tf.float32` has 23 bits in its mantissa), and (3) precision losses that arise in doing math on `tf.float32` and `tf.float64` inputs. As a concrete example, if the range is `+/- 1000`, errors up to `1e-4` per element should be expected for `tf.float32` and up to `1e-5` for `tf.float64`. Args: client_value: A `tff.Value` placed at `tff.CLIENTS`. lower_bound: The smallest possible value for `client_value` (inclusive). Values smaller than this bound will be clipped. Must be either a scalar or a nested structure of scalars, matching the structure of `client_value`. Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`, with dtype matching that of `client_value`. upper_bound: The largest possible value for `client_value` (inclusive). Values greater than this bound will be clipped. Must be either a scalar or a nested structure of scalars, matching the structure of `client_value`. Must be either a Python constant or a `tff.Value` placed at `tff.SERVER`, with dtype matching that of `client_value`. Returns: Summed `client_value` placed at `tff.SERVER`, of the same dtype as `client_value`. Raises: TypeError (or its subclasses): If input arguments do not satisfy the type constraints specified above. # Possibly converts Python constants to federated values. # This object is used during decoration of the `client_shift` method, and the # value stored in this mutable container is used during decoration of the # `server_shift` method. The reason for this is that we cannot currently get # the needed information out of `client_value.type_signature.member` as we # need both the `TensorType` information as well as the Python container # attached to them. # These tf_computations assume the inputs were already validated. In # particular, that lower_bnd and upper_bnd have the same structure, and if not # scalar, the structure matches the structure of value. # Orchestration. | 1.77151 | 2 |
maven/tests/dicts_test.bzl | KelvinLu/bazel_maven_repository | 0 | 6632580 | <reponame>KelvinLu/bazel_maven_repository<filename>maven/tests/dicts_test.bzl
load(":testing.bzl", "asserts", "test_suite")
load("//maven:utils.bzl", "dicts")
def encode_test(env):
nested = {
"foo": "bar",
"blah": {
"a": "b",
"c": "d",
}
}
expected = {
"foo": "bar",
"blah": ["a>>>b", "c>>>d"],
}
asserts.equals(env, expected, dicts.encode_nested(nested))
# Roll-up function.
def suite():
return test_suite("dicts", tests = [encode_test])
| load(":testing.bzl", "asserts", "test_suite")
load("//maven:utils.bzl", "dicts")
def encode_test(env):
nested = {
"foo": "bar",
"blah": {
"a": "b",
"c": "d",
}
}
expected = {
"foo": "bar",
"blah": ["a>>>b", "c>>>d"],
}
asserts.equals(env, expected, dicts.encode_nested(nested))
# Roll-up function.
def suite():
return test_suite("dicts", tests = [encode_test]) | en | 0.535715 | # Roll-up function. | 2.189579 | 2 |
2onnx_resnet.py | Yifanfanfanfan/flops-counter.pytorch | 0 | 6632581 | <filename>2onnx_resnet.py
import os, sys, time, shutil, argparse
from functools import partial
import pickle
sys.path.append('../')
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
#import torchvision.models as models
import torch.optim as optim
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torch.multiprocessing as mp
from collections import OrderedDict
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx as torch_onnx
import onnx
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import lab2rgb
from skimage import io
# import prune_util
# from prune_util import GradualWarmupScheduler
# from prune_util import CrossEntropyLossMaybeSmooth
# from prune_util import mixup_data, mixup_criterion
# from utils import save_checkpoint, AverageMeter, visualize_image, GrayscaleImageFolder
# from model import ColorNet
#from wdsr_b import *
#from args import *
import captioning.utils.opts as opts
import captioning.models as models
from captioning.data.dataloader import *
from captioning.utils.resnet_utils import myResnet
from captioning.utils import resnet
import onnxruntime
def main():
use_gpu = torch.cuda.is_available()
# Create model
# models.resnet18(num_classes=365)
# model = ColorNet()
#args = get_args()
#model = MODEL(args)
# state_dict = torch.load("./checkpoint/checkpoint6/model_epoch133_step1.pth")
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# k = k.replace('module.', '')
# new_state_dict[k] = v
# model = torch.nn.DataParallel(model)
# model.load_state_dict(new_state_dict)
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
cnn_model = 'resnet101'
my_resnet = getattr(resnet, cnn_model)()
state_dict = torch.load('/home/zzgyf/github_yifan/ImageCaptioning.pytorch/data/imagenet_weights/'+ cnn_model+'.pth')
new_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
new_state_dict[k] = v
my_resnet.load_state_dict(new_state_dict)
model = myResnet(my_resnet)
# model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
# if isinstance(model, torch.nn.DataParallel):
# model = model.module
#checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
#model.load_state_dict(checkpoint)
# cocotest_bu_fc_size = (10, 2048)
# cocotest_bu_att_size = (10, 0, 0)
# labels_size = (10, 5, 18)
# masks_size = (10, 5, 18)
model_onnx_path = "./resnet101.onnx"
input_shape = (3, 640, 480)
model.train(False)
model.eval()
# Export the model to an ONNX file
# dummy_input = Variable(torch.randn(1, *input_shape))
dummy_input = Variable(torch.randn(*input_shape))
#output = torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False)
output = torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False)
print("Export of torch_model.onnx complete!")
def check():
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
cnn_model = 'resnet101'
my_resnet = getattr(resnet, cnn_model)()
my_resnet.load_state_dict(torch.load('/home/zzgyf/github_yifan/ImageCaptioning.pytorch/data/imagenet_weights/'+ cnn_model+'.pth'))
model = myResnet(my_resnet)
#checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
#model.load_state_dict(checkpoint)
# torch.nn.utils.remove_weight_norm(model.head[0])
# for i in range(2):
# for j in [0,2,3]:
# torch.nn.utils.remove_weight_norm(model.body[i].body[j])
# torch.nn.utils.remove_weight_norm(model.tail[0])
# torch.nn.utils.remove_weight_norm(model.skip[0])
model.eval()
ort_session = onnxruntime.InferenceSession("resnet101.onnx")
x = Variable(torch.randn(3, 640, 480))
#x = torch.randn(1, 3, 392, 392, requires_grad=False)
#torch_out = model(x)
# # Load the ONNX model
# model = onnx.load("wdsr_b.onnx")
# # Check that the IR is well formed
# onnx.checker.check_model(model)
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(model.graph)
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
if __name__ == '__main__':
main()
check()
| <filename>2onnx_resnet.py
import os, sys, time, shutil, argparse
from functools import partial
import pickle
sys.path.append('../')
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
#import torchvision.models as models
import torch.optim as optim
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torch.multiprocessing as mp
from collections import OrderedDict
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx as torch_onnx
import onnx
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import lab2rgb
from skimage import io
# import prune_util
# from prune_util import GradualWarmupScheduler
# from prune_util import CrossEntropyLossMaybeSmooth
# from prune_util import mixup_data, mixup_criterion
# from utils import save_checkpoint, AverageMeter, visualize_image, GrayscaleImageFolder
# from model import ColorNet
#from wdsr_b import *
#from args import *
import captioning.utils.opts as opts
import captioning.models as models
from captioning.data.dataloader import *
from captioning.utils.resnet_utils import myResnet
from captioning.utils import resnet
import onnxruntime
def main():
use_gpu = torch.cuda.is_available()
# Create model
# models.resnet18(num_classes=365)
# model = ColorNet()
#args = get_args()
#model = MODEL(args)
# state_dict = torch.load("./checkpoint/checkpoint6/model_epoch133_step1.pth")
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# k = k.replace('module.', '')
# new_state_dict[k] = v
# model = torch.nn.DataParallel(model)
# model.load_state_dict(new_state_dict)
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
cnn_model = 'resnet101'
my_resnet = getattr(resnet, cnn_model)()
state_dict = torch.load('/home/zzgyf/github_yifan/ImageCaptioning.pytorch/data/imagenet_weights/'+ cnn_model+'.pth')
new_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
new_state_dict[k] = v
my_resnet.load_state_dict(new_state_dict)
model = myResnet(my_resnet)
# model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
# if isinstance(model, torch.nn.DataParallel):
# model = model.module
#checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
#model.load_state_dict(checkpoint)
# cocotest_bu_fc_size = (10, 2048)
# cocotest_bu_att_size = (10, 0, 0)
# labels_size = (10, 5, 18)
# masks_size = (10, 5, 18)
model_onnx_path = "./resnet101.onnx"
input_shape = (3, 640, 480)
model.train(False)
model.eval()
# Export the model to an ONNX file
# dummy_input = Variable(torch.randn(1, *input_shape))
dummy_input = Variable(torch.randn(*input_shape))
#output = torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False)
output = torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False)
print("Export of torch_model.onnx complete!")
def check():
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
cnn_model = 'resnet101'
my_resnet = getattr(resnet, cnn_model)()
my_resnet.load_state_dict(torch.load('/home/zzgyf/github_yifan/ImageCaptioning.pytorch/data/imagenet_weights/'+ cnn_model+'.pth'))
model = myResnet(my_resnet)
#checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
#model.load_state_dict(checkpoint)
# torch.nn.utils.remove_weight_norm(model.head[0])
# for i in range(2):
# for j in [0,2,3]:
# torch.nn.utils.remove_weight_norm(model.body[i].body[j])
# torch.nn.utils.remove_weight_norm(model.tail[0])
# torch.nn.utils.remove_weight_norm(model.skip[0])
model.eval()
ort_session = onnxruntime.InferenceSession("resnet101.onnx")
x = Variable(torch.randn(3, 640, 480))
#x = torch.randn(1, 3, 392, 392, requires_grad=False)
#torch_out = model(x)
# # Load the ONNX model
# model = onnx.load("wdsr_b.onnx")
# # Check that the IR is well formed
# onnx.checker.check_model(model)
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(model.graph)
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
if __name__ == '__main__':
main()
check()
| en | 0.400596 | #import torchvision.models as models # import prune_util # from prune_util import GradualWarmupScheduler # from prune_util import CrossEntropyLossMaybeSmooth # from prune_util import mixup_data, mixup_criterion # from utils import save_checkpoint, AverageMeter, visualize_image, GrayscaleImageFolder # from model import ColorNet #from wdsr_b import * #from args import * # Create model # models.resnet18(num_classes=365) # model = ColorNet() #args = get_args() #model = MODEL(args) # state_dict = torch.load("./checkpoint/checkpoint6/model_epoch133_step1.pth") # new_state_dict = OrderedDict() # for k, v in state_dict.items(): # k = k.replace('module.', '') # new_state_dict[k] = v # model = torch.nn.DataParallel(model) # model.load_state_dict(new_state_dict) # Input paths # model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) # if isinstance(model, torch.nn.DataParallel): # model = model.module #checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth") #model.load_state_dict(checkpoint) # cocotest_bu_fc_size = (10, 2048) # cocotest_bu_att_size = (10, 0, 0) # labels_size = (10, 5, 18) # masks_size = (10, 5, 18) # Export the model to an ONNX file # dummy_input = Variable(torch.randn(1, *input_shape)) #output = torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False) # Input paths #checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth") #model.load_state_dict(checkpoint) # torch.nn.utils.remove_weight_norm(model.head[0]) # for i in range(2): # for j in [0,2,3]: # torch.nn.utils.remove_weight_norm(model.body[i].body[j]) # torch.nn.utils.remove_weight_norm(model.tail[0]) # torch.nn.utils.remove_weight_norm(model.skip[0]) #x = torch.randn(1, 3, 392, 392, requires_grad=False) #torch_out = model(x) # # Load the ONNX model # model = onnx.load("wdsr_b.onnx") # # Check that the IR is well formed # onnx.checker.check_model(model) # # Print a human readable representation of the graph # onnx.helper.printable_graph(model.graph) # compute ONNX Runtime output prediction # compare ONNX Runtime and PyTorch results | 1.973321 | 2 |
programs/witness_node/saltpass.py | techsharesteam/techshares | 0 | 6632582 | #!/usr/bin/env python3
import base64
import getpass
import hashlib
import json
import os
pw = getpass.getpass("enter your password: ")
pw_bytes = pw.encode("utf-8")
salt_bytes = os.urandom(8)
salt_b64 = base64.b64encode( salt_bytes )
pw_hash = hashlib.sha256( pw_bytes + salt_bytes ).digest()
pw_hash_b64 = base64.b64encode( pw_hash )
print(json.dumps(
{
"password_hash_b64" : pw_hash_b64.decode("ascii"),
"password_salt_b64" : salt_b64.decode("ascii"),
},
sort_keys=True,
indent=3, separators=(',', ' : ')
))
| #!/usr/bin/env python3
import base64
import getpass
import hashlib
import json
import os
pw = getpass.getpass("enter your password: ")
pw_bytes = pw.encode("utf-8")
salt_bytes = os.urandom(8)
salt_b64 = base64.b64encode( salt_bytes )
pw_hash = hashlib.sha256( pw_bytes + salt_bytes ).digest()
pw_hash_b64 = base64.b64encode( pw_hash )
print(json.dumps(
{
"password_hash_b64" : pw_hash_b64.decode("ascii"),
"password_salt_b64" : salt_b64.decode("ascii"),
},
sort_keys=True,
indent=3, separators=(',', ' : ')
))
| fr | 0.221828 | #!/usr/bin/env python3 | 3.040607 | 3 |
shell/diskwalk_api.py | basicworld/studys | 0 | 6632583 | <filename>shell/diskwalk_api.py<gh_stars>0
#!/usr/bin/python
#coding:utf-8
#docs:this is a .sh with function similiar to 'tree'
import os
class diskwalk(object):
def __init__(self,path):
self.path=path
def enumeratepaths(self):
'''return the path to all the files in a directory'''
path_collection=[]
for dirpath, dirnames, filenames in os.walk(self.path):
for file in filenames:
fullpath=os.path.join(dirpath,file)
path_collection.append(fullpath)
return path_collection
def enumeratefiles(self):
'''return all the files in a directory'''
file_collection=[]
for dirpath, dirnames, filenames in os.walk(self.path):
for file in filenames:
file_collection.append(file)
return file_collection
def enumeratedir(self):
'''return all the directories in a directory'''
dir_collection=[]
for dirpath, dirnames, filenames in os.walk(self.path):
for dir in dirnames:
dir_collection.append(dir)
return dir_collection
if __name__=='__main__':
default_path=os.getcwd()
while True:
print 'please input your path([y|yes|Enter] for', default_path,'):'
path=raw_input()
if (path=='yes' or path=='y' or path==''): path ='./'
try:
os.chdir(path)
break
except OSError:
continue
disk=diskwalk(path)
print '---------Recursive listening of all paths in a dir---------'
if len(disk.enumeratepaths())>0:
for path in disk.enumeratepaths():
print path
else:
print '!!No result!!'
print '\n---------Recursive listening of all files in a dir---------'
if len(disk.enumeratefiles())>0:
for file in disk.enumeratefiles():
print file
else:
print '!!No result!!'
print '\n---------Recursive listening of all dirs in a dir---------'
if len(disk.enumeratedir())>0:
for dir in disk.enumeratedir():
print dir
else:
print '!!No result!!'
| <filename>shell/diskwalk_api.py<gh_stars>0
#!/usr/bin/python
#coding:utf-8
#docs:this is a .sh with function similiar to 'tree'
import os
class diskwalk(object):
def __init__(self,path):
self.path=path
def enumeratepaths(self):
'''return the path to all the files in a directory'''
path_collection=[]
for dirpath, dirnames, filenames in os.walk(self.path):
for file in filenames:
fullpath=os.path.join(dirpath,file)
path_collection.append(fullpath)
return path_collection
def enumeratefiles(self):
'''return all the files in a directory'''
file_collection=[]
for dirpath, dirnames, filenames in os.walk(self.path):
for file in filenames:
file_collection.append(file)
return file_collection
def enumeratedir(self):
'''return all the directories in a directory'''
dir_collection=[]
for dirpath, dirnames, filenames in os.walk(self.path):
for dir in dirnames:
dir_collection.append(dir)
return dir_collection
if __name__=='__main__':
default_path=os.getcwd()
while True:
print 'please input your path([y|yes|Enter] for', default_path,'):'
path=raw_input()
if (path=='yes' or path=='y' or path==''): path ='./'
try:
os.chdir(path)
break
except OSError:
continue
disk=diskwalk(path)
print '---------Recursive listening of all paths in a dir---------'
if len(disk.enumeratepaths())>0:
for path in disk.enumeratepaths():
print path
else:
print '!!No result!!'
print '\n---------Recursive listening of all files in a dir---------'
if len(disk.enumeratefiles())>0:
for file in disk.enumeratefiles():
print file
else:
print '!!No result!!'
print '\n---------Recursive listening of all dirs in a dir---------'
if len(disk.enumeratedir())>0:
for dir in disk.enumeratedir():
print dir
else:
print '!!No result!!'
| en | 0.659191 | #!/usr/bin/python #coding:utf-8 #docs:this is a .sh with function similiar to 'tree' return the path to all the files in a directory return all the files in a directory return all the directories in a directory | 3.277084 | 3 |
gedit/.local/share/gedit/plugins/pair_char_lang.py | bradleyfrank/dotfiles-archive | 0 | 6632584 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Programming language pair char support
#
# The default set is used if the language is not specified below. The plain
# set is used for plain text, or when the document has no specified language.
#
lang('default', '(){}[]""\'\'``')
lang('changelog', '(){}[]""<>')
lang('html', '(){}[]""<>')
lang('ruby', '(){}[]""\'\'``||')
lang('xml', '(){}[]""<>')
lang('php', '(){}[]""<>')
lang('plain', '(){}[]""')
lang('latex', '(){}[]""$$`\'')
| # -*- coding: utf-8 -*-
#
# Programming language pair char support
#
# The default set is used if the language is not specified below. The plain
# set is used for plain text, or when the document has no specified language.
#
lang('default', '(){}[]""\'\'``')
lang('changelog', '(){}[]""<>')
lang('html', '(){}[]""<>')
lang('ruby', '(){}[]""\'\'``||')
lang('xml', '(){}[]""<>')
lang('php', '(){}[]""<>')
lang('plain', '(){}[]""')
lang('latex', '(){}[]""$$`\'') | en | 0.675991 | # -*- coding: utf-8 -*- # # Programming language pair char support # # The default set is used if the language is not specified below. The plain # set is used for plain text, or when the document has no specified language. # | 1.881187 | 2 |
ibis/tests/expr/test_interactive.py | kvemani/ibis | 0 | 6632585 | <gh_stars>0
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
import ibis.config as config
from ibis.tests.expr.mocks import MockBackend
class TestInteractiveUse(unittest.TestCase):
def setUp(self):
self.con = MockBackend()
def test_interactive_execute_on_repr(self):
table = self.con.table('functional_alltypes')
expr = table.bigint_col.sum()
with config.option_context('interactive', True):
repr(expr)
assert len(self.con.executed_queries) > 0
def test_repr_png_is_none_in_interactive(self):
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
assert table._repr_png_() is None
def test_repr_png_is_not_none_in_not_interactive(self):
pytest.importorskip('ibis.expr.visualize')
table = self.con.table('functional_alltypes')
with config.option_context(
'interactive', False
), config.option_context('graphviz_repr', True):
assert table._repr_png_() is not None
# XXX This test is failing in the OmniSciDB/Spark build, and working
# in the rest, even if does not seem to depend on the backend.
# For some reason in that build the statement does not contain
# the LIMIT. Xfailing with `strict=False` since in the other backends
# it does work. See #2337
@pytest.mark.xfail(
reason='Not obvious why this is failing for omnisci/spark, and this '
'was incorrectly skipped until now. Xfailing to restore the CI',
strict=False,
)
def test_default_limit(self):
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
repr(table)
expected = """\
SELECT *
FROM functional_alltypes
LIMIT {}""".format(
config.options.sql.default_limit
)
assert self.con.executed_queries[0] == expected
def test_respect_set_limit(self):
table = self.con.table('functional_alltypes').limit(10)
with config.option_context('interactive', True):
repr(table)
expected = """\
SELECT *
FROM functional_alltypes
LIMIT 10"""
assert self.con.executed_queries[0] == expected
def test_disable_query_limit(self):
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
with config.option_context('sql.default_limit', None):
repr(table)
expected = """\
SELECT *
FROM functional_alltypes"""
assert self.con.executed_queries[0] == expected
def test_interactive_non_compilable_repr_not_fail(self):
# #170
table = self.con.table('functional_alltypes')
expr = table.string_col.topk(3)
# it works!
with config.option_context('interactive', True):
repr(expr)
def test_histogram_repr_no_query_execute(self):
t = self.con.table('functional_alltypes')
tier = t.double_col.histogram(10).name('bucket')
expr = t.group_by(tier).size()
with config.option_context('interactive', True):
expr._repr()
assert self.con.executed_queries == []
def test_compile_no_execute(self):
t = self.con.table('functional_alltypes')
t.double_col.sum().compile()
assert self.con.executed_queries == []
def test_isin_rule_supressed_exception_repr_not_fail(self):
with config.option_context('interactive', True):
t = self.con.table('functional_alltypes')
bool_clause = t['string_col'].notin(['1', '4', '7'])
expr = t[bool_clause]['string_col'].value_counts()
repr(expr)
| # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
import ibis.config as config
from ibis.tests.expr.mocks import MockBackend
class TestInteractiveUse(unittest.TestCase):
def setUp(self):
self.con = MockBackend()
def test_interactive_execute_on_repr(self):
table = self.con.table('functional_alltypes')
expr = table.bigint_col.sum()
with config.option_context('interactive', True):
repr(expr)
assert len(self.con.executed_queries) > 0
def test_repr_png_is_none_in_interactive(self):
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
assert table._repr_png_() is None
def test_repr_png_is_not_none_in_not_interactive(self):
pytest.importorskip('ibis.expr.visualize')
table = self.con.table('functional_alltypes')
with config.option_context(
'interactive', False
), config.option_context('graphviz_repr', True):
assert table._repr_png_() is not None
# XXX This test is failing in the OmniSciDB/Spark build, and working
# in the rest, even if does not seem to depend on the backend.
# For some reason in that build the statement does not contain
# the LIMIT. Xfailing with `strict=False` since in the other backends
# it does work. See #2337
@pytest.mark.xfail(
reason='Not obvious why this is failing for omnisci/spark, and this '
'was incorrectly skipped until now. Xfailing to restore the CI',
strict=False,
)
def test_default_limit(self):
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
repr(table)
expected = """\
SELECT *
FROM functional_alltypes
LIMIT {}""".format(
config.options.sql.default_limit
)
assert self.con.executed_queries[0] == expected
def test_respect_set_limit(self):
table = self.con.table('functional_alltypes').limit(10)
with config.option_context('interactive', True):
repr(table)
expected = """\
SELECT *
FROM functional_alltypes
LIMIT 10"""
assert self.con.executed_queries[0] == expected
def test_disable_query_limit(self):
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
with config.option_context('sql.default_limit', None):
repr(table)
expected = """\
SELECT *
FROM functional_alltypes"""
assert self.con.executed_queries[0] == expected
def test_interactive_non_compilable_repr_not_fail(self):
# #170
table = self.con.table('functional_alltypes')
expr = table.string_col.topk(3)
# it works!
with config.option_context('interactive', True):
repr(expr)
def test_histogram_repr_no_query_execute(self):
t = self.con.table('functional_alltypes')
tier = t.double_col.histogram(10).name('bucket')
expr = t.group_by(tier).size()
with config.option_context('interactive', True):
expr._repr()
assert self.con.executed_queries == []
def test_compile_no_execute(self):
t = self.con.table('functional_alltypes')
t.double_col.sum().compile()
assert self.con.executed_queries == []
def test_isin_rule_supressed_exception_repr_not_fail(self):
with config.option_context('interactive', True):
t = self.con.table('functional_alltypes')
bool_clause = t['string_col'].notin(['1', '4', '7'])
expr = t[bool_clause]['string_col'].value_counts()
repr(expr) | en | 0.848738 | # Copyright 2014 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # XXX This test is failing in the OmniSciDB/Spark build, and working # in the rest, even if does not seem to depend on the backend. # For some reason in that build the statement does not contain # the LIMIT. Xfailing with `strict=False` since in the other backends # it does work. See #2337 \ SELECT * FROM functional_alltypes LIMIT {} \ SELECT * FROM functional_alltypes LIMIT 10 \ SELECT * FROM functional_alltypes # #170 # it works! | 1.548487 | 2 |
tnn/efficientgaternn.py | neuroailab/tnn | 88 | 6632586 | <gh_stars>10-100
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMStateTuple
import tfutils.model
from tnn.cell import *
from tnn.main import _get_func_from_kwargs
try:
from tfutils.model import conv, depth_conv
except:
from tfutils.model_tool_old import conv, depth_conv
import copy
def ksize(val):
if isinstance(val, float):
return [int(val), int(val)]
elif isinstance(val, int):
return [val, val]
else:
return val
class ConvRNNCell(object):
"""Abstract object representing an Convolutional RNN cell.
"""
def __init__(self, shape, out_depth, scope):
self.shape = shape
self._out_depth = out_depth
self.scope = scope
def __call__(self, inputs, state=None, fb_input=None, res_input=None, **training_kwargs):
"""Run this RNN cell on inputs, starting from the given state.
"""
with tf.variable_scope(type(self).__name__ + '_' + self.scope): # "ConvRNNCell + self.scope#
self.next_state = state
output = tf.identity(inputs, name="convrnn_cell_passthrough")
return output, self.next_state
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x out_depth]
filled with zeros
"""
return None
# shape = self.shape
# out_depth = self._out_depth
# zeros = tf.zeros([batch_size, shape[0], shape[1], out_depth], dtype=dtype)
# return zeros
class EfficientGateCell(ConvRNNCell):
"""
"""
def __init__(self,
shape,
out_depth,
tau_filter_size,
in_depth=None,
cell_depth=0,
bypass_state=False,
gate_filter_size=[3,3],
feedback_filter_size=[1,1],
activation="swish",
gate_nonlinearity=tf.nn.sigmoid,
kernel_initializer='normal',
kernel_initializer_kwargs={},
weight_decay=None,
batch_norm=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_gamma_init=1.0,
bn_trainable=True,
crossdevice_bn_kwargs={},
group_norm=False,
num_groups=32,
strides=1,
se_ratio=0,
residual_add=False,
res_cell=False
):
"""
Initialize the memory function of the EfficientGateCell.
"""
# shapes and filter sizes
self.shape = shape # [H, W]
self.tau_filter_size = ksize(tau_filter_size)
self.gate_filter_size = ksize(gate_filter_size)
self.feedback_filter_size = ksize(feedback_filter_size)
self.out_depth = out_depth
self.in_depth = self.out_depth if in_depth is None else in_depth
self.res_cell = res_cell
self.cell_depth = cell_depth
self.bypass_state = bypass_state
self.strides = strides
self.residual_add = residual_add
# functions
self._relu = activation
self._se_ratio = se_ratio
self._se = tf.identity if not se_ratio \
else lambda x, rC: squeeze_and_excitation(
x, rC, activation=self._relu,
kernel_init=kernel_initializer,
kernel_init_kwargs=kernel_initializer_kwargs
)
# function kwargs
self.bn_kwargs = {
'batch_norm': batch_norm and not group_norm,
'group_norm': group_norm,
'num_groups': num_groups,
'batch_norm_decay': batch_norm_decay,
'batch_norm_epsilon': batch_norm_epsilon,
'batch_norm_gamma_init': batch_norm_gamma_init,
'bn_trainable': bn_trainable,
'crossdevice_bn_kwargs': crossdevice_bn_kwargs
}
self.conv_kwargs = {
'strides': self.strides,
'kernel_init': kernel_initializer,
'kernel_init_kwargs': kernel_initializer_kwargs,
'weight_decay': weight_decay,
'data_format': 'channels_last',
'padding': 'SAME',
'use_bias': False
}
def zero_state(self, batch_size, dtype):
"""
Return zero-filled state tensor(s)
"""
return tf.zeros([batch_size, self.shape[0], self.shape[1], self.cell_depth + self.in_depth], dtype=dtype)
def _conv_bn(self, inputs, ksize, scope, out_depth=None, depthwise=False, activation=True):
if out_depth is None:
out_depth = inputs.shape.as_list()[-1]
kwargs = copy.deepcopy(self.conv_kwargs)
kwargs.update(self.bn_kwargs)
kwargs['ksize'] = ksize
kwargs['activation'] = self._relu if activation else None
# def _conv_op(x):
# return depth_conv(x, **kwargs) if depthwise \
# else conv(x, out_depth, **kwargs)
with tf.variable_scope(scope):
inputs = depth_conv(inputs, **kwargs) if depthwise else conv(inputs, out_depth, **kwargs)
return inputs
def __call__(self, inputs, state, fb_input, res_input, is_training=True, **training_kwargs):
"""
"""
# update training-specific kwargs
self.bn_kwargs['is_training'] = is_training
self.bn_kwargs.update({'time_suffix': training_kwargs.get('time_suffix', None),
'time_sep': training_kwargs.get('time_sep', True)}) # time suffix
self.res_depth = res_input.shape.as_list()[-1] if res_input is not None else self.out_depth
# print("bn kwargs", self.bn_kwargs['time_suffix'])
# get previous state
prev_cell, prev_state = tf.split(value=state, num_or_size_splits=[self.cell_depth, self.in_depth], axis=3, name="state_split")
# updates
with tf.variable_scope(type(self).__name__): # "EfficientGateCell"
update = tf.zeros_like(inputs)
# combine fb input with ff input
if fb_input is not None:
update += self._conv_bn(fb_input, self.feedback_filter_size, out_depth=self.in_depth, depthwise=False, activation=True, scope="feedback_to_state")
print("added feedback: %s of shape %s" % (fb_input.name, fb_input.shape.as_list()))
# update the state with a kxk depthwise conv/bn/relu
update += self._conv_bn(inputs + prev_state, self.tau_filter_size, depthwise=True, activation=True, scope="state_to_state")
next_state = prev_state + update
# update the cell TODO
if self.res_cell:
assert res_input is not None
next_cell = res_input
else:
next_cell = prev_cell
# depthwise conv on expanded state, then squeeze-excitation, channel reduction, residual add
inp = next_state if not self.bypass_state else (inputs + prev_state)
print("bypassed state?", self.bypass_state)
next_out = self._se(inp, self._se_ratio * self.res_depth)
next_out = self._conv_bn(next_out, [1,1], out_depth=self.out_depth, depthwise=False, activation=False, scope="state_to_out")
if (res_input is not None) and (res_input.shape.as_list() == next_out.shape.as_list()):
next_out = drop_connect(next_out, self.bn_kwargs['is_training'], training_kwargs['drop_connect_rate'])
print("drop connect/residual adding", training_kwargs['drop_connect_rate'], res_input.name, res_input.shape.as_list())
next_out = tf.add(next_out, res_input)
elif (res_input is not None) and self.residual_add: # add the matching channels with resize if necessary
next_out = drop_connect(next_out, self.bn_kwargs['is_training'], training_kwargs['drop_connect_rate'])
next_out, remainder = tf.split(next_out, [res_input.shape.as_list()[-1], -1], axis=-1)
if res_input.shape.as_list()[1:3] != self.shape:
res_input = tf.image.resize_images(res_input, size=self.shape)
next_out = tf.add(next_out, res_input)
next_out = tf.concat([next_out, remainder], axis=-1)
print("added matching channels", next_out.shape.as_list())
# concat back on the cell
next_state = tf.concat([next_cell, next_state], axis=3, name="cell_concat_next_state")
return next_out, next_state
class tnn_EfficientGateCell(ConvRNNCell):
def __init__(self,
harbor_shape,
harbor=(harbor, None),
pre_memory=None,
memory=(memory, None),
post_memory=None,
input_init=(tf.zeros, None),
state_init=(tf.zeros, None),
dtype=tf.float32,
name=None):
self.harbor_shape = harbor_shape
self.harbor = harbor if harbor[1] is not None else (harbor[0], {})
self.pre_memory = pre_memory
self.memory = memory if memory[1] is not None else (memory[0], {})
self.post_memory = post_memory
self.input_init = input_init if input_init[1] is not None else (input_init[0], {})
self.state_init = state_init if state_init[1] is not None else (state_init[0], {})
self.dtype_tmp = dtype
self.name_tmp = name
self._reuse = None
self.internal_time = 0
self.max_internal_time = self.memory[1].get('max_internal_time', None)
# Kwargs for trainining/validation"
self.is_training = self.memory[1].get('is_training', True)
self.training_kwargs = {
'time_sep': self.memory[1].get('time_sep', True),
'dropout_rate': self.memory[1].get('dropout_rate', 1),
'drop_connect_rate': self.memory[1].get('drop_connect_rate', 1)
}
### Memory includes both a typical ConvRNN cell (optional) and an IntegratedGraphCell (optional) ###
self.convrnn_cell_kwargs = self.memory[1].get('convrnn_cell_kwargs', {})
idx = [i for i in range(len(self.pre_memory)) if 'out_depth' in self.pre_memory[i][1]]
idx = idx[-1] if len(idx) else None
self.pre_memory_out_depth = self.pre_memory[idx][1]['out_depth'] if idx is not None else self.harbor_shape[-1]
self._pre_conv_idx = idx
if self.memory[1].get('convrnn_cell', None) == "EfficientGateCell":
self.convrnn_cell_kwargs['shape'] = self.harbor_shape[-3:-1] # set shape of memory layer
if 'in_depth' not in self.convrnn_cell_kwargs:
self.convrnn_cell_kwargs['in_depth'] = self.pre_memory_out_depth # the expansion width
if 'out_depth' not in self.convrnn_cell_kwargs:
self.convrnn_cell_kwargs['out_depth'] = self.harbor_shape[-1] # channels coming out of harbor
self.convrnn_cell = EfficientGateCell(**self.convrnn_cell_kwargs)
else:
self.convrnn_cell_kwargs['out_depth'] = self.pre_memory_out_depth
self.convrnn_cell = ConvRNNCell(shape=self.convrnn_cell_kwargs.get('shape', None),
out_depth=self.convrnn_cell_kwargs['out_depth'],
scope="0")
# not used in this cell
self.graph_cell = ConvRNNCell(shape=self.convrnn_cell_kwargs.get('shape', None),
out_depth=self.convrnn_cell_kwargs['out_depth'],
scope="1")
def __call__(self, inputs=None, state=None):
with tf.variable_scope(self.name_tmp, reuse=self._reuse):
# state initializer
if inputs is None:
inputs = [self.input_init[0](shape=self.harbor_shape, **self.input_init[1])]
# Pass inputs through harbor
fb_input = None
if self.memory[1].get('convrnn_cell', None) in ["EfficientGateCell"]:
if len(inputs) == 1:
ff_idx = 0
output = self.harbor[0](inputs, self.harbor_shape, self.name_tmp, reuse=self._reuse, **self.harbor[1])
elif len(inputs) > 1:
for j, inp in enumerate(inputs):
if self.pre_memory[self._pre_conv_idx][1]['input_name'] in inp.name:
ff_inpnm = inp.name
ff_idx = j
ff_depth = inputs[ff_idx].shape.as_list()[-1]
output = self.harbor[0](inputs, self.harbor_shape, self.name_tmp, ff_inpnm=ff_inpnm, reuse=self._reuse, **self.harbor[1])
fb_depth = output.shape.as_list()[-1] - ff_depth
if self.harbor[1]['channel_op'] == 'concat':
output, fb_input = tf.split(output, num_or_size_splits=[ff_depth, fb_depth], axis=3)
fb_size = fb_input.shape.as_list()[1:3]
else:
ff_idx = None
output = self.harbor[0](inputs, self.harbor_shape, self.name_tmp, reuse=self._reuse, **self.harbor[1])
# pre_memory usually contains feedforward convolutions, etc.
res_input = None
curr_time_suffix = 't' + str(self.internal_time)
pre_name_counter = 0
for function, kwargs in self.pre_memory:
with tf.variable_scope("pre_" + str(pre_name_counter), reuse=self._reuse):
if kwargs.get('time_sep', False):
kwargs['time_suffix'] = curr_time_suffix # for scoping unshared BN
if function.__name__ == "component_conv":
ff_inputs = [inputs[ff_idx]] if ff_idx is not None else inputs
if kwargs.get('return_input', False):
output, res_input = function(output, ff_inputs, **kwargs) # component_conv needs to know the inputs
else:
output = function(output, ff_inputs, **kwargs) # component_conv needs to know the inputs
else:
output = function(output, **kwargs)
# output = tf.Print(output, [tf.shape(output), output.name.split('/')[-1], tf.reduce_max(output)], message="output of %s" % tf.get_variable_scope().name)
pre_name_counter += 1
# memory for this TNN layer includes an optional convrnn_cell
self.next_state = {}
if state is None or state.get('convrnn_cell_state', None) is None:
batch_size = output.shape.as_list()[0]
convrnn_cell_state = self.convrnn_cell.zero_state(batch_size, dtype=self.dtype_tmp)
state = {'convrnn_cell_state': convrnn_cell_state}
# resize fb if there was a strided convolution, for instance
ff_size = output.shape.as_list()[1:3]
if fb_input is not None:
if fb_size != ff_size:
fb_input = tf.image.resize_images(fb_input, size=ff_size)
self.training_kwargs['time_suffix'] = curr_time_suffix
output, convrnn_cell_state = self.convrnn_cell(output, state['convrnn_cell_state'], fb_input=fb_input, res_input=res_input, is_training=self.is_training, **self.training_kwargs)
self.next_state['convrnn_cell_state'] = convrnn_cell_state
# graph cell is not used here currently
if state is None or state.get('graph_cell_state', None) is None:
batch_size = output.shape.as_list()[0]
graph_cell_state = self.graph_cell.zero_state(batch_size, dtype=self.dtype_tmp)
state = {'graph_cell_state': graph_cell_state}
output, graph_cell_state = self.graph_cell(output, state['graph_cell_state'])
self.next_state['graph_cell_state'] = graph_cell_state
# post memory functions (e.g. more convs, pooling)
post_name_counter = 0
for function, kwargs in self.post_memory:
with tf.variable_scope("post_" + str(post_name_counter), reuse=self._reuse):
if kwargs.get('time_sep', False):
kwargs['time_suffix'] = curr_time_suffix # for scoping unshared BN
if function.__name__ == "component_conv":
output = function(output, inputs, **kwargs)
elif function.__name__ == "residual_add":
output = function(output, res_input, is_training=self.is_training, drop_connect_rate=self.training_kwargs['drop_connect_rate'], **kwargs)
else:
output = function(output, **kwargs)
post_name_counter += 1
# layer output
self.output_tmp = tf.identity(tf.cast(output, self.dtype_tmp), name='output')
self._reuse = True
if (self.max_internal_time is None) or ((self.max_internal_time is not None) and (self.internal_time < self.max_internal_time)):
self.internal_time += 1
return self.output_tmp, self.next_state
| import tensorflow as tf
from tensorflow.contrib.rnn import LSTMStateTuple
import tfutils.model
from tnn.cell import *
from tnn.main import _get_func_from_kwargs
try:
from tfutils.model import conv, depth_conv
except:
from tfutils.model_tool_old import conv, depth_conv
import copy
def ksize(val):
if isinstance(val, float):
return [int(val), int(val)]
elif isinstance(val, int):
return [val, val]
else:
return val
class ConvRNNCell(object):
"""Abstract object representing an Convolutional RNN cell.
"""
def __init__(self, shape, out_depth, scope):
self.shape = shape
self._out_depth = out_depth
self.scope = scope
def __call__(self, inputs, state=None, fb_input=None, res_input=None, **training_kwargs):
"""Run this RNN cell on inputs, starting from the given state.
"""
with tf.variable_scope(type(self).__name__ + '_' + self.scope): # "ConvRNNCell + self.scope#
self.next_state = state
output = tf.identity(inputs, name="convrnn_cell_passthrough")
return output, self.next_state
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x out_depth]
filled with zeros
"""
return None
# shape = self.shape
# out_depth = self._out_depth
# zeros = tf.zeros([batch_size, shape[0], shape[1], out_depth], dtype=dtype)
# return zeros
class EfficientGateCell(ConvRNNCell):
"""
"""
def __init__(self,
shape,
out_depth,
tau_filter_size,
in_depth=None,
cell_depth=0,
bypass_state=False,
gate_filter_size=[3,3],
feedback_filter_size=[1,1],
activation="swish",
gate_nonlinearity=tf.nn.sigmoid,
kernel_initializer='normal',
kernel_initializer_kwargs={},
weight_decay=None,
batch_norm=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_gamma_init=1.0,
bn_trainable=True,
crossdevice_bn_kwargs={},
group_norm=False,
num_groups=32,
strides=1,
se_ratio=0,
residual_add=False,
res_cell=False
):
"""
Initialize the memory function of the EfficientGateCell.
"""
# shapes and filter sizes
self.shape = shape # [H, W]
self.tau_filter_size = ksize(tau_filter_size)
self.gate_filter_size = ksize(gate_filter_size)
self.feedback_filter_size = ksize(feedback_filter_size)
self.out_depth = out_depth
self.in_depth = self.out_depth if in_depth is None else in_depth
self.res_cell = res_cell
self.cell_depth = cell_depth
self.bypass_state = bypass_state
self.strides = strides
self.residual_add = residual_add
# functions
self._relu = activation
self._se_ratio = se_ratio
self._se = tf.identity if not se_ratio \
else lambda x, rC: squeeze_and_excitation(
x, rC, activation=self._relu,
kernel_init=kernel_initializer,
kernel_init_kwargs=kernel_initializer_kwargs
)
# function kwargs
self.bn_kwargs = {
'batch_norm': batch_norm and not group_norm,
'group_norm': group_norm,
'num_groups': num_groups,
'batch_norm_decay': batch_norm_decay,
'batch_norm_epsilon': batch_norm_epsilon,
'batch_norm_gamma_init': batch_norm_gamma_init,
'bn_trainable': bn_trainable,
'crossdevice_bn_kwargs': crossdevice_bn_kwargs
}
self.conv_kwargs = {
'strides': self.strides,
'kernel_init': kernel_initializer,
'kernel_init_kwargs': kernel_initializer_kwargs,
'weight_decay': weight_decay,
'data_format': 'channels_last',
'padding': 'SAME',
'use_bias': False
}
def zero_state(self, batch_size, dtype):
"""
Return zero-filled state tensor(s)
"""
return tf.zeros([batch_size, self.shape[0], self.shape[1], self.cell_depth + self.in_depth], dtype=dtype)
def _conv_bn(self, inputs, ksize, scope, out_depth=None, depthwise=False, activation=True):
if out_depth is None:
out_depth = inputs.shape.as_list()[-1]
kwargs = copy.deepcopy(self.conv_kwargs)
kwargs.update(self.bn_kwargs)
kwargs['ksize'] = ksize
kwargs['activation'] = self._relu if activation else None
# def _conv_op(x):
# return depth_conv(x, **kwargs) if depthwise \
# else conv(x, out_depth, **kwargs)
with tf.variable_scope(scope):
inputs = depth_conv(inputs, **kwargs) if depthwise else conv(inputs, out_depth, **kwargs)
return inputs
def __call__(self, inputs, state, fb_input, res_input, is_training=True, **training_kwargs):
"""
"""
# update training-specific kwargs
self.bn_kwargs['is_training'] = is_training
self.bn_kwargs.update({'time_suffix': training_kwargs.get('time_suffix', None),
'time_sep': training_kwargs.get('time_sep', True)}) # time suffix
self.res_depth = res_input.shape.as_list()[-1] if res_input is not None else self.out_depth
# print("bn kwargs", self.bn_kwargs['time_suffix'])
# get previous state
prev_cell, prev_state = tf.split(value=state, num_or_size_splits=[self.cell_depth, self.in_depth], axis=3, name="state_split")
# updates
with tf.variable_scope(type(self).__name__): # "EfficientGateCell"
update = tf.zeros_like(inputs)
# combine fb input with ff input
if fb_input is not None:
update += self._conv_bn(fb_input, self.feedback_filter_size, out_depth=self.in_depth, depthwise=False, activation=True, scope="feedback_to_state")
print("added feedback: %s of shape %s" % (fb_input.name, fb_input.shape.as_list()))
# update the state with a kxk depthwise conv/bn/relu
update += self._conv_bn(inputs + prev_state, self.tau_filter_size, depthwise=True, activation=True, scope="state_to_state")
next_state = prev_state + update
# update the cell TODO
if self.res_cell:
assert res_input is not None
next_cell = res_input
else:
next_cell = prev_cell
# depthwise conv on expanded state, then squeeze-excitation, channel reduction, residual add
inp = next_state if not self.bypass_state else (inputs + prev_state)
print("bypassed state?", self.bypass_state)
next_out = self._se(inp, self._se_ratio * self.res_depth)
next_out = self._conv_bn(next_out, [1,1], out_depth=self.out_depth, depthwise=False, activation=False, scope="state_to_out")
if (res_input is not None) and (res_input.shape.as_list() == next_out.shape.as_list()):
next_out = drop_connect(next_out, self.bn_kwargs['is_training'], training_kwargs['drop_connect_rate'])
print("drop connect/residual adding", training_kwargs['drop_connect_rate'], res_input.name, res_input.shape.as_list())
next_out = tf.add(next_out, res_input)
elif (res_input is not None) and self.residual_add: # add the matching channels with resize if necessary
next_out = drop_connect(next_out, self.bn_kwargs['is_training'], training_kwargs['drop_connect_rate'])
next_out, remainder = tf.split(next_out, [res_input.shape.as_list()[-1], -1], axis=-1)
if res_input.shape.as_list()[1:3] != self.shape:
res_input = tf.image.resize_images(res_input, size=self.shape)
next_out = tf.add(next_out, res_input)
next_out = tf.concat([next_out, remainder], axis=-1)
print("added matching channels", next_out.shape.as_list())
# concat back on the cell
next_state = tf.concat([next_cell, next_state], axis=3, name="cell_concat_next_state")
return next_out, next_state
class tnn_EfficientGateCell(ConvRNNCell):
def __init__(self,
harbor_shape,
harbor=(harbor, None),
pre_memory=None,
memory=(memory, None),
post_memory=None,
input_init=(tf.zeros, None),
state_init=(tf.zeros, None),
dtype=tf.float32,
name=None):
self.harbor_shape = harbor_shape
self.harbor = harbor if harbor[1] is not None else (harbor[0], {})
self.pre_memory = pre_memory
self.memory = memory if memory[1] is not None else (memory[0], {})
self.post_memory = post_memory
self.input_init = input_init if input_init[1] is not None else (input_init[0], {})
self.state_init = state_init if state_init[1] is not None else (state_init[0], {})
self.dtype_tmp = dtype
self.name_tmp = name
self._reuse = None
self.internal_time = 0
self.max_internal_time = self.memory[1].get('max_internal_time', None)
# Kwargs for trainining/validation"
self.is_training = self.memory[1].get('is_training', True)
self.training_kwargs = {
'time_sep': self.memory[1].get('time_sep', True),
'dropout_rate': self.memory[1].get('dropout_rate', 1),
'drop_connect_rate': self.memory[1].get('drop_connect_rate', 1)
}
### Memory includes both a typical ConvRNN cell (optional) and an IntegratedGraphCell (optional) ###
self.convrnn_cell_kwargs = self.memory[1].get('convrnn_cell_kwargs', {})
idx = [i for i in range(len(self.pre_memory)) if 'out_depth' in self.pre_memory[i][1]]
idx = idx[-1] if len(idx) else None
self.pre_memory_out_depth = self.pre_memory[idx][1]['out_depth'] if idx is not None else self.harbor_shape[-1]
self._pre_conv_idx = idx
if self.memory[1].get('convrnn_cell', None) == "EfficientGateCell":
self.convrnn_cell_kwargs['shape'] = self.harbor_shape[-3:-1] # set shape of memory layer
if 'in_depth' not in self.convrnn_cell_kwargs:
self.convrnn_cell_kwargs['in_depth'] = self.pre_memory_out_depth # the expansion width
if 'out_depth' not in self.convrnn_cell_kwargs:
self.convrnn_cell_kwargs['out_depth'] = self.harbor_shape[-1] # channels coming out of harbor
self.convrnn_cell = EfficientGateCell(**self.convrnn_cell_kwargs)
else:
self.convrnn_cell_kwargs['out_depth'] = self.pre_memory_out_depth
self.convrnn_cell = ConvRNNCell(shape=self.convrnn_cell_kwargs.get('shape', None),
out_depth=self.convrnn_cell_kwargs['out_depth'],
scope="0")
# not used in this cell
self.graph_cell = ConvRNNCell(shape=self.convrnn_cell_kwargs.get('shape', None),
out_depth=self.convrnn_cell_kwargs['out_depth'],
scope="1")
def __call__(self, inputs=None, state=None):
with tf.variable_scope(self.name_tmp, reuse=self._reuse):
# state initializer
if inputs is None:
inputs = [self.input_init[0](shape=self.harbor_shape, **self.input_init[1])]
# Pass inputs through harbor
fb_input = None
if self.memory[1].get('convrnn_cell', None) in ["EfficientGateCell"]:
if len(inputs) == 1:
ff_idx = 0
output = self.harbor[0](inputs, self.harbor_shape, self.name_tmp, reuse=self._reuse, **self.harbor[1])
elif len(inputs) > 1:
for j, inp in enumerate(inputs):
if self.pre_memory[self._pre_conv_idx][1]['input_name'] in inp.name:
ff_inpnm = inp.name
ff_idx = j
ff_depth = inputs[ff_idx].shape.as_list()[-1]
output = self.harbor[0](inputs, self.harbor_shape, self.name_tmp, ff_inpnm=ff_inpnm, reuse=self._reuse, **self.harbor[1])
fb_depth = output.shape.as_list()[-1] - ff_depth
if self.harbor[1]['channel_op'] == 'concat':
output, fb_input = tf.split(output, num_or_size_splits=[ff_depth, fb_depth], axis=3)
fb_size = fb_input.shape.as_list()[1:3]
else:
ff_idx = None
output = self.harbor[0](inputs, self.harbor_shape, self.name_tmp, reuse=self._reuse, **self.harbor[1])
# pre_memory usually contains feedforward convolutions, etc.
res_input = None
curr_time_suffix = 't' + str(self.internal_time)
pre_name_counter = 0
for function, kwargs in self.pre_memory:
with tf.variable_scope("pre_" + str(pre_name_counter), reuse=self._reuse):
if kwargs.get('time_sep', False):
kwargs['time_suffix'] = curr_time_suffix # for scoping unshared BN
if function.__name__ == "component_conv":
ff_inputs = [inputs[ff_idx]] if ff_idx is not None else inputs
if kwargs.get('return_input', False):
output, res_input = function(output, ff_inputs, **kwargs) # component_conv needs to know the inputs
else:
output = function(output, ff_inputs, **kwargs) # component_conv needs to know the inputs
else:
output = function(output, **kwargs)
# output = tf.Print(output, [tf.shape(output), output.name.split('/')[-1], tf.reduce_max(output)], message="output of %s" % tf.get_variable_scope().name)
pre_name_counter += 1
# memory for this TNN layer includes an optional convrnn_cell
self.next_state = {}
if state is None or state.get('convrnn_cell_state', None) is None:
batch_size = output.shape.as_list()[0]
convrnn_cell_state = self.convrnn_cell.zero_state(batch_size, dtype=self.dtype_tmp)
state = {'convrnn_cell_state': convrnn_cell_state}
# resize fb if there was a strided convolution, for instance
ff_size = output.shape.as_list()[1:3]
if fb_input is not None:
if fb_size != ff_size:
fb_input = tf.image.resize_images(fb_input, size=ff_size)
self.training_kwargs['time_suffix'] = curr_time_suffix
output, convrnn_cell_state = self.convrnn_cell(output, state['convrnn_cell_state'], fb_input=fb_input, res_input=res_input, is_training=self.is_training, **self.training_kwargs)
self.next_state['convrnn_cell_state'] = convrnn_cell_state
# graph cell is not used here currently
if state is None or state.get('graph_cell_state', None) is None:
batch_size = output.shape.as_list()[0]
graph_cell_state = self.graph_cell.zero_state(batch_size, dtype=self.dtype_tmp)
state = {'graph_cell_state': graph_cell_state}
output, graph_cell_state = self.graph_cell(output, state['graph_cell_state'])
self.next_state['graph_cell_state'] = graph_cell_state
# post memory functions (e.g. more convs, pooling)
post_name_counter = 0
for function, kwargs in self.post_memory:
with tf.variable_scope("post_" + str(post_name_counter), reuse=self._reuse):
if kwargs.get('time_sep', False):
kwargs['time_suffix'] = curr_time_suffix # for scoping unshared BN
if function.__name__ == "component_conv":
output = function(output, inputs, **kwargs)
elif function.__name__ == "residual_add":
output = function(output, res_input, is_training=self.is_training, drop_connect_rate=self.training_kwargs['drop_connect_rate'], **kwargs)
else:
output = function(output, **kwargs)
post_name_counter += 1
# layer output
self.output_tmp = tf.identity(tf.cast(output, self.dtype_tmp), name='output')
self._reuse = True
if (self.max_internal_time is None) or ((self.max_internal_time is not None) and (self.internal_time < self.max_internal_time)):
self.internal_time += 1
return self.output_tmp, self.next_state | en | 0.654634 | Abstract object representing an Convolutional RNN cell. Run this RNN cell on inputs, starting from the given state. # "ConvRNNCell + self.scope# size(s) of state(s) used by this cell. Integer or TensorShape: size of outputs produced by this cell. Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: tensor of shape '[batch_size x shape[0] x shape[1] x out_depth] filled with zeros # shape = self.shape # out_depth = self._out_depth # zeros = tf.zeros([batch_size, shape[0], shape[1], out_depth], dtype=dtype) # return zeros Initialize the memory function of the EfficientGateCell. # shapes and filter sizes # [H, W] # functions # function kwargs Return zero-filled state tensor(s) # def _conv_op(x): # return depth_conv(x, **kwargs) if depthwise \ # else conv(x, out_depth, **kwargs) # update training-specific kwargs # time suffix # print("bn kwargs", self.bn_kwargs['time_suffix']) # get previous state # updates # "EfficientGateCell" # combine fb input with ff input # update the state with a kxk depthwise conv/bn/relu # update the cell TODO # depthwise conv on expanded state, then squeeze-excitation, channel reduction, residual add # add the matching channels with resize if necessary # concat back on the cell # Kwargs for trainining/validation" ### Memory includes both a typical ConvRNN cell (optional) and an IntegratedGraphCell (optional) ### # set shape of memory layer # the expansion width # channels coming out of harbor # not used in this cell # state initializer # Pass inputs through harbor # pre_memory usually contains feedforward convolutions, etc. # for scoping unshared BN # component_conv needs to know the inputs # component_conv needs to know the inputs # output = tf.Print(output, [tf.shape(output), output.name.split('/')[-1], tf.reduce_max(output)], message="output of %s" % tf.get_variable_scope().name) # memory for this TNN layer includes an optional convrnn_cell # resize fb if there was a strided convolution, for instance # graph cell is not used here currently # post memory functions (e.g. more convs, pooling) # for scoping unshared BN # layer output | 2.587086 | 3 |
scripts/artifacts/sms.py | xperylabhub/iLEAPP | 0 | 6632587 | import os
import pandas as pd
import shutil
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly, sanitize_file_name
from scripts.chat_rendering import render_chat, chat_HTML
def get_sms(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = open_sqlite_db_readonly(file_found)
sms_df = pd.read_sql_query('''
SELECT
CASE
WHEN LENGTH(MESSAGE.DATE)=18 THEN DATETIME(MESSAGE.DATE/1000000000+978307200,'UNIXEPOCH')
WHEN LENGTH(MESSAGE.DATE)=9 THEN DATETIME(MESSAGE.DATE + 978307200,'UNIXEPOCH')
ELSE "N/A"
END "MESSAGE DATE",
MESSAGE.ROWID as "MESSAGE ID",
CASE
WHEN LENGTH(MESSAGE.DATE_DELIVERED)=18 THEN DATETIME(MESSAGE.DATE_DELIVERED/1000000000+978307200,"UNIXEPOCH")
WHEN LENGTH(MESSAGE.DATE_DELIVERED)=9 THEN DATETIME(MESSAGE.DATE_DELIVERED+978307200,"UNIXEPOCH")
ELSE "N/A"
END "DATE DELIVERED",
CASE
WHEN LENGTH(MESSAGE.DATE_READ)=18 THEN DATETIME(MESSAGE.DATE_READ/1000000000+978307200,"UNIXEPOCH")
WHEN LENGTH(MESSAGE.DATE_READ)=9 THEN DATETIME(MESSAGE.DATE_READ+978307200,"UNIXEPOCH")
ELSE "N/A"
END "DATE READ",
MESSAGE.TEXT as "MESSAGE",
HANDLE.ID AS "CONTACT ID",
MESSAGE.SERVICE AS "SERVICE",
MESSAGE.ACCOUNT AS "ACCOUNT",
MESSAGE.IS_DELIVERED AS "IS DELIVERED",
MESSAGE.IS_FROM_ME AS "IS FROM ME",
ATTACHMENT.FILENAME AS "FILENAME",
ATTACHMENT.MIME_TYPE AS "MIME TYPE",
ATTACHMENT.TRANSFER_NAME AS "TRANSFER TYPE",
ATTACHMENT.TOTAL_BYTES AS "TOTAL BYTES"
FROM MESSAGE
LEFT OUTER JOIN MESSAGE_ATTACHMENT_JOIN ON MESSAGE.ROWID = MESSAGE_ATTACHMENT_JOIN.MESSAGE_ID
LEFT OUTER JOIN ATTACHMENT ON MESSAGE_ATTACHMENT_JOIN.ATTACHMENT_ID = ATTACHMENT.ROWID
LEFT OUTER JOIN HANDLE ON MESSAGE.HANDLE_ID = HANDLE.ROWID
''', db)
usageentries = sms_df.shape[0]
if usageentries > 0:
data_list = sms_df.to_records(index=False)
report = ArtifactHtmlReport('SMS - iMessage')
report.start_artifact_report(report_folder, 'SMS - iMessage')
report.add_script()
sms_df = sms_df.rename(
columns={"MESSAGE DATE": "data-time", 'MESSAGE ID': "message-id", "MESSAGE": "message", "CONTACT ID": "data-name", "IS FROM ME": "from_me", "MIME TYPE": "content-type"}
)
sms_df["data-time"] = pd.to_datetime(sms_df["data-time"])
def copyAttachments(rec):
pathToAttachment = None
if rec["FILENAME"]:
attachment = seeker.search('**'+rec["FILENAME"].replace('~', '', 1), return_on_first_hit=True)
if not attachment:
logfunc(' [!] Unable to extract attachment file: "{}"'.format(rec['FILENAME']))
return
if is_platform_windows():
destFileName = sanitize_file_name(os.path.basename(rec["FILENAME"]))
else:
destFileName = os.path.basename(rec["FILENAME"])
pathToAttachment = os.path.join((os.path.basename(os.path.abspath(report_folder))), destFileName)
shutil.copy(attachment[0], os.path.join(report_folder, destFileName))
return pathToAttachment
sms_df["file-path"] = sms_df.apply(lambda rec: copyAttachments(rec), axis=1)
num_entries = sms_df.shape[0]
report.write_minor_header(f'Total number of entries: {num_entries}', 'h6')
if file_found.startswith('\\\\?\\'):
file_found = file_found[4:]
report.write_lead_text(f'SMS - iMessage located at: {file_found}')
report.write_raw_html(chat_HTML)
report.add_script(render_chat(sms_df))
data_headers = ('Message Date', 'Message ID', 'Date Delivered', 'Date Read', 'Message', 'Contact ID', 'Service', 'Account', 'Is Delivered', 'Is from Me', 'Filename', 'MIME Type', 'Transfer Type', 'Total Bytes')
report.write_artifact_data_table(data_headers, data_list, file_found, write_total=False, write_location=False)
report.end_artifact_report()
tsvname = 'SMS - iMessage'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'SMS - iMessage'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No SMS & iMessage data available')
db.close()
return
| import os
import pandas as pd
import shutil
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly, sanitize_file_name
from scripts.chat_rendering import render_chat, chat_HTML
def get_sms(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = open_sqlite_db_readonly(file_found)
sms_df = pd.read_sql_query('''
SELECT
CASE
WHEN LENGTH(MESSAGE.DATE)=18 THEN DATETIME(MESSAGE.DATE/1000000000+978307200,'UNIXEPOCH')
WHEN LENGTH(MESSAGE.DATE)=9 THEN DATETIME(MESSAGE.DATE + 978307200,'UNIXEPOCH')
ELSE "N/A"
END "MESSAGE DATE",
MESSAGE.ROWID as "MESSAGE ID",
CASE
WHEN LENGTH(MESSAGE.DATE_DELIVERED)=18 THEN DATETIME(MESSAGE.DATE_DELIVERED/1000000000+978307200,"UNIXEPOCH")
WHEN LENGTH(MESSAGE.DATE_DELIVERED)=9 THEN DATETIME(MESSAGE.DATE_DELIVERED+978307200,"UNIXEPOCH")
ELSE "N/A"
END "DATE DELIVERED",
CASE
WHEN LENGTH(MESSAGE.DATE_READ)=18 THEN DATETIME(MESSAGE.DATE_READ/1000000000+978307200,"UNIXEPOCH")
WHEN LENGTH(MESSAGE.DATE_READ)=9 THEN DATETIME(MESSAGE.DATE_READ+978307200,"UNIXEPOCH")
ELSE "N/A"
END "DATE READ",
MESSAGE.TEXT as "MESSAGE",
HANDLE.ID AS "CONTACT ID",
MESSAGE.SERVICE AS "SERVICE",
MESSAGE.ACCOUNT AS "ACCOUNT",
MESSAGE.IS_DELIVERED AS "IS DELIVERED",
MESSAGE.IS_FROM_ME AS "IS FROM ME",
ATTACHMENT.FILENAME AS "FILENAME",
ATTACHMENT.MIME_TYPE AS "MIME TYPE",
ATTACHMENT.TRANSFER_NAME AS "TRANSFER TYPE",
ATTACHMENT.TOTAL_BYTES AS "TOTAL BYTES"
FROM MESSAGE
LEFT OUTER JOIN MESSAGE_ATTACHMENT_JOIN ON MESSAGE.ROWID = MESSAGE_ATTACHMENT_JOIN.MESSAGE_ID
LEFT OUTER JOIN ATTACHMENT ON MESSAGE_ATTACHMENT_JOIN.ATTACHMENT_ID = ATTACHMENT.ROWID
LEFT OUTER JOIN HANDLE ON MESSAGE.HANDLE_ID = HANDLE.ROWID
''', db)
usageentries = sms_df.shape[0]
if usageentries > 0:
data_list = sms_df.to_records(index=False)
report = ArtifactHtmlReport('SMS - iMessage')
report.start_artifact_report(report_folder, 'SMS - iMessage')
report.add_script()
sms_df = sms_df.rename(
columns={"MESSAGE DATE": "data-time", 'MESSAGE ID': "message-id", "MESSAGE": "message", "CONTACT ID": "data-name", "IS FROM ME": "from_me", "MIME TYPE": "content-type"}
)
sms_df["data-time"] = pd.to_datetime(sms_df["data-time"])
def copyAttachments(rec):
pathToAttachment = None
if rec["FILENAME"]:
attachment = seeker.search('**'+rec["FILENAME"].replace('~', '', 1), return_on_first_hit=True)
if not attachment:
logfunc(' [!] Unable to extract attachment file: "{}"'.format(rec['FILENAME']))
return
if is_platform_windows():
destFileName = sanitize_file_name(os.path.basename(rec["FILENAME"]))
else:
destFileName = os.path.basename(rec["FILENAME"])
pathToAttachment = os.path.join((os.path.basename(os.path.abspath(report_folder))), destFileName)
shutil.copy(attachment[0], os.path.join(report_folder, destFileName))
return pathToAttachment
sms_df["file-path"] = sms_df.apply(lambda rec: copyAttachments(rec), axis=1)
num_entries = sms_df.shape[0]
report.write_minor_header(f'Total number of entries: {num_entries}', 'h6')
if file_found.startswith('\\\\?\\'):
file_found = file_found[4:]
report.write_lead_text(f'SMS - iMessage located at: {file_found}')
report.write_raw_html(chat_HTML)
report.add_script(render_chat(sms_df))
data_headers = ('Message Date', 'Message ID', 'Date Delivered', 'Date Read', 'Message', 'Contact ID', 'Service', 'Account', 'Is Delivered', 'Is from Me', 'Filename', 'MIME Type', 'Transfer Type', 'Total Bytes')
report.write_artifact_data_table(data_headers, data_list, file_found, write_total=False, write_location=False)
report.end_artifact_report()
tsvname = 'SMS - iMessage'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'SMS - iMessage'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No SMS & iMessage data available')
db.close()
return
| en | 0.323127 | SELECT CASE WHEN LENGTH(MESSAGE.DATE)=18 THEN DATETIME(MESSAGE.DATE/1000000000+978307200,'UNIXEPOCH') WHEN LENGTH(MESSAGE.DATE)=9 THEN DATETIME(MESSAGE.DATE + 978307200,'UNIXEPOCH') ELSE "N/A" END "MESSAGE DATE", MESSAGE.ROWID as "MESSAGE ID", CASE WHEN LENGTH(MESSAGE.DATE_DELIVERED)=18 THEN DATETIME(MESSAGE.DATE_DELIVERED/1000000000+978307200,"UNIXEPOCH") WHEN LENGTH(MESSAGE.DATE_DELIVERED)=9 THEN DATETIME(MESSAGE.DATE_DELIVERED+978307200,"UNIXEPOCH") ELSE "N/A" END "DATE DELIVERED", CASE WHEN LENGTH(MESSAGE.DATE_READ)=18 THEN DATETIME(MESSAGE.DATE_READ/1000000000+978307200,"UNIXEPOCH") WHEN LENGTH(MESSAGE.DATE_READ)=9 THEN DATETIME(MESSAGE.DATE_READ+978307200,"UNIXEPOCH") ELSE "N/A" END "DATE READ", MESSAGE.TEXT as "MESSAGE", HANDLE.ID AS "CONTACT ID", MESSAGE.SERVICE AS "SERVICE", MESSAGE.ACCOUNT AS "ACCOUNT", MESSAGE.IS_DELIVERED AS "IS DELIVERED", MESSAGE.IS_FROM_ME AS "IS FROM ME", ATTACHMENT.FILENAME AS "FILENAME", ATTACHMENT.MIME_TYPE AS "MIME TYPE", ATTACHMENT.TRANSFER_NAME AS "TRANSFER TYPE", ATTACHMENT.TOTAL_BYTES AS "TOTAL BYTES" FROM MESSAGE LEFT OUTER JOIN MESSAGE_ATTACHMENT_JOIN ON MESSAGE.ROWID = MESSAGE_ATTACHMENT_JOIN.MESSAGE_ID LEFT OUTER JOIN ATTACHMENT ON MESSAGE_ATTACHMENT_JOIN.ATTACHMENT_ID = ATTACHMENT.ROWID LEFT OUTER JOIN HANDLE ON MESSAGE.HANDLE_ID = HANDLE.ROWID | 2.176243 | 2 |
python_ncurses/curse_2.py | Perceu/tiktok | 0 | 6632588 | import sys,os
import curses
from time import sleep
def animate(screen, desenho):
screen.clear()
desenho = open('./desenhos/batman.txt', 'r')
lines = desenho.readlines()
linha = len(lines)
coluna = 0
for l in lines[::-1]:
linha -= 1
coluna = 0
for c in l:
sleep(0.004)
if c == "1":
screen.attron(curses.color_pair(1))
elif c == "2":
screen.attron(curses.color_pair(2))
elif c == "3":
screen.attron(curses.color_pair(3))
else:
screen.addstr(linha, coluna, c)
screen.refresh()
coluna +=1
def draw_screen(stdscr):
k = 0
cursor_x = 0
cursor_y = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_YELLOW)
# Loop where k is the last character pressed
while (k != ord('q')):
if k == ord('r'):
animate(stdscr, 2)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
animate(stdscr, 2)
stdscr.refresh()
# Wait for next input
k = stdscr.getch()
def main():
curses.wrapper(draw_screen)
if __name__ == "__main__":
main() | import sys,os
import curses
from time import sleep
def animate(screen, desenho):
screen.clear()
desenho = open('./desenhos/batman.txt', 'r')
lines = desenho.readlines()
linha = len(lines)
coluna = 0
for l in lines[::-1]:
linha -= 1
coluna = 0
for c in l:
sleep(0.004)
if c == "1":
screen.attron(curses.color_pair(1))
elif c == "2":
screen.attron(curses.color_pair(2))
elif c == "3":
screen.attron(curses.color_pair(3))
else:
screen.addstr(linha, coluna, c)
screen.refresh()
coluna +=1
def draw_screen(stdscr):
k = 0
cursor_x = 0
cursor_y = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_YELLOW)
# Loop where k is the last character pressed
while (k != ord('q')):
if k == ord('r'):
animate(stdscr, 2)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
animate(stdscr, 2)
stdscr.refresh()
# Wait for next input
k = stdscr.getch()
def main():
curses.wrapper(draw_screen)
if __name__ == "__main__":
main() | en | 0.869539 | # Clear and refresh the screen for a blank canvas # Start colors in curses # Loop where k is the last character pressed # Wait for next input | 3.621543 | 4 |
src/apps/esv_dashboard/visualise_esvs.py | tomstark99/epic-kitchens-100-fyrp | 0 | 6632589 | <gh_stars>0
import argparse
import os
from pathlib import Path
import pandas as pd
import dash
from dash import Dash
from dash.exceptions import PreventUpdate
import flask
from apps.esv_dashboard.visualisation import Visualiser
from apps.esv_dashboard.visualisation_mf import VisualiserMF
from apps.esv_dashboard.result import Result, ShapleyValueResultsMTRN, ShapleyValueResultsMF
parser = argparse.ArgumentParser(
description="Run web-based ESV visualisation tool",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("esvs_root", type=Path, help="Path to folder containing extracted ESVs")
parser.add_argument("dataset_root", type=Path, help="Path dataset folder of videos")
parser.add_argument("labels_root", type=Path, help="Path to labels root")
parser.add_argument("--debug", default=True, type=bool, help="Enable Dash debug capabilities")
parser.add_argument("--port", default=8050, type=int, help="Port for webserver to listen on")
parser.add_argument("--host", default="localhost", help="Host to bind to")
parser.add_argument("--motion-former", default=False, action='store_true', help="Display motion former ESVs")
parser.add_argument("--test", default=False, action='store_true', help="Display ESVs for test set")
def main(args):
args = parser.parse_args()
dataset_dir: Path = args.dataset_root
colours = {
'rgb': {
'yellow_20': 'rgba(244,160,0,0.1)',
'blue_20': 'rgba(66,133,244,0.05)'
},
'hex': {
'red': '#DB4437',
'red_mf': '#ff0000',
'blue': '#4285F4',
'blue_mf': '#000cff',
'yellow': '#F4B400',
'green': '#0F9D58'
}
}
verbs = pd.read_csv(args.labels_root / 'EPIC_100_verb_classes.csv')
nouns = pd.read_csv(args.labels_root / 'EPIC_100_noun_classes.csv')
verb2str = pd.Series(verbs.key.values,index=verbs.id).to_dict()
noun2str = pd.Series(nouns.key.values,index=nouns.id).to_dict()
verb_noun = pd.read_pickle(args.labels_root / 'verb_noun.pkl')
verb_noun_classes = pd.read_pickle(args.labels_root / 'verb_noun_classes.pkl')
verb_noun_narration = pd.read_pickle(args.labels_root /'verb_noun_classes_narration.pkl')
if args.test:
results_dict_mtrn = pd.read_pickle(args.esvs_root / 'f_val_mtrn-esv-min_frames=1-max_frames=8.pkl')
results_dict_mf = pd.read_pickle(args.esvs_root / 'f_val_mf-esv-min_frames=1-max_frames=8.pkl')
features_path = Path('datasets/epic-100/features/9668_val_features.pkl')
else:
results_dict_mtrn = pd.read_pickle(args.esvs_root / 'f_train_mtrn-esv-min_frames=1-max_frames=8.pkl')
results_dict_mf = pd.read_pickle(args.esvs_root / 'f_train_mf-esv-min_frames=1-max_frames=8.pkl')
features_path = Path('datasets/epic-100/features/67217_train_features.pkl')
if args.motion_former:
labels_dict = pd.read_pickle(features_path)['labels']
title = "ESV Dashboard"
results = ShapleyValueResultsMTRN(results_dict_mtrn)
if args.motion_former:
results_mf = ShapleyValueResultsMF(results_dict_mf, labels_dict)
visualisation = VisualiserMF(
results,
results_mf,
colours,
verb2str,
noun2str,
verb_noun,
verb_noun_classes,
verb_noun_narration,
dataset_dir,
title=title
)
else:
visualisation = Visualiser(
results,
colours,
verb2str,
noun2str,
verb_noun,
verb_noun_classes,
verb_noun_narration,
dataset_dir,
title=title
)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = Dash(
__name__,
title="ESV Visualiser",
update_title="Updating..." if args.debug else None,
external_stylesheets=external_stylesheets,
)
visualisation.attach_to_app(app)
app.run_server(host=args.host, debug=args.debug, port=args.port)
if __name__ == "__main__":
main(parser.parse_args())
| import argparse
import os
from pathlib import Path
import pandas as pd
import dash
from dash import Dash
from dash.exceptions import PreventUpdate
import flask
from apps.esv_dashboard.visualisation import Visualiser
from apps.esv_dashboard.visualisation_mf import VisualiserMF
from apps.esv_dashboard.result import Result, ShapleyValueResultsMTRN, ShapleyValueResultsMF
parser = argparse.ArgumentParser(
description="Run web-based ESV visualisation tool",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("esvs_root", type=Path, help="Path to folder containing extracted ESVs")
parser.add_argument("dataset_root", type=Path, help="Path dataset folder of videos")
parser.add_argument("labels_root", type=Path, help="Path to labels root")
parser.add_argument("--debug", default=True, type=bool, help="Enable Dash debug capabilities")
parser.add_argument("--port", default=8050, type=int, help="Port for webserver to listen on")
parser.add_argument("--host", default="localhost", help="Host to bind to")
parser.add_argument("--motion-former", default=False, action='store_true', help="Display motion former ESVs")
parser.add_argument("--test", default=False, action='store_true', help="Display ESVs for test set")
def main(args):
args = parser.parse_args()
dataset_dir: Path = args.dataset_root
colours = {
'rgb': {
'yellow_20': 'rgba(244,160,0,0.1)',
'blue_20': 'rgba(66,133,244,0.05)'
},
'hex': {
'red': '#DB4437',
'red_mf': '#ff0000',
'blue': '#4285F4',
'blue_mf': '#000cff',
'yellow': '#F4B400',
'green': '#0F9D58'
}
}
verbs = pd.read_csv(args.labels_root / 'EPIC_100_verb_classes.csv')
nouns = pd.read_csv(args.labels_root / 'EPIC_100_noun_classes.csv')
verb2str = pd.Series(verbs.key.values,index=verbs.id).to_dict()
noun2str = pd.Series(nouns.key.values,index=nouns.id).to_dict()
verb_noun = pd.read_pickle(args.labels_root / 'verb_noun.pkl')
verb_noun_classes = pd.read_pickle(args.labels_root / 'verb_noun_classes.pkl')
verb_noun_narration = pd.read_pickle(args.labels_root /'verb_noun_classes_narration.pkl')
if args.test:
results_dict_mtrn = pd.read_pickle(args.esvs_root / 'f_val_mtrn-esv-min_frames=1-max_frames=8.pkl')
results_dict_mf = pd.read_pickle(args.esvs_root / 'f_val_mf-esv-min_frames=1-max_frames=8.pkl')
features_path = Path('datasets/epic-100/features/9668_val_features.pkl')
else:
results_dict_mtrn = pd.read_pickle(args.esvs_root / 'f_train_mtrn-esv-min_frames=1-max_frames=8.pkl')
results_dict_mf = pd.read_pickle(args.esvs_root / 'f_train_mf-esv-min_frames=1-max_frames=8.pkl')
features_path = Path('datasets/epic-100/features/67217_train_features.pkl')
if args.motion_former:
labels_dict = pd.read_pickle(features_path)['labels']
title = "ESV Dashboard"
results = ShapleyValueResultsMTRN(results_dict_mtrn)
if args.motion_former:
results_mf = ShapleyValueResultsMF(results_dict_mf, labels_dict)
visualisation = VisualiserMF(
results,
results_mf,
colours,
verb2str,
noun2str,
verb_noun,
verb_noun_classes,
verb_noun_narration,
dataset_dir,
title=title
)
else:
visualisation = Visualiser(
results,
colours,
verb2str,
noun2str,
verb_noun,
verb_noun_classes,
verb_noun_narration,
dataset_dir,
title=title
)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = Dash(
__name__,
title="ESV Visualiser",
update_title="Updating..." if args.debug else None,
external_stylesheets=external_stylesheets,
)
visualisation.attach_to_app(app)
app.run_server(host=args.host, debug=args.debug, port=args.port)
if __name__ == "__main__":
main(parser.parse_args()) | none | 1 | 2.321673 | 2 |
|
hand.py | Devsharma431/Hand_Tracking | 0 | 6632590 | <gh_stars>0
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# For static images:
IMAGE_FILES = []
with mp_hands.Hands(
static_image_mode=True,
max_num_hands=2,
min_detection_confidence=0.5) as hands:
for idx, file in enumerate(IMAGE_FILES):
# Read an image, flip it around y-axis for correct handedness output (see
# above).
image = cv2.flip(cv2.imread(file), 1)
# Convert the BGR image to RGB before processing.
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print handedness and draw hand landmarks on the image.
print('Handedness:', results.multi_handedness)
if not results.multi_hand_landmarks:
continue
image_height, image_width, _ = image.shape
annotated_image = image.copy()
for hand_landmarks in results.multi_hand_landmarks:
print('hand_landmarks:', hand_landmarks)
print(
f'Index finger tip coordinates: (',
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, '
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height})'
)
mp_drawing.draw_landmarks(
annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imwrite(
'/tmp/annotated_image' + str(idx) + '.png', cv2.flip(annotated_image, 1))
# For webcam input:
cap = cv2.VideoCapture(0)
with mp_hands.Hands(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('HACKER HANDS', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release() | import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# For static images:
IMAGE_FILES = []
with mp_hands.Hands(
static_image_mode=True,
max_num_hands=2,
min_detection_confidence=0.5) as hands:
for idx, file in enumerate(IMAGE_FILES):
# Read an image, flip it around y-axis for correct handedness output (see
# above).
image = cv2.flip(cv2.imread(file), 1)
# Convert the BGR image to RGB before processing.
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print handedness and draw hand landmarks on the image.
print('Handedness:', results.multi_handedness)
if not results.multi_hand_landmarks:
continue
image_height, image_width, _ = image.shape
annotated_image = image.copy()
for hand_landmarks in results.multi_hand_landmarks:
print('hand_landmarks:', hand_landmarks)
print(
f'Index finger tip coordinates: (',
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, '
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height})'
)
mp_drawing.draw_landmarks(
annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imwrite(
'/tmp/annotated_image' + str(idx) + '.png', cv2.flip(annotated_image, 1))
# For webcam input:
cap = cv2.VideoCapture(0)
with mp_hands.Hands(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('HACKER HANDS', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release() | en | 0.785873 | # For static images: # Read an image, flip it around y-axis for correct handedness output (see # above). # Convert the BGR image to RGB before processing. # Print handedness and draw hand landmarks on the image. # For webcam input: # If loading a video, use 'break' instead of 'continue'. # Flip the image horizontally for a later selfie-view display, and convert # the BGR image to RGB. # To improve performance, optionally mark the image as not writeable to # pass by reference. # Draw the hand annotations on the image. | 2.783354 | 3 |
301-400/392.is-subsequence.py | guangxu-li/leetcode-in-python | 0 | 6632591 | #
# @lc app=leetcode id=392 lang=python3
#
# [392] Is Subsequence
#
# @lc code=start
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
i, j = 0, 0
while i < len(s) and j < len(t):
if s[i] == t[j]:
i += 1
j += 1
return i == len(s)
# @lc code=end
| #
# @lc app=leetcode id=392 lang=python3
#
# [392] Is Subsequence
#
# @lc code=start
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
i, j = 0, 0
while i < len(s) and j < len(t):
if s[i] == t[j]:
i += 1
j += 1
return i == len(s)
# @lc code=end
| en | 0.546223 | # # @lc app=leetcode id=392 lang=python3 # # [392] Is Subsequence # # @lc code=start # @lc code=end | 3.060883 | 3 |
python/pyspark/daemon.py | xieyuchen/spark | 79 | 6632592 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
import os
import signal
import select
import socket
import sys
import traceback
import time
from errno import EINTR, ECHILD, EAGAIN
from socket import AF_INET, SOCK_STREAM, SOMAXCONN
from signal import SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN
from pyspark.worker import main as worker_main
from pyspark.serializers import read_int, write_int
def compute_real_exit_code(exit_code):
# SystemExit's code can be integer or string, but os._exit only accepts integers
if isinstance(exit_code, numbers.Integral):
return exit_code
else:
return 1
def worker(sock):
"""
Called by a worker process after the fork().
"""
# Redirect stdout to stderr
os.dup2(2, 1)
sys.stdout = sys.stderr # The sys.stdout object is different from file descriptor 1
signal.signal(SIGHUP, SIG_DFL)
signal.signal(SIGCHLD, SIG_DFL)
signal.signal(SIGTERM, SIG_DFL)
# Blocks until the socket is closed by draining the input stream
# until it raises an exception or returns EOF.
def waitSocketClose(sock):
try:
while True:
# Empty string is returned upon EOF (and only then).
if sock.recv(4096) == '':
return
except:
pass
# Read the socket using fdopen instead of socket.makefile() because the latter
# seems to be very slow; note that we need to dup() the file descriptor because
# otherwise writes also cause a seek that makes us miss data on the read side.
infile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
outfile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
exit_code = 0
try:
# Acknowledge that the fork was successful
write_int(os.getpid(), outfile)
outfile.flush()
worker_main(infile, outfile)
except SystemExit as exc:
exit_code = exc.code
finally:
outfile.flush()
# The Scala side will close the socket upon task completion.
waitSocketClose(sock)
os._exit(compute_real_exit_code(exit_code))
# Cleanup zombie children
def cleanup_dead_children():
try:
while True:
pid, _ = os.waitpid(0, os.WNOHANG)
if not pid:
break
except:
pass
def manager():
# Create a new process group to corral our children
os.setpgid(0, 0)
# Create a listening socket on the AF_INET loopback interface
listen_sock = socket.socket(AF_INET, SOCK_STREAM)
listen_sock.bind(('127.0.0.1', 0))
listen_sock.listen(max(1024, SOMAXCONN))
listen_host, listen_port = listen_sock.getsockname()
write_int(listen_port, sys.stdout)
def shutdown(code):
signal.signal(SIGTERM, SIG_DFL)
# Send SIGHUP to notify workers of shutdown
os.kill(0, SIGHUP)
exit(code)
def handle_sigterm(*args):
shutdown(1)
signal.signal(SIGTERM, handle_sigterm) # Gracefully exit on SIGTERM
signal.signal(SIGHUP, SIG_IGN) # Don't die on SIGHUP
# Initialization complete
sys.stdout.close()
try:
while True:
try:
ready_fds = select.select([0, listen_sock], [], [], 1)[0]
except select.error as ex:
if ex[0] == EINTR:
continue
else:
raise
# cleanup in signal handler will cause deadlock
cleanup_dead_children()
if 0 in ready_fds:
try:
worker_pid = read_int(sys.stdin)
except EOFError:
# Spark told us to exit by closing stdin
shutdown(0)
try:
os.kill(worker_pid, signal.SIGKILL)
except OSError:
pass # process already died
if listen_sock in ready_fds:
try:
sock, _ = listen_sock.accept()
except OSError as e:
if e.errno == EINTR:
continue
raise
# Launch a worker process
try:
pid = os.fork()
except OSError as e:
if e.errno in (EAGAIN, EINTR):
time.sleep(1)
pid = os.fork() # error here will shutdown daemon
else:
outfile = sock.makefile('w')
write_int(e.errno, outfile) # Signal that the fork failed
outfile.flush()
outfile.close()
sock.close()
continue
if pid == 0:
# in child process
listen_sock.close()
try:
worker(sock)
except:
traceback.print_exc()
os._exit(1)
else:
os._exit(0)
else:
sock.close()
finally:
shutdown(1)
if __name__ == '__main__':
manager()
| #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
import os
import signal
import select
import socket
import sys
import traceback
import time
from errno import EINTR, ECHILD, EAGAIN
from socket import AF_INET, SOCK_STREAM, SOMAXCONN
from signal import SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN
from pyspark.worker import main as worker_main
from pyspark.serializers import read_int, write_int
def compute_real_exit_code(exit_code):
# SystemExit's code can be integer or string, but os._exit only accepts integers
if isinstance(exit_code, numbers.Integral):
return exit_code
else:
return 1
def worker(sock):
"""
Called by a worker process after the fork().
"""
# Redirect stdout to stderr
os.dup2(2, 1)
sys.stdout = sys.stderr # The sys.stdout object is different from file descriptor 1
signal.signal(SIGHUP, SIG_DFL)
signal.signal(SIGCHLD, SIG_DFL)
signal.signal(SIGTERM, SIG_DFL)
# Blocks until the socket is closed by draining the input stream
# until it raises an exception or returns EOF.
def waitSocketClose(sock):
try:
while True:
# Empty string is returned upon EOF (and only then).
if sock.recv(4096) == '':
return
except:
pass
# Read the socket using fdopen instead of socket.makefile() because the latter
# seems to be very slow; note that we need to dup() the file descriptor because
# otherwise writes also cause a seek that makes us miss data on the read side.
infile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
outfile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
exit_code = 0
try:
# Acknowledge that the fork was successful
write_int(os.getpid(), outfile)
outfile.flush()
worker_main(infile, outfile)
except SystemExit as exc:
exit_code = exc.code
finally:
outfile.flush()
# The Scala side will close the socket upon task completion.
waitSocketClose(sock)
os._exit(compute_real_exit_code(exit_code))
# Cleanup zombie children
def cleanup_dead_children():
try:
while True:
pid, _ = os.waitpid(0, os.WNOHANG)
if not pid:
break
except:
pass
def manager():
# Create a new process group to corral our children
os.setpgid(0, 0)
# Create a listening socket on the AF_INET loopback interface
listen_sock = socket.socket(AF_INET, SOCK_STREAM)
listen_sock.bind(('127.0.0.1', 0))
listen_sock.listen(max(1024, SOMAXCONN))
listen_host, listen_port = listen_sock.getsockname()
write_int(listen_port, sys.stdout)
def shutdown(code):
signal.signal(SIGTERM, SIG_DFL)
# Send SIGHUP to notify workers of shutdown
os.kill(0, SIGHUP)
exit(code)
def handle_sigterm(*args):
shutdown(1)
signal.signal(SIGTERM, handle_sigterm) # Gracefully exit on SIGTERM
signal.signal(SIGHUP, SIG_IGN) # Don't die on SIGHUP
# Initialization complete
sys.stdout.close()
try:
while True:
try:
ready_fds = select.select([0, listen_sock], [], [], 1)[0]
except select.error as ex:
if ex[0] == EINTR:
continue
else:
raise
# cleanup in signal handler will cause deadlock
cleanup_dead_children()
if 0 in ready_fds:
try:
worker_pid = read_int(sys.stdin)
except EOFError:
# Spark told us to exit by closing stdin
shutdown(0)
try:
os.kill(worker_pid, signal.SIGKILL)
except OSError:
pass # process already died
if listen_sock in ready_fds:
try:
sock, _ = listen_sock.accept()
except OSError as e:
if e.errno == EINTR:
continue
raise
# Launch a worker process
try:
pid = os.fork()
except OSError as e:
if e.errno in (EAGAIN, EINTR):
time.sleep(1)
pid = os.fork() # error here will shutdown daemon
else:
outfile = sock.makefile('w')
write_int(e.errno, outfile) # Signal that the fork failed
outfile.flush()
outfile.close()
sock.close()
continue
if pid == 0:
# in child process
listen_sock.close()
try:
worker(sock)
except:
traceback.print_exc()
os._exit(1)
else:
os._exit(0)
else:
sock.close()
finally:
shutdown(1)
if __name__ == '__main__':
manager()
| en | 0.880666 | # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SystemExit's code can be integer or string, but os._exit only accepts integers Called by a worker process after the fork(). # Redirect stdout to stderr # The sys.stdout object is different from file descriptor 1 # Blocks until the socket is closed by draining the input stream # until it raises an exception or returns EOF. # Empty string is returned upon EOF (and only then). # Read the socket using fdopen instead of socket.makefile() because the latter # seems to be very slow; note that we need to dup() the file descriptor because # otherwise writes also cause a seek that makes us miss data on the read side. # Acknowledge that the fork was successful # The Scala side will close the socket upon task completion. # Cleanup zombie children # Create a new process group to corral our children # Create a listening socket on the AF_INET loopback interface # Send SIGHUP to notify workers of shutdown # Gracefully exit on SIGTERM # Don't die on SIGHUP # Initialization complete # cleanup in signal handler will cause deadlock # Spark told us to exit by closing stdin # process already died # Launch a worker process # error here will shutdown daemon # Signal that the fork failed # in child process | 1.818976 | 2 |
ex02-mdps/ex02-mdps.py | christianreiser/reinforcement-learning-course-uni-stuttgart | 0 | 6632593 | <reponame>christianreiser/reinforcement-learning-course-uni-stuttgart
import gym
import numpy as np
# Init environment
# Lets use a smaller 3x3 custom map for faster computations
custom_map3x3 = [
'SFF',
'FFF',
'FHG',
]
# env = gym.make("FrozenLake-v0", desc=custom_map3x3)
# TODO: Uncomment the following line to try the default map (4x4):
env = gym.make("FrozenLake-v0")
# Uncomment the following lines for even larger maps:
# random_map = generate_random_map(size=5, p=0.8)
# env = gym.make("FrozenLake-v0", desc=random_map)
# Init some useful variables:
n_states = env.observation_space.n
#n_states_without_terminal = n_states - 2
n_actions = env.action_space.n
r = np.zeros(n_states) # the r vector is zero everywhere except for the goal state (last state)
r[-1] = 1.
gamma = 0.8
i = 0
def trans_matrix_for_policy(policy):
"""
This is a helper function that returns the transition probability matrix P for a policy
"""
transitions = np.zeros((n_states, n_states))
for s in range(n_states):
probs = env.P[s][policy[s]]
for el in probs:
transitions[s, el[1]] += el[0]
return transitions
def terminals():
"""
This is a helper function that returns terminal states
"""
terms = []
for s in range(n_states):
# terminal is when we end with probability 1 in terminal:
if env.P[s][0][0][0] == 1.0 and env.P[s][0][0][3] == True:
terms.append(s)
return terms
def generate_list_of_all_policies(start, end, base, step=1):
def Convert(n, base):
string = "0123456789"
if n < base:
return string[n]
else:
return Convert(n//base,base) + string[n%base]
return (Convert(i, base) for i in range(start, end, step))
def value_policy(policy):
P = trans_matrix_for_policy(policy)
# TODO: sth is wrong: calculate and return v
# (P, r and gamma already given)
# v= (−γP+Id)^(-1) * r
v = np.matmul(
np.linalg.inv(
np.multiply(-gamma, P) + np.identity(n_states)
)
, r
)
return v
def bruteforce_policies():
terms = terminals()
optimalpolicies = np.zeros((1, n_states))
num_optimal_policies = 0
optimalvalue = np.zeros(n_states, dtype=np.float)
# in the discrete case a policy is just an array with action = policy[state]
print('computing all possible policies... may take forever ...')
all_policies = list(generate_list_of_all_policies(0, n_actions ** n_states, n_actions))
print('computing all possible policies finished. Evaluating ...')
for j in range(0, n_actions ** n_states):
a = (list(map(int, [int for int in all_policies[j]])))
policy = np.zeros(n_states, dtype=np.int)
for ele in range(0, len(a)):
policy[n_states - ele - 1] = a[len(a) - ele - 1]
value = value_policy(policy)
#print('value=', value)
if np.sum(value) > np.sum(optimalvalue):
optimalvalue = value
optimalpolicies = np.zeros((1, n_states))
optimalpolicies[0] = policy
num_optimal_policies = 0
print('new best policy found. valuesum:', np.sum(value))
elif np.sum(value) == np.sum(optimalvalue):
num_optimal_policies += 1
optimalpolicies = np.concatenate((optimalpolicies, np.array([policy])), axis=0)
print('optimalvalue=', optimalvalue)
print('optimalvalueSum=', np.sum(optimalvalue))
# TODO: implement code that tries all possible policies, calculate the values using def value_policy.
# TODO: Find the optimal values and the optimal policies to answer the exercise questions.
print("Optimal value function:")
print(optimalvalue)
print("number optimal policies (INCLUDING TERMINAL STATES):")
print(len(optimalpolicies))
print("optimal policies:")
print(np.array(optimalpolicies))
return optimalpolicies
def main():
# print the environment
print("current environment: ")
env.render()
print("")
# Here a policy is just an array with the action for a state as element
policy_left = np.zeros(n_states, dtype=np.int) # 0 for all states
policy_right = np.ones(n_states, dtype=np.int) * 2 # 2 for all states
# Value functions:
print("Value function for policy_left (always going left):")
print(value_policy(policy_left))
print("Value function for policy_right (always going right):")
print (value_policy(policy_right))
optimalpolicies = bruteforce_policies()
# This code can be used to "rollout" a policy in the environment:
"""
print ("rollout policy:")
maxiter = 100
state = env.reset()
for i in range(maxiter):
new_state, reward, done, info = env.step(optimalpolicies[0][state])
env.render()
state=new_state
if done:
print ("Finished episode")
break"""
if __name__ == "__main__":
main()
| import gym
import numpy as np
# Init environment
# Lets use a smaller 3x3 custom map for faster computations
custom_map3x3 = [
'SFF',
'FFF',
'FHG',
]
# env = gym.make("FrozenLake-v0", desc=custom_map3x3)
# TODO: Uncomment the following line to try the default map (4x4):
env = gym.make("FrozenLake-v0")
# Uncomment the following lines for even larger maps:
# random_map = generate_random_map(size=5, p=0.8)
# env = gym.make("FrozenLake-v0", desc=random_map)
# Init some useful variables:
n_states = env.observation_space.n
#n_states_without_terminal = n_states - 2
n_actions = env.action_space.n
r = np.zeros(n_states) # the r vector is zero everywhere except for the goal state (last state)
r[-1] = 1.
gamma = 0.8
i = 0
def trans_matrix_for_policy(policy):
"""
This is a helper function that returns the transition probability matrix P for a policy
"""
transitions = np.zeros((n_states, n_states))
for s in range(n_states):
probs = env.P[s][policy[s]]
for el in probs:
transitions[s, el[1]] += el[0]
return transitions
def terminals():
"""
This is a helper function that returns terminal states
"""
terms = []
for s in range(n_states):
# terminal is when we end with probability 1 in terminal:
if env.P[s][0][0][0] == 1.0 and env.P[s][0][0][3] == True:
terms.append(s)
return terms
def generate_list_of_all_policies(start, end, base, step=1):
def Convert(n, base):
string = "0123456789"
if n < base:
return string[n]
else:
return Convert(n//base,base) + string[n%base]
return (Convert(i, base) for i in range(start, end, step))
def value_policy(policy):
P = trans_matrix_for_policy(policy)
# TODO: sth is wrong: calculate and return v
# (P, r and gamma already given)
# v= (−γP+Id)^(-1) * r
v = np.matmul(
np.linalg.inv(
np.multiply(-gamma, P) + np.identity(n_states)
)
, r
)
return v
def bruteforce_policies():
terms = terminals()
optimalpolicies = np.zeros((1, n_states))
num_optimal_policies = 0
optimalvalue = np.zeros(n_states, dtype=np.float)
# in the discrete case a policy is just an array with action = policy[state]
print('computing all possible policies... may take forever ...')
all_policies = list(generate_list_of_all_policies(0, n_actions ** n_states, n_actions))
print('computing all possible policies finished. Evaluating ...')
for j in range(0, n_actions ** n_states):
a = (list(map(int, [int for int in all_policies[j]])))
policy = np.zeros(n_states, dtype=np.int)
for ele in range(0, len(a)):
policy[n_states - ele - 1] = a[len(a) - ele - 1]
value = value_policy(policy)
#print('value=', value)
if np.sum(value) > np.sum(optimalvalue):
optimalvalue = value
optimalpolicies = np.zeros((1, n_states))
optimalpolicies[0] = policy
num_optimal_policies = 0
print('new best policy found. valuesum:', np.sum(value))
elif np.sum(value) == np.sum(optimalvalue):
num_optimal_policies += 1
optimalpolicies = np.concatenate((optimalpolicies, np.array([policy])), axis=0)
print('optimalvalue=', optimalvalue)
print('optimalvalueSum=', np.sum(optimalvalue))
# TODO: implement code that tries all possible policies, calculate the values using def value_policy.
# TODO: Find the optimal values and the optimal policies to answer the exercise questions.
print("Optimal value function:")
print(optimalvalue)
print("number optimal policies (INCLUDING TERMINAL STATES):")
print(len(optimalpolicies))
print("optimal policies:")
print(np.array(optimalpolicies))
return optimalpolicies
def main():
# print the environment
print("current environment: ")
env.render()
print("")
# Here a policy is just an array with the action for a state as element
policy_left = np.zeros(n_states, dtype=np.int) # 0 for all states
policy_right = np.ones(n_states, dtype=np.int) * 2 # 2 for all states
# Value functions:
print("Value function for policy_left (always going left):")
print(value_policy(policy_left))
print("Value function for policy_right (always going right):")
print (value_policy(policy_right))
optimalpolicies = bruteforce_policies()
# This code can be used to "rollout" a policy in the environment:
"""
print ("rollout policy:")
maxiter = 100
state = env.reset()
for i in range(maxiter):
new_state, reward, done, info = env.step(optimalpolicies[0][state])
env.render()
state=new_state
if done:
print ("Finished episode")
break"""
if __name__ == "__main__":
main() | en | 0.709468 | # Init environment # Lets use a smaller 3x3 custom map for faster computations # env = gym.make("FrozenLake-v0", desc=custom_map3x3) # TODO: Uncomment the following line to try the default map (4x4): # Uncomment the following lines for even larger maps: # random_map = generate_random_map(size=5, p=0.8) # env = gym.make("FrozenLake-v0", desc=random_map) # Init some useful variables: #n_states_without_terminal = n_states - 2 # the r vector is zero everywhere except for the goal state (last state) This is a helper function that returns the transition probability matrix P for a policy This is a helper function that returns terminal states # terminal is when we end with probability 1 in terminal: # TODO: sth is wrong: calculate and return v # (P, r and gamma already given) # v= (−γP+Id)^(-1) * r # in the discrete case a policy is just an array with action = policy[state] #print('value=', value) # TODO: implement code that tries all possible policies, calculate the values using def value_policy. # TODO: Find the optimal values and the optimal policies to answer the exercise questions. # print the environment # Here a policy is just an array with the action for a state as element # 0 for all states # 2 for all states # Value functions: # This code can be used to "rollout" a policy in the environment: print ("rollout policy:") maxiter = 100 state = env.reset() for i in range(maxiter): new_state, reward, done, info = env.step(optimalpolicies[0][state]) env.render() state=new_state if done: print ("Finished episode") break | 2.962636 | 3 |
py_server/db_ctrl.py | Makoto-hr-jp/makoto-site | 0 | 6632594 | '''
Autor: TM
Svrha: upravljanje bazom podataka
'''
#built-in
from hashlib import sha256
from datetime import datetime
# addons
import mysql.connector as sql
# internal
import db_config
from log import log_msg
forbidden=["SELECT","FROM","DELETE","INSERT"]
def connect():
global dbconn
log_msg('INFO','Connecting to database...')
dbconn='OK'
try:
ankete_cfg=db_config.get_db_cfg("ankete.cfg")
except:
log_msg('FAIL','Missing or broken cfg file "ankete.cfg".')
dbconn='ERSS'
try:
return sql.connect(**ankete_cfg,database='tjedne_ankete')
except:
log_msg('FAIL','Could not establish link to feedback database.')
def _check_field(field):
for f in forbidden:
if f in field:
log_msg('WARN',f'Possible SQL injection: "{field}"')
return False
return True
req_keys=set(('gradivo','listic','zadaca','komentar'))
def add_entry(link,data):
if type(data)!=dict: return False
if set(data.keys())!=req_keys: return False
str_repr=(f"{data['gradivo']}"
f"{data['listic']}"
f"{data['zadaca']}"
f"{data['komentar']}")
ID=sha256(str_repr.encode()).hexdigest()
date=datetime.now().strftime("%Y-%m-%d")
query=("INSERT INTO odgovori "
"(ID, datum, gradivo, listic, zadaca, komentar) "
f"""VALUES ("{ID}", "{date}", {data['gradivo']}, """
f"""{data['listic']}, {data['zadaca']}, "{data['komentar']}")""")
print(query)
cursor=link.cursor()
cursor.execute(query)
link.commit()
cursor.close()
return True
# database init
ankete=connect()
| '''
Autor: TM
Svrha: upravljanje bazom podataka
'''
#built-in
from hashlib import sha256
from datetime import datetime
# addons
import mysql.connector as sql
# internal
import db_config
from log import log_msg
forbidden=["SELECT","FROM","DELETE","INSERT"]
def connect():
global dbconn
log_msg('INFO','Connecting to database...')
dbconn='OK'
try:
ankete_cfg=db_config.get_db_cfg("ankete.cfg")
except:
log_msg('FAIL','Missing or broken cfg file "ankete.cfg".')
dbconn='ERSS'
try:
return sql.connect(**ankete_cfg,database='tjedne_ankete')
except:
log_msg('FAIL','Could not establish link to feedback database.')
def _check_field(field):
for f in forbidden:
if f in field:
log_msg('WARN',f'Possible SQL injection: "{field}"')
return False
return True
req_keys=set(('gradivo','listic','zadaca','komentar'))
def add_entry(link,data):
if type(data)!=dict: return False
if set(data.keys())!=req_keys: return False
str_repr=(f"{data['gradivo']}"
f"{data['listic']}"
f"{data['zadaca']}"
f"{data['komentar']}")
ID=sha256(str_repr.encode()).hexdigest()
date=datetime.now().strftime("%Y-%m-%d")
query=("INSERT INTO odgovori "
"(ID, datum, gradivo, listic, zadaca, komentar) "
f"""VALUES ("{ID}", "{date}", {data['gradivo']}, """
f"""{data['listic']}, {data['zadaca']}, "{data['komentar']}")""")
print(query)
cursor=link.cursor()
cursor.execute(query)
link.commit()
cursor.close()
return True
# database init
ankete=connect()
| en | 0.146509 | Autor: TM Svrha: upravljanje bazom podataka #built-in # addons # internal VALUES ("{ID}", "{date}", {data['gradivo']}, {data['listic']}, {data['zadaca']}, "{data['komentar']}") # database init | 2.313505 | 2 |
tests/test_parser.py | AndrewAnnex/pvl | 0 | 6632595 | #!/usr/bin/env python
"""This module has new tests for the pvl decoder functions."""
# Copyright 2019, <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
from pvl.parser import PVLParser, ParseError, OmniParser, EmptyValueAtLine
from pvl.lexer import lexer as Lexer
from pvl.lexer import LexerError
from pvl._collections import Units, PVLModule, PVLGroup, PVLObject
class TestParse(unittest.TestCase):
def setUp(self):
self.p = PVLParser()
# def test_broken_assignment(self):
# self.assertRaises(DecodeError,
# self.d.broken_assignment, 'foo', 0)
# self.d.strict = False
# empty = EmptyValueAtLine(1)
# self.assertEqual(empty, self.d.broken_assignment('foo', 0))
# def test_parse_iterable(self):
# def pv(s, idx):
# (t, _, _) = s[idx:-1].partition(',')
# v = t.strip()
# i = s.find(v, idx)
# return v, i + len(v)
# with patch('pvl.decoder.PVLDecoder.parse_value', side_effect=pv):
# i = '( a, b, c, d )'
# v = ['a', 'b', 'c', 'd']
# self.assertEqual((v, len(i)),
# self.d.parse_iterable(i, 0, '(', ')'))
def test_parse_begin_aggregation_statement(self):
pairs = (('GROUP = name next', 'GROUP', 'name'),
('OBJECT=name next', 'OBJECT', 'name'),
('BEGIN_GROUP /*c1*/ = /*c2*/ name /*c3*/ next',
'BEGIN_GROUP', 'name'))
for x in pairs:
with self.subTest(pair=x):
tokens = Lexer(x[0])
# next_t = x[1].split()[-1]
self.assertEqual((x[1], x[2]),
self.p.parse_begin_aggregation_statement(tokens))
tokens = Lexer('Not-a-Begin-Aggegation-Statement = name')
self.assertRaises(ValueError,
self.p.parse_begin_aggregation_statement,
tokens)
strings = ('GROUP equals name', 'GROUP = 5')
for s in strings:
with self.subTest(string=s):
tokens = Lexer(s)
self.assertRaises(ValueError,
self.p.parse_begin_aggregation_statement,
tokens)
def test_parse_end_aggregation(self):
groups = (('END_GROUP', 'GROUP', 'name'),
('END_GROUP = name', 'BEGIN_GROUP', 'name'),
('END_OBJECT /*c1*/ = \n name', 'OBJECT', 'name'))
for g in groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertIsNone(self.p.parse_end_aggregation(g[1], g[2],
tokens))
bad_groups = (('END_GROUP', 'OBJECT', 'name'),
('END_GROUP = foo', 'BEGIN_GROUP', 'name'))
for g in bad_groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertRaises(ValueError,
self.p.parse_end_aggregation, g[1], g[2],
tokens)
tokens = Lexer('END_GROUP = ')
self.assertRaises(StopIteration, self.p.parse_end_aggregation,
'BEGIN_GROUP', 'name', tokens)
def test_parse_around_equals(self):
strings = ('=', ' = ', '/*c1*/ = /*c2*/')
for s in strings:
with self.subTest(string=s):
tokens = Lexer(s)
self.assertIsNone(self.p.parse_around_equals(tokens))
bad_strings = ('f', ' f ')
for s in bad_strings:
with self.subTest(string=s):
tokens = Lexer(s)
self.assertRaises(ValueError,
self.p.parse_around_equals, tokens)
tokens = Lexer('')
self.assertRaises(ParseError, self.p.parse_around_equals, tokens)
tokens = Lexer(' = f')
self.p.parse_around_equals(tokens)
f = next(tokens)
self.assertEqual(f, 'f')
def test_parse_units(self):
pairs = ((Units(5, 'm'), '<m>'),
(Units(5, 'm'), '< m >'),
(Units(5, 'm /* comment */'), '< m /* comment */>'),
(Units(5, 'm\nfoo'), '< m\nfoo >'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[1])
self.assertEqual(p[0], self.p.parse_units(5, tokens))
def test_parse_set(self):
pairs = ((set(['a', 'b', 'c']), '{a,b,c}'),
(set(['a', 'b', 'c']), '{ a, b, c }'),
(set(['a', 'b', 'c']), '{ a, /* random */b, c }'),
(set(['a', frozenset(['x', 'y']), 'c']), '{ a, {x,y}, c }'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[1])
self.assertEqual(p[0], self.p.parse_set(tokens))
def test_parse_sequence(self):
pairs = ((['a', 'b', 'c'], '(a,b,c)'),
(['a', 'b', 'c'], '( a, b, c )'),
(['a', 'b', 'c'], '( a, /* random */b, c )'),
(['a', ['x', 'y'], 'c'], '( a, (x,y), c )'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[1])
self.assertEqual(p[0], self.p.parse_sequence(tokens))
def test_parse_WSC_until(self):
triplets = ((' stop <units>', 'stop', True),)
for t in triplets:
with self.subTest(triplet=t):
tokens = Lexer(t[0])
self.assertEqual(t[2], self.p.parse_WSC_until(t[1], tokens))
def test_parse_value(self):
pairs = (('(a,b,c)', ['a', 'b', 'c']),
('{ a, b, c }', set(['a', 'b', 'c'])),
('2001-01-01', datetime.date(2001, 1, 1)),
('2#0101#', 5),
('-79', -79),
('Unquoted', 'Unquoted'),
('"Quoted"', 'Quoted'),
('Null', None),
('TRUE', True),
('false', False),
('9 <planets>', Units(9, 'planets')))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[0])
self.assertEqual(p[1], self.p.parse_value(tokens))
def test_parse_assignment_statement(self):
pairs = (('a=b', 'a', 'b'),
('a =\tb', 'a', 'b'),
('a /*comment*/ = +80', 'a', 80),
('a = b c = d', 'a', 'b'),
('a = b; c = d', 'a', 'b'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[0])
self.assertEqual((p[1], p[2]),
self.p.parse_assignment_statement(tokens))
tokens = Lexer('empty = 2##')
self.assertRaises(LexerError, self.p.parse_assignment_statement, tokens)
def test_parse_aggregation_block(self):
groups = (('GROUP = name bob = uncle END_GROUP',
('name', PVLGroup(bob='uncle'))),
('GROUP = name OBJECT = uncle name = bob END_OBJECT END_GROUP',
('name', PVLGroup(uncle=PVLObject(name='bob')))),
('GROUP = name bob = uncle END_GROUP = name next = token',
('name', PVLGroup(bob='uncle'))))
for g in groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertEqual(g[1],
self.p.parse_aggregation_block(tokens))
bad_blocks = ('Group = name bob = uncle END_OBJECT',
'GROUP= name = bob = uncle END_GROUP',
'')
for b in bad_blocks:
with self.subTest(block=b):
tokens = Lexer(b)
self.assertRaises(ValueError,
self.p.parse_aggregation_block, tokens)
def test_parse_end_statement(self):
strings = ('END;', 'End ; ', 'End /*comment*/', 'END next',
'END', 'end', 'END ', 'END\n\n')
for g in strings:
with self.subTest(groups=g):
tokens = Lexer(g)
# top = Lexer(g)
# for t in top:
# print(f'token : "{t}"')
self.assertIsNone(self.p.parse_end_statement(tokens))
tokens = Lexer('the_end')
self.assertRaises(ValueError, self.p.parse_end_statement, tokens)
def test_parse_module(self):
groups = (('a = b c = d END', PVLModule(a='b', c='d')),
('a =b GROUP = g f=g END_GROUP END',
PVLModule(a='b', g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP END',
PVLModule(g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP a = b OBJECT = o END_OBJECT END',
PVLModule(g=PVLGroup(f='g'), a='b', o=PVLObject())))
for g in groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertEqual(g[1],
self.p.parse_module(tokens))
tokens = Lexer('blob')
self.assertRaises(ParseError, self.p.parse_module, tokens)
tokens = Lexer('blob =')
self.assertRaises(ParseError, self.p.parse_module, tokens)
tokens = Lexer('GROUP GROUP')
self.assertRaises(LexerError, self.p.parse_module, tokens)
tokens = Lexer('BEGIN_OBJECT = foo END_OBJECT = bar')
self.assertRaises(ValueError, self.p.parse_module, tokens)
tokens = Lexer(""" mixed = 'mixed"\\'quotes'
number = '123' """)
self.assertRaises(LexerError, self.p.parse_module, tokens)
# tokens = Lexer('blob = foo = bar')
# self.p.parse_module(tokens)
def test_parse(self):
groups = (('a = b c = d END', PVLModule(a='b', c='d')),
('a =b GROUP = g f=g END_GROUP END',
PVLModule(a='b', g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP END',
PVLModule(g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP a = b OBJECT = o END_OBJECT END',
PVLModule(g=PVLGroup(f='g'), a='b', o=PVLObject())))
for g in groups:
with self.subTest(groups=g):
self.assertEqual(g[1], self.p.parse(g[0]))
self.assertRaises(ParseError, self.p.parse, 'blob')
class TestOmni(unittest.TestCase):
def setUp(self):
self.p = OmniParser()
def test_parse_module_post_hook(self):
m = PVLModule(a='b')
tokens = Lexer('c = d',
g=self.p.grammar, d=self.p.decoder)
self.assertRaises(Exception, self.p.parse_module_post_hook, m, tokens)
self.p.doc = ('a = b = d')
m = PVLModule(a='b')
tokens = Lexer('= d',
g=self.p.grammar, d=self.p.decoder)
mod = PVLModule(a=EmptyValueAtLine(0), b='d')
self.assertEqual((mod, False),
self.p.parse_module_post_hook(m, tokens))
self.p.doc = ('a = b =')
m = PVLModule(a='b')
tokens = Lexer('=',
g=self.p.grammar, d=self.p.decoder)
mod = PVLModule(a=EmptyValueAtLine(0),
b=EmptyValueAtLine(0))
self.assertEqual((mod, False),
self.p.parse_module_post_hook(m, tokens))
def test_comments(self):
some_pvl = ("""
/* comment on line */
# here is a line comment
/* here is a multi-
line comment */
foo = bar /* comment at end of line */
weird/* in the */=/*middle*/comments
baz = bang # end line comment
End
""")
self.assertEqual(PVLModule(foo='bar', weird='comments', baz='bang'),
self.p.parse(some_pvl))
| #!/usr/bin/env python
"""This module has new tests for the pvl decoder functions."""
# Copyright 2019, <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
from pvl.parser import PVLParser, ParseError, OmniParser, EmptyValueAtLine
from pvl.lexer import lexer as Lexer
from pvl.lexer import LexerError
from pvl._collections import Units, PVLModule, PVLGroup, PVLObject
class TestParse(unittest.TestCase):
def setUp(self):
self.p = PVLParser()
# def test_broken_assignment(self):
# self.assertRaises(DecodeError,
# self.d.broken_assignment, 'foo', 0)
# self.d.strict = False
# empty = EmptyValueAtLine(1)
# self.assertEqual(empty, self.d.broken_assignment('foo', 0))
# def test_parse_iterable(self):
# def pv(s, idx):
# (t, _, _) = s[idx:-1].partition(',')
# v = t.strip()
# i = s.find(v, idx)
# return v, i + len(v)
# with patch('pvl.decoder.PVLDecoder.parse_value', side_effect=pv):
# i = '( a, b, c, d )'
# v = ['a', 'b', 'c', 'd']
# self.assertEqual((v, len(i)),
# self.d.parse_iterable(i, 0, '(', ')'))
def test_parse_begin_aggregation_statement(self):
pairs = (('GROUP = name next', 'GROUP', 'name'),
('OBJECT=name next', 'OBJECT', 'name'),
('BEGIN_GROUP /*c1*/ = /*c2*/ name /*c3*/ next',
'BEGIN_GROUP', 'name'))
for x in pairs:
with self.subTest(pair=x):
tokens = Lexer(x[0])
# next_t = x[1].split()[-1]
self.assertEqual((x[1], x[2]),
self.p.parse_begin_aggregation_statement(tokens))
tokens = Lexer('Not-a-Begin-Aggegation-Statement = name')
self.assertRaises(ValueError,
self.p.parse_begin_aggregation_statement,
tokens)
strings = ('GROUP equals name', 'GROUP = 5')
for s in strings:
with self.subTest(string=s):
tokens = Lexer(s)
self.assertRaises(ValueError,
self.p.parse_begin_aggregation_statement,
tokens)
def test_parse_end_aggregation(self):
groups = (('END_GROUP', 'GROUP', 'name'),
('END_GROUP = name', 'BEGIN_GROUP', 'name'),
('END_OBJECT /*c1*/ = \n name', 'OBJECT', 'name'))
for g in groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertIsNone(self.p.parse_end_aggregation(g[1], g[2],
tokens))
bad_groups = (('END_GROUP', 'OBJECT', 'name'),
('END_GROUP = foo', 'BEGIN_GROUP', 'name'))
for g in bad_groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertRaises(ValueError,
self.p.parse_end_aggregation, g[1], g[2],
tokens)
tokens = Lexer('END_GROUP = ')
self.assertRaises(StopIteration, self.p.parse_end_aggregation,
'BEGIN_GROUP', 'name', tokens)
def test_parse_around_equals(self):
strings = ('=', ' = ', '/*c1*/ = /*c2*/')
for s in strings:
with self.subTest(string=s):
tokens = Lexer(s)
self.assertIsNone(self.p.parse_around_equals(tokens))
bad_strings = ('f', ' f ')
for s in bad_strings:
with self.subTest(string=s):
tokens = Lexer(s)
self.assertRaises(ValueError,
self.p.parse_around_equals, tokens)
tokens = Lexer('')
self.assertRaises(ParseError, self.p.parse_around_equals, tokens)
tokens = Lexer(' = f')
self.p.parse_around_equals(tokens)
f = next(tokens)
self.assertEqual(f, 'f')
def test_parse_units(self):
pairs = ((Units(5, 'm'), '<m>'),
(Units(5, 'm'), '< m >'),
(Units(5, 'm /* comment */'), '< m /* comment */>'),
(Units(5, 'm\nfoo'), '< m\nfoo >'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[1])
self.assertEqual(p[0], self.p.parse_units(5, tokens))
def test_parse_set(self):
pairs = ((set(['a', 'b', 'c']), '{a,b,c}'),
(set(['a', 'b', 'c']), '{ a, b, c }'),
(set(['a', 'b', 'c']), '{ a, /* random */b, c }'),
(set(['a', frozenset(['x', 'y']), 'c']), '{ a, {x,y}, c }'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[1])
self.assertEqual(p[0], self.p.parse_set(tokens))
def test_parse_sequence(self):
pairs = ((['a', 'b', 'c'], '(a,b,c)'),
(['a', 'b', 'c'], '( a, b, c )'),
(['a', 'b', 'c'], '( a, /* random */b, c )'),
(['a', ['x', 'y'], 'c'], '( a, (x,y), c )'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[1])
self.assertEqual(p[0], self.p.parse_sequence(tokens))
def test_parse_WSC_until(self):
triplets = ((' stop <units>', 'stop', True),)
for t in triplets:
with self.subTest(triplet=t):
tokens = Lexer(t[0])
self.assertEqual(t[2], self.p.parse_WSC_until(t[1], tokens))
def test_parse_value(self):
pairs = (('(a,b,c)', ['a', 'b', 'c']),
('{ a, b, c }', set(['a', 'b', 'c'])),
('2001-01-01', datetime.date(2001, 1, 1)),
('2#0101#', 5),
('-79', -79),
('Unquoted', 'Unquoted'),
('"Quoted"', 'Quoted'),
('Null', None),
('TRUE', True),
('false', False),
('9 <planets>', Units(9, 'planets')))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[0])
self.assertEqual(p[1], self.p.parse_value(tokens))
def test_parse_assignment_statement(self):
pairs = (('a=b', 'a', 'b'),
('a =\tb', 'a', 'b'),
('a /*comment*/ = +80', 'a', 80),
('a = b c = d', 'a', 'b'),
('a = b; c = d', 'a', 'b'))
for p in pairs:
with self.subTest(pairs=p):
tokens = Lexer(p[0])
self.assertEqual((p[1], p[2]),
self.p.parse_assignment_statement(tokens))
tokens = Lexer('empty = 2##')
self.assertRaises(LexerError, self.p.parse_assignment_statement, tokens)
def test_parse_aggregation_block(self):
groups = (('GROUP = name bob = uncle END_GROUP',
('name', PVLGroup(bob='uncle'))),
('GROUP = name OBJECT = uncle name = bob END_OBJECT END_GROUP',
('name', PVLGroup(uncle=PVLObject(name='bob')))),
('GROUP = name bob = uncle END_GROUP = name next = token',
('name', PVLGroup(bob='uncle'))))
for g in groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertEqual(g[1],
self.p.parse_aggregation_block(tokens))
bad_blocks = ('Group = name bob = uncle END_OBJECT',
'GROUP= name = bob = uncle END_GROUP',
'')
for b in bad_blocks:
with self.subTest(block=b):
tokens = Lexer(b)
self.assertRaises(ValueError,
self.p.parse_aggregation_block, tokens)
def test_parse_end_statement(self):
strings = ('END;', 'End ; ', 'End /*comment*/', 'END next',
'END', 'end', 'END ', 'END\n\n')
for g in strings:
with self.subTest(groups=g):
tokens = Lexer(g)
# top = Lexer(g)
# for t in top:
# print(f'token : "{t}"')
self.assertIsNone(self.p.parse_end_statement(tokens))
tokens = Lexer('the_end')
self.assertRaises(ValueError, self.p.parse_end_statement, tokens)
def test_parse_module(self):
groups = (('a = b c = d END', PVLModule(a='b', c='d')),
('a =b GROUP = g f=g END_GROUP END',
PVLModule(a='b', g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP END',
PVLModule(g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP a = b OBJECT = o END_OBJECT END',
PVLModule(g=PVLGroup(f='g'), a='b', o=PVLObject())))
for g in groups:
with self.subTest(groups=g):
tokens = Lexer(g[0])
self.assertEqual(g[1],
self.p.parse_module(tokens))
tokens = Lexer('blob')
self.assertRaises(ParseError, self.p.parse_module, tokens)
tokens = Lexer('blob =')
self.assertRaises(ParseError, self.p.parse_module, tokens)
tokens = Lexer('GROUP GROUP')
self.assertRaises(LexerError, self.p.parse_module, tokens)
tokens = Lexer('BEGIN_OBJECT = foo END_OBJECT = bar')
self.assertRaises(ValueError, self.p.parse_module, tokens)
tokens = Lexer(""" mixed = 'mixed"\\'quotes'
number = '123' """)
self.assertRaises(LexerError, self.p.parse_module, tokens)
# tokens = Lexer('blob = foo = bar')
# self.p.parse_module(tokens)
def test_parse(self):
groups = (('a = b c = d END', PVLModule(a='b', c='d')),
('a =b GROUP = g f=g END_GROUP END',
PVLModule(a='b', g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP END',
PVLModule(g=PVLGroup(f='g'))),
('GROUP = g f=g END_GROUP a = b OBJECT = o END_OBJECT END',
PVLModule(g=PVLGroup(f='g'), a='b', o=PVLObject())))
for g in groups:
with self.subTest(groups=g):
self.assertEqual(g[1], self.p.parse(g[0]))
self.assertRaises(ParseError, self.p.parse, 'blob')
class TestOmni(unittest.TestCase):
def setUp(self):
self.p = OmniParser()
def test_parse_module_post_hook(self):
m = PVLModule(a='b')
tokens = Lexer('c = d',
g=self.p.grammar, d=self.p.decoder)
self.assertRaises(Exception, self.p.parse_module_post_hook, m, tokens)
self.p.doc = ('a = b = d')
m = PVLModule(a='b')
tokens = Lexer('= d',
g=self.p.grammar, d=self.p.decoder)
mod = PVLModule(a=EmptyValueAtLine(0), b='d')
self.assertEqual((mod, False),
self.p.parse_module_post_hook(m, tokens))
self.p.doc = ('a = b =')
m = PVLModule(a='b')
tokens = Lexer('=',
g=self.p.grammar, d=self.p.decoder)
mod = PVLModule(a=EmptyValueAtLine(0),
b=EmptyValueAtLine(0))
self.assertEqual((mod, False),
self.p.parse_module_post_hook(m, tokens))
def test_comments(self):
some_pvl = ("""
/* comment on line */
# here is a line comment
/* here is a multi-
line comment */
foo = bar /* comment at end of line */
weird/* in the */=/*middle*/comments
baz = bang # end line comment
End
""")
self.assertEqual(PVLModule(foo='bar', weird='comments', baz='bang'),
self.p.parse(some_pvl))
| en | 0.596331 | #!/usr/bin/env python This module has new tests for the pvl decoder functions. # Copyright 2019, <NAME> (<EMAIL>) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def test_broken_assignment(self): # self.assertRaises(DecodeError, # self.d.broken_assignment, 'foo', 0) # self.d.strict = False # empty = EmptyValueAtLine(1) # self.assertEqual(empty, self.d.broken_assignment('foo', 0)) # def test_parse_iterable(self): # def pv(s, idx): # (t, _, _) = s[idx:-1].partition(',') # v = t.strip() # i = s.find(v, idx) # return v, i + len(v) # with patch('pvl.decoder.PVLDecoder.parse_value', side_effect=pv): # i = '( a, b, c, d )' # v = ['a', 'b', 'c', 'd'] # self.assertEqual((v, len(i)), # self.d.parse_iterable(i, 0, '(', ')')) # next_t = x[1].split()[-1] #0101#', 5), ##') # top = Lexer(g) # for t in top: # print(f'token : "{t}"') mixed = 'mixed"\\'quotes' number = '123' # tokens = Lexer('blob = foo = bar') # self.p.parse_module(tokens) /* comment on line */ # here is a line comment /* here is a multi- line comment */ foo = bar /* comment at end of line */ weird/* in the */=/*middle*/comments baz = bang # end line comment End | 2.406891 | 2 |
delta/data/task/text_seq_label_task_test.py | didichuxing/delta | 1,442 | 6632596 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' text sequence labeling task unittest '''
import os
from pathlib import Path
import numpy as np
import delta.compat as tf
from absl import logging
from delta import utils
from delta.data.task.text_seq_label_task import TextSeqLabelTask
from delta.utils.register import import_all_modules_for_register
from delta import PACKAGE_ROOT_DIR
class TextSeqLabelTaskTest(tf.test.TestCase):
''' sequence labeling task test'''
def setUp(self):
super().setUp()
import_all_modules_for_register()
package_root = Path(PACKAGE_ROOT_DIR)
self.config_file = package_root.joinpath(
'../egs/mock_text_seq_label_data/seq-label/v1/config/seq-label-mock.yml'
)
def tearDown(self):
''' tear down '''
def test_english(self):
""" test seq label task of english data """
config = utils.load_config(self.config_file)
max_len = config["model"]["net"]["structure"]["max_len"]
config["data"]["task"]["language"] = "english"
task_config = config["data"]["task"]
task_config[
"text_vocab"] = "egs/mock_text_seq_label_data/seq-label/v1/data/text_vocab.txt"
task_config["need_shuffle"] = False
task = TextSeqLabelTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.cached_session(use_gpu=False, force_gpu=False) as sess:
sess.run(data["iterator"].initializer)
res = sess.run(
[data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"]])
logging.debug(res[0][0][:5])
logging.debug(res[1][0])
self.assertAllEqual(res[0][0][:5], [2, 3, 4, 5, 0])
self.assertEqual(np.shape(res[0]), (10, max_len))
self.assertEqual(np.shape(res[1]), (10, max_len))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.cached_session(use_gpu=False, force_gpu=False) as sess:
sess.run(data["iterator"].initializer)
res = sess.run(input_x, feed_dict={input_sentence: ["I feel good ."]})
logging.debug(res[0][:5])
self.assertAllEqual(res[0][:5], [0, 3, 4, 5, 0])
self.assertEqual(np.shape(res[0]), (max_len,))
if __name__ == "__main__":
logging.set_verbosity(logging.DEBUG)
tf.test.main()
| # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' text sequence labeling task unittest '''
import os
from pathlib import Path
import numpy as np
import delta.compat as tf
from absl import logging
from delta import utils
from delta.data.task.text_seq_label_task import TextSeqLabelTask
from delta.utils.register import import_all_modules_for_register
from delta import PACKAGE_ROOT_DIR
class TextSeqLabelTaskTest(tf.test.TestCase):
''' sequence labeling task test'''
def setUp(self):
super().setUp()
import_all_modules_for_register()
package_root = Path(PACKAGE_ROOT_DIR)
self.config_file = package_root.joinpath(
'../egs/mock_text_seq_label_data/seq-label/v1/config/seq-label-mock.yml'
)
def tearDown(self):
''' tear down '''
def test_english(self):
""" test seq label task of english data """
config = utils.load_config(self.config_file)
max_len = config["model"]["net"]["structure"]["max_len"]
config["data"]["task"]["language"] = "english"
task_config = config["data"]["task"]
task_config[
"text_vocab"] = "egs/mock_text_seq_label_data/seq-label/v1/data/text_vocab.txt"
task_config["need_shuffle"] = False
task = TextSeqLabelTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.cached_session(use_gpu=False, force_gpu=False) as sess:
sess.run(data["iterator"].initializer)
res = sess.run(
[data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"]])
logging.debug(res[0][0][:5])
logging.debug(res[1][0])
self.assertAllEqual(res[0][0][:5], [2, 3, 4, 5, 0])
self.assertEqual(np.shape(res[0]), (10, max_len))
self.assertEqual(np.shape(res[1]), (10, max_len))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.cached_session(use_gpu=False, force_gpu=False) as sess:
sess.run(data["iterator"].initializer)
res = sess.run(input_x, feed_dict={input_sentence: ["I feel good ."]})
logging.debug(res[0][:5])
self.assertAllEqual(res[0][:5], [0, 3, 4, 5, 0])
self.assertEqual(np.shape(res[0]), (max_len,))
if __name__ == "__main__":
logging.set_verbosity(logging.DEBUG)
tf.test.main()
| en | 0.773585 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== text sequence labeling task unittest sequence labeling task test tear down test seq label task of english data # test offline data # test online data | 1.603457 | 2 |
brfast/measures/sensitivity/fpselect.py | tandriamil/BrFAST | 6 | 6632597 | <reponame>tandriamil/BrFAST
#!/usr/bin/python3
"""Module containing the sensitivity measures used in the FPSelect paper."""
import importlib
from typing import List
from loguru import logger
from brfast.data.attribute import AttributeSet
from brfast.data.dataset import FingerprintDataset
from brfast.measures import SensitivityMeasure
# from measures.similarity import TODO
# Import the engine of the analysis module (pandas or modin)
from brfast.config import params
pd = importlib.import_module(params.get('DataAnalysis', 'engine'))
PROPORTION_FIELD = 'proportion'
def _get_top_k_fingerprints(dataframe: pd.DataFrame,
attribute_names: List[str], k: int
) -> pd.DataFrame:
"""Get a DataFrame with the k-most common fingerprints.
Args:
dataframe: The fingerprint dataset.
attribute_names: The name of the attributes to consider.
k: The parameter to specify the k-most common fingerprints to hold.
Returns:
A DataFrame with only the most common fingerprints together with the
proportion of the users that share them.
"""
# Project the datafame on the wanted attributes
projected_dataframe = dataframe[attribute_names]
# Store the current dtypes of each column (needed to put back the dtypes,
# see below)
current_column_dtypes = {}
for column, dtype in projected_dataframe.dtypes.items():
current_column_dtypes[column] = dtype
# 1. We have to convert to strings due to the NaN values not being counted
# by the value_counts() function!
# 2. Use the value_counts function to count the unique/distinct rows.
# - Each distinct fingerprint is assigned the number of rows
# (= browsers) that share it.
# - The rows are sorted in descending manner: the most shared
# fingerprints are firsts.
# - The count is normalized: we have the proportion of users.
# 2. Rename the normalized count column as "proportion".
# 3. Only hold the k-most common/shared fingerprints.
# 4. Put back the previous dtypes of each column
return (projected_dataframe
.astype('str')
.value_counts(normalize=True, sort=True, ascending=False)
.reset_index(name=PROPORTION_FIELD)
.head(k)
.astype(current_column_dtypes)
)
# class SimilarAttributes(SensitivityMeasure):
# """The sensivity measure used in the FPSelect paper.
#
# This sensitivity measure considers that the impersonated users are those
# that have a fingerprint that is similar to the one of the k-most common
# fingerprints. Two fingerprints are considered similar if all their
# attributes are deemed similar using a similarity function.
# """
#
# def __init__(self, fingerprint_dataset: FingerprintDataset,
# most_common_fps: int,
# attr_dist_info: Dict[str, Dict[str, Any]]):
# """Initialize the sensivity measure used in the FPSelect paper.
#
# Args:
# dataset: The fingerprint dataset.
# most_common_fps: The number of the most common fingerprints that
# we should consider.
# attr_dist_info: The information about the attribute distances.
# For each attribute as a key, it should contain
# the type of the attribute and its TODO
# """
# # Initialize using the __init__ function of SensitivityMeasure
# super().__init__()
#
# # Set the variables used to compute the sensitivity
# self._fingerprint_dataset = fingerprint_dataset
# self._working_dataframe = (
# fingerprint_dataset.get_df_w_one_fp_per_browser())
# self._k = most_common_fps
# self._attr_dist_info = attr_dist_info
#
# def __repr__(self) -> str:
# """Provide a string representation of this sensitivity measure.
#
# Returns:
# A string representation of this sensitivity measure.
# """
# return (f'{self.__class__.__name__}({self._fingerprint_dataset}, '
# f'{self._k})')
#
# def evaluate(self, attribute_set: AttributeSet) -> float:
# """Measure the sensitivity of an attribute set.
#
# The sensitivity measure is required to be monotonously decreasing as
# we add attributes (see the FPSelect paper).
#
# Args:
# attribute_set: The attribute set which sensitivity is to be
# measured.
#
# Returns:
# The sensitivity of the attribute set.
# """
# # Get the names of the attributes that we consider
# attribute_names = [attribute.name for attribute in attribute_set]
#
# # Project these attributes on the fingerprint dataset
# projected_fingerprints = self._working_dataframe[attribute_names]
#
# # Get the k-most common/shared fingerprints
# top_k_fingerprints = _get_top_k_fingerprints(
# projected_fingerprints, attribute_names, self._k)
#
# # Return the proportion of users having a fingerprint that is similar
# # to the k-most common fingerprints considering similarity functions
# # return _get_proportion_similar_fingerprints(
# # projected_fingerprints, top_k_fingerprints)
# return None
class TopKFingerprints(SensitivityMeasure):
"""Simple sensitivity measure considering the k-most common fingerprints.
This sensitivity measure considers that the impersonated users are those
that share the k-most common fingerprints. No similarity function is
considered here.
"""
def __init__(self, fingerprint_dataset: FingerprintDataset,
most_common_fps: int):
"""Initialize the top-k simple sensitivity measure.
Args:
dataset: The fingerprint dataset.
most_common_fps: The number of the most common fingerprints that we
should consider.
"""
# Initialize using the __init__ function of SensitivityMeasure
super().__init__()
# Set the variables used to compute the sensitivity
self._fingerprint_dataset = fingerprint_dataset
self._working_dataframe = (
fingerprint_dataset.get_df_w_one_fp_per_browser())
self._k = most_common_fps
def __repr__(self) -> str:
"""Provide a string representation of this sensitivity measure.
Returns:
A string representation of this sensitivity measure.
"""
return (f'{self.__class__.__name__}({self._fingerprint_dataset}, '
f'{self._k})')
def evaluate(self, attribute_set: AttributeSet) -> float:
"""Measure the sensitivity of an attribute set.
The sensitivity measure is required to be monotonously decreasing as we
add attributes (see the FPSelect paper).
Args:
attribute_set: The attribute set which sensitivity is to be
measured.
Returns:
The sensitivity of the attribute set.
"""
# Get the names of the attributes that we consider
attribute_names = [attribute.name for attribute in attribute_set]
# Get the k-most common/shared fingerprints
top_k_fingerprints = _get_top_k_fingerprints(
self._working_dataframe, attribute_names, self._k)
browsers_sharing_top_k_fps = top_k_fingerprints[PROPORTION_FIELD].sum()
logger.debug(f'The top {self._k} fingerprints are shared by '
f'{browsers_sharing_top_k_fps} of the browsers.')
# Return the proportion of users sharing the k-most common fingerprints
return browsers_sharing_top_k_fps
| #!/usr/bin/python3
"""Module containing the sensitivity measures used in the FPSelect paper."""
import importlib
from typing import List
from loguru import logger
from brfast.data.attribute import AttributeSet
from brfast.data.dataset import FingerprintDataset
from brfast.measures import SensitivityMeasure
# from measures.similarity import TODO
# Import the engine of the analysis module (pandas or modin)
from brfast.config import params
pd = importlib.import_module(params.get('DataAnalysis', 'engine'))
PROPORTION_FIELD = 'proportion'
def _get_top_k_fingerprints(dataframe: pd.DataFrame,
attribute_names: List[str], k: int
) -> pd.DataFrame:
"""Get a DataFrame with the k-most common fingerprints.
Args:
dataframe: The fingerprint dataset.
attribute_names: The name of the attributes to consider.
k: The parameter to specify the k-most common fingerprints to hold.
Returns:
A DataFrame with only the most common fingerprints together with the
proportion of the users that share them.
"""
# Project the datafame on the wanted attributes
projected_dataframe = dataframe[attribute_names]
# Store the current dtypes of each column (needed to put back the dtypes,
# see below)
current_column_dtypes = {}
for column, dtype in projected_dataframe.dtypes.items():
current_column_dtypes[column] = dtype
# 1. We have to convert to strings due to the NaN values not being counted
# by the value_counts() function!
# 2. Use the value_counts function to count the unique/distinct rows.
# - Each distinct fingerprint is assigned the number of rows
# (= browsers) that share it.
# - The rows are sorted in descending manner: the most shared
# fingerprints are firsts.
# - The count is normalized: we have the proportion of users.
# 2. Rename the normalized count column as "proportion".
# 3. Only hold the k-most common/shared fingerprints.
# 4. Put back the previous dtypes of each column
return (projected_dataframe
.astype('str')
.value_counts(normalize=True, sort=True, ascending=False)
.reset_index(name=PROPORTION_FIELD)
.head(k)
.astype(current_column_dtypes)
)
# class SimilarAttributes(SensitivityMeasure):
# """The sensivity measure used in the FPSelect paper.
#
# This sensitivity measure considers that the impersonated users are those
# that have a fingerprint that is similar to the one of the k-most common
# fingerprints. Two fingerprints are considered similar if all their
# attributes are deemed similar using a similarity function.
# """
#
# def __init__(self, fingerprint_dataset: FingerprintDataset,
# most_common_fps: int,
# attr_dist_info: Dict[str, Dict[str, Any]]):
# """Initialize the sensivity measure used in the FPSelect paper.
#
# Args:
# dataset: The fingerprint dataset.
# most_common_fps: The number of the most common fingerprints that
# we should consider.
# attr_dist_info: The information about the attribute distances.
# For each attribute as a key, it should contain
# the type of the attribute and its TODO
# """
# # Initialize using the __init__ function of SensitivityMeasure
# super().__init__()
#
# # Set the variables used to compute the sensitivity
# self._fingerprint_dataset = fingerprint_dataset
# self._working_dataframe = (
# fingerprint_dataset.get_df_w_one_fp_per_browser())
# self._k = most_common_fps
# self._attr_dist_info = attr_dist_info
#
# def __repr__(self) -> str:
# """Provide a string representation of this sensitivity measure.
#
# Returns:
# A string representation of this sensitivity measure.
# """
# return (f'{self.__class__.__name__}({self._fingerprint_dataset}, '
# f'{self._k})')
#
# def evaluate(self, attribute_set: AttributeSet) -> float:
# """Measure the sensitivity of an attribute set.
#
# The sensitivity measure is required to be monotonously decreasing as
# we add attributes (see the FPSelect paper).
#
# Args:
# attribute_set: The attribute set which sensitivity is to be
# measured.
#
# Returns:
# The sensitivity of the attribute set.
# """
# # Get the names of the attributes that we consider
# attribute_names = [attribute.name for attribute in attribute_set]
#
# # Project these attributes on the fingerprint dataset
# projected_fingerprints = self._working_dataframe[attribute_names]
#
# # Get the k-most common/shared fingerprints
# top_k_fingerprints = _get_top_k_fingerprints(
# projected_fingerprints, attribute_names, self._k)
#
# # Return the proportion of users having a fingerprint that is similar
# # to the k-most common fingerprints considering similarity functions
# # return _get_proportion_similar_fingerprints(
# # projected_fingerprints, top_k_fingerprints)
# return None
class TopKFingerprints(SensitivityMeasure):
"""Simple sensitivity measure considering the k-most common fingerprints.
This sensitivity measure considers that the impersonated users are those
that share the k-most common fingerprints. No similarity function is
considered here.
"""
def __init__(self, fingerprint_dataset: FingerprintDataset,
most_common_fps: int):
"""Initialize the top-k simple sensitivity measure.
Args:
dataset: The fingerprint dataset.
most_common_fps: The number of the most common fingerprints that we
should consider.
"""
# Initialize using the __init__ function of SensitivityMeasure
super().__init__()
# Set the variables used to compute the sensitivity
self._fingerprint_dataset = fingerprint_dataset
self._working_dataframe = (
fingerprint_dataset.get_df_w_one_fp_per_browser())
self._k = most_common_fps
def __repr__(self) -> str:
"""Provide a string representation of this sensitivity measure.
Returns:
A string representation of this sensitivity measure.
"""
return (f'{self.__class__.__name__}({self._fingerprint_dataset}, '
f'{self._k})')
def evaluate(self, attribute_set: AttributeSet) -> float:
"""Measure the sensitivity of an attribute set.
The sensitivity measure is required to be monotonously decreasing as we
add attributes (see the FPSelect paper).
Args:
attribute_set: The attribute set which sensitivity is to be
measured.
Returns:
The sensitivity of the attribute set.
"""
# Get the names of the attributes that we consider
attribute_names = [attribute.name for attribute in attribute_set]
# Get the k-most common/shared fingerprints
top_k_fingerprints = _get_top_k_fingerprints(
self._working_dataframe, attribute_names, self._k)
browsers_sharing_top_k_fps = top_k_fingerprints[PROPORTION_FIELD].sum()
logger.debug(f'The top {self._k} fingerprints are shared by '
f'{browsers_sharing_top_k_fps} of the browsers.')
# Return the proportion of users sharing the k-most common fingerprints
return browsers_sharing_top_k_fps | en | 0.770538 | #!/usr/bin/python3 Module containing the sensitivity measures used in the FPSelect paper. # from measures.similarity import TODO # Import the engine of the analysis module (pandas or modin) Get a DataFrame with the k-most common fingerprints. Args: dataframe: The fingerprint dataset. attribute_names: The name of the attributes to consider. k: The parameter to specify the k-most common fingerprints to hold. Returns: A DataFrame with only the most common fingerprints together with the proportion of the users that share them. # Project the datafame on the wanted attributes # Store the current dtypes of each column (needed to put back the dtypes, # see below) # 1. We have to convert to strings due to the NaN values not being counted # by the value_counts() function! # 2. Use the value_counts function to count the unique/distinct rows. # - Each distinct fingerprint is assigned the number of rows # (= browsers) that share it. # - The rows are sorted in descending manner: the most shared # fingerprints are firsts. # - The count is normalized: we have the proportion of users. # 2. Rename the normalized count column as "proportion". # 3. Only hold the k-most common/shared fingerprints. # 4. Put back the previous dtypes of each column # class SimilarAttributes(SensitivityMeasure): # """The sensivity measure used in the FPSelect paper. # # This sensitivity measure considers that the impersonated users are those # that have a fingerprint that is similar to the one of the k-most common # fingerprints. Two fingerprints are considered similar if all their # attributes are deemed similar using a similarity function. # """ # # def __init__(self, fingerprint_dataset: FingerprintDataset, # most_common_fps: int, # attr_dist_info: Dict[str, Dict[str, Any]]): # """Initialize the sensivity measure used in the FPSelect paper. # # Args: # dataset: The fingerprint dataset. # most_common_fps: The number of the most common fingerprints that # we should consider. # attr_dist_info: The information about the attribute distances. # For each attribute as a key, it should contain # the type of the attribute and its TODO # """ # # Initialize using the __init__ function of SensitivityMeasure # super().__init__() # # # Set the variables used to compute the sensitivity # self._fingerprint_dataset = fingerprint_dataset # self._working_dataframe = ( # fingerprint_dataset.get_df_w_one_fp_per_browser()) # self._k = most_common_fps # self._attr_dist_info = attr_dist_info # # def __repr__(self) -> str: # """Provide a string representation of this sensitivity measure. # # Returns: # A string representation of this sensitivity measure. # """ # return (f'{self.__class__.__name__}({self._fingerprint_dataset}, ' # f'{self._k})') # # def evaluate(self, attribute_set: AttributeSet) -> float: # """Measure the sensitivity of an attribute set. # # The sensitivity measure is required to be monotonously decreasing as # we add attributes (see the FPSelect paper). # # Args: # attribute_set: The attribute set which sensitivity is to be # measured. # # Returns: # The sensitivity of the attribute set. # """ # # Get the names of the attributes that we consider # attribute_names = [attribute.name for attribute in attribute_set] # # # Project these attributes on the fingerprint dataset # projected_fingerprints = self._working_dataframe[attribute_names] # # # Get the k-most common/shared fingerprints # top_k_fingerprints = _get_top_k_fingerprints( # projected_fingerprints, attribute_names, self._k) # # # Return the proportion of users having a fingerprint that is similar # # to the k-most common fingerprints considering similarity functions # # return _get_proportion_similar_fingerprints( # # projected_fingerprints, top_k_fingerprints) # return None Simple sensitivity measure considering the k-most common fingerprints. This sensitivity measure considers that the impersonated users are those that share the k-most common fingerprints. No similarity function is considered here. Initialize the top-k simple sensitivity measure. Args: dataset: The fingerprint dataset. most_common_fps: The number of the most common fingerprints that we should consider. # Initialize using the __init__ function of SensitivityMeasure # Set the variables used to compute the sensitivity Provide a string representation of this sensitivity measure. Returns: A string representation of this sensitivity measure. Measure the sensitivity of an attribute set. The sensitivity measure is required to be monotonously decreasing as we add attributes (see the FPSelect paper). Args: attribute_set: The attribute set which sensitivity is to be measured. Returns: The sensitivity of the attribute set. # Get the names of the attributes that we consider # Get the k-most common/shared fingerprints # Return the proportion of users sharing the k-most common fingerprints | 2.553569 | 3 |
_352_scrape.py | bansallab/roundup | 0 | 6632598 | import csv
import re
from urllib.request import Request, urlopen
from dateutil import parser
from sys import argv
from bs4 import BeautifulSoup
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
report_path = 'main/actual_sales.idc.htm'
def get_sale_date(tag):
"""Return the date of the livestock sale."""
text = tag.get_text()
sale_date = parser.parse(text).date()
return sale_date
def is_header(line):
is_succinct = len(line) < 2
match = False
if is_succinct:
match = re.search(r':$', line[-1])
return is_succinct and match
def is_sale(line):
match = re.search(r'\$[\d\.]+$', line[-1])
return bool(match)
def get_sale_location(location):
if ',' in location:
sale_location = location.split(',')
elif not ' ' in location:
sale_location = [location, '']
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [location, '']
return sale_location
def get_sale(line, header):
sale_location = get_sale_location(line[1])
match = re.search(r'(?P<head>\d*)(?P<cattle>[^\d]+)(?P<weight>\d+)lbs\s\$(?P<price>[\d\.]+)', line[-1])
sale = {
'consignor_name': line[0].strip(','),
'consignor_city': sale_location[0],
'consignor_state': sale_location[1],
'cattle_head': match.group('head'),
'cattle_avg_weight': match.group('weight').replace(',', ''),
'cattle_cattle': ' '.join([header, match.group('cattle').strip(', ')]),
'cattle_price_cwt': match.group('price').replace(',', ''),
}
sale = {k: v.strip() for k, v in sale.items() if v.strip()}
return sale
def write_sale(line, default_sale, writer):
header = None
for this_line in filter(bool, line):
if is_header(this_line):
header = this_line[0].strip(':')
elif header and is_sale(this_line):
sale = default_sale.copy()
sale.update(get_sale(this_line, header))
writer.writerow(sale)
def main():
# locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
# download Saturday sale page
request = Request(
base_url + report_path,
headers=scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'html5lib')
report = soup.find_all('td')
for this_report in report:
sale_date = get_sale_date(this_report.h3)
io_name = archive.new_csv(sale_date)
# Stop iteration if this report is already archived
if not io_name:
break
# Initialize the default sale dictionary
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
})
line = [[]]
for this_string in this_report.strings:
if re.match(r'\r|\n(?!\d)', this_string):
line.append([])
this_string = re.sub(r'\r|\n|\.{2,}', ' ', this_string).strip()
if this_string:
match = re.match(r'([^,]+),([^,]+),(.*)', this_string)
if match:
this_string = [match.group(i) for i in [1,2,3]]
line[-1].extend(this_string)
elif len(line[-1]) == 4:
this_line = line[-1][:2]
this_line.append(''.join(line[-1][2:]) + this_string)
line[-1] = this_line
else:
line[-1].append(this_string)
# open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
| import csv
import re
from urllib.request import Request, urlopen
from dateutil import parser
from sys import argv
from bs4 import BeautifulSoup
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
report_path = 'main/actual_sales.idc.htm'
def get_sale_date(tag):
"""Return the date of the livestock sale."""
text = tag.get_text()
sale_date = parser.parse(text).date()
return sale_date
def is_header(line):
is_succinct = len(line) < 2
match = False
if is_succinct:
match = re.search(r':$', line[-1])
return is_succinct and match
def is_sale(line):
match = re.search(r'\$[\d\.]+$', line[-1])
return bool(match)
def get_sale_location(location):
if ',' in location:
sale_location = location.split(',')
elif not ' ' in location:
sale_location = [location, '']
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [location, '']
return sale_location
def get_sale(line, header):
sale_location = get_sale_location(line[1])
match = re.search(r'(?P<head>\d*)(?P<cattle>[^\d]+)(?P<weight>\d+)lbs\s\$(?P<price>[\d\.]+)', line[-1])
sale = {
'consignor_name': line[0].strip(','),
'consignor_city': sale_location[0],
'consignor_state': sale_location[1],
'cattle_head': match.group('head'),
'cattle_avg_weight': match.group('weight').replace(',', ''),
'cattle_cattle': ' '.join([header, match.group('cattle').strip(', ')]),
'cattle_price_cwt': match.group('price').replace(',', ''),
}
sale = {k: v.strip() for k, v in sale.items() if v.strip()}
return sale
def write_sale(line, default_sale, writer):
header = None
for this_line in filter(bool, line):
if is_header(this_line):
header = this_line[0].strip(':')
elif header and is_sale(this_line):
sale = default_sale.copy()
sale.update(get_sale(this_line, header))
writer.writerow(sale)
def main():
# locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
# download Saturday sale page
request = Request(
base_url + report_path,
headers=scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'html5lib')
report = soup.find_all('td')
for this_report in report:
sale_date = get_sale_date(this_report.h3)
io_name = archive.new_csv(sale_date)
# Stop iteration if this report is already archived
if not io_name:
break
# Initialize the default sale dictionary
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
})
line = [[]]
for this_string in this_report.strings:
if re.match(r'\r|\n(?!\d)', this_string):
line.append([])
this_string = re.sub(r'\r|\n|\.{2,}', ' ', this_string).strip()
if this_string:
match = re.match(r'([^,]+),([^,]+),(.*)', this_string)
if match:
this_string = [match.group(i) for i in [1,2,3]]
line[-1].extend(this_string)
elif len(line[-1]) == 4:
this_line = line[-1][:2]
this_line.append(''.join(line[-1][2:]) + this_string)
line[-1] = this_line
else:
line[-1].append(this_string)
# open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
| en | 0.746337 | Return the date of the livestock sale. # locate existing CSV files # download Saturday sale page # Stop iteration if this report is already archived # Initialize the default sale dictionary # open a new CSV file and write each sale | 3.095248 | 3 |
selfdrive/mapd/lib/osm.py | arneschwarck/openpilot | 4 | 6632599 | <reponame>arneschwarck/openpilot<gh_stars>1-10
import overpy
class OSM():
def __init__(self):
self.api = overpy.Overpass()
def fetch_road_ways_around_location(self, location, radius):
lat, lon = location
# fetch all ways and nodes on this ways around location
around_str = f'{str(radius)},{str(lat)},{str(lon)}'
q = """
way(around:""" + around_str + """)
[highway]
[highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape|service)$"];
(._;>;);
out;
"""
try:
ways = self.api.query(q).ways
except Exception as e:
print(f'Exception while querying OSM:\n{e}')
ways = []
return ways
| import overpy
class OSM():
def __init__(self):
self.api = overpy.Overpass()
def fetch_road_ways_around_location(self, location, radius):
lat, lon = location
# fetch all ways and nodes on this ways around location
around_str = f'{str(radius)},{str(lat)},{str(lon)}'
q = """
way(around:""" + around_str + """)
[highway]
[highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape|service)$"];
(._;>;);
out;
"""
try:
ways = self.api.query(q).ways
except Exception as e:
print(f'Exception while querying OSM:\n{e}')
ways = []
return ways | en | 0.786186 | # fetch all ways and nodes on this ways around location way(around: ) [highway] [highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape|service)$"]; (._;>;); out; | 3.089963 | 3 |
dcm/cli.py | moloney/dcm | 11 | 6632600 | """Command line interface"""
from __future__ import annotations
import sys, os, logging, json, re
import asyncio
from contextlib import ExitStack
from copy import deepcopy
from datetime import datetime
from typing import Optional, Callable, Awaitable
import pydicom
from pydicom.dataset import Dataset
from pydicom.datadict import keyword_for_tag
from pynetdicom import evt
import click
import toml
from rich.console import Console
from rich.progress import Progress
from rich.logging import RichHandler
import dateparser
from . import __version__
from .conf import DcmConfig, _default_conf, NoLocalNodeError
from .util import str_to_tag, aclosing, json_serializer
from .lazyset import AllElems, LazySet
from .report import MultiListReport, RichProgressHook
from .query import QueryResult
from .net import DcmNode, LocalEntity, QueryLevel, EventFilter, make_queue_data_cb
from .filt import make_edit_filter, MultiFilter
from .route import StaticRoute, DynamicTransferReport, Router
from .store import TransferMethod
from .store.local_dir import LocalDir
from .store.net_repo import NetRepo
from .sync import SyncReport, make_basic_validator, sync_data
from .normalize import normalize
from .diff import diff_data_sets
log = logging.getLogger("dcm.cli")
def cli_error(msg, exit_code=1):
"""Print msg to stderr and exit with non-zero exit code"""
click.secho(msg, err=True, fg="red")
sys.exit(exit_code)
class QueryResponseFilter(logging.Filter):
def filter(self, record):
if (
record.name == "dcm.net"
and record.levelno == logging.DEBUG
and record.msg.startswith("Got query response:")
):
return False
return True
class PerformedQueryFilter(logging.Filter):
def filter(self, record):
if (
record.name == "dcm.net"
and record.levelno == logging.DEBUG
and record.msg.startswith("Performing query:")
):
return False
return True
debug_filters = {
"query_responses": QueryResponseFilter(),
"performed_queries": PerformedQueryFilter(),
}
@click.group()
@click.option(
"--config",
type=click.Path(dir_okay=False, readable=True, resolve_path=True),
envvar="DCM_CONFIG_PATH",
default=os.path.join(click.get_app_dir("dcm"), "dcm_conf.toml"),
help="Path to TOML config file",
)
@click.option(
"--log-path",
type=click.Path(dir_okay=False, readable=True, writable=True, resolve_path=True),
envvar="DCM_LOG_PATH",
help="Save logging output to this file",
)
@click.option(
"--file-log-level",
type=click.Choice(["DEBUG", "INFO", "WARN", "ERROR"], case_sensitive=False),
default="INFO",
help="Log level to use when logging to a file",
)
@click.option(
"--verbose", "-v", is_flag=True, default=False, help="Print INFO log messages"
)
@click.option("--debug", is_flag=True, default=False, help="Print DEBUG log messages")
@click.option(
"--debug-filter", multiple=True, help="Selectively filter debug log messages"
)
@click.option(
"--quiet", is_flag=True, default=False, help="Hide WARNING and below log messages"
)
@click.option(
"--pynetdicom-log-level",
type=click.Choice(["DEBUG", "INFO", "WARN", "ERROR"], case_sensitive=False),
default="WARN",
help="Control log level for lower level pynetdicom package",
)
@click.pass_context
def cli(
ctx,
config,
log_path,
file_log_level,
verbose,
debug,
debug_filter,
quiet,
pynetdicom_log_level,
):
"""High level DICOM file and network operations"""
if quiet:
if verbose or debug:
cli_error("Can't mix --quiet with --verbose/--debug")
# Create Rich Console outputing to stderr for logging / progress bars
rich_con = Console(stderr=True)
# Setup logging
LOG_FORMAT = "%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s"
def_formatter = logging.Formatter(LOG_FORMAT)
root_logger = logging.getLogger("")
root_logger.setLevel(logging.DEBUG)
pynetdicom_logger = logging.getLogger("pynetdicom")
pynetdicom_logger.setLevel(getattr(logging, pynetdicom_log_level))
stream_formatter = logging.Formatter("%(threadName)s %(name)s %(message)s")
stream_handler = RichHandler(console=rich_con, enable_link_path=False)
stream_handler.setFormatter(stream_formatter)
# logging.getLogger("asyncio").setLevel(logging.DEBUG)
if debug:
stream_handler.setLevel(logging.DEBUG)
elif verbose:
stream_handler.setLevel(logging.INFO)
elif quiet:
stream_handler.setLevel(logging.ERROR)
else:
stream_handler.setLevel(logging.WARN)
root_logger.addHandler(stream_handler)
handlers = [stream_handler]
if log_path is not None:
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(def_formatter)
file_handler.setLevel(getattr(logging, file_log_level))
root_logger.addHandler(file_handler)
handlers.append(file_handler)
if len(debug_filter) > 0:
for filter_name in debug_filter:
if filter_name not in debug_filters:
cli_error("Unknown debug filter: %s" % filter_name)
for handler in handlers:
handler.addFilter(debug_filters[filter_name])
# Create global param dict for subcommands to use
ctx.obj = {}
ctx.obj["config_path"] = config
ctx.obj["config"] = DcmConfig(config, create_if_missing=True)
ctx.obj["rich_con"] = rich_con
@click.command()
@click.pass_obj
def version(params):
"""Print the version and exit"""
click.echo(__version__)
@click.command()
@click.pass_obj
@click.option("--show", is_flag=True, help="Just print the current config contents")
@click.option(
"--show-default", is_flag=True, help="Just print the default config contets"
)
@click.option("--path", is_flag=True, help="Just print the current config path")
def conf(params, show, show_default, path):
"""Open the config file with your $EDITOR"""
config_path = params["config_path"]
# TODO: Make these mutually exclusive? Or sub-commands?
if path:
click.echo(config_path)
if show:
with open(config_path, "r") as f:
click.echo(f.read())
if show_default:
click.echo(_default_conf)
if path or show or show_default:
return
err = False
while True:
click.edit(filename=config_path)
try:
with open(config_path, "r") as f:
_ = toml.load(f)
except toml.decoder.TomlDecodeError as e:
err = True
click.echo("The config file contains an error: %s" % e)
click.echo("The editor will be reopened so you can correct the error")
click.pause()
else:
if err:
click.echo("Config file is now valid")
break
@click.command()
@click.pass_obj
@click.argument("remote")
@click.option("--local", help="Local DICOM network node properties")
def echo(params, remote, local):
"""Test connectivity with remote node"""
local = params["config"].get_local_node(local)
remote_node = params["config"].get_remote_node(remote)
net_ent = LocalEntity(local)
res = asyncio.run(net_ent.echo(remote_node))
if res:
click.echo("Success")
else:
cli_error("Failed")
def _hr_to_dcm_date(in_str):
try:
dt = dateparser.parse(in_str)
except Exception:
cli_error(f"Unable to parse date: 'in_str'")
return dt.strftime("%Y%m%d")
def _build_study_date(since, before):
if since is not None:
since_str = _hr_to_dcm_date(since)
else:
since_str = ""
if before is not None:
before_str = _hr_to_dcm_date(before)
else:
before_str = ""
return f"{since_str}-{before_str}"
def _build_query(query_strs, since, before):
qdat = Dataset()
for query_input in query_strs:
try:
q_attr, q_val = query_input.split("=")
except Exception:
cli_error(f"Invalid query input string: {query_input}")
setattr(qdat, q_attr, q_val)
if since is not None or before is not None:
if hasattr(qdat, "StudyDate"):
cli_error("Do not specify 'StudyDate' when using '--since' or '--before'")
setattr(qdat, "StudyDate", _build_study_date(since, before))
return qdat
@click.command()
@click.pass_obj
@click.argument("remote")
@click.argument("query", nargs=-1)
@click.option(
"--level", default=None, help="Level of detail: patient/study/series/image"
)
@click.option(
"--query-res",
type=click.File("rb"),
help="A result from a previous query to refine",
)
@click.option("--since", help="Only return studies since this date")
@click.option("--before", help="Only return studies before this date")
@click.option("--local", help="Local DICOM network node properties")
@click.option("--out-format", default=None, help="Output format: tree/json")
@click.option(
"--assume-yes",
is_flag=True,
default=False,
help="Automatically answer all prompts with 'y'",
)
@click.option("--no-progress", is_flag=True, help="Don't display progress bars")
def query(
params,
remote,
query,
level,
query_res,
since,
before,
local,
out_format,
assume_yes,
no_progress,
):
"""Perform a query against a network node"""
if level is not None:
level = level.upper()
for q_lvl in QueryLevel:
if q_lvl.name == level:
level = q_lvl
break
else:
cli_error("Invalid level: %s" % level)
if query_res is None and not sys.stdin.isatty():
log.debug("Reading query_res from stdin")
query_res = sys.stdin
if query_res is not None:
in_str = query_res.read()
if in_str:
query_res = json_serializer.loads(in_str)
else:
query_res = None
if sys.stdout.isatty():
if out_format is None:
out_format = "tree"
else:
no_progress = True
if out_format is None:
out_format = "json"
if out_format not in ("tree", "json"):
cli_error("Invalid out-format: %s" % out_format)
local = params["config"].get_local_node(local)
remote_node = params["config"].get_remote_node(remote)
net_ent = LocalEntity(local)
qdat = _build_query(query, since, before)
if len(qdat) == 0 and query_res is None and not assume_yes:
if not click.confirm(
"This query hasn't been limited in any "
"way and may generate a huge result, "
"continue?"
):
return
with ExitStack() as estack:
if not no_progress:
prog = RichProgressHook(
estack.enter_context(
Progress(console=params["rich_con"], transient=True)
)
)
report = MultiListReport(description="query", prog_hook=prog)
else:
report = MultiListReport(description="query")
qr = asyncio.run(
net_ent.query(remote_node, level, qdat, query_res, report=report)
)
if out_format == "tree":
out = qr.to_tree()
elif out_format == "json":
out = json_serializer.dumps(qr, indent=4)
click.echo(out)
report.log_issues()
@click.command()
@click.pass_obj
@click.argument("dests", nargs=-1)
@click.option("--source", "-s", multiple=True, help="A data source")
@click.option("--query", "-q", multiple=True, help="Only sync data matching the query")
@click.option(
"--query-res",
type=click.File("rb"),
help="A result from a previous query to limit the data synced",
)
@click.option("--since", help="Only return studies since this date")
@click.option("--before", help="Only return studies before this date")
@click.option(
"--edit", "-e", multiple=True, help="Modify DICOM attribute in the synced data"
)
@click.option(
"--edit-json", type=click.File("rb"), help="Specify attribute modifications as JSON"
)
@click.option(
"--trust-level",
type=click.Choice([q.name for q in QueryLevel], case_sensitive=False),
default="IMAGE",
help="If sub-component counts match at this query level, assume "
"the data matches. Improves performance but sacrifices accuracy",
)
@click.option(
"--force-all",
"-f",
is_flag=True,
default=False,
help="Force all data on the source to be transfered, even if it "
"appears to already exist on the dest",
)
@click.option(
"--method",
"-m",
help="Transfer method to use",
type=click.Choice([m.name for m in TransferMethod], case_sensitive=False),
)
# Expose this when it is working
# @click.option('--validate', is_flag=True, default=False,
# help="All synced data is retrieved back from the dests and "
# "compared to the original data. Differing elements produce "
# "warnings.")
@click.option(
"--keep-errors",
is_flag=True,
default=False,
help="Don't skip inconsistent/unexpected incoming data",
)
@click.option(
"--dry-run",
"-n",
is_flag=True,
default=False,
help="Don't actually do any transfers, just print them",
)
@click.option("--local", help="Local DICOM network node properties")
@click.option("--dir-format", help="Output format for any local output directories")
@click.option(
"--recurse/--no-recurse",
default=None,
is_flag=True,
help="Don't recurse into input directories",
)
@click.option("--in-file-ext", help="File extension for local input directories")
@click.option("--out-file-ext", help="File extension for local output directories")
@click.option("--no-progress", is_flag=True, help="Don't display progress bars")
@click.option("--no-report", is_flag=True, help="Don't print report")
def sync(
params,
dests,
source,
query,
query_res,
since,
before,
edit,
edit_json,
trust_level,
force_all,
method,
keep_errors,
dry_run,
local,
dir_format,
recurse,
in_file_ext,
out_file_ext,
no_progress,
no_report,
):
"""Sync DICOM data from a one or more sources to one or more destinations
The `dests` can be a local directory, a DICOM network entity (given as
'hostname:aetitle:port'), or a named remote/route from your config file.
Generally you will need to use `--source` to specify the data source, unless
you pass in a query result which contains a source (e.g. when doing
'dcm query srcpacs ... | dcm sync destpacs'). The `--source` can be given
in the same way `dests` are specified, except it cannot be a 'route'.
"""
# Check for incompatible options
# if validate and dry_run:
# cli_error("Can't do validation on a dry run!")
# Disable progress for non-interactive output or dry runs
if not sys.stdout.isatty() or dry_run:
no_progress = True
# Build query dataset if needed
if len(query) != 0 or since is not None or before is not None:
query = _build_query(query, since, before)
else:
query = None
# Handle query-result options
if query_res is None and not sys.stdin.isatty():
query_res = sys.stdin
if query_res is not None:
in_str = query_res.read()
if in_str:
query_res = json_serializer.loads(in_str)
else:
query_res = None
# Determine the local node being used
try:
local = params["config"].get_local_node(local)
except NoLocalNodeError:
local = None
# Pass source options that override config through to the config parser
local_dir_kwargs = {"make_missing": False}
if recurse is not None:
local_dir_kwargs["recurse"] = recurse
if in_file_ext is not None:
local_dir_kwargs["file_ext"] = in_file_ext
params["config"].set_local_dir_kwargs(**local_dir_kwargs)
params["config"].set_net_repo_kwargs(local=local)
# Figure out source info
if len(source) == 0:
if query_res is None or query_res.prov.source is None:
cli_error("No data source specified")
if local is None:
raise NoLocalNodeError("No local DICOM node configured")
sources = [NetRepo(local, query_res.prov.source)]
else:
sources = []
for s in source:
try:
sources.append(params["config"].get_bucket(s))
except Exception as e:
cli_error(f"Error processing source '{s}': {e}")
# Pass dest options that override config through to the config parser
local_dir_kwargs = {}
if dir_format is not None:
local_dir_kwargs["out_fmt"] = dir_format
if out_file_ext is not None:
local_dir_kwargs["file_ext"] = out_file_ext
params["config"].set_local_dir_kwargs(**local_dir_kwargs)
# Some command line options override route configuration
static_route_kwargs = {}
dynamic_route_kwargs = {}
# Handle edit options
filt = None
if edit_json is not None:
edit_dict = json.load(edit_json)
edit_json.close()
else:
edit_dict = {}
if edit:
for edit_str in edit:
attr, val = edit_str.split("=")
edit_dict[attr] = val
if edit_dict:
filt = make_edit_filter(edit_dict)
static_route_kwargs["filt"] = filt
dynamic_route_kwargs["filt"] = filt
# Convert dests/filters to a StaticRoute
if method is not None:
method = TransferMethod[method.upper()]
static_route_kwargs["methods"] = (method,)
dynamic_route_kwargs["methods"] = {None: (method,)}
# Pass route options that override config through to the config parser
params["config"].set_static_route_kwargs(**static_route_kwargs)
params["config"].set_dynamic_route_kwargs(**dynamic_route_kwargs)
# Do samity check that no sources are in dests. This is especially easy
# mistake as earlier versions took the first positional arg to be the
# source
for dest in dests:
try:
d_bucket = params["config"].get_bucket(dest)
except Exception:
pass
else:
if any(s == d_bucket for s in sources):
cli_error(f"The dest {dest} is also a source!")
continue
try:
static_route = params["config"].get_static_route(dest)
except Exception:
pass
else:
for d in static_route.dests:
if any(s == d_bucket for s in sources):
cli_error(f"The dest {d} is also a source!")
continue
try:
sel_dest_map = params["config"].get_selector_dest_map(dest)
except Exception:
pass
else:
for _, s_dests in sel_dest_map.routing_map:
for d in s_dests:
if any(s == d_bucket for s in sources):
cli_error(f"The dest {d} is also a source!")
continue
cli_error(f"Unknown dest: {dest}")
# Convert dests to routes
dests = params["config"].get_routes(dests)
# Handle validate option
# if validate:
# validators = [make_basic_validator()]
# else:
# validators = None
# Handle trust-level option
trust_level = QueryLevel[trust_level.upper()]
# Setup reporting/progress hooks and do the transfer
with ExitStack() as estack:
if not no_progress:
prog_hook = RichProgressHook(
estack.enter_context(
Progress(console=params["rich_con"], transient=True)
)
)
qr_reports = None
if query is not None or query_res is not None:
qr_reports = []
for src in sources:
if not no_progress:
report = MultiListReport(
description="init-query", prog_hook=prog_hook
)
else:
report = None
qr_reports.append(report)
if query_res is None:
qrs = None
else:
qrs = [deepcopy(query_res) for _ in sources]
base_kwargs = {
"trust_level": trust_level,
"force_all": force_all,
"keep_errors": keep_errors,
#'validators': validators,
}
sm_kwargs = []
sync_reports = []
for src in sources:
if not no_progress:
sync_report = SyncReport(prog_hook=prog_hook)
else:
sync_report = SyncReport()
kwargs = deepcopy(base_kwargs)
kwargs["report"] = sync_report
sm_kwargs.append(kwargs)
sync_reports.append(sync_report)
asyncio.run(
sync_data(sources, dests, query, qrs, qr_reports, sm_kwargs, dry_run)
)
for report in sync_reports:
report.log_issues()
if not no_report:
click.echo(report)
click.echo("\n")
def _make_route_data_cb(
res_q: asyncio.Queue[Dataset],
) -> Callable[[evt.Event], Awaitable[int]]:
"""Return callback that queues dataset/metadata from incoming events"""
async def callback(event: evt.Event) -> int:
# TODO: Do we need to embed the file_meta here?
await res_q.put(event.dataset)
return 0x0 # Success
return callback
async def _do_route(
local: DcmNode, router: Router, inactive_timeout: Optional[int] = None
) -> None:
local_ent = LocalEntity(local)
event_filter = EventFilter(event_types=frozenset((evt.EVT_C_STORE,)))
report = DynamicTransferReport()
last_update = None
if inactive_timeout:
last_update = datetime.now()
last_reported = 0
async with router.route(report=report) as route_q:
fwd_cb = _make_route_data_cb(route_q)
async with local_ent.listen(fwd_cb, event_filter=event_filter):
print("Listener started, hit Ctrl-c to exit")
try:
while True:
await asyncio.sleep(1.0)
if last_update is not None:
n_reported = report.n_reported
if n_reported != last_reported:
last_update = datetime.now()
last_reported = n_reported
elif inactive_timeout is not None:
if (
datetime.now() - last_update
).total_seconds() > inactive_timeout:
print("Timeout due to inactivity")
break
finally:
print("Listener shutting down")
@click.command()
@click.pass_obj
@click.argument("dests", nargs=-1)
@click.option(
"--edit", "-e", multiple=True, help="Modify DICOM attribute in the synced data"
)
@click.option(
"--edit-json", type=click.File("rb"), help="Specify attribute modifications as JSON"
)
@click.option("--local", help="Local DICOM network node properties")
@click.option("--dir-format", help="Output format for any local output directories")
@click.option(
"--out-file-ext", default="dcm", help="File extension for local output directories"
)
@click.option(
"--inactive-timeout",
type=int,
help="Stop listening after this many seconds of inactivity",
)
def forward(
params, dests, edit, edit_json, local, dir_format, out_file_ext, inactive_timeout
):
"""Listen for incoming DICOM files on network and forward to dests"""
local = params["config"].get_local_node(local)
# Pass dest options that override config through to the config parser
params["config"].set_local_dir_kwargs(out_fmt=dir_format, file_ext=out_file_ext)
# Some command line options override route configuration
static_route_kwargs = {}
dynamic_route_kwargs = {}
# Handle edit options
filt = None
if edit_json is not None:
edit_dict = json.load(edit_json)
edit_json.close()
else:
edit_dict = {}
if edit:
for edit_str in edit:
attr, val = edit_str.split("=")
edit_dict[attr] = val
if edit_dict:
filt = make_edit_filter(edit_dict)
static_route_kwargs["filt"] = filt
dynamic_route_kwargs["filt"] = filt
# Pass route options that override config through to the config parser
params["config"].set_static_route_kwargs(**static_route_kwargs)
params["config"].set_dynamic_route_kwargs(**dynamic_route_kwargs)
# Convert dests to routes
dests = params["config"].get_routes(dests)
router = Router(dests)
asyncio.run(_do_route(local, router, inactive_timeout))
def make_print_cb(fmt, elem_filter=None):
def print_cb(ds, elem):
if elem_filter:
tag = elem.tag
keyword = keyword_for_tag(tag)
if not elem_filter(tag, keyword):
return
try:
print(fmt.format(elem=elem, ds=ds))
except Exception:
log.warn("Couldn't apply format to elem: %s", elem)
return print_cb
def _make_elem_filter(include, exclude, groups, kw_regex, exclude_private):
if len(include) == 0:
include_tags = LazySet(AllElems)
else:
include_tags = set()
for in_str in include:
include_tags.add(str_to_tag(in_str))
include_tags = LazySet(include_tags)
exclude_tags = set()
for in_str in exclude:
exclude_tags.add(str_to_tag(in_str))
exclude_tags = LazySet(exclude_tags)
if len(groups) == 0:
groups = LazySet(AllElems)
else:
groups = LazySet([int(x) for x in groups])
kw_regex = [re.compile(x) for x in kw_regex]
def elem_filter(tag, keyword):
if exclude_private and tag.group % 2 == 1:
return False
if tag in exclude_tags:
return False
if tag.group not in groups:
if include and tag in include_tags:
return True
return False
if kw_regex:
keyword = keyword_for_tag(tag)
if not any(r.search(keyword) for r in kw_regex):
if include and tag in include_tags:
return True
return False
if tag in include_tags:
return True
return False
return elem_filter
@click.command()
@click.pass_obj
@click.argument("dcm_files", type=click.Path(exists=True, readable=True), nargs=-1)
@click.option("--out-format", default="plain", help="Output format: plain/json")
@click.option(
"--plain-fmt",
default="{elem}",
help="Format string applied to each element for 'plain' output. Can "
"reference 'elem' (the pydicom Element) and 'ds' (the pydicom Dataset) "
"objects in the format string.",
)
@click.option(
"--include",
"-i",
multiple=True,
help="Include specific elements by keyword or tag",
)
@click.option("--group", "-g", multiple=True, help="Include elements by group number")
@click.option(
"--kw-regex",
multiple=True,
help="Include elements where the keyword matches a regex",
)
@click.option(
"--exclude", "-e", multiple=True, help="Exclude elements by keyword or tag"
)
@click.option(
"--exclude-private",
is_flag=True,
default=False,
help="Exclude all private elements",
)
def dump(
params,
dcm_files,
out_format,
plain_fmt,
include,
group,
kw_regex,
exclude,
exclude_private,
):
"""Dump contents of DICOM files to stdout
Default is to include all elements, but this can be overridden by various options.
The `--out-format` can be `plain` or `json`. If it is `plain` each included element
is used to format a string which can be overridden with `--plain-fmt`.
"""
elem_filter = _make_elem_filter(include, exclude, group, kw_regex, exclude_private)
if out_format == "plain":
print_cb = make_print_cb(plain_fmt, elem_filter)
for pth in dcm_files:
ds = pydicom.dcmread(pth)
ds.walk(print_cb)
elif out_format == "json":
for pth in dcm_files:
ds = pydicom.dcmread(pth)
click.echo(json.dumps(normalize(ds, elem_filter), indent=4))
else:
cli_error("Unknown out-format: '%s'" % out_format)
@click.command()
@click.pass_obj
@click.argument("left")
@click.argument("right")
def diff(params, left, right):
"""Show differences between two data sets"""
left = pydicom.dcmread(left)
right = pydicom.dcmread(right)
diffs = diff_data_sets(left, right)
for d in diffs:
click.echo(str(d))
# Add our subcommands ot the CLI
cli.add_command(version)
cli.add_command(conf)
cli.add_command(echo)
cli.add_command(query)
cli.add_command(sync)
cli.add_command(forward)
cli.add_command(dump)
cli.add_command(diff)
# Entry point
if __name__ == "__main__":
cli()
| """Command line interface"""
from __future__ import annotations
import sys, os, logging, json, re
import asyncio
from contextlib import ExitStack
from copy import deepcopy
from datetime import datetime
from typing import Optional, Callable, Awaitable
import pydicom
from pydicom.dataset import Dataset
from pydicom.datadict import keyword_for_tag
from pynetdicom import evt
import click
import toml
from rich.console import Console
from rich.progress import Progress
from rich.logging import RichHandler
import dateparser
from . import __version__
from .conf import DcmConfig, _default_conf, NoLocalNodeError
from .util import str_to_tag, aclosing, json_serializer
from .lazyset import AllElems, LazySet
from .report import MultiListReport, RichProgressHook
from .query import QueryResult
from .net import DcmNode, LocalEntity, QueryLevel, EventFilter, make_queue_data_cb
from .filt import make_edit_filter, MultiFilter
from .route import StaticRoute, DynamicTransferReport, Router
from .store import TransferMethod
from .store.local_dir import LocalDir
from .store.net_repo import NetRepo
from .sync import SyncReport, make_basic_validator, sync_data
from .normalize import normalize
from .diff import diff_data_sets
log = logging.getLogger("dcm.cli")
def cli_error(msg, exit_code=1):
"""Print msg to stderr and exit with non-zero exit code"""
click.secho(msg, err=True, fg="red")
sys.exit(exit_code)
class QueryResponseFilter(logging.Filter):
def filter(self, record):
if (
record.name == "dcm.net"
and record.levelno == logging.DEBUG
and record.msg.startswith("Got query response:")
):
return False
return True
class PerformedQueryFilter(logging.Filter):
def filter(self, record):
if (
record.name == "dcm.net"
and record.levelno == logging.DEBUG
and record.msg.startswith("Performing query:")
):
return False
return True
debug_filters = {
"query_responses": QueryResponseFilter(),
"performed_queries": PerformedQueryFilter(),
}
@click.group()
@click.option(
"--config",
type=click.Path(dir_okay=False, readable=True, resolve_path=True),
envvar="DCM_CONFIG_PATH",
default=os.path.join(click.get_app_dir("dcm"), "dcm_conf.toml"),
help="Path to TOML config file",
)
@click.option(
"--log-path",
type=click.Path(dir_okay=False, readable=True, writable=True, resolve_path=True),
envvar="DCM_LOG_PATH",
help="Save logging output to this file",
)
@click.option(
"--file-log-level",
type=click.Choice(["DEBUG", "INFO", "WARN", "ERROR"], case_sensitive=False),
default="INFO",
help="Log level to use when logging to a file",
)
@click.option(
"--verbose", "-v", is_flag=True, default=False, help="Print INFO log messages"
)
@click.option("--debug", is_flag=True, default=False, help="Print DEBUG log messages")
@click.option(
"--debug-filter", multiple=True, help="Selectively filter debug log messages"
)
@click.option(
"--quiet", is_flag=True, default=False, help="Hide WARNING and below log messages"
)
@click.option(
"--pynetdicom-log-level",
type=click.Choice(["DEBUG", "INFO", "WARN", "ERROR"], case_sensitive=False),
default="WARN",
help="Control log level for lower level pynetdicom package",
)
@click.pass_context
def cli(
ctx,
config,
log_path,
file_log_level,
verbose,
debug,
debug_filter,
quiet,
pynetdicom_log_level,
):
"""High level DICOM file and network operations"""
if quiet:
if verbose or debug:
cli_error("Can't mix --quiet with --verbose/--debug")
# Create Rich Console outputing to stderr for logging / progress bars
rich_con = Console(stderr=True)
# Setup logging
LOG_FORMAT = "%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s"
def_formatter = logging.Formatter(LOG_FORMAT)
root_logger = logging.getLogger("")
root_logger.setLevel(logging.DEBUG)
pynetdicom_logger = logging.getLogger("pynetdicom")
pynetdicom_logger.setLevel(getattr(logging, pynetdicom_log_level))
stream_formatter = logging.Formatter("%(threadName)s %(name)s %(message)s")
stream_handler = RichHandler(console=rich_con, enable_link_path=False)
stream_handler.setFormatter(stream_formatter)
# logging.getLogger("asyncio").setLevel(logging.DEBUG)
if debug:
stream_handler.setLevel(logging.DEBUG)
elif verbose:
stream_handler.setLevel(logging.INFO)
elif quiet:
stream_handler.setLevel(logging.ERROR)
else:
stream_handler.setLevel(logging.WARN)
root_logger.addHandler(stream_handler)
handlers = [stream_handler]
if log_path is not None:
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(def_formatter)
file_handler.setLevel(getattr(logging, file_log_level))
root_logger.addHandler(file_handler)
handlers.append(file_handler)
if len(debug_filter) > 0:
for filter_name in debug_filter:
if filter_name not in debug_filters:
cli_error("Unknown debug filter: %s" % filter_name)
for handler in handlers:
handler.addFilter(debug_filters[filter_name])
# Create global param dict for subcommands to use
ctx.obj = {}
ctx.obj["config_path"] = config
ctx.obj["config"] = DcmConfig(config, create_if_missing=True)
ctx.obj["rich_con"] = rich_con
@click.command()
@click.pass_obj
def version(params):
"""Print the version and exit"""
click.echo(__version__)
@click.command()
@click.pass_obj
@click.option("--show", is_flag=True, help="Just print the current config contents")
@click.option(
"--show-default", is_flag=True, help="Just print the default config contets"
)
@click.option("--path", is_flag=True, help="Just print the current config path")
def conf(params, show, show_default, path):
"""Open the config file with your $EDITOR"""
config_path = params["config_path"]
# TODO: Make these mutually exclusive? Or sub-commands?
if path:
click.echo(config_path)
if show:
with open(config_path, "r") as f:
click.echo(f.read())
if show_default:
click.echo(_default_conf)
if path or show or show_default:
return
err = False
while True:
click.edit(filename=config_path)
try:
with open(config_path, "r") as f:
_ = toml.load(f)
except toml.decoder.TomlDecodeError as e:
err = True
click.echo("The config file contains an error: %s" % e)
click.echo("The editor will be reopened so you can correct the error")
click.pause()
else:
if err:
click.echo("Config file is now valid")
break
@click.command()
@click.pass_obj
@click.argument("remote")
@click.option("--local", help="Local DICOM network node properties")
def echo(params, remote, local):
"""Test connectivity with remote node"""
local = params["config"].get_local_node(local)
remote_node = params["config"].get_remote_node(remote)
net_ent = LocalEntity(local)
res = asyncio.run(net_ent.echo(remote_node))
if res:
click.echo("Success")
else:
cli_error("Failed")
def _hr_to_dcm_date(in_str):
try:
dt = dateparser.parse(in_str)
except Exception:
cli_error(f"Unable to parse date: 'in_str'")
return dt.strftime("%Y%m%d")
def _build_study_date(since, before):
if since is not None:
since_str = _hr_to_dcm_date(since)
else:
since_str = ""
if before is not None:
before_str = _hr_to_dcm_date(before)
else:
before_str = ""
return f"{since_str}-{before_str}"
def _build_query(query_strs, since, before):
qdat = Dataset()
for query_input in query_strs:
try:
q_attr, q_val = query_input.split("=")
except Exception:
cli_error(f"Invalid query input string: {query_input}")
setattr(qdat, q_attr, q_val)
if since is not None or before is not None:
if hasattr(qdat, "StudyDate"):
cli_error("Do not specify 'StudyDate' when using '--since' or '--before'")
setattr(qdat, "StudyDate", _build_study_date(since, before))
return qdat
@click.command()
@click.pass_obj
@click.argument("remote")
@click.argument("query", nargs=-1)
@click.option(
"--level", default=None, help="Level of detail: patient/study/series/image"
)
@click.option(
"--query-res",
type=click.File("rb"),
help="A result from a previous query to refine",
)
@click.option("--since", help="Only return studies since this date")
@click.option("--before", help="Only return studies before this date")
@click.option("--local", help="Local DICOM network node properties")
@click.option("--out-format", default=None, help="Output format: tree/json")
@click.option(
"--assume-yes",
is_flag=True,
default=False,
help="Automatically answer all prompts with 'y'",
)
@click.option("--no-progress", is_flag=True, help="Don't display progress bars")
def query(
params,
remote,
query,
level,
query_res,
since,
before,
local,
out_format,
assume_yes,
no_progress,
):
"""Perform a query against a network node"""
if level is not None:
level = level.upper()
for q_lvl in QueryLevel:
if q_lvl.name == level:
level = q_lvl
break
else:
cli_error("Invalid level: %s" % level)
if query_res is None and not sys.stdin.isatty():
log.debug("Reading query_res from stdin")
query_res = sys.stdin
if query_res is not None:
in_str = query_res.read()
if in_str:
query_res = json_serializer.loads(in_str)
else:
query_res = None
if sys.stdout.isatty():
if out_format is None:
out_format = "tree"
else:
no_progress = True
if out_format is None:
out_format = "json"
if out_format not in ("tree", "json"):
cli_error("Invalid out-format: %s" % out_format)
local = params["config"].get_local_node(local)
remote_node = params["config"].get_remote_node(remote)
net_ent = LocalEntity(local)
qdat = _build_query(query, since, before)
if len(qdat) == 0 and query_res is None and not assume_yes:
if not click.confirm(
"This query hasn't been limited in any "
"way and may generate a huge result, "
"continue?"
):
return
with ExitStack() as estack:
if not no_progress:
prog = RichProgressHook(
estack.enter_context(
Progress(console=params["rich_con"], transient=True)
)
)
report = MultiListReport(description="query", prog_hook=prog)
else:
report = MultiListReport(description="query")
qr = asyncio.run(
net_ent.query(remote_node, level, qdat, query_res, report=report)
)
if out_format == "tree":
out = qr.to_tree()
elif out_format == "json":
out = json_serializer.dumps(qr, indent=4)
click.echo(out)
report.log_issues()
@click.command()
@click.pass_obj
@click.argument("dests", nargs=-1)
@click.option("--source", "-s", multiple=True, help="A data source")
@click.option("--query", "-q", multiple=True, help="Only sync data matching the query")
@click.option(
"--query-res",
type=click.File("rb"),
help="A result from a previous query to limit the data synced",
)
@click.option("--since", help="Only return studies since this date")
@click.option("--before", help="Only return studies before this date")
@click.option(
"--edit", "-e", multiple=True, help="Modify DICOM attribute in the synced data"
)
@click.option(
"--edit-json", type=click.File("rb"), help="Specify attribute modifications as JSON"
)
@click.option(
"--trust-level",
type=click.Choice([q.name for q in QueryLevel], case_sensitive=False),
default="IMAGE",
help="If sub-component counts match at this query level, assume "
"the data matches. Improves performance but sacrifices accuracy",
)
@click.option(
"--force-all",
"-f",
is_flag=True,
default=False,
help="Force all data on the source to be transfered, even if it "
"appears to already exist on the dest",
)
@click.option(
"--method",
"-m",
help="Transfer method to use",
type=click.Choice([m.name for m in TransferMethod], case_sensitive=False),
)
# Expose this when it is working
# @click.option('--validate', is_flag=True, default=False,
# help="All synced data is retrieved back from the dests and "
# "compared to the original data. Differing elements produce "
# "warnings.")
@click.option(
"--keep-errors",
is_flag=True,
default=False,
help="Don't skip inconsistent/unexpected incoming data",
)
@click.option(
"--dry-run",
"-n",
is_flag=True,
default=False,
help="Don't actually do any transfers, just print them",
)
@click.option("--local", help="Local DICOM network node properties")
@click.option("--dir-format", help="Output format for any local output directories")
@click.option(
"--recurse/--no-recurse",
default=None,
is_flag=True,
help="Don't recurse into input directories",
)
@click.option("--in-file-ext", help="File extension for local input directories")
@click.option("--out-file-ext", help="File extension for local output directories")
@click.option("--no-progress", is_flag=True, help="Don't display progress bars")
@click.option("--no-report", is_flag=True, help="Don't print report")
def sync(
params,
dests,
source,
query,
query_res,
since,
before,
edit,
edit_json,
trust_level,
force_all,
method,
keep_errors,
dry_run,
local,
dir_format,
recurse,
in_file_ext,
out_file_ext,
no_progress,
no_report,
):
"""Sync DICOM data from a one or more sources to one or more destinations
The `dests` can be a local directory, a DICOM network entity (given as
'hostname:aetitle:port'), or a named remote/route from your config file.
Generally you will need to use `--source` to specify the data source, unless
you pass in a query result which contains a source (e.g. when doing
'dcm query srcpacs ... | dcm sync destpacs'). The `--source` can be given
in the same way `dests` are specified, except it cannot be a 'route'.
"""
# Check for incompatible options
# if validate and dry_run:
# cli_error("Can't do validation on a dry run!")
# Disable progress for non-interactive output or dry runs
if not sys.stdout.isatty() or dry_run:
no_progress = True
# Build query dataset if needed
if len(query) != 0 or since is not None or before is not None:
query = _build_query(query, since, before)
else:
query = None
# Handle query-result options
if query_res is None and not sys.stdin.isatty():
query_res = sys.stdin
if query_res is not None:
in_str = query_res.read()
if in_str:
query_res = json_serializer.loads(in_str)
else:
query_res = None
# Determine the local node being used
try:
local = params["config"].get_local_node(local)
except NoLocalNodeError:
local = None
# Pass source options that override config through to the config parser
local_dir_kwargs = {"make_missing": False}
if recurse is not None:
local_dir_kwargs["recurse"] = recurse
if in_file_ext is not None:
local_dir_kwargs["file_ext"] = in_file_ext
params["config"].set_local_dir_kwargs(**local_dir_kwargs)
params["config"].set_net_repo_kwargs(local=local)
# Figure out source info
if len(source) == 0:
if query_res is None or query_res.prov.source is None:
cli_error("No data source specified")
if local is None:
raise NoLocalNodeError("No local DICOM node configured")
sources = [NetRepo(local, query_res.prov.source)]
else:
sources = []
for s in source:
try:
sources.append(params["config"].get_bucket(s))
except Exception as e:
cli_error(f"Error processing source '{s}': {e}")
# Pass dest options that override config through to the config parser
local_dir_kwargs = {}
if dir_format is not None:
local_dir_kwargs["out_fmt"] = dir_format
if out_file_ext is not None:
local_dir_kwargs["file_ext"] = out_file_ext
params["config"].set_local_dir_kwargs(**local_dir_kwargs)
# Some command line options override route configuration
static_route_kwargs = {}
dynamic_route_kwargs = {}
# Handle edit options
filt = None
if edit_json is not None:
edit_dict = json.load(edit_json)
edit_json.close()
else:
edit_dict = {}
if edit:
for edit_str in edit:
attr, val = edit_str.split("=")
edit_dict[attr] = val
if edit_dict:
filt = make_edit_filter(edit_dict)
static_route_kwargs["filt"] = filt
dynamic_route_kwargs["filt"] = filt
# Convert dests/filters to a StaticRoute
if method is not None:
method = TransferMethod[method.upper()]
static_route_kwargs["methods"] = (method,)
dynamic_route_kwargs["methods"] = {None: (method,)}
# Pass route options that override config through to the config parser
params["config"].set_static_route_kwargs(**static_route_kwargs)
params["config"].set_dynamic_route_kwargs(**dynamic_route_kwargs)
# Do samity check that no sources are in dests. This is especially easy
# mistake as earlier versions took the first positional arg to be the
# source
for dest in dests:
try:
d_bucket = params["config"].get_bucket(dest)
except Exception:
pass
else:
if any(s == d_bucket for s in sources):
cli_error(f"The dest {dest} is also a source!")
continue
try:
static_route = params["config"].get_static_route(dest)
except Exception:
pass
else:
for d in static_route.dests:
if any(s == d_bucket for s in sources):
cli_error(f"The dest {d} is also a source!")
continue
try:
sel_dest_map = params["config"].get_selector_dest_map(dest)
except Exception:
pass
else:
for _, s_dests in sel_dest_map.routing_map:
for d in s_dests:
if any(s == d_bucket for s in sources):
cli_error(f"The dest {d} is also a source!")
continue
cli_error(f"Unknown dest: {dest}")
# Convert dests to routes
dests = params["config"].get_routes(dests)
# Handle validate option
# if validate:
# validators = [make_basic_validator()]
# else:
# validators = None
# Handle trust-level option
trust_level = QueryLevel[trust_level.upper()]
# Setup reporting/progress hooks and do the transfer
with ExitStack() as estack:
if not no_progress:
prog_hook = RichProgressHook(
estack.enter_context(
Progress(console=params["rich_con"], transient=True)
)
)
qr_reports = None
if query is not None or query_res is not None:
qr_reports = []
for src in sources:
if not no_progress:
report = MultiListReport(
description="init-query", prog_hook=prog_hook
)
else:
report = None
qr_reports.append(report)
if query_res is None:
qrs = None
else:
qrs = [deepcopy(query_res) for _ in sources]
base_kwargs = {
"trust_level": trust_level,
"force_all": force_all,
"keep_errors": keep_errors,
#'validators': validators,
}
sm_kwargs = []
sync_reports = []
for src in sources:
if not no_progress:
sync_report = SyncReport(prog_hook=prog_hook)
else:
sync_report = SyncReport()
kwargs = deepcopy(base_kwargs)
kwargs["report"] = sync_report
sm_kwargs.append(kwargs)
sync_reports.append(sync_report)
asyncio.run(
sync_data(sources, dests, query, qrs, qr_reports, sm_kwargs, dry_run)
)
for report in sync_reports:
report.log_issues()
if not no_report:
click.echo(report)
click.echo("\n")
def _make_route_data_cb(
res_q: asyncio.Queue[Dataset],
) -> Callable[[evt.Event], Awaitable[int]]:
"""Return callback that queues dataset/metadata from incoming events"""
async def callback(event: evt.Event) -> int:
# TODO: Do we need to embed the file_meta here?
await res_q.put(event.dataset)
return 0x0 # Success
return callback
async def _do_route(
local: DcmNode, router: Router, inactive_timeout: Optional[int] = None
) -> None:
local_ent = LocalEntity(local)
event_filter = EventFilter(event_types=frozenset((evt.EVT_C_STORE,)))
report = DynamicTransferReport()
last_update = None
if inactive_timeout:
last_update = datetime.now()
last_reported = 0
async with router.route(report=report) as route_q:
fwd_cb = _make_route_data_cb(route_q)
async with local_ent.listen(fwd_cb, event_filter=event_filter):
print("Listener started, hit Ctrl-c to exit")
try:
while True:
await asyncio.sleep(1.0)
if last_update is not None:
n_reported = report.n_reported
if n_reported != last_reported:
last_update = datetime.now()
last_reported = n_reported
elif inactive_timeout is not None:
if (
datetime.now() - last_update
).total_seconds() > inactive_timeout:
print("Timeout due to inactivity")
break
finally:
print("Listener shutting down")
@click.command()
@click.pass_obj
@click.argument("dests", nargs=-1)
@click.option(
"--edit", "-e", multiple=True, help="Modify DICOM attribute in the synced data"
)
@click.option(
"--edit-json", type=click.File("rb"), help="Specify attribute modifications as JSON"
)
@click.option("--local", help="Local DICOM network node properties")
@click.option("--dir-format", help="Output format for any local output directories")
@click.option(
"--out-file-ext", default="dcm", help="File extension for local output directories"
)
@click.option(
"--inactive-timeout",
type=int,
help="Stop listening after this many seconds of inactivity",
)
def forward(
params, dests, edit, edit_json, local, dir_format, out_file_ext, inactive_timeout
):
"""Listen for incoming DICOM files on network and forward to dests"""
local = params["config"].get_local_node(local)
# Pass dest options that override config through to the config parser
params["config"].set_local_dir_kwargs(out_fmt=dir_format, file_ext=out_file_ext)
# Some command line options override route configuration
static_route_kwargs = {}
dynamic_route_kwargs = {}
# Handle edit options
filt = None
if edit_json is not None:
edit_dict = json.load(edit_json)
edit_json.close()
else:
edit_dict = {}
if edit:
for edit_str in edit:
attr, val = edit_str.split("=")
edit_dict[attr] = val
if edit_dict:
filt = make_edit_filter(edit_dict)
static_route_kwargs["filt"] = filt
dynamic_route_kwargs["filt"] = filt
# Pass route options that override config through to the config parser
params["config"].set_static_route_kwargs(**static_route_kwargs)
params["config"].set_dynamic_route_kwargs(**dynamic_route_kwargs)
# Convert dests to routes
dests = params["config"].get_routes(dests)
router = Router(dests)
asyncio.run(_do_route(local, router, inactive_timeout))
def make_print_cb(fmt, elem_filter=None):
def print_cb(ds, elem):
if elem_filter:
tag = elem.tag
keyword = keyword_for_tag(tag)
if not elem_filter(tag, keyword):
return
try:
print(fmt.format(elem=elem, ds=ds))
except Exception:
log.warn("Couldn't apply format to elem: %s", elem)
return print_cb
def _make_elem_filter(include, exclude, groups, kw_regex, exclude_private):
if len(include) == 0:
include_tags = LazySet(AllElems)
else:
include_tags = set()
for in_str in include:
include_tags.add(str_to_tag(in_str))
include_tags = LazySet(include_tags)
exclude_tags = set()
for in_str in exclude:
exclude_tags.add(str_to_tag(in_str))
exclude_tags = LazySet(exclude_tags)
if len(groups) == 0:
groups = LazySet(AllElems)
else:
groups = LazySet([int(x) for x in groups])
kw_regex = [re.compile(x) for x in kw_regex]
def elem_filter(tag, keyword):
if exclude_private and tag.group % 2 == 1:
return False
if tag in exclude_tags:
return False
if tag.group not in groups:
if include and tag in include_tags:
return True
return False
if kw_regex:
keyword = keyword_for_tag(tag)
if not any(r.search(keyword) for r in kw_regex):
if include and tag in include_tags:
return True
return False
if tag in include_tags:
return True
return False
return elem_filter
@click.command()
@click.pass_obj
@click.argument("dcm_files", type=click.Path(exists=True, readable=True), nargs=-1)
@click.option("--out-format", default="plain", help="Output format: plain/json")
@click.option(
"--plain-fmt",
default="{elem}",
help="Format string applied to each element for 'plain' output. Can "
"reference 'elem' (the pydicom Element) and 'ds' (the pydicom Dataset) "
"objects in the format string.",
)
@click.option(
"--include",
"-i",
multiple=True,
help="Include specific elements by keyword or tag",
)
@click.option("--group", "-g", multiple=True, help="Include elements by group number")
@click.option(
"--kw-regex",
multiple=True,
help="Include elements where the keyword matches a regex",
)
@click.option(
"--exclude", "-e", multiple=True, help="Exclude elements by keyword or tag"
)
@click.option(
"--exclude-private",
is_flag=True,
default=False,
help="Exclude all private elements",
)
def dump(
params,
dcm_files,
out_format,
plain_fmt,
include,
group,
kw_regex,
exclude,
exclude_private,
):
"""Dump contents of DICOM files to stdout
Default is to include all elements, but this can be overridden by various options.
The `--out-format` can be `plain` or `json`. If it is `plain` each included element
is used to format a string which can be overridden with `--plain-fmt`.
"""
elem_filter = _make_elem_filter(include, exclude, group, kw_regex, exclude_private)
if out_format == "plain":
print_cb = make_print_cb(plain_fmt, elem_filter)
for pth in dcm_files:
ds = pydicom.dcmread(pth)
ds.walk(print_cb)
elif out_format == "json":
for pth in dcm_files:
ds = pydicom.dcmread(pth)
click.echo(json.dumps(normalize(ds, elem_filter), indent=4))
else:
cli_error("Unknown out-format: '%s'" % out_format)
@click.command()
@click.pass_obj
@click.argument("left")
@click.argument("right")
def diff(params, left, right):
"""Show differences between two data sets"""
left = pydicom.dcmread(left)
right = pydicom.dcmread(right)
diffs = diff_data_sets(left, right)
for d in diffs:
click.echo(str(d))
# Add our subcommands ot the CLI
cli.add_command(version)
cli.add_command(conf)
cli.add_command(echo)
cli.add_command(query)
cli.add_command(sync)
cli.add_command(forward)
cli.add_command(dump)
cli.add_command(diff)
# Entry point
if __name__ == "__main__":
cli()
| en | 0.760044 | Command line interface Print msg to stderr and exit with non-zero exit code High level DICOM file and network operations # Create Rich Console outputing to stderr for logging / progress bars # Setup logging # logging.getLogger("asyncio").setLevel(logging.DEBUG) # Create global param dict for subcommands to use Print the version and exit Open the config file with your $EDITOR # TODO: Make these mutually exclusive? Or sub-commands? Test connectivity with remote node Perform a query against a network node # Expose this when it is working # @click.option('--validate', is_flag=True, default=False, # help="All synced data is retrieved back from the dests and " # "compared to the original data. Differing elements produce " # "warnings.") Sync DICOM data from a one or more sources to one or more destinations The `dests` can be a local directory, a DICOM network entity (given as 'hostname:aetitle:port'), or a named remote/route from your config file. Generally you will need to use `--source` to specify the data source, unless you pass in a query result which contains a source (e.g. when doing 'dcm query srcpacs ... | dcm sync destpacs'). The `--source` can be given in the same way `dests` are specified, except it cannot be a 'route'. # Check for incompatible options # if validate and dry_run: # cli_error("Can't do validation on a dry run!") # Disable progress for non-interactive output or dry runs # Build query dataset if needed # Handle query-result options # Determine the local node being used # Pass source options that override config through to the config parser # Figure out source info # Pass dest options that override config through to the config parser # Some command line options override route configuration # Handle edit options # Convert dests/filters to a StaticRoute # Pass route options that override config through to the config parser # Do samity check that no sources are in dests. This is especially easy # mistake as earlier versions took the first positional arg to be the # source # Convert dests to routes # Handle validate option # if validate: # validators = [make_basic_validator()] # else: # validators = None # Handle trust-level option # Setup reporting/progress hooks and do the transfer #'validators': validators, Return callback that queues dataset/metadata from incoming events # TODO: Do we need to embed the file_meta here? # Success Listen for incoming DICOM files on network and forward to dests # Pass dest options that override config through to the config parser # Some command line options override route configuration # Handle edit options # Pass route options that override config through to the config parser # Convert dests to routes Dump contents of DICOM files to stdout Default is to include all elements, but this can be overridden by various options. The `--out-format` can be `plain` or `json`. If it is `plain` each included element is used to format a string which can be overridden with `--plain-fmt`. Show differences between two data sets # Add our subcommands ot the CLI # Entry point | 1.847126 | 2 |
ipysheet/_version.py | DougRzz/ipysheet | 0 | 6632601 | __version_tuple__ = (0, 3, 0)
__version_tuple_js__ = (0, 3, 1)
__version__ = '0.3.0'
__version_js__ = '0.3.1'
version_info = __version_tuple__ # kept for backward compatibility
| __version_tuple__ = (0, 3, 0)
__version_tuple_js__ = (0, 3, 1)
__version__ = '0.3.0'
__version_js__ = '0.3.1'
version_info = __version_tuple__ # kept for backward compatibility
| en | 0.820226 | # kept for backward compatibility | 1.327737 | 1 |
tools/download_imagenet_weights.py | jinlmsft/Detectron.pytorch | 0 | 6632602 | <reponame>jinlmsft/Detectron.pytorch
"""Script to downlaod ImageNet pretrained weights from Google Drive
Extra packages required to run the script:
colorama, argparse_color_formatter
"""
import argparse
import os
import requests
#from argparse_color_formatter import ColorHelpFormatter
#from colorama import init, Fore
import _init_paths # pylint: disable=unused-import
from core.config import cfg
def parse_args():
"""Parser command line argumnets"""
parser = argparse.ArgumentParser() #(formatter_class=ColorHelpFormatter)
parser.add_argument('--output_dir', help='Directory to save downloaded weight files',
default=os.path.join(cfg.DATA_DIR, 'pretrained_model'))
parser.add_argument('-t', '--targets', nargs='+', metavar='file_name',
help='Files to download. Allowed values are: ',
choices=list(PRETRAINED_WEIGHTS.keys()),
default=list(PRETRAINED_WEIGHTS.keys()))
return parser.parse_args()
# ---------------------------------------------------------------------------- #
# Mapping from filename to google drive file_id
# ---------------------------------------------------------------------------- #
PRETRAINED_WEIGHTS = {
'resnet50_caffe.pth': '1wHSvusQ1CiEMc5Nx5R8adqoHQjIDWXl1',
'resnet101_caffe.pth': '1x2fTMqLrn63EMW0VuK4GEa2eQKzvJ_7l',
'resnet152_caffe.pth': '1NSCycOb7pU0KzluH326zmyMFUU55JslF',
'vgg16_caffe.pth': '19UphT53C0Ua9JAtICnw84PPTa3sZZ_9k',
}
# ---------------------------------------------------------------------------- #
# Helper fucntions for download file from google drive
# ---------------------------------------------------------------------------- #
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def main():
args = parse_args()
for filename in args.targets:
file_id = PRETRAINED_WEIGHTS[filename]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
destination = os.path.join(args.output_dir, filename)
download_file_from_google_drive(file_id, destination)
print('Download {} to {}'.format(filename, destination))
if __name__ == "__main__":
main()
| """Script to downlaod ImageNet pretrained weights from Google Drive
Extra packages required to run the script:
colorama, argparse_color_formatter
"""
import argparse
import os
import requests
#from argparse_color_formatter import ColorHelpFormatter
#from colorama import init, Fore
import _init_paths # pylint: disable=unused-import
from core.config import cfg
def parse_args():
"""Parser command line argumnets"""
parser = argparse.ArgumentParser() #(formatter_class=ColorHelpFormatter)
parser.add_argument('--output_dir', help='Directory to save downloaded weight files',
default=os.path.join(cfg.DATA_DIR, 'pretrained_model'))
parser.add_argument('-t', '--targets', nargs='+', metavar='file_name',
help='Files to download. Allowed values are: ',
choices=list(PRETRAINED_WEIGHTS.keys()),
default=list(PRETRAINED_WEIGHTS.keys()))
return parser.parse_args()
# ---------------------------------------------------------------------------- #
# Mapping from filename to google drive file_id
# ---------------------------------------------------------------------------- #
PRETRAINED_WEIGHTS = {
'resnet50_caffe.pth': '1wHSvusQ1CiEMc5Nx5R8adqoHQjIDWXl1',
'resnet101_caffe.pth': '1x2fTMqLrn63EMW0VuK4GEa2eQKzvJ_7l',
'resnet152_caffe.pth': '1NSCycOb7pU0KzluH326zmyMFUU55JslF',
'vgg16_caffe.pth': '19UphT53C0Ua9JAtICnw84PPTa3sZZ_9k',
}
# ---------------------------------------------------------------------------- #
# Helper fucntions for download file from google drive
# ---------------------------------------------------------------------------- #
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def main():
args = parse_args()
for filename in args.targets:
file_id = PRETRAINED_WEIGHTS[filename]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
destination = os.path.join(args.output_dir, filename)
download_file_from_google_drive(file_id, destination)
print('Download {} to {}'.format(filename, destination))
if __name__ == "__main__":
main() | en | 0.348289 | Script to downlaod ImageNet pretrained weights from Google Drive Extra packages required to run the script: colorama, argparse_color_formatter #from argparse_color_formatter import ColorHelpFormatter #from colorama import init, Fore # pylint: disable=unused-import Parser command line argumnets #(formatter_class=ColorHelpFormatter) # ---------------------------------------------------------------------------- # # Mapping from filename to google drive file_id # ---------------------------------------------------------------------------- # # ---------------------------------------------------------------------------- # # Helper fucntions for download file from google drive # ---------------------------------------------------------------------------- # # filter out keep-alive new chunks | 2.421883 | 2 |
vehicle_control/3_control/testing.py | tekjar/fcnd.projects | 0 | 6632603 | <filename>vehicle_control/3_control/testing.py
from controllers import PController, PDController, PIDController
def close_enough_floats(f1, f2):
return abs(f1 - f2) < 0.0001
def pct_diff(f, f_expected):
return 100.0 * (f - f_expected) / f_expected
def p_controller_test(StudentPC):
k_p = 1.5
m = 2.5
z_target = 2.0
z_actual = 3.2
pc = PController(k_p, m)
spc = StudentPC(k_p, m)
thrust = pc.thrust_control(z_target, z_actual)
s_thrust = spc.thrust_control(z_target, z_actual)
if close_enough_floats(thrust, s_thrust):
print("Tests pass")
else:
print("Tests fail. Off by %3.3f percent" % pct_diff(thrust, s_thrust))
def pd_controller_test(StudentPDC, feed_forward=False):
k_p = 1.5
k_d = 2.0
m = 2.5
z_target = 2.0
z_actual = 3.2
z_dot_target = -2.8
z_dot_actual = -2.7
ff = 1.1
controller = PDController(k_p, k_d, m)
scontroller = StudentPDC(k_p, k_d, m)
if feed_forward:
thrust = controller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
ff)
s_thrust = scontroller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
ff)
else:
thrust = controller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual)
s_thrust = scontroller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual)
if close_enough_floats(thrust, s_thrust):
print("Tests pass")
else:
print("Tests fail. Off by %3.3f percent" % pct_diff(thrust, s_thrust))
def pid_controller_test(StudentPIDC):
k_p = 1.5
k_d = 2.0
k_i = 1.2
m = 2.5
z_target = 2.0
z_actual = 3.2
z_dot_target = -2.8
z_dot_actual = -2.7
dt = 0.1
controller = PIDController(k_p, k_d, k_i, m)
scontroller = StudentPIDC(k_p, k_d, k_i, m)
for _ in range(3):
thrust = controller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
dt)
s_thrust = scontroller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
dt)
if close_enough_floats(thrust, s_thrust):
print("Tests pass")
else:
print("Tests fail. Off by %3.3f percent" % pct_diff(thrust, s_thrust)) | <filename>vehicle_control/3_control/testing.py
from controllers import PController, PDController, PIDController
def close_enough_floats(f1, f2):
return abs(f1 - f2) < 0.0001
def pct_diff(f, f_expected):
return 100.0 * (f - f_expected) / f_expected
def p_controller_test(StudentPC):
k_p = 1.5
m = 2.5
z_target = 2.0
z_actual = 3.2
pc = PController(k_p, m)
spc = StudentPC(k_p, m)
thrust = pc.thrust_control(z_target, z_actual)
s_thrust = spc.thrust_control(z_target, z_actual)
if close_enough_floats(thrust, s_thrust):
print("Tests pass")
else:
print("Tests fail. Off by %3.3f percent" % pct_diff(thrust, s_thrust))
def pd_controller_test(StudentPDC, feed_forward=False):
k_p = 1.5
k_d = 2.0
m = 2.5
z_target = 2.0
z_actual = 3.2
z_dot_target = -2.8
z_dot_actual = -2.7
ff = 1.1
controller = PDController(k_p, k_d, m)
scontroller = StudentPDC(k_p, k_d, m)
if feed_forward:
thrust = controller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
ff)
s_thrust = scontroller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
ff)
else:
thrust = controller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual)
s_thrust = scontroller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual)
if close_enough_floats(thrust, s_thrust):
print("Tests pass")
else:
print("Tests fail. Off by %3.3f percent" % pct_diff(thrust, s_thrust))
def pid_controller_test(StudentPIDC):
k_p = 1.5
k_d = 2.0
k_i = 1.2
m = 2.5
z_target = 2.0
z_actual = 3.2
z_dot_target = -2.8
z_dot_actual = -2.7
dt = 0.1
controller = PIDController(k_p, k_d, k_i, m)
scontroller = StudentPIDC(k_p, k_d, k_i, m)
for _ in range(3):
thrust = controller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
dt)
s_thrust = scontroller.thrust_control(z_target,
z_actual,
z_dot_target,
z_dot_actual,
dt)
if close_enough_floats(thrust, s_thrust):
print("Tests pass")
else:
print("Tests fail. Off by %3.3f percent" % pct_diff(thrust, s_thrust)) | none | 1 | 2.74602 | 3 |
|
commands/SinterBoxCommand/entry.py | tapnair/SinterBox | 0 | 6632604 | # Copyright 2022 by Autodesk, Inc.
# Permission to use, copy, modify, and distribute this software in object code form
# for any purpose and without fee is hereby granted, provided that the above copyright
# notice appears in all copies and that both that copyright notice and the limited
# warranty and restricted rights notice below appear in all supporting documentation.
#
# AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS. AUTODESK SPECIFICALLY
# DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE.
# AUTODESK, INC. DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR FREE.
import adsk.core
import adsk.fusion
import os
from .SinterBoxUtils import bounding_box_from_selections, get_default_thickness, auto_gaps
from .SinterBoxDefinition import SinterBoxDefinition
from ...lib import fusion360utils as futil
from ... import config
app = adsk.core.Application.get()
ui = app.userInterface
CMD_NAME = 'Sinterbox'
CMD_Description = 'Creates a rectangular sinterbox enclosing the selected geometry for 3D Printing with Selective ' \
'Laser Sintering (SLS) or Multi Jet Fusion (MJF).<br><br>' \
'Select the solid bodies to enclose then specify the dimensions of the sinterbox. ' \
'Use Move Bodies To New Component to consolidate all bodies in the same component. '
IS_PROMOTED = False
CMD_ID = f'{config.COMPANY_NAME}_{config.ADDIN_NAME}_{CMD_NAME}'
# TODO When workspace issues are fixed for working model. Also Add MESH when supported.
# WORKSPACE_IDS = ['FusionSolidEnvironment', 'MfgWorkingModelEnv', 'SimplifyWMEnv']
WORKSPACE_IDS = ['FusionSolidEnvironment']
PANEL_ID = 'SolidCreatePanel'
COMMAND_BESIDE_ID = 'PrimitivePipe'
ICON_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', '')
local_handlers = []
# Sinterbox specific global variables
IS_DRAGGING = False
AUTO_SIZE_GAPS = True
the_box: SinterBoxDefinition
the_box = None
def start():
cmd_def = ui.commandDefinitions.addButtonDefinition(CMD_ID, CMD_NAME, CMD_Description, ICON_FOLDER)
cmd_def.toolClipFilename = os.path.join(ICON_FOLDER, 'Sinterbox_Tooltip.png')
futil.add_handler(cmd_def.commandCreated, command_created)
for WORKSPACE_ID in WORKSPACE_IDS:
workspace = ui.workspaces.itemById(WORKSPACE_ID)
panel = workspace.toolbarPanels.itemById(PANEL_ID)
control = panel.controls.addCommand(cmd_def, COMMAND_BESIDE_ID, False)
control.isPromoted = IS_PROMOTED
def stop():
for WORKSPACE_ID in WORKSPACE_IDS:
workspace = ui.workspaces.itemById(WORKSPACE_ID)
panel = workspace.toolbarPanels.itemById(PANEL_ID)
command_control = panel.controls.itemById(CMD_ID)
command_definition = ui.commandDefinitions.itemById(CMD_ID)
if command_control:
command_control.deleteMe()
if command_definition:
command_definition.deleteMe()
def command_created(args: adsk.core.CommandCreatedEventArgs):
global the_box
futil.log(f'{CMD_NAME} Command Created Event')
futil.add_handler(args.command.execute, command_execute, local_handlers=local_handlers)
futil.add_handler(args.command.inputChanged, command_input_changed, local_handlers=local_handlers)
futil.add_handler(args.command.executePreview, command_preview, local_handlers=local_handlers)
futil.add_handler(args.command.destroy, command_destroy, local_handlers=local_handlers)
futil.add_handler(args.command.mouseDragEnd, mouse_drag_end, local_handlers=local_handlers)
futil.add_handler(args.command.mouseDragBegin, mouse_drag_begin, local_handlers=local_handlers)
inputs = args.command.commandInputs
design: adsk.fusion.Design = app.activeProduct
units = design.unitsManager.defaultLengthUnits
selection_input = inputs.addSelectionInput('body_select', "Input Bodies", "Bodies for Bounding Box")
selection_input.addSelectionFilter('Bodies')
# selection_input.addSelectionFilter('MeshBodies') # TODO When bounding box is supported for mesh bodies
selection_input.setSelectionLimits(1, 0)
default_selections = []
b_box = bounding_box_from_selections(default_selections)
default_thickness = get_default_thickness()
default_thickness_value = adsk.core.ValueInput.createByReal(default_thickness)
default_gap_value = adsk.core.ValueInput.createByReal(default_thickness * 4)
default_bar_value = adsk.core.ValueInput.createByReal(default_thickness * 2)
inputs.addValueInput('thick_input', "Cage Thickness", units, default_thickness_value)
inputs.addValueInput('bar', "Bar Width", units, default_bar_value)
inputs.addBoolValueInput('auto_gaps_input', 'Automatic Bar Spacing', True, '', True)
gap_input = inputs.addValueInput('gap', "Bar Spacing", units, default_gap_value)
gap_input.isEnabled = False
inputs.addBoolValueInput('full_preview_input', 'Preview', True, '', True)
inputs.addBoolValueInput('new_component_input', 'Move Bodies to New Component', True, '', True)
the_box = SinterBoxDefinition(b_box, inputs)
def command_execute(args: adsk.core.CommandEventArgs):
futil.log(f'{CMD_NAME} Command Execute Event')
inputs = args.command.commandInputs
selection_input: adsk.core.SelectionCommandInput = inputs.itemById('body_select')
new_component_input: adsk.core.BoolValueCommandInput = inputs.itemById('new_component_input')
bar_input: adsk.core.ValueCommandInput = inputs.itemById('bar')
thickness_input: adsk.core.ValueCommandInput = inputs.itemById('thick_input')
gap_input: adsk.core.ValueCommandInput = inputs.itemById('gap')
selection_bodies = [selection_input.selection(i).entity for i in range(selection_input.selectionCount)]
if len(selection_bodies) < 1:
return
design: adsk.fusion.Design = app.activeProduct
root_comp = design.rootComponent
group_start_index = 0
group_end_index = 0
is_parametric = design.designType == adsk.fusion.DesignTypes.ParametricDesignType
if is_parametric:
group_start_index = design.timeline.markerPosition
group_end_index = group_start_index + 2
the_box.clear_graphics()
the_box.update_selections(selection_bodies)
the_box.feature_values.bar = bar_input.value
the_box.feature_values.gap = gap_input.value
the_box.feature_values.shell_thickness = thickness_input.value
new_occurrence = the_box.create_brep()
if new_component_input.value:
body: adsk.fusion.BRepBody
for body in selection_bodies:
new_body = body.copyToComponent(new_occurrence)
# TODO think about Occurrences
# new_body.name = f'{body.parentComponent.name} - {body.name}'
for body in selection_bodies:
if body.isValid:
if is_parametric:
remove_feature = root_comp.features.removeFeatures.add(body)
group_end_index = remove_feature.timelineObject.index
else:
body.deleteMe()
if is_parametric:
t_group = design.timeline.timelineGroups.add(group_start_index, group_end_index)
t_group.name = 'Sinterbox'
def command_preview(args: adsk.core.CommandEventArgs):
futil.log(f'{CMD_NAME} Command Preview Event')
inputs = args.command.commandInputs
selection_input: adsk.core.SelectionCommandInput = inputs.itemById('body_select')
selection_bodies = [selection_input.selection(i).entity for i in range(selection_input.selectionCount)]
full_preview_input: adsk.core.BoolValueCommandInput = inputs.itemById('full_preview_input')
full_preview_value = full_preview_input.value
if len(selection_bodies) > 0:
the_box.update_selections(selection_bodies)
if (not IS_DRAGGING) and full_preview_value:
the_box.update_graphics_full()
else:
the_box.update_graphics()
def command_input_changed(args: adsk.core.InputChangedEventArgs):
global AUTO_SIZE_GAPS
changed_input = args.input
command: adsk.core.Command = args.firingEvent.sender
inputs = command.commandInputs
futil.log(f'{CMD_NAME} Input Changed Event fired from a change to {changed_input.id}')
selection_input: adsk.core.SelectionCommandInput = inputs.itemById('body_select')
selection_bodies = [selection_input.selection(i).entity for i in range(selection_input.selectionCount)]
bar_input: adsk.core.ValueCommandInput = inputs.itemById('bar')
bar_value = bar_input.value
thickness_input: adsk.core.ValueCommandInput = inputs.itemById('thick_input')
thickness_value = thickness_input.value
gap_input: adsk.core.ValueCommandInput = inputs.itemById('gap')
gap_value = gap_input.value
direction_group: adsk.core.GroupCommandInput = inputs.itemById('direction_group')
auto_gaps_input: adsk.core.BoolValueCommandInput = inputs.itemById('auto_gaps_input')
auto_gaps_value = auto_gaps_input.value
if changed_input.id == 'body_select':
if len(selection_bodies) > 0:
if direction_group is not None:
direction_input: adsk.core.DirectionCommandInput
for direction_input in direction_group.children:
if not direction_input.isVisible:
direction_input.isVisible = True
the_box.update_selections(selection_bodies)
if AUTO_SIZE_GAPS:
new_gap = auto_gaps(selection_bodies, the_box.modified_b_box, thickness_value, bar_value)
gap_input.value = new_gap
the_box.feature_values.gap = new_gap
else:
if direction_group is not None:
direction_input: adsk.core.DirectionCommandInput
for direction_input in direction_group.children:
if direction_input.isVisible:
direction_input.isVisible = False
elif changed_input.id == 'bar':
the_box.feature_values.bar = bar_value
elif changed_input.id == 'gap':
the_box.feature_values.gap = gap_value
elif changed_input.id == 'thick_input':
the_box.feature_values.shell_thickness = thickness_value
elif changed_input.id == 'auto_gaps_input':
AUTO_SIZE_GAPS = auto_gaps_value
if AUTO_SIZE_GAPS:
gap_input.isEnabled = False
if len(selection_bodies) > 0:
new_gap = auto_gaps(selection_bodies, the_box.modified_b_box, thickness_value, bar_value)
gap_input.value = new_gap
the_box.feature_values.gap = new_gap
else:
gap_input.isEnabled = True
def mouse_drag_begin(args: adsk.core.MouseEventArgs):
futil.log(f'{CMD_NAME} mouse_drag_begin')
global IS_DRAGGING
IS_DRAGGING = True
def mouse_drag_end(args: adsk.core.MouseEventArgs):
futil.log(f'{CMD_NAME} mouse_drag_end')
global IS_DRAGGING
IS_DRAGGING = False
command: adsk.core.Command = args.firingEvent.sender
inputs = command.commandInputs
full_preview_input: adsk.core.BoolValueCommandInput = inputs.itemById('full_preview_input')
full_preview_value = full_preview_input.value
if full_preview_value:
command.doExecutePreview()
def command_destroy(args: adsk.core.CommandEventArgs):
global local_handlers
futil.log(f'{CMD_NAME} Command Destroy Event')
the_box.clear_graphics()
local_handlers = []
| # Copyright 2022 by Autodesk, Inc.
# Permission to use, copy, modify, and distribute this software in object code form
# for any purpose and without fee is hereby granted, provided that the above copyright
# notice appears in all copies and that both that copyright notice and the limited
# warranty and restricted rights notice below appear in all supporting documentation.
#
# AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS. AUTODESK SPECIFICALLY
# DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE.
# AUTODESK, INC. DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR FREE.
import adsk.core
import adsk.fusion
import os
from .SinterBoxUtils import bounding_box_from_selections, get_default_thickness, auto_gaps
from .SinterBoxDefinition import SinterBoxDefinition
from ...lib import fusion360utils as futil
from ... import config
app = adsk.core.Application.get()
ui = app.userInterface
CMD_NAME = 'Sinterbox'
CMD_Description = 'Creates a rectangular sinterbox enclosing the selected geometry for 3D Printing with Selective ' \
'Laser Sintering (SLS) or Multi Jet Fusion (MJF).<br><br>' \
'Select the solid bodies to enclose then specify the dimensions of the sinterbox. ' \
'Use Move Bodies To New Component to consolidate all bodies in the same component. '
IS_PROMOTED = False
CMD_ID = f'{config.COMPANY_NAME}_{config.ADDIN_NAME}_{CMD_NAME}'
# TODO When workspace issues are fixed for working model. Also Add MESH when supported.
# WORKSPACE_IDS = ['FusionSolidEnvironment', 'MfgWorkingModelEnv', 'SimplifyWMEnv']
WORKSPACE_IDS = ['FusionSolidEnvironment']
PANEL_ID = 'SolidCreatePanel'
COMMAND_BESIDE_ID = 'PrimitivePipe'
ICON_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', '')
local_handlers = []
# Sinterbox specific global variables
IS_DRAGGING = False
AUTO_SIZE_GAPS = True
the_box: SinterBoxDefinition
the_box = None
def start():
cmd_def = ui.commandDefinitions.addButtonDefinition(CMD_ID, CMD_NAME, CMD_Description, ICON_FOLDER)
cmd_def.toolClipFilename = os.path.join(ICON_FOLDER, 'Sinterbox_Tooltip.png')
futil.add_handler(cmd_def.commandCreated, command_created)
for WORKSPACE_ID in WORKSPACE_IDS:
workspace = ui.workspaces.itemById(WORKSPACE_ID)
panel = workspace.toolbarPanels.itemById(PANEL_ID)
control = panel.controls.addCommand(cmd_def, COMMAND_BESIDE_ID, False)
control.isPromoted = IS_PROMOTED
def stop():
for WORKSPACE_ID in WORKSPACE_IDS:
workspace = ui.workspaces.itemById(WORKSPACE_ID)
panel = workspace.toolbarPanels.itemById(PANEL_ID)
command_control = panel.controls.itemById(CMD_ID)
command_definition = ui.commandDefinitions.itemById(CMD_ID)
if command_control:
command_control.deleteMe()
if command_definition:
command_definition.deleteMe()
def command_created(args: adsk.core.CommandCreatedEventArgs):
global the_box
futil.log(f'{CMD_NAME} Command Created Event')
futil.add_handler(args.command.execute, command_execute, local_handlers=local_handlers)
futil.add_handler(args.command.inputChanged, command_input_changed, local_handlers=local_handlers)
futil.add_handler(args.command.executePreview, command_preview, local_handlers=local_handlers)
futil.add_handler(args.command.destroy, command_destroy, local_handlers=local_handlers)
futil.add_handler(args.command.mouseDragEnd, mouse_drag_end, local_handlers=local_handlers)
futil.add_handler(args.command.mouseDragBegin, mouse_drag_begin, local_handlers=local_handlers)
inputs = args.command.commandInputs
design: adsk.fusion.Design = app.activeProduct
units = design.unitsManager.defaultLengthUnits
selection_input = inputs.addSelectionInput('body_select', "Input Bodies", "Bodies for Bounding Box")
selection_input.addSelectionFilter('Bodies')
# selection_input.addSelectionFilter('MeshBodies') # TODO When bounding box is supported for mesh bodies
selection_input.setSelectionLimits(1, 0)
default_selections = []
b_box = bounding_box_from_selections(default_selections)
default_thickness = get_default_thickness()
default_thickness_value = adsk.core.ValueInput.createByReal(default_thickness)
default_gap_value = adsk.core.ValueInput.createByReal(default_thickness * 4)
default_bar_value = adsk.core.ValueInput.createByReal(default_thickness * 2)
inputs.addValueInput('thick_input', "Cage Thickness", units, default_thickness_value)
inputs.addValueInput('bar', "Bar Width", units, default_bar_value)
inputs.addBoolValueInput('auto_gaps_input', 'Automatic Bar Spacing', True, '', True)
gap_input = inputs.addValueInput('gap', "Bar Spacing", units, default_gap_value)
gap_input.isEnabled = False
inputs.addBoolValueInput('full_preview_input', 'Preview', True, '', True)
inputs.addBoolValueInput('new_component_input', 'Move Bodies to New Component', True, '', True)
the_box = SinterBoxDefinition(b_box, inputs)
def command_execute(args: adsk.core.CommandEventArgs):
futil.log(f'{CMD_NAME} Command Execute Event')
inputs = args.command.commandInputs
selection_input: adsk.core.SelectionCommandInput = inputs.itemById('body_select')
new_component_input: adsk.core.BoolValueCommandInput = inputs.itemById('new_component_input')
bar_input: adsk.core.ValueCommandInput = inputs.itemById('bar')
thickness_input: adsk.core.ValueCommandInput = inputs.itemById('thick_input')
gap_input: adsk.core.ValueCommandInput = inputs.itemById('gap')
selection_bodies = [selection_input.selection(i).entity for i in range(selection_input.selectionCount)]
if len(selection_bodies) < 1:
return
design: adsk.fusion.Design = app.activeProduct
root_comp = design.rootComponent
group_start_index = 0
group_end_index = 0
is_parametric = design.designType == adsk.fusion.DesignTypes.ParametricDesignType
if is_parametric:
group_start_index = design.timeline.markerPosition
group_end_index = group_start_index + 2
the_box.clear_graphics()
the_box.update_selections(selection_bodies)
the_box.feature_values.bar = bar_input.value
the_box.feature_values.gap = gap_input.value
the_box.feature_values.shell_thickness = thickness_input.value
new_occurrence = the_box.create_brep()
if new_component_input.value:
body: adsk.fusion.BRepBody
for body in selection_bodies:
new_body = body.copyToComponent(new_occurrence)
# TODO think about Occurrences
# new_body.name = f'{body.parentComponent.name} - {body.name}'
for body in selection_bodies:
if body.isValid:
if is_parametric:
remove_feature = root_comp.features.removeFeatures.add(body)
group_end_index = remove_feature.timelineObject.index
else:
body.deleteMe()
if is_parametric:
t_group = design.timeline.timelineGroups.add(group_start_index, group_end_index)
t_group.name = 'Sinterbox'
def command_preview(args: adsk.core.CommandEventArgs):
futil.log(f'{CMD_NAME} Command Preview Event')
inputs = args.command.commandInputs
selection_input: adsk.core.SelectionCommandInput = inputs.itemById('body_select')
selection_bodies = [selection_input.selection(i).entity for i in range(selection_input.selectionCount)]
full_preview_input: adsk.core.BoolValueCommandInput = inputs.itemById('full_preview_input')
full_preview_value = full_preview_input.value
if len(selection_bodies) > 0:
the_box.update_selections(selection_bodies)
if (not IS_DRAGGING) and full_preview_value:
the_box.update_graphics_full()
else:
the_box.update_graphics()
def command_input_changed(args: adsk.core.InputChangedEventArgs):
global AUTO_SIZE_GAPS
changed_input = args.input
command: adsk.core.Command = args.firingEvent.sender
inputs = command.commandInputs
futil.log(f'{CMD_NAME} Input Changed Event fired from a change to {changed_input.id}')
selection_input: adsk.core.SelectionCommandInput = inputs.itemById('body_select')
selection_bodies = [selection_input.selection(i).entity for i in range(selection_input.selectionCount)]
bar_input: adsk.core.ValueCommandInput = inputs.itemById('bar')
bar_value = bar_input.value
thickness_input: adsk.core.ValueCommandInput = inputs.itemById('thick_input')
thickness_value = thickness_input.value
gap_input: adsk.core.ValueCommandInput = inputs.itemById('gap')
gap_value = gap_input.value
direction_group: adsk.core.GroupCommandInput = inputs.itemById('direction_group')
auto_gaps_input: adsk.core.BoolValueCommandInput = inputs.itemById('auto_gaps_input')
auto_gaps_value = auto_gaps_input.value
if changed_input.id == 'body_select':
if len(selection_bodies) > 0:
if direction_group is not None:
direction_input: adsk.core.DirectionCommandInput
for direction_input in direction_group.children:
if not direction_input.isVisible:
direction_input.isVisible = True
the_box.update_selections(selection_bodies)
if AUTO_SIZE_GAPS:
new_gap = auto_gaps(selection_bodies, the_box.modified_b_box, thickness_value, bar_value)
gap_input.value = new_gap
the_box.feature_values.gap = new_gap
else:
if direction_group is not None:
direction_input: adsk.core.DirectionCommandInput
for direction_input in direction_group.children:
if direction_input.isVisible:
direction_input.isVisible = False
elif changed_input.id == 'bar':
the_box.feature_values.bar = bar_value
elif changed_input.id == 'gap':
the_box.feature_values.gap = gap_value
elif changed_input.id == 'thick_input':
the_box.feature_values.shell_thickness = thickness_value
elif changed_input.id == 'auto_gaps_input':
AUTO_SIZE_GAPS = auto_gaps_value
if AUTO_SIZE_GAPS:
gap_input.isEnabled = False
if len(selection_bodies) > 0:
new_gap = auto_gaps(selection_bodies, the_box.modified_b_box, thickness_value, bar_value)
gap_input.value = new_gap
the_box.feature_values.gap = new_gap
else:
gap_input.isEnabled = True
def mouse_drag_begin(args: adsk.core.MouseEventArgs):
futil.log(f'{CMD_NAME} mouse_drag_begin')
global IS_DRAGGING
IS_DRAGGING = True
def mouse_drag_end(args: adsk.core.MouseEventArgs):
futil.log(f'{CMD_NAME} mouse_drag_end')
global IS_DRAGGING
IS_DRAGGING = False
command: adsk.core.Command = args.firingEvent.sender
inputs = command.commandInputs
full_preview_input: adsk.core.BoolValueCommandInput = inputs.itemById('full_preview_input')
full_preview_value = full_preview_input.value
if full_preview_value:
command.doExecutePreview()
def command_destroy(args: adsk.core.CommandEventArgs):
global local_handlers
futil.log(f'{CMD_NAME} Command Destroy Event')
the_box.clear_graphics()
local_handlers = []
| en | 0.728097 | # Copyright 2022 by Autodesk, Inc. # Permission to use, copy, modify, and distribute this software in object code form # for any purpose and without fee is hereby granted, provided that the above copyright # notice appears in all copies and that both that copyright notice and the limited # warranty and restricted rights notice below appear in all supporting documentation. # # AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS. AUTODESK SPECIFICALLY # DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE. # AUTODESK, INC. DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE # UNINTERRUPTED OR ERROR FREE. # TODO When workspace issues are fixed for working model. Also Add MESH when supported. # WORKSPACE_IDS = ['FusionSolidEnvironment', 'MfgWorkingModelEnv', 'SimplifyWMEnv'] # Sinterbox specific global variables # selection_input.addSelectionFilter('MeshBodies') # TODO When bounding box is supported for mesh bodies # TODO think about Occurrences # new_body.name = f'{body.parentComponent.name} - {body.name}' | 1.647796 | 2 |
qf_diamond_norm/info_test.py | gecrooks/qf-diamond-norm | 1 | 6632605 | # Copyright 2020-, <NAME> and contributors
#
# This source code is licensed under the Apache License 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import numpy as np
import pytest
import quantumflow as qf
from qf_diamond_norm import diamond_norm
def test_diamond_norm() -> None:
# Test cases borrowed from qutip,
# https://github.com/qutip/qutip/blob/master/qutip/tests/test_metrics.py
# which were in turn generated using QuantumUtils for MATLAB
# (https://goo.gl/oWXhO9)
RTOL = 0.01
chan0 = qf.I(0).aschannel()
chan1 = qf.X(0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(2.0, dn, rtol=RTOL)
turns_dnorm = [
[1.000000e-03, 3.141591e-03],
[3.100000e-03, 9.738899e-03],
[1.000000e-02, 3.141463e-02],
[3.100000e-02, 9.735089e-02],
[1.000000e-01, 3.128689e-01],
[3.100000e-01, 9.358596e-01],
]
for turns, target in turns_dnorm:
chan0 = qf.XPow(0.0, 0).aschannel()
chan1 = qf.XPow(turns, 0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(target, dn, rtol=RTOL)
hadamard_mixtures = [
[1.000000e-03, 2.000000e-03],
[3.100000e-03, 6.200000e-03],
[1.000000e-02, 2.000000e-02],
[3.100000e-02, 6.200000e-02],
[1.000000e-01, 2.000000e-01],
[3.100000e-01, 6.200000e-01],
]
for p, target in hadamard_mixtures:
tensor = qf.I(0).aschannel().tensor * (1 - p) + qf.H(0).aschannel().tensor * p
chan0 = qf.Channel(tensor, [0])
chan1 = qf.I(0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(dn, target, rtol=RTOL)
chan0 = qf.YPow(0.5, 0).aschannel()
chan1 = qf.I(0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(dn, np.sqrt(2), rtol=RTOL)
chan0 = qf.CNot(0, 1).aschannel()
chan1 = qf.CNot(1, 0).aschannel()
diamond_norm(chan0, chan1)
def test_diamond_norm_err() -> None:
with pytest.raises(ValueError):
chan0 = qf.I(0).aschannel()
chan1 = qf.I(1).aschannel()
diamond_norm(chan0, chan1)
# fin
| # Copyright 2020-, <NAME> and contributors
#
# This source code is licensed under the Apache License 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import numpy as np
import pytest
import quantumflow as qf
from qf_diamond_norm import diamond_norm
def test_diamond_norm() -> None:
# Test cases borrowed from qutip,
# https://github.com/qutip/qutip/blob/master/qutip/tests/test_metrics.py
# which were in turn generated using QuantumUtils for MATLAB
# (https://goo.gl/oWXhO9)
RTOL = 0.01
chan0 = qf.I(0).aschannel()
chan1 = qf.X(0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(2.0, dn, rtol=RTOL)
turns_dnorm = [
[1.000000e-03, 3.141591e-03],
[3.100000e-03, 9.738899e-03],
[1.000000e-02, 3.141463e-02],
[3.100000e-02, 9.735089e-02],
[1.000000e-01, 3.128689e-01],
[3.100000e-01, 9.358596e-01],
]
for turns, target in turns_dnorm:
chan0 = qf.XPow(0.0, 0).aschannel()
chan1 = qf.XPow(turns, 0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(target, dn, rtol=RTOL)
hadamard_mixtures = [
[1.000000e-03, 2.000000e-03],
[3.100000e-03, 6.200000e-03],
[1.000000e-02, 2.000000e-02],
[3.100000e-02, 6.200000e-02],
[1.000000e-01, 2.000000e-01],
[3.100000e-01, 6.200000e-01],
]
for p, target in hadamard_mixtures:
tensor = qf.I(0).aschannel().tensor * (1 - p) + qf.H(0).aschannel().tensor * p
chan0 = qf.Channel(tensor, [0])
chan1 = qf.I(0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(dn, target, rtol=RTOL)
chan0 = qf.YPow(0.5, 0).aschannel()
chan1 = qf.I(0).aschannel()
dn = diamond_norm(chan0, chan1)
assert np.isclose(dn, np.sqrt(2), rtol=RTOL)
chan0 = qf.CNot(0, 1).aschannel()
chan1 = qf.CNot(1, 0).aschannel()
diamond_norm(chan0, chan1)
def test_diamond_norm_err() -> None:
with pytest.raises(ValueError):
chan0 = qf.I(0).aschannel()
chan1 = qf.I(1).aschannel()
diamond_norm(chan0, chan1)
# fin
| en | 0.836755 | # Copyright 2020-, <NAME> and contributors # # This source code is licensed under the Apache License 2.0 found in # the LICENSE.txt file in the root directory of this source tree. # Test cases borrowed from qutip, # https://github.com/qutip/qutip/blob/master/qutip/tests/test_metrics.py # which were in turn generated using QuantumUtils for MATLAB # (https://goo.gl/oWXhO9) # fin | 1.859883 | 2 |
.history/ClassFiles/Control Flow/ForLoopRangeFunc_20210101223259.py | minefarmer/Comprehensive-Python | 0 | 6632606 | ''' Range Function
range( )
''' | ''' Range Function
range( )
''' | en | 0.324508 | Range Function range( ) | 1.201315 | 1 |
exportCellPopExcel.py | msw1293/HDF5-data-Mining | 0 | 6632607 | <filename>exportCellPopExcel.py
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 04 09:31:26 2018
@author: lfuchshofen
"""
#This Script will Demonstrate Pulling Data for a Single Cell
#Using the loadHDF5 tool.
#Import packages and loadHDF5 custom script
import numpy as np
import loadHDF5
import matplotlib.pyplot as plt
import xlwings as xw
#Define cystom "CellObj" object to store data
class CellObj:
def __init__(self, ID):
self.ID = ID
self.RTE = []
self.CLE = []
self.CE = []
self.DE = []
self.CC = []
self.DC = []
self.CT = []
self.DT = []
self.CP = []
self.DP = []
#Define a group of cells
IDs = range(13620,13643)
#Define empty list to populate
cellData = []
#Get Data for Cells
for ID in IDs:
ID = str(ID)
#Load the cycle stats dictionary
stats = loadHDF5.loadStat(ID)
#Check that load was successful
if type(stats) == str:
continue
#Create instance of CellObj to store data for cell number ID
cell = CellObj(ID)
#Filter cycling data by discharge energy
mask = [np.add(stats['CV_Discharge_Energy'],stats['CC_Discharge_Energy']) > 0.1]
#Populate CellObj container with cycling stats data
cell.RTE = stats['RT_Energy_Efficiency'][mask]*100
cell.CE = np.add(stats['CV_Charge_Energy'],stats['CC_Charge_Energy'])[mask]
cell.DE = np.add(stats['CV_Discharge_Energy'],stats['CC_Discharge_Energy'])[mask]
cell.CC = np.add(stats['CV_Charge_Capacity'],stats['CC_Charge_Capacity'])[mask]
cell.DC = np.add(stats['CV_Discharge_Capacity'],stats['CC_Discharge_Capacity'])[mask]
cell.CT = np.add(stats['CV_Charge_Time'],stats['CC_Charge_Time'])[mask]/3600.0
cell.DT = np.add(stats['CV_Discharge_Time'],stats['CC_Discharge_Time'])[mask]/3600.0
cell.CP = np.array([cell.CE[i]/cell.CT[i] for i in range(len(cell.RTE))])
cell.DP = np.array([cell.DE[i]/cell.DT[i] for i in range(len(cell.RTE))])
cell.CLE = np.array([cell.DC[i]/cell.CC[i] for i in range(len(cell.RTE))])*100
#Append instance of CellObj to cellData list
cellData.append(cell)
#Write Data to Excell Sheet
excelName = 'AAVG_Data.xlsx'
wb = xw.Book()
#Define the sheets to be written in the sheet and the respective units
sheets = ['RTE','CLE','CE','DE','CC','DC','CT','DT','CP','DP']
units = ['(%)','(%)','(Wh)','(Wh)','(Ah)','(Ah)','(h)','(h)','(W)','(W)']
#Add sheets to doc
for i in range(len(sheets)-1):
wb.sheets.add()
#Iterate over sheets and add data
for i,sheet in enumerate(sheets):
#Set name and activate corresponding sheet
wb.sheets[i].name = sheet +' - ' + units[i]
wb.sheets[i].activate()
#Iterate over cells
for j,cell in enumerate(cellData):
prefix = chr(66+j)
xw.Range(prefix+'1').value = cell.ID
indexes = [x+1 for x in range(len(cell.RTE))]
xw.Range('A2').value = np.array(indexes)[np.newaxis].T
xw.Range(prefix+'2').value = np.array(getattr(cell,sheet))[np.newaxis].T
#Save File. Add ".close()" to close it
wb.save(excelName)
| <filename>exportCellPopExcel.py
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 04 09:31:26 2018
@author: lfuchshofen
"""
#This Script will Demonstrate Pulling Data for a Single Cell
#Using the loadHDF5 tool.
#Import packages and loadHDF5 custom script
import numpy as np
import loadHDF5
import matplotlib.pyplot as plt
import xlwings as xw
#Define cystom "CellObj" object to store data
class CellObj:
def __init__(self, ID):
self.ID = ID
self.RTE = []
self.CLE = []
self.CE = []
self.DE = []
self.CC = []
self.DC = []
self.CT = []
self.DT = []
self.CP = []
self.DP = []
#Define a group of cells
IDs = range(13620,13643)
#Define empty list to populate
cellData = []
#Get Data for Cells
for ID in IDs:
ID = str(ID)
#Load the cycle stats dictionary
stats = loadHDF5.loadStat(ID)
#Check that load was successful
if type(stats) == str:
continue
#Create instance of CellObj to store data for cell number ID
cell = CellObj(ID)
#Filter cycling data by discharge energy
mask = [np.add(stats['CV_Discharge_Energy'],stats['CC_Discharge_Energy']) > 0.1]
#Populate CellObj container with cycling stats data
cell.RTE = stats['RT_Energy_Efficiency'][mask]*100
cell.CE = np.add(stats['CV_Charge_Energy'],stats['CC_Charge_Energy'])[mask]
cell.DE = np.add(stats['CV_Discharge_Energy'],stats['CC_Discharge_Energy'])[mask]
cell.CC = np.add(stats['CV_Charge_Capacity'],stats['CC_Charge_Capacity'])[mask]
cell.DC = np.add(stats['CV_Discharge_Capacity'],stats['CC_Discharge_Capacity'])[mask]
cell.CT = np.add(stats['CV_Charge_Time'],stats['CC_Charge_Time'])[mask]/3600.0
cell.DT = np.add(stats['CV_Discharge_Time'],stats['CC_Discharge_Time'])[mask]/3600.0
cell.CP = np.array([cell.CE[i]/cell.CT[i] for i in range(len(cell.RTE))])
cell.DP = np.array([cell.DE[i]/cell.DT[i] for i in range(len(cell.RTE))])
cell.CLE = np.array([cell.DC[i]/cell.CC[i] for i in range(len(cell.RTE))])*100
#Append instance of CellObj to cellData list
cellData.append(cell)
#Write Data to Excell Sheet
excelName = 'AAVG_Data.xlsx'
wb = xw.Book()
#Define the sheets to be written in the sheet and the respective units
sheets = ['RTE','CLE','CE','DE','CC','DC','CT','DT','CP','DP']
units = ['(%)','(%)','(Wh)','(Wh)','(Ah)','(Ah)','(h)','(h)','(W)','(W)']
#Add sheets to doc
for i in range(len(sheets)-1):
wb.sheets.add()
#Iterate over sheets and add data
for i,sheet in enumerate(sheets):
#Set name and activate corresponding sheet
wb.sheets[i].name = sheet +' - ' + units[i]
wb.sheets[i].activate()
#Iterate over cells
for j,cell in enumerate(cellData):
prefix = chr(66+j)
xw.Range(prefix+'1').value = cell.ID
indexes = [x+1 for x in range(len(cell.RTE))]
xw.Range('A2').value = np.array(indexes)[np.newaxis].T
xw.Range(prefix+'2').value = np.array(getattr(cell,sheet))[np.newaxis].T
#Save File. Add ".close()" to close it
wb.save(excelName)
| en | 0.746589 | # -*- coding: utf-8 -*- Created on Thu Jan 04 09:31:26 2018
@author: lfuchshofen #This Script will Demonstrate Pulling Data for a Single Cell #Using the loadHDF5 tool. #Import packages and loadHDF5 custom script #Define cystom "CellObj" object to store data #Define a group of cells #Define empty list to populate #Get Data for Cells #Load the cycle stats dictionary #Check that load was successful #Create instance of CellObj to store data for cell number ID #Filter cycling data by discharge energy #Populate CellObj container with cycling stats data #Append instance of CellObj to cellData list #Write Data to Excell Sheet #Define the sheets to be written in the sheet and the respective units #Add sheets to doc #Iterate over sheets and add data #Set name and activate corresponding sheet #Iterate over cells #Save File. Add ".close()" to close it | 2.519598 | 3 |
external/config.simple.py | eeyes-fsd/gymnasium-2019-11 | 0 | 6632608 | <gh_stars>0
class Config:
redis = {
'host': '127.0.0.1',
'port': '6379',
'password': <PASSWORD>,
'db': 0
}
app = {
'name': 'laravel_database_gym',
'tag': 'swap'
}
conf = Config()
| class Config:
redis = {
'host': '127.0.0.1',
'port': '6379',
'password': <PASSWORD>,
'db': 0
}
app = {
'name': 'laravel_database_gym',
'tag': 'swap'
}
conf = Config() | none | 1 | 1.616571 | 2 |
|
landlab/grid/tests/test_raster_grid/test_has_boundary_neighbor.py | awickert/landlab | 1 | 6632609 | import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_false
try:
from nose.tools import assert_tuple_equal
except ImportError:
from landlab.testing.tools import assert_tuple_equal
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid(4, 5)
})
def test_boundary_node():
rmg = RasterModelGrid(5, 6)
assert_true(rmg.node_has_boundary_neighbor(0))
assert_false(rmg.node_has_boundary_neighbor(14))
@with_setup(setup_grid)
def test_last_index():
assert_true(rmg.node_has_boundary_neighbor(-1))
@with_setup(setup_grid)
def test_id_as_list():
assert_array_equal(rmg.node_has_boundary_neighbor([-1, 0]),
np.array([True, True]))
@with_setup(setup_grid)
def test_id_as_array():
assert_array_equal(rmg.node_has_boundary_neighbor(np.arange(20)),
np.array([True, True, True, True, True,
True, True, True, True, True,
True, True, True, True, True,
True, True, True, True, True]))
def test_id_as_array_with_one_interior():
rmg = RasterModelGrid(5, 5)
assert_array_equal(rmg.node_has_boundary_neighbor(np.arange(25)),
np.array([True, True, True, True, True,
True, True, True, True, True,
True, True, False, True, True,
True, True, True, True, True,
True, True, True, True, True]))
| import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_false
try:
from nose.tools import assert_tuple_equal
except ImportError:
from landlab.testing.tools import assert_tuple_equal
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid(4, 5)
})
def test_boundary_node():
rmg = RasterModelGrid(5, 6)
assert_true(rmg.node_has_boundary_neighbor(0))
assert_false(rmg.node_has_boundary_neighbor(14))
@with_setup(setup_grid)
def test_last_index():
assert_true(rmg.node_has_boundary_neighbor(-1))
@with_setup(setup_grid)
def test_id_as_list():
assert_array_equal(rmg.node_has_boundary_neighbor([-1, 0]),
np.array([True, True]))
@with_setup(setup_grid)
def test_id_as_array():
assert_array_equal(rmg.node_has_boundary_neighbor(np.arange(20)),
np.array([True, True, True, True, True,
True, True, True, True, True,
True, True, True, True, True,
True, True, True, True, True]))
def test_id_as_array_with_one_interior():
rmg = RasterModelGrid(5, 5)
assert_array_equal(rmg.node_has_boundary_neighbor(np.arange(25)),
np.array([True, True, True, True, True,
True, True, True, True, True,
True, True, False, True, True,
True, True, True, True, True,
True, True, True, True, True]))
| none | 1 | 2.26941 | 2 |
|
renderButton.py | tsmaster/RayMarcher | 0 | 6632610 | <reponame>tsmaster/RayMarcher<gh_stars>0
import os
import math
import bdgmath
import camera
import scene
import plane
import sphere
import cylinder
import donut
import box
import repeated2d
import transform
import light
import material
import csg
import roundedge
cam = camera.FreeCamera(bdgmath.Vector3(5, -8, 6),
bdgmath.Vector3(0, 0, 0.75))
scene = scene.Scene()
scene.addLight(light.DirectionalLight(bdgmath.Vector3(-1, 0.5, -4), 0.8))
scene.addLight(light.AmbientLight(0.2))
floor = plane.ZPlane(0)
floor.squareSize = 2
scene.addObject(floor)
disk = roundedge.RoundEdge(0.1, cylinder.CappedCylinder(0.8, 3.8))
translatedDisk = transform.Translate3d(bdgmath.Vector3(0, 0, 1.5), disk)
#scene.addObject(translatedDisk)
negCyl1 = cylinder.ZCylinder(bdgmath.Vector2(1.5, 1.5), 1.1)
negCyl2 = cylinder.ZCylinder(bdgmath.Vector2(1.5, -1.5), 1.1)
negCyl3 = cylinder.ZCylinder(bdgmath.Vector2(-1.5, -1.5), 1.1)
negCyl4 = cylinder.ZCylinder(bdgmath.Vector2(-1.5, 1.5), 1.1)
w = translatedDisk
for c in [negCyl1, negCyl2, negCyl3, negCyl4]:
w = csg.Difference(w, c)
scene.addObject(w)
#cam.renderScene(scene, 640, 320, "raytest.png", 5)
#cam.renderScene(scene, 300, 240, "raytest.png", 5)
cam.renderScene(scene, 100, 80, "raytest.png", 5)
| import os
import math
import bdgmath
import camera
import scene
import plane
import sphere
import cylinder
import donut
import box
import repeated2d
import transform
import light
import material
import csg
import roundedge
cam = camera.FreeCamera(bdgmath.Vector3(5, -8, 6),
bdgmath.Vector3(0, 0, 0.75))
scene = scene.Scene()
scene.addLight(light.DirectionalLight(bdgmath.Vector3(-1, 0.5, -4), 0.8))
scene.addLight(light.AmbientLight(0.2))
floor = plane.ZPlane(0)
floor.squareSize = 2
scene.addObject(floor)
disk = roundedge.RoundEdge(0.1, cylinder.CappedCylinder(0.8, 3.8))
translatedDisk = transform.Translate3d(bdgmath.Vector3(0, 0, 1.5), disk)
#scene.addObject(translatedDisk)
negCyl1 = cylinder.ZCylinder(bdgmath.Vector2(1.5, 1.5), 1.1)
negCyl2 = cylinder.ZCylinder(bdgmath.Vector2(1.5, -1.5), 1.1)
negCyl3 = cylinder.ZCylinder(bdgmath.Vector2(-1.5, -1.5), 1.1)
negCyl4 = cylinder.ZCylinder(bdgmath.Vector2(-1.5, 1.5), 1.1)
w = translatedDisk
for c in [negCyl1, negCyl2, negCyl3, negCyl4]:
w = csg.Difference(w, c)
scene.addObject(w)
#cam.renderScene(scene, 640, 320, "raytest.png", 5)
#cam.renderScene(scene, 300, 240, "raytest.png", 5)
cam.renderScene(scene, 100, 80, "raytest.png", 5) | ko | 0.157037 | #scene.addObject(translatedDisk) #cam.renderScene(scene, 640, 320, "raytest.png", 5) #cam.renderScene(scene, 300, 240, "raytest.png", 5) | 2.133129 | 2 |
elifinokuzu/dictionary/apps.py | omerumur14/elifin-okuzu | 2 | 6632611 | from django.apps import AppConfig
class DictionaryConfig(AppConfig):
name = 'dictionary'
| from django.apps import AppConfig
class DictionaryConfig(AppConfig):
name = 'dictionary'
| none | 1 | 1.255409 | 1 |
|
predict/predict.py | hirune924/kaggle-SETI-2ndPlaceSolution | 0 | 6632612 | <reponame>hirune924/kaggle-SETI-2ndPlaceSolution
####################
# Import Libraries
####################
import os
import sys
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning import loggers
from pytorch_lightning import seed_everything
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
import albumentations as A
import timm
from omegaconf import OmegaConf
import glob
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
import torch
####################
# Utils
####################
def get_score(y_true, y_pred):
score = roc_auc_score(y_true, y_pred)
return score
def load_pytorch_model(ckpt_name, model, ignore_suffix='model'):
state_dict = torch.load(ckpt_name, map_location='cpu')["state_dict"]
new_state_dict = {}
for k, v in state_dict.items():
name = k
if name.startswith(str(ignore_suffix)+"."):
name = name.replace(str(ignore_suffix)+".", "", 1) # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
return model
def rot180(input: torch.Tensor) -> torch.Tensor:
r"""Rotate a tensor image or a batch of tensor images
180 degrees. Input must be a tensor of shape (C, H, W)
or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The rotated image tensor
"""
return torch.flip(input, [-2, -1])
def hflip(input: torch.Tensor) -> torch.Tensor:
r"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
"""
w = input.shape[-1]
return input[..., torch.arange(w - 1, -1, -1, device=input.device)]
def vflip(input: torch.Tensor) -> torch.Tensor:
r"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
"""
h = input.shape[-2]
return input[..., torch.arange(h - 1, -1, -1, device=input.device), :]
####################
# Config
####################
conf_dict = {'batch_size': 8,
'height': 512,
'width': 512,
'model_name': 'efficientnet_b0',
'data_dir': '../input/seti-breakthrough-listen',
'model_dir': None,
'output_dir': './',
'submission_fname': None}
conf_base = OmegaConf.create(conf_dict)
####################
# Dataset
####################
class SETIDataset(Dataset):
def __init__(self, df, transform=None, conf=None):
self.df = df.reset_index(drop=True)
self.labels = df['target'].values
self.dir_names = df['dir'].values
self.transform = transform
self.conf = conf
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_id = self.df.loc[idx, 'id']
file_path = os.path.join(self.dir_names[idx],"{}/{}.npy".format(img_id[0], img_id))
image = np.load(file_path)
image = image.astype(np.float32)
image = np.vstack(image).transpose((1, 0))
img_pl = Image.fromarray(image).resize((self.conf.height, self.conf.width), resample=Image.BICUBIC)
image = np.array(img_pl)
if self.transform is not None:
image = self.transform(image=image)['image']
image = torch.from_numpy(image).unsqueeze(dim=0)
label = torch.tensor([self.labels[idx]]).float()
return image, label
####################
# Data Module
####################
class SETIDataModule(pl.LightningDataModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
# OPTIONAL, called only on 1 GPU/machine(for download or tokenize)
def prepare_data(self):
pass
# OPTIONAL, called for every GPU/machine
def setup(self, stage=None):
if stage == 'fit':
print('Not implemented')
elif stage == 'test':
test_df = pd.read_csv(os.path.join(self.conf.data_dir, "sample_submission.csv"))
test_df['dir'] = os.path.join(self.conf.data_dir, "test")
self.test_dataset = SETIDataset(test_df, transform=None, conf=self.conf)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.conf.batch_size, num_workers=4, shuffle=True, pin_memory=True, drop_last=True)
def val_dataloader(self):
return DataLoader(self.valid_dataset, batch_size=self.conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=True)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=False)
# ====================================================
# Inference function
# ====================================================
def inference(models, test_loader):
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
with torch.no_grad():
for i, (images) in tk0:
images = images[0].cuda()
avg_preds = []
for model in models:
y_preds = model(images)/2.0
y_preds += model(vflip(images))/2.0
avg_preds.append(y_preds.sigmoid().to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs
####################
# Train
####################
def main():
conf_cli = OmegaConf.from_cli()
conf = OmegaConf.merge(conf_base, conf_cli)
print(OmegaConf.to_yaml(conf))
seed_everything(2021)
# get model path
model_path = glob.glob(os.path.join(conf.model_dir, '*.ckpt'))
models = []
for ckpt in model_path:
m = timm.create_model(model_name=conf.model_name, num_classes=1, pretrained=False, in_chans=1)
m = load_pytorch_model(ckpt, m, ignore_suffix='model')
m.cuda()
m.eval()
models.append(m)
data_module = SETIDataModule(conf)
data_module.setup(stage='test')
test_dataset = data_module.test_dataset
test_loader = DataLoader(test_dataset, batch_size=conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=False)
predictions = inference(models, test_loader)
test = pd.read_csv(os.path.join(conf.data_dir, "sample_submission.csv"))
test['target'] = predictions
test[['id', 'target']].to_csv(os.path.join(conf.output_dir, conf.submission_fname), index=False)
print(test[['id', 'target']].head())
print(model_path)
if __name__ == "__main__":
main() | ####################
# Import Libraries
####################
import os
import sys
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning import loggers
from pytorch_lightning import seed_everything
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
import albumentations as A
import timm
from omegaconf import OmegaConf
import glob
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
import torch
####################
# Utils
####################
def get_score(y_true, y_pred):
score = roc_auc_score(y_true, y_pred)
return score
def load_pytorch_model(ckpt_name, model, ignore_suffix='model'):
state_dict = torch.load(ckpt_name, map_location='cpu')["state_dict"]
new_state_dict = {}
for k, v in state_dict.items():
name = k
if name.startswith(str(ignore_suffix)+"."):
name = name.replace(str(ignore_suffix)+".", "", 1) # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
return model
def rot180(input: torch.Tensor) -> torch.Tensor:
r"""Rotate a tensor image or a batch of tensor images
180 degrees. Input must be a tensor of shape (C, H, W)
or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The rotated image tensor
"""
return torch.flip(input, [-2, -1])
def hflip(input: torch.Tensor) -> torch.Tensor:
r"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
"""
w = input.shape[-1]
return input[..., torch.arange(w - 1, -1, -1, device=input.device)]
def vflip(input: torch.Tensor) -> torch.Tensor:
r"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
"""
h = input.shape[-2]
return input[..., torch.arange(h - 1, -1, -1, device=input.device), :]
####################
# Config
####################
conf_dict = {'batch_size': 8,
'height': 512,
'width': 512,
'model_name': 'efficientnet_b0',
'data_dir': '../input/seti-breakthrough-listen',
'model_dir': None,
'output_dir': './',
'submission_fname': None}
conf_base = OmegaConf.create(conf_dict)
####################
# Dataset
####################
class SETIDataset(Dataset):
def __init__(self, df, transform=None, conf=None):
self.df = df.reset_index(drop=True)
self.labels = df['target'].values
self.dir_names = df['dir'].values
self.transform = transform
self.conf = conf
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_id = self.df.loc[idx, 'id']
file_path = os.path.join(self.dir_names[idx],"{}/{}.npy".format(img_id[0], img_id))
image = np.load(file_path)
image = image.astype(np.float32)
image = np.vstack(image).transpose((1, 0))
img_pl = Image.fromarray(image).resize((self.conf.height, self.conf.width), resample=Image.BICUBIC)
image = np.array(img_pl)
if self.transform is not None:
image = self.transform(image=image)['image']
image = torch.from_numpy(image).unsqueeze(dim=0)
label = torch.tensor([self.labels[idx]]).float()
return image, label
####################
# Data Module
####################
class SETIDataModule(pl.LightningDataModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
# OPTIONAL, called only on 1 GPU/machine(for download or tokenize)
def prepare_data(self):
pass
# OPTIONAL, called for every GPU/machine
def setup(self, stage=None):
if stage == 'fit':
print('Not implemented')
elif stage == 'test':
test_df = pd.read_csv(os.path.join(self.conf.data_dir, "sample_submission.csv"))
test_df['dir'] = os.path.join(self.conf.data_dir, "test")
self.test_dataset = SETIDataset(test_df, transform=None, conf=self.conf)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.conf.batch_size, num_workers=4, shuffle=True, pin_memory=True, drop_last=True)
def val_dataloader(self):
return DataLoader(self.valid_dataset, batch_size=self.conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=True)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=False)
# ====================================================
# Inference function
# ====================================================
def inference(models, test_loader):
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
with torch.no_grad():
for i, (images) in tk0:
images = images[0].cuda()
avg_preds = []
for model in models:
y_preds = model(images)/2.0
y_preds += model(vflip(images))/2.0
avg_preds.append(y_preds.sigmoid().to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs
####################
# Train
####################
def main():
conf_cli = OmegaConf.from_cli()
conf = OmegaConf.merge(conf_base, conf_cli)
print(OmegaConf.to_yaml(conf))
seed_everything(2021)
# get model path
model_path = glob.glob(os.path.join(conf.model_dir, '*.ckpt'))
models = []
for ckpt in model_path:
m = timm.create_model(model_name=conf.model_name, num_classes=1, pretrained=False, in_chans=1)
m = load_pytorch_model(ckpt, m, ignore_suffix='model')
m.cuda()
m.eval()
models.append(m)
data_module = SETIDataModule(conf)
data_module.setup(stage='test')
test_dataset = data_module.test_dataset
test_loader = DataLoader(test_dataset, batch_size=conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=False)
predictions = inference(models, test_loader)
test = pd.read_csv(os.path.join(conf.data_dir, "sample_submission.csv"))
test['target'] = predictions
test[['id', 'target']].to_csv(os.path.join(conf.output_dir, conf.submission_fname), index=False)
print(test[['id', 'target']].head())
print(model_path)
if __name__ == "__main__":
main() | en | 0.371055 | #################### # Import Libraries #################### #################### # Utils #################### # remove `model.` Rotate a tensor image or a batch of tensor images 180 degrees. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`. Args: input (torch.Tensor): input tensor Returns: torch.Tensor: The rotated image tensor Horizontally flip a tensor image or a batch of tensor images. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`. Args: input (torch.Tensor): input tensor Returns: torch.Tensor: The horizontally flipped image tensor Vertically flip a tensor image or a batch of tensor images. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`. Args: input (torch.Tensor): input tensor Returns: torch.Tensor: The vertically flipped image tensor #################### # Config #################### #################### # Dataset #################### #################### # Data Module #################### # OPTIONAL, called only on 1 GPU/machine(for download or tokenize) # OPTIONAL, called for every GPU/machine # ==================================================== # Inference function # ==================================================== #################### # Train #################### # get model path | 2.121084 | 2 |
ros_ws/src/baxter_examples/scripts/ik_service_client.py | mesneym/Baxter-Arm-PP | 0 | 6632613 | <filename>ros_ws/src/baxter_examples/scripts/ik_service_client.py<gh_stars>0
version https://git-lfs.github.com/spec/v1
oid sha256:75d6721557de605e8ed3657e3ec24e2276bdcbae6bda0ead4ad8282ad183e806
size 5347
| <filename>ros_ws/src/baxter_examples/scripts/ik_service_client.py<gh_stars>0
version https://git-lfs.github.com/spec/v1
oid sha256:75d6721557de605e8ed3657e3ec24e2276bdcbae6bda0ead4ad8282ad183e806
size 5347
| none | 1 | 1.234999 | 1 |
|
tree.py | hmble/tree | 0 | 6632614 | <filename>tree.py<gh_stars>0
import os
import sys
from collections import OrderedDict
class Tree:
def __init__(self):
self.dircount = 0
self.filecount = 0
self.dict_dir = []
self.dict_file = []
self.ignore = [".git", "AW_Dribble"]
def walk(self, path, prefix=""):
for entry in os.scandir(path):
if entry.is_dir():
self.dircount += 1
print(prefix + "|-- " + entry.name)
newstr = prefix + "| "
self.walk(entry.path, newstr)
else:
self.filecount += 1
print(prefix + "|-- " + entry.name)
def sorted_walk(self, path, prefix=""):
dirlist = OrderedDict()
filelist = []
for entry in os.scandir(path):
if entry.is_dir():
self.dircount += 1
dirlist[entry.name] = entry.path
else:
self.filecount += 1
filelist.append(entry.name)
fl = sorted(filelist)
for file in fl:
if file != fl[-1]:
print(prefix + "|-- " + file)
else:
print(prefix + "`-- " + file)
lt = (list(dirlist.keys()))
for key in dirlist.keys():
if (key in self.ignore):
continue
elif key != lt[-1]:
print(prefix + "|-- " + key)
newstr = prefix + "| "
self.sorted_walk(dirlist[key], newstr)
else:
print(prefix + "`-- " + key)
newstr = prefix + "| "
self.sorted_walk(dirlist[key], newstr)
directory = "."
# FIXME :
# when arguments passed with / at end `os.path.basename(path)
# prints nothings as basename is an empty string e.g. home/dir/
#
# TODO : Options to follow symlinks or not
# Currently no method for symlinks
if len(sys.argv) > 1:
directory = sys.argv[1]
print(directory)
else:
print(os.getcwd())
tree = Tree()
# tree.walk(directory)
tree.sorted_walk(directory)
#print("\ndirectories " + str(tree.dircount) + ", files " + str(tree.filecount))
| <filename>tree.py<gh_stars>0
import os
import sys
from collections import OrderedDict
class Tree:
def __init__(self):
self.dircount = 0
self.filecount = 0
self.dict_dir = []
self.dict_file = []
self.ignore = [".git", "AW_Dribble"]
def walk(self, path, prefix=""):
for entry in os.scandir(path):
if entry.is_dir():
self.dircount += 1
print(prefix + "|-- " + entry.name)
newstr = prefix + "| "
self.walk(entry.path, newstr)
else:
self.filecount += 1
print(prefix + "|-- " + entry.name)
def sorted_walk(self, path, prefix=""):
dirlist = OrderedDict()
filelist = []
for entry in os.scandir(path):
if entry.is_dir():
self.dircount += 1
dirlist[entry.name] = entry.path
else:
self.filecount += 1
filelist.append(entry.name)
fl = sorted(filelist)
for file in fl:
if file != fl[-1]:
print(prefix + "|-- " + file)
else:
print(prefix + "`-- " + file)
lt = (list(dirlist.keys()))
for key in dirlist.keys():
if (key in self.ignore):
continue
elif key != lt[-1]:
print(prefix + "|-- " + key)
newstr = prefix + "| "
self.sorted_walk(dirlist[key], newstr)
else:
print(prefix + "`-- " + key)
newstr = prefix + "| "
self.sorted_walk(dirlist[key], newstr)
directory = "."
# FIXME :
# when arguments passed with / at end `os.path.basename(path)
# prints nothings as basename is an empty string e.g. home/dir/
#
# TODO : Options to follow symlinks or not
# Currently no method for symlinks
if len(sys.argv) > 1:
directory = sys.argv[1]
print(directory)
else:
print(os.getcwd())
tree = Tree()
# tree.walk(directory)
tree.sorted_walk(directory)
#print("\ndirectories " + str(tree.dircount) + ", files " + str(tree.filecount))
| en | 0.615855 | # FIXME : # when arguments passed with / at end `os.path.basename(path) # prints nothings as basename is an empty string e.g. home/dir/ # # TODO : Options to follow symlinks or not # Currently no method for symlinks # tree.walk(directory) #print("\ndirectories " + str(tree.dircount) + ", files " + str(tree.filecount)) | 2.943474 | 3 |
neutron/extensions/flavors.py | glove747/liberty-neutron | 0 | 6632615 | # All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.api.v2 import resource_helper
from neutron import manager
from neutron.plugins.common import constants
FLAVORS = 'flavors'
SERVICE_PROFILES = 'service_profiles'
FLAVORS_PREFIX = ""
RESOURCE_ATTRIBUTE_MAP = {
FLAVORS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'service_type': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'service_profiles': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'is_visible': True, 'default': []},
'enabled': {'allow_post': True, 'allow_put': True,
'validate': {'type:boolean': None},
'default': True,
'is_visible': True},
},
SERVICE_PROFILES: {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True,
'primary_key': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
# service_profile belong to one service type for now
#'service_types': {'allow_post': False, 'allow_put': False,
# 'is_visible': True},
'driver': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True,
'default': attr.ATTR_NOT_SPECIFIED},
'metainfo': {'allow_post': True, 'allow_put': True,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'enabled': {'allow_post': True, 'allow_put': True,
'validate': {'type:boolean': None},
'is_visible': True, 'default': True},
},
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
'service_profiles': {
'parent': {'collection_name': 'flavors',
'member_name': 'flavor'},
'parameters': {'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string':
attr.TENANT_ID_MAX_LEN},
'is_visible': True}}
}
}
class Flavors(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron Service Flavors"
@classmethod
def get_alias(cls):
return "flavors"
@classmethod
def get_description(cls):
return "Service specification for advanced services"
@classmethod
def get_updated(cls):
return "2014-07-06T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
resources = resource_helper.build_resource_info(
plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.FLAVORS)
plugin = manager.NeutronManager.get_service_plugins()[
constants.FLAVORS]
for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
# Special handling needed for sub-resources with 'y' ending
# (e.g. proxies -> proxy)
resource_name = collection_name[:-1]
parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parameters')
controller = base.create_resource(collection_name, resource_name,
plugin, params,
allow_bulk=True,
parent=parent)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
path_prefix=FLAVORS_PREFIX,
attr_map=params)
resources.append(resource)
return resources
def update_attributes_map(self, attributes):
super(Flavors, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
| # All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.api.v2 import resource_helper
from neutron import manager
from neutron.plugins.common import constants
FLAVORS = 'flavors'
SERVICE_PROFILES = 'service_profiles'
FLAVORS_PREFIX = ""
RESOURCE_ATTRIBUTE_MAP = {
FLAVORS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'service_type': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'service_profiles': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'is_visible': True, 'default': []},
'enabled': {'allow_post': True, 'allow_put': True,
'validate': {'type:boolean': None},
'default': True,
'is_visible': True},
},
SERVICE_PROFILES: {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True,
'primary_key': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
# service_profile belong to one service type for now
#'service_types': {'allow_post': False, 'allow_put': False,
# 'is_visible': True},
'driver': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True,
'default': attr.ATTR_NOT_SPECIFIED},
'metainfo': {'allow_post': True, 'allow_put': True,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'enabled': {'allow_post': True, 'allow_put': True,
'validate': {'type:boolean': None},
'is_visible': True, 'default': True},
},
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
'service_profiles': {
'parent': {'collection_name': 'flavors',
'member_name': 'flavor'},
'parameters': {'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string':
attr.TENANT_ID_MAX_LEN},
'is_visible': True}}
}
}
class Flavors(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron Service Flavors"
@classmethod
def get_alias(cls):
return "flavors"
@classmethod
def get_description(cls):
return "Service specification for advanced services"
@classmethod
def get_updated(cls):
return "2014-07-06T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
resources = resource_helper.build_resource_info(
plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.FLAVORS)
plugin = manager.NeutronManager.get_service_plugins()[
constants.FLAVORS]
for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
# Special handling needed for sub-resources with 'y' ending
# (e.g. proxies -> proxy)
resource_name = collection_name[:-1]
parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parameters')
controller = base.create_resource(collection_name, resource_name,
plugin, params,
allow_bulk=True,
parent=parent)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
path_prefix=FLAVORS_PREFIX,
attr_map=params)
resources.append(resource)
return resources
def update_attributes_map(self, attributes):
super(Flavors, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
| en | 0.793099 | # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # service_profile belong to one service type for now #'service_types': {'allow_post': False, 'allow_put': False, # 'is_visible': True}, Returns Ext Resources. # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) | 1.246742 | 1 |
30_Information_Retrieval/project/src/Inverted_Index/Lib/dataProcess_BySpark.py | Xinrihui/Data-Structure-and-Algrithms | 1 | 6632616 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
from collections import *
from numpy import *
import re
import json
from pyspark.sql import SparkSession
from functools import reduce
class Test:
def test1(self):
"""
不可见字符 的 打印 和 写入文本
:return:
"""
SOH=chr(0x01) # SOH
next_line_char=chr(0x0a) # 换行键(LF)
# print(next_line_char) # 根本看不到 ; 只能写到文本 中使用 notepad++ 查看
NUL=chr(0x00)
STX=chr(0x02)
EXT=chr(0x03)
a='' # 直接从文本 中 粘SOH 过来
print(ord(a)) # 输出 ASCII 码 为 1
# 换行符 不能直接从文本 粘贴过来,这样直接在 IDE中就换行了
line_str=next_line_char + SOH + NUL + STX + EXT
data_dir = '../data_test/'
test_file_dir = os.path.join(data_dir, 'invisible_characters.csv') #
self.encoding='utf-8'
with open(test_file_dir , "wb+") as f_test:
f_test.seek(0, 0) # 指向 切片文件的 首位
row_text = line_str
row_text_bytes = row_text.encode(self.encoding)
f_test.write(row_text_bytes)
def test2(self):
orgin = " '!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ "
orgin = " \'!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ "
print('[{0}]'.format(re.escape(orgin)))
class DataProcess:
"""
by XRH
date: 2020-06-14
利用 spark 做数据 处理
功能:
1.对 临时索引文件 tmp_index_spark.bin 按照 termid 进行排序
2. 在 大文件中 输出 满足 特定 条件的 行
2.1 读取 使用 0x01 作为分隔符 的 CSV ,并输出 字段数目不匹配的行
"""
def sort_by_termid(self):
"""
对 临时索引文件 tmp_index_spark.bin 按照 termid 进行排序
:return:
"""
spark = SparkSession.builder.appName("sort_by_termid").getOrCreate()
sc = spark.sparkContext
# nums = sc.parallelize([1, 2, 3, 4])
# print(nums.map(lambda x: x * x).collect())
# data_dir = '..\\data\\' # windows 下必须使用 '\\'
# tmp_index_file_dir = data_dir + 'tmp_index_spark.bin' # for spark
data_dir='../data/'
tmp_index_file_dir=os.path.join(data_dir, 'tmp_index_spark.bin')
sorted_tmp_index_file_dir=data_dir+'sorted_'+'tmp_index_spark.bin'
lines = sc.textFile(tmp_index_file_dir,8) # 对大文件 进行切片 sliceNum=8,否则报错
# 20/05/30 11:54:28 ERROR PythonRunner: This may have been caused by a prior exception:
# java.net.SocketException: Connection reset by peer: socket write error
# print(lines.first()) # 看第一条
# print(lines.take(10)) # 可以看 指定数目的记录
# for line in lines : #TypeError: 'RDD' object is not iterable
# print(line)
lines=lines.map(lambda line:line.split("\t"))
lines=lines.sortBy(lambda x: x[0])
#TODO: 过滤掉 term_id =0 ,因为 term_id==0 为标点符号
lines=lines.map(lambda line: line[0]+"\t"+line[1]) #采用 分隔符 区分两个 域
# lines.saveAsTextFile(sorted_tmp_index_file_dir) # 在文件夹下 保存 sliceNum 个切片文件
lines.coalesce(1, shuffle=True).saveAsTextFile(sorted_tmp_index_file_dir) # 在文件夹下 只有一个 切片
# lines.saveAsSingleTextFile(sorted_tmp_index_file_dir)
# list_b=[['1', '1'], ['0', '1'], ['2', '1'], ['3', '1'], ['4', '1'], ['5', '1'], ['6', '1'], ['7', '1'], ['8', '1'],
# ['9', '1']]
# lines = sc.parallelize(list_b)
# lines=lines.sortBy(lambda x: x[0])
# print(lines.take(10))
def __process_oneline(self,line, col_num):
"""
自定义 Map 中的 lambda 函数
处理 文件中的 每一行
:param line: 文件中的一行,已经是字符串了 (str)
:param col_num: 预设的字段数
:return:
"""
line_array = line.split("\x01")
length = len(line_array)
line_array.append(length)
res = None
if length != col_num: # 实际 字段数目 不符合 预设的 col_num
res = line_array
return res # 每一行 必须都 要有 返回
def __process_oneslice(self,lines_slice, col_num):
"""
自定义 mapPartitions 中的 lambda 函数
这个计算 过程被 分发给了 spark 的计算节点,
计算节点 使用本地的数据分片进行 计算
:param lines_slice: 文件的切片,其中有多行数据 (字符串数组)
:param col_num:
:return:
"""
res = []
for line in lines_slice:
# line 为字符串
line_array = line.split("\x01")
length = len(line_array)
line_array.append(length) # 记录 总的字段数目
if length != col_num: # 找到 字段数目 不符合 col_num 的
res.append(line + str(line_array))
return res
def find_bad_line(self):
"""
spark 读取 使用 0x01 作为分隔符 的 CSV ,并输出 字段数目不匹配的行
:return:
"""
spark = SparkSession.builder.appName("find_bad_line").getOrCreate()
sc = spark.sparkContext
data_dir = '../data_test/'
test_file_dir = os.path.join(data_dir, '20200614.csv')
result_file_dir = os.path.join(data_dir, '20200614-result.csv')
sliceNum = 2
lines = sc.textFile(test_file_dir, sliceNum) # 对大文件 进行切片 sliceNum=8,否则报错
# 20/05/30 11:54:28 ERROR PythonRunner: This may have been caused by a prior exception:
# java.net.SocketException: Connection reset by peer: socket write error
# print(lines.take(10))
col_num = 3
# linesByMap = lines.map(lambda line: self.__process_oneline(line, col_num))
# print(linesByMap.take(10)) # [None, ['1', ' abc \x03 ', ' 超哥 ', ' 666 ', 4], None]
linesByMapPartitions = lines.mapPartitions(lambda lines_slice: self.__process_oneslice(lines_slice, col_num))
# print(linesByMapPartitions.take(10))
# 分区合并
one_slice = linesByMapPartitions.coalesce(1, shuffle=True)
one_slice.saveAsTextFile(result_file_dir) # 先删除之前的文件,否则报错
class Join:
"""
by XRH
date: 2020-06-16
利用 spark 的基础算子 实现 常见的 join 算法
功能:
1. 朴素的 MapReduce 的 join
2. 基于 广播变量的 hash join
3.
"""
def common_join(self,table_a_dir,table_b_dir,table_dir):
"""
利用基本算子 实现 MapReduce 的 join
:param table_a:
:param table_b:
:return:
"""
spark = SparkSession.builder.appName("common_Join").getOrCreate()
sc = spark.sparkContext
sliceNum = 2
table_a = sc.textFile(table_a_dir, sliceNum) # 2个分区
table_b = sc.textFile(table_b_dir, sliceNum) # 2个分区
table_a = table_a.map(lambda line: line.split(','))
table_b = table_b.map(lambda line: line.split(','))
table_a = table_a.map(lambda line: (line[0], line[1:])) # 只能有 两个元素 ,第1个为 Key; 否则后面的 groupByKey() 报错
table_b = table_b.map(lambda line: (line[0], line[1:]))
table = table_a.union(table_b) # 合并后 分区的数目 也是 两个 RDD 的分区的和
# table.glom().collect() # 输出 各个分区 的元素 列表
# [[('1', ['a', '27']), ('2', ['b', '24']), ('3', ['c', '23'])],
# [('4', ['d', '21']), ('5', ['e', '22']), ('6', ['f', '20'])],
# [('1', ['male']), ('2', ['female'])],
# [('4', ['female']), ('5', ['male'])]]
# 可以看出一共有4个分区
#重新划分为2个分区, 默认采用 hash 分区, 因此 key 相同的会被 shuffle 到1个分区中
table = table.partitionBy(2)
# 1.此处原理与 MapReduce 不同, MapReduce 肯定会做shuffle
# 一般 1个hdfs 的block对应 1个map task, 在1个map task中:
# (1) 在环形缓冲区, 数据按照 分区+key 进行快速排序了
# (2) 环形缓冲区溢出到磁盘, 对每一个分区对应的多个溢出文件进行归并排序, 最后生成 分区文件, 一个分区对应一个文件
# 1个分区对应 1个reduce task, 在1个reduce task中:
# (1) 去拉取 map task 在磁盘上的, 我对应要处理的分区文件, 然后进行归并排序
# (2) 从归并排序后的文件中, 按顺序提取出 (key, key 对应的 value-list ) 输入给reduce 函数,
# 如果是两张表join, 则此步骤相当于完成了按照key的join 操作
# 2. 可以看出 spark 相较于 MapReduce ,操作更加灵活, 在spark 中shuffle 是可选的
# table.glom().collect()
# [[('1', ['a', '27']), ('4', ['d', '21']), ('1', ['male']), ('4', ['female'])],
# [('2', ['b', '24']),
# ('3', ['c', '23']),
# ('5', ['e', '22']),
# ('6', ['f', '20']),
# ('2', ['female']),
# ('5', ['male'])]]
# 可以看出一共有2个分区, 并且相同的 key 在同一分区
def process_oneslice(one_slice, col_num):
"""
对一个分区的处理
:param one_slice:
:param col_num:
:return:
"""
res = []
hash_table = {}
for line in one_slice:
key = line[0]
value = line[1]
if key not in hash_table:
hash_table[key] = value
else:
hash_table[key] = hash_table[key] + value
for key, value in hash_table.items():
if len(value) == col_num: # 这一行的 col 个数 匹配 说明 关联成功
res.append([key] + value)
return res
col_num = 3 # 最终表 除了 Key 之外 应该有 3 个列(字段)
table = table.mapPartitions(lambda one_slice: process_oneslice(one_slice, col_num))
# table.glom().collect()
table_one_slice = table.map(lambda line: ",".join(line)).coalesce(1, shuffle=True) # 输出为 一个切片
table_one_slice.saveAsTextFile(table_dir)
def hash_join(self,table_a_dir,table_b_dir,table_dir):
"""
利用 基本 算子 实现 hash join
:return:
"""
spark = SparkSession.builder.appName("hash_join").getOrCreate()
sc = spark.sparkContext
sliceNum = 2
table_a = sc.textFile(table_a_dir, sliceNum)
table_b = sc.textFile(table_b_dir, sliceNum)
table_a = table_a.map(lambda line: line.split(',')) # 大表
table_b = table_b.map(lambda line: line.split(',')) # 小表
table_a = table_a.map(lambda line: (line[0], line[1:])) # 只能有 两个元素 ,第1个为 Key; 否则后面的 groupByKey() 报错
table_b = table_b.map(lambda line: (line[0], line[1:]))
table_b = table_b.collect() # [('1', ['male']), ('2', ['female']), ('4', ['female']), ('5', ['male'])]
hash_table_b = {} # 把小表 做成 hash 表
for line in table_b:
hash_table_b[line[0]] = line[1][0]
# 把小表 作为 广播变量 分发到各个 计算节点上
broadcast_table_b = sc.broadcast(hash_table_b) # SPARK-5063: RDD 不能被广播
def process_oneslice(big_table_slice):
res = []
for line in big_table_slice:
key = line[0]
values = line[1]
if key in broadcast_table_b:
res.append([key] + [hash_table_b[key]] + values)
return res
table = table_a.mapPartitions(lambda big_table_slice: process_oneslice(big_table_slice))
# table.collect()
table_one_slice = table.map(lambda line: ",".join(line)).coalesce(1, shuffle=True) # 输出为 一个切片
table_one_slice.saveAsTextFile(table_dir)
def shuffle_Hash_join(self):
"""
实现 一个 基于分区 的 Join
:return:
"""
spark = SparkSession.builder.appName("backet_map_join").getOrCreate()
sc = spark.sparkContext
#TODO: 如何 同时 操作 两个分区 中的数据, eg. 把一个 分区中的 数据 放入 内存中 做成 hash 表,与另一个分区 关联
if __name__ == '__main__':
sol = DataProcess()
# sol.find_bad_line()
Test = Test()
# Test.test2()
#--------------- join 函数 测试 -------------#
data_dir = '../data_test/'
table_a_dir = os.path.join(data_dir, 'table_A')
table_b_dir = os.path.join(data_dir, 'table_B')
table_dir = os.path.join(data_dir, 'table')
sol2=Join()
sol2.common_join(table_a_dir,table_b_dir,table_dir)
# sol2.hash_join(table_a_dir, table_b_dir, table_dir)
| #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
from collections import *
from numpy import *
import re
import json
from pyspark.sql import SparkSession
from functools import reduce
class Test:
def test1(self):
"""
不可见字符 的 打印 和 写入文本
:return:
"""
SOH=chr(0x01) # SOH
next_line_char=chr(0x0a) # 换行键(LF)
# print(next_line_char) # 根本看不到 ; 只能写到文本 中使用 notepad++ 查看
NUL=chr(0x00)
STX=chr(0x02)
EXT=chr(0x03)
a='' # 直接从文本 中 粘SOH 过来
print(ord(a)) # 输出 ASCII 码 为 1
# 换行符 不能直接从文本 粘贴过来,这样直接在 IDE中就换行了
line_str=next_line_char + SOH + NUL + STX + EXT
data_dir = '../data_test/'
test_file_dir = os.path.join(data_dir, 'invisible_characters.csv') #
self.encoding='utf-8'
with open(test_file_dir , "wb+") as f_test:
f_test.seek(0, 0) # 指向 切片文件的 首位
row_text = line_str
row_text_bytes = row_text.encode(self.encoding)
f_test.write(row_text_bytes)
def test2(self):
orgin = " '!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ "
orgin = " \'!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ "
print('[{0}]'.format(re.escape(orgin)))
class DataProcess:
"""
by XRH
date: 2020-06-14
利用 spark 做数据 处理
功能:
1.对 临时索引文件 tmp_index_spark.bin 按照 termid 进行排序
2. 在 大文件中 输出 满足 特定 条件的 行
2.1 读取 使用 0x01 作为分隔符 的 CSV ,并输出 字段数目不匹配的行
"""
def sort_by_termid(self):
"""
对 临时索引文件 tmp_index_spark.bin 按照 termid 进行排序
:return:
"""
spark = SparkSession.builder.appName("sort_by_termid").getOrCreate()
sc = spark.sparkContext
# nums = sc.parallelize([1, 2, 3, 4])
# print(nums.map(lambda x: x * x).collect())
# data_dir = '..\\data\\' # windows 下必须使用 '\\'
# tmp_index_file_dir = data_dir + 'tmp_index_spark.bin' # for spark
data_dir='../data/'
tmp_index_file_dir=os.path.join(data_dir, 'tmp_index_spark.bin')
sorted_tmp_index_file_dir=data_dir+'sorted_'+'tmp_index_spark.bin'
lines = sc.textFile(tmp_index_file_dir,8) # 对大文件 进行切片 sliceNum=8,否则报错
# 20/05/30 11:54:28 ERROR PythonRunner: This may have been caused by a prior exception:
# java.net.SocketException: Connection reset by peer: socket write error
# print(lines.first()) # 看第一条
# print(lines.take(10)) # 可以看 指定数目的记录
# for line in lines : #TypeError: 'RDD' object is not iterable
# print(line)
lines=lines.map(lambda line:line.split("\t"))
lines=lines.sortBy(lambda x: x[0])
#TODO: 过滤掉 term_id =0 ,因为 term_id==0 为标点符号
lines=lines.map(lambda line: line[0]+"\t"+line[1]) #采用 分隔符 区分两个 域
# lines.saveAsTextFile(sorted_tmp_index_file_dir) # 在文件夹下 保存 sliceNum 个切片文件
lines.coalesce(1, shuffle=True).saveAsTextFile(sorted_tmp_index_file_dir) # 在文件夹下 只有一个 切片
# lines.saveAsSingleTextFile(sorted_tmp_index_file_dir)
# list_b=[['1', '1'], ['0', '1'], ['2', '1'], ['3', '1'], ['4', '1'], ['5', '1'], ['6', '1'], ['7', '1'], ['8', '1'],
# ['9', '1']]
# lines = sc.parallelize(list_b)
# lines=lines.sortBy(lambda x: x[0])
# print(lines.take(10))
def __process_oneline(self,line, col_num):
"""
自定义 Map 中的 lambda 函数
处理 文件中的 每一行
:param line: 文件中的一行,已经是字符串了 (str)
:param col_num: 预设的字段数
:return:
"""
line_array = line.split("\x01")
length = len(line_array)
line_array.append(length)
res = None
if length != col_num: # 实际 字段数目 不符合 预设的 col_num
res = line_array
return res # 每一行 必须都 要有 返回
def __process_oneslice(self,lines_slice, col_num):
"""
自定义 mapPartitions 中的 lambda 函数
这个计算 过程被 分发给了 spark 的计算节点,
计算节点 使用本地的数据分片进行 计算
:param lines_slice: 文件的切片,其中有多行数据 (字符串数组)
:param col_num:
:return:
"""
res = []
for line in lines_slice:
# line 为字符串
line_array = line.split("\x01")
length = len(line_array)
line_array.append(length) # 记录 总的字段数目
if length != col_num: # 找到 字段数目 不符合 col_num 的
res.append(line + str(line_array))
return res
def find_bad_line(self):
"""
spark 读取 使用 0x01 作为分隔符 的 CSV ,并输出 字段数目不匹配的行
:return:
"""
spark = SparkSession.builder.appName("find_bad_line").getOrCreate()
sc = spark.sparkContext
data_dir = '../data_test/'
test_file_dir = os.path.join(data_dir, '20200614.csv')
result_file_dir = os.path.join(data_dir, '20200614-result.csv')
sliceNum = 2
lines = sc.textFile(test_file_dir, sliceNum) # 对大文件 进行切片 sliceNum=8,否则报错
# 20/05/30 11:54:28 ERROR PythonRunner: This may have been caused by a prior exception:
# java.net.SocketException: Connection reset by peer: socket write error
# print(lines.take(10))
col_num = 3
# linesByMap = lines.map(lambda line: self.__process_oneline(line, col_num))
# print(linesByMap.take(10)) # [None, ['1', ' abc \x03 ', ' 超哥 ', ' 666 ', 4], None]
linesByMapPartitions = lines.mapPartitions(lambda lines_slice: self.__process_oneslice(lines_slice, col_num))
# print(linesByMapPartitions.take(10))
# 分区合并
one_slice = linesByMapPartitions.coalesce(1, shuffle=True)
one_slice.saveAsTextFile(result_file_dir) # 先删除之前的文件,否则报错
class Join:
"""
by XRH
date: 2020-06-16
利用 spark 的基础算子 实现 常见的 join 算法
功能:
1. 朴素的 MapReduce 的 join
2. 基于 广播变量的 hash join
3.
"""
def common_join(self,table_a_dir,table_b_dir,table_dir):
"""
利用基本算子 实现 MapReduce 的 join
:param table_a:
:param table_b:
:return:
"""
spark = SparkSession.builder.appName("common_Join").getOrCreate()
sc = spark.sparkContext
sliceNum = 2
table_a = sc.textFile(table_a_dir, sliceNum) # 2个分区
table_b = sc.textFile(table_b_dir, sliceNum) # 2个分区
table_a = table_a.map(lambda line: line.split(','))
table_b = table_b.map(lambda line: line.split(','))
table_a = table_a.map(lambda line: (line[0], line[1:])) # 只能有 两个元素 ,第1个为 Key; 否则后面的 groupByKey() 报错
table_b = table_b.map(lambda line: (line[0], line[1:]))
table = table_a.union(table_b) # 合并后 分区的数目 也是 两个 RDD 的分区的和
# table.glom().collect() # 输出 各个分区 的元素 列表
# [[('1', ['a', '27']), ('2', ['b', '24']), ('3', ['c', '23'])],
# [('4', ['d', '21']), ('5', ['e', '22']), ('6', ['f', '20'])],
# [('1', ['male']), ('2', ['female'])],
# [('4', ['female']), ('5', ['male'])]]
# 可以看出一共有4个分区
#重新划分为2个分区, 默认采用 hash 分区, 因此 key 相同的会被 shuffle 到1个分区中
table = table.partitionBy(2)
# 1.此处原理与 MapReduce 不同, MapReduce 肯定会做shuffle
# 一般 1个hdfs 的block对应 1个map task, 在1个map task中:
# (1) 在环形缓冲区, 数据按照 分区+key 进行快速排序了
# (2) 环形缓冲区溢出到磁盘, 对每一个分区对应的多个溢出文件进行归并排序, 最后生成 分区文件, 一个分区对应一个文件
# 1个分区对应 1个reduce task, 在1个reduce task中:
# (1) 去拉取 map task 在磁盘上的, 我对应要处理的分区文件, 然后进行归并排序
# (2) 从归并排序后的文件中, 按顺序提取出 (key, key 对应的 value-list ) 输入给reduce 函数,
# 如果是两张表join, 则此步骤相当于完成了按照key的join 操作
# 2. 可以看出 spark 相较于 MapReduce ,操作更加灵活, 在spark 中shuffle 是可选的
# table.glom().collect()
# [[('1', ['a', '27']), ('4', ['d', '21']), ('1', ['male']), ('4', ['female'])],
# [('2', ['b', '24']),
# ('3', ['c', '23']),
# ('5', ['e', '22']),
# ('6', ['f', '20']),
# ('2', ['female']),
# ('5', ['male'])]]
# 可以看出一共有2个分区, 并且相同的 key 在同一分区
def process_oneslice(one_slice, col_num):
"""
对一个分区的处理
:param one_slice:
:param col_num:
:return:
"""
res = []
hash_table = {}
for line in one_slice:
key = line[0]
value = line[1]
if key not in hash_table:
hash_table[key] = value
else:
hash_table[key] = hash_table[key] + value
for key, value in hash_table.items():
if len(value) == col_num: # 这一行的 col 个数 匹配 说明 关联成功
res.append([key] + value)
return res
col_num = 3 # 最终表 除了 Key 之外 应该有 3 个列(字段)
table = table.mapPartitions(lambda one_slice: process_oneslice(one_slice, col_num))
# table.glom().collect()
table_one_slice = table.map(lambda line: ",".join(line)).coalesce(1, shuffle=True) # 输出为 一个切片
table_one_slice.saveAsTextFile(table_dir)
def hash_join(self,table_a_dir,table_b_dir,table_dir):
"""
利用 基本 算子 实现 hash join
:return:
"""
spark = SparkSession.builder.appName("hash_join").getOrCreate()
sc = spark.sparkContext
sliceNum = 2
table_a = sc.textFile(table_a_dir, sliceNum)
table_b = sc.textFile(table_b_dir, sliceNum)
table_a = table_a.map(lambda line: line.split(',')) # 大表
table_b = table_b.map(lambda line: line.split(',')) # 小表
table_a = table_a.map(lambda line: (line[0], line[1:])) # 只能有 两个元素 ,第1个为 Key; 否则后面的 groupByKey() 报错
table_b = table_b.map(lambda line: (line[0], line[1:]))
table_b = table_b.collect() # [('1', ['male']), ('2', ['female']), ('4', ['female']), ('5', ['male'])]
hash_table_b = {} # 把小表 做成 hash 表
for line in table_b:
hash_table_b[line[0]] = line[1][0]
# 把小表 作为 广播变量 分发到各个 计算节点上
broadcast_table_b = sc.broadcast(hash_table_b) # SPARK-5063: RDD 不能被广播
def process_oneslice(big_table_slice):
res = []
for line in big_table_slice:
key = line[0]
values = line[1]
if key in broadcast_table_b:
res.append([key] + [hash_table_b[key]] + values)
return res
table = table_a.mapPartitions(lambda big_table_slice: process_oneslice(big_table_slice))
# table.collect()
table_one_slice = table.map(lambda line: ",".join(line)).coalesce(1, shuffle=True) # 输出为 一个切片
table_one_slice.saveAsTextFile(table_dir)
def shuffle_Hash_join(self):
"""
实现 一个 基于分区 的 Join
:return:
"""
spark = SparkSession.builder.appName("backet_map_join").getOrCreate()
sc = spark.sparkContext
#TODO: 如何 同时 操作 两个分区 中的数据, eg. 把一个 分区中的 数据 放入 内存中 做成 hash 表,与另一个分区 关联
if __name__ == '__main__':
sol = DataProcess()
# sol.find_bad_line()
Test = Test()
# Test.test2()
#--------------- join 函数 测试 -------------#
data_dir = '../data_test/'
table_a_dir = os.path.join(data_dir, 'table_A')
table_b_dir = os.path.join(data_dir, 'table_B')
table_dir = os.path.join(data_dir, 'table')
sol2=Join()
sol2.common_join(table_a_dir,table_b_dir,table_dir)
# sol2.hash_join(table_a_dir, table_b_dir, table_dir)
| zh | 0.873465 | #!/usr/bin/python # -*- coding: UTF-8 -*- 不可见字符 的 打印 和 写入文本 :return: # SOH # 换行键(LF) # print(next_line_char) # 根本看不到 ; 只能写到文本 中使用 notepad++ 查看 # 直接从文本 中 粘SOH 过来 # 输出 ASCII 码 为 1 # 换行符 不能直接从文本 粘贴过来,这样直接在 IDE中就换行了 # # 指向 切片文件的 首位 by XRH date: 2020-06-14 利用 spark 做数据 处理 功能: 1.对 临时索引文件 tmp_index_spark.bin 按照 termid 进行排序 2. 在 大文件中 输出 满足 特定 条件的 行 2.1 读取 使用 0x01 作为分隔符 的 CSV ,并输出 字段数目不匹配的行 对 临时索引文件 tmp_index_spark.bin 按照 termid 进行排序 :return: # nums = sc.parallelize([1, 2, 3, 4]) # print(nums.map(lambda x: x * x).collect()) # data_dir = '..\\data\\' # windows 下必须使用 '\\' # tmp_index_file_dir = data_dir + 'tmp_index_spark.bin' # for spark # 对大文件 进行切片 sliceNum=8,否则报错 # 20/05/30 11:54:28 ERROR PythonRunner: This may have been caused by a prior exception: # java.net.SocketException: Connection reset by peer: socket write error # print(lines.first()) # 看第一条 # print(lines.take(10)) # 可以看 指定数目的记录 # for line in lines : #TypeError: 'RDD' object is not iterable # print(line) #TODO: 过滤掉 term_id =0 ,因为 term_id==0 为标点符号 #采用 分隔符 区分两个 域 # lines.saveAsTextFile(sorted_tmp_index_file_dir) # 在文件夹下 保存 sliceNum 个切片文件 # 在文件夹下 只有一个 切片 # lines.saveAsSingleTextFile(sorted_tmp_index_file_dir) # list_b=[['1', '1'], ['0', '1'], ['2', '1'], ['3', '1'], ['4', '1'], ['5', '1'], ['6', '1'], ['7', '1'], ['8', '1'], # ['9', '1']] # lines = sc.parallelize(list_b) # lines=lines.sortBy(lambda x: x[0]) # print(lines.take(10)) 自定义 Map 中的 lambda 函数 处理 文件中的 每一行 :param line: 文件中的一行,已经是字符串了 (str) :param col_num: 预设的字段数 :return: # 实际 字段数目 不符合 预设的 col_num # 每一行 必须都 要有 返回 自定义 mapPartitions 中的 lambda 函数 这个计算 过程被 分发给了 spark 的计算节点, 计算节点 使用本地的数据分片进行 计算 :param lines_slice: 文件的切片,其中有多行数据 (字符串数组) :param col_num: :return: # line 为字符串 # 记录 总的字段数目 # 找到 字段数目 不符合 col_num 的 spark 读取 使用 0x01 作为分隔符 的 CSV ,并输出 字段数目不匹配的行 :return: # 对大文件 进行切片 sliceNum=8,否则报错 # 20/05/30 11:54:28 ERROR PythonRunner: This may have been caused by a prior exception: # java.net.SocketException: Connection reset by peer: socket write error # print(lines.take(10)) # linesByMap = lines.map(lambda line: self.__process_oneline(line, col_num)) # print(linesByMap.take(10)) # [None, ['1', ' abc \x03 ', ' 超哥 ', ' 666 ', 4], None] # print(linesByMapPartitions.take(10)) # 分区合并 # 先删除之前的文件,否则报错 by XRH date: 2020-06-16 利用 spark 的基础算子 实现 常见的 join 算法 功能: 1. 朴素的 MapReduce 的 join 2. 基于 广播变量的 hash join 3. 利用基本算子 实现 MapReduce 的 join :param table_a: :param table_b: :return: # 2个分区 # 2个分区 # 只能有 两个元素 ,第1个为 Key; 否则后面的 groupByKey() 报错 # 合并后 分区的数目 也是 两个 RDD 的分区的和 # table.glom().collect() # 输出 各个分区 的元素 列表 # [[('1', ['a', '27']), ('2', ['b', '24']), ('3', ['c', '23'])], # [('4', ['d', '21']), ('5', ['e', '22']), ('6', ['f', '20'])], # [('1', ['male']), ('2', ['female'])], # [('4', ['female']), ('5', ['male'])]] # 可以看出一共有4个分区 #重新划分为2个分区, 默认采用 hash 分区, 因此 key 相同的会被 shuffle 到1个分区中 # 1.此处原理与 MapReduce 不同, MapReduce 肯定会做shuffle # 一般 1个hdfs 的block对应 1个map task, 在1个map task中: # (1) 在环形缓冲区, 数据按照 分区+key 进行快速排序了 # (2) 环形缓冲区溢出到磁盘, 对每一个分区对应的多个溢出文件进行归并排序, 最后生成 分区文件, 一个分区对应一个文件 # 1个分区对应 1个reduce task, 在1个reduce task中: # (1) 去拉取 map task 在磁盘上的, 我对应要处理的分区文件, 然后进行归并排序 # (2) 从归并排序后的文件中, 按顺序提取出 (key, key 对应的 value-list ) 输入给reduce 函数, # 如果是两张表join, 则此步骤相当于完成了按照key的join 操作 # 2. 可以看出 spark 相较于 MapReduce ,操作更加灵活, 在spark 中shuffle 是可选的 # table.glom().collect() # [[('1', ['a', '27']), ('4', ['d', '21']), ('1', ['male']), ('4', ['female'])], # [('2', ['b', '24']), # ('3', ['c', '23']), # ('5', ['e', '22']), # ('6', ['f', '20']), # ('2', ['female']), # ('5', ['male'])]] # 可以看出一共有2个分区, 并且相同的 key 在同一分区 对一个分区的处理 :param one_slice: :param col_num: :return: # 这一行的 col 个数 匹配 说明 关联成功 # 最终表 除了 Key 之外 应该有 3 个列(字段) # table.glom().collect() # 输出为 一个切片 利用 基本 算子 实现 hash join :return: # 大表 # 小表 # 只能有 两个元素 ,第1个为 Key; 否则后面的 groupByKey() 报错 # [('1', ['male']), ('2', ['female']), ('4', ['female']), ('5', ['male'])] # 把小表 做成 hash 表 # 把小表 作为 广播变量 分发到各个 计算节点上 # SPARK-5063: RDD 不能被广播 # table.collect() # 输出为 一个切片 实现 一个 基于分区 的 Join :return: #TODO: 如何 同时 操作 两个分区 中的数据, eg. 把一个 分区中的 数据 放入 内存中 做成 hash 表,与另一个分区 关联 # sol.find_bad_line() # Test.test2() #--------------- join 函数 测试 -------------# # sol2.hash_join(table_a_dir, table_b_dir, table_dir) | 2.778204 | 3 |
tripleo_ansible/ansible_plugins/modules/tripleo_container_config_scripts.py | beagles/tripleo-ansible | 22 | 6632617 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__metaclass__ = type
import os
import yaml
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = """
---
module: tripleo_container_config_scripts
author:
- "TripleO team"
version_added: '2.9'
short_description: Generate container config scripts
notes: []
description:
- It will generate the TripleO container config scripts.
requirements:
- None
options:
config_data:
description:
- Content of container_config_scripts.yaml file (must be YAML format)
type: dict
required: true
config_dir:
description:
- Directory where config scripts will be written.
type: str
default: /var/lib/container-config-scripts
"""
EXAMPLES = """
- name: Write container config scripts
tripleo_container_config_scripts:
config_data:
container_puppet_apply.sh:
content: "#!/bin/bash\npuppet apply"
mode: "0700"
config_dir: /var/lib/container-config-scripts
"""
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=True,
)
results = dict(
changed=False
)
# parse args
args = module.params
# Set parameters
config_data = args['config_data']
config_dir = args['config_dir']
if not module.check_mode:
for path, config in config_data.items():
# this is specific to how the files are written in config-download
mode = config.get('mode', '0600')
config_path = os.path.join(config_dir, path)
with open(config_path, "w") as config_file:
config_file.write(config['content'])
os.chmod(config_path, int(mode, 8))
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__metaclass__ = type
import os
import yaml
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = """
---
module: tripleo_container_config_scripts
author:
- "TripleO team"
version_added: '2.9'
short_description: Generate container config scripts
notes: []
description:
- It will generate the TripleO container config scripts.
requirements:
- None
options:
config_data:
description:
- Content of container_config_scripts.yaml file (must be YAML format)
type: dict
required: true
config_dir:
description:
- Directory where config scripts will be written.
type: str
default: /var/lib/container-config-scripts
"""
EXAMPLES = """
- name: Write container config scripts
tripleo_container_config_scripts:
config_data:
container_puppet_apply.sh:
content: "#!/bin/bash\npuppet apply"
mode: "0700"
config_dir: /var/lib/container-config-scripts
"""
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=True,
)
results = dict(
changed=False
)
# parse args
args = module.params
# Set parameters
config_data = args['config_data']
config_dir = args['config_dir']
if not module.check_mode:
for path, config in config_data.items():
# this is specific to how the files are written in config-download
mode = config.get('mode', '0600')
config_path = os.path.join(config_dir, path)
with open(config_path, "w") as config_file:
config_file.write(config['content'])
os.chmod(config_path, int(mode, 8))
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| en | 0.684911 | #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. --- module: tripleo_container_config_scripts author: - "TripleO team" version_added: '2.9' short_description: Generate container config scripts notes: [] description: - It will generate the TripleO container config scripts. requirements: - None options: config_data: description: - Content of container_config_scripts.yaml file (must be YAML format) type: dict required: true config_dir: description: - Directory where config scripts will be written. type: str default: /var/lib/container-config-scripts - name: Write container config scripts tripleo_container_config_scripts: config_data: container_puppet_apply.sh: content: "#!/bin/bash\npuppet apply" mode: "0700" config_dir: /var/lib/container-config-scripts # parse args # Set parameters # this is specific to how the files are written in config-download | 2.032669 | 2 |
rastercube/io.py | terrai/rastercube | 16 | 6632618 | """
IO helper functions that transparently deal with both loca files (fs://) and
HDFS files (hdfs://)
"""
import os
import shutil
import numpy as np
import rastercube.utils as utils
import rastercube.hadoop.common as terrahdfs
def strip_uri_proto(uri, proto):
"""
Remove the protocol prefix in a URI. Turns
fs:///foo/bar
into
/foo/bar
"""
if uri.startswith(proto):
stripped = uri[len(proto):]
assert stripped.startswith('/'),\
"Relative path in proto-prefixed URI : %s" % uri
return stripped
else:
return uri
def fs_write(fname, data, hdfs_client=None):
"""
Write to local fs or HDFS based on fname uri
"""
if fname.startswith('hdfs://'):
fname = strip_uri_proto(fname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
# When writing fractions to HDFS, we might want to adapt the
# blocksize to match the file size to avoid fractionning and avoid
# using too much space
# http://grokbase.com/t/cloudera/cdh-user/133z6yj74d/how-to-write-a-file-with-custom-block-size-to-hdfs
# TODO: Not sure if this has a performance impact
# round up to MB and add 2MB to have a margin (on-disk size is not
# exactly equal to len(data) for some reason... block metadata ?)
blocksize_mb = int(np.ceil(len(data) / (1024 * 1024))) + 2
# minimum blocksize = 1MB - fully masked fracs are pretty small
blocksize = 1024 * 1024 * max(1, blocksize_mb)
with hdfs_client.write(fname, overwrite=True,
blocksize=blocksize) as writer:
writer.write(data)
# Verify write correctness by requesting file status and check
# that filesize < blocksize
stat = hdfs_client.status(fname)
assert stat['blockSize'] > stat['length'], "blockSize <= length for "\
" file %s" % fname
else:
fname = strip_uri_proto(fname, 'fs://')
outdir = os.path.dirname(fname)
utils.mkdir_p(outdir)
with open(fname, 'wb') as f:
f.write(data)
def fs_read(fname, hdfs_client=None):
"""
Read a local (fs://) or HDFS (hdfs://) file as a blob
"""
if fname.startswith('hdfs://'):
fname = strip_uri_proto(fname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
with hdfs_client.read(fname) as reader:
blob = reader.read()
return blob
else:
fname = strip_uri_proto(fname, 'fs://')
with open(fname, 'rb') as f:
blob = f.read()
return blob
def fs_delete(path, hdfs_client=None, recursive=False):
"""
Delete a local (fs://) or HDFS(hdfs://) file or directory, possibly
recursively
"""
if path.startswith('hdfs://'):
path = strip_uri_proto(path, 'hdfs://')
assert len(path) > 0
assert path != '/'
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
hdfs_client.delete(path, recursive=recursive)
else:
path = strip_uri_proto(path, 'fs://')
assert len(path) > 0
assert path != '/'
if recursive:
shutil.rmtree(path)
else:
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
def fs_exists(fname, hdfs_client=None):
"""
Test if a file exists
"""
if fname.startswith('hdfs://'):
fname = strip_uri_proto(fname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
return hdfs_client.status(fname, strict=False) is not None
else:
fname = strip_uri_proto(fname, 'fs://')
return os.path.exists(fname)
def fs_list(dirname, hdfs_client=None):
"""
List a directory
"""
if dirname.startswith('hdfs://'):
dirname = strip_uri_proto(dirname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
return hdfs_client.list(dirname)
else:
dirname = strip_uri_proto(dirname, 'fs://')
return os.listdir(dirname)
| """
IO helper functions that transparently deal with both loca files (fs://) and
HDFS files (hdfs://)
"""
import os
import shutil
import numpy as np
import rastercube.utils as utils
import rastercube.hadoop.common as terrahdfs
def strip_uri_proto(uri, proto):
"""
Remove the protocol prefix in a URI. Turns
fs:///foo/bar
into
/foo/bar
"""
if uri.startswith(proto):
stripped = uri[len(proto):]
assert stripped.startswith('/'),\
"Relative path in proto-prefixed URI : %s" % uri
return stripped
else:
return uri
def fs_write(fname, data, hdfs_client=None):
"""
Write to local fs or HDFS based on fname uri
"""
if fname.startswith('hdfs://'):
fname = strip_uri_proto(fname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
# When writing fractions to HDFS, we might want to adapt the
# blocksize to match the file size to avoid fractionning and avoid
# using too much space
# http://grokbase.com/t/cloudera/cdh-user/133z6yj74d/how-to-write-a-file-with-custom-block-size-to-hdfs
# TODO: Not sure if this has a performance impact
# round up to MB and add 2MB to have a margin (on-disk size is not
# exactly equal to len(data) for some reason... block metadata ?)
blocksize_mb = int(np.ceil(len(data) / (1024 * 1024))) + 2
# minimum blocksize = 1MB - fully masked fracs are pretty small
blocksize = 1024 * 1024 * max(1, blocksize_mb)
with hdfs_client.write(fname, overwrite=True,
blocksize=blocksize) as writer:
writer.write(data)
# Verify write correctness by requesting file status and check
# that filesize < blocksize
stat = hdfs_client.status(fname)
assert stat['blockSize'] > stat['length'], "blockSize <= length for "\
" file %s" % fname
else:
fname = strip_uri_proto(fname, 'fs://')
outdir = os.path.dirname(fname)
utils.mkdir_p(outdir)
with open(fname, 'wb') as f:
f.write(data)
def fs_read(fname, hdfs_client=None):
"""
Read a local (fs://) or HDFS (hdfs://) file as a blob
"""
if fname.startswith('hdfs://'):
fname = strip_uri_proto(fname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
with hdfs_client.read(fname) as reader:
blob = reader.read()
return blob
else:
fname = strip_uri_proto(fname, 'fs://')
with open(fname, 'rb') as f:
blob = f.read()
return blob
def fs_delete(path, hdfs_client=None, recursive=False):
"""
Delete a local (fs://) or HDFS(hdfs://) file or directory, possibly
recursively
"""
if path.startswith('hdfs://'):
path = strip_uri_proto(path, 'hdfs://')
assert len(path) > 0
assert path != '/'
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
hdfs_client.delete(path, recursive=recursive)
else:
path = strip_uri_proto(path, 'fs://')
assert len(path) > 0
assert path != '/'
if recursive:
shutil.rmtree(path)
else:
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
def fs_exists(fname, hdfs_client=None):
"""
Test if a file exists
"""
if fname.startswith('hdfs://'):
fname = strip_uri_proto(fname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
return hdfs_client.status(fname, strict=False) is not None
else:
fname = strip_uri_proto(fname, 'fs://')
return os.path.exists(fname)
def fs_list(dirname, hdfs_client=None):
"""
List a directory
"""
if dirname.startswith('hdfs://'):
dirname = strip_uri_proto(dirname, 'hdfs://')
if hdfs_client is None:
hdfs_client = terrahdfs.hdfs_client()
return hdfs_client.list(dirname)
else:
dirname = strip_uri_proto(dirname, 'fs://')
return os.listdir(dirname)
| en | 0.819081 | IO helper functions that transparently deal with both loca files (fs://) and HDFS files (hdfs://) Remove the protocol prefix in a URI. Turns fs:///foo/bar into /foo/bar Write to local fs or HDFS based on fname uri # When writing fractions to HDFS, we might want to adapt the # blocksize to match the file size to avoid fractionning and avoid # using too much space # http://grokbase.com/t/cloudera/cdh-user/133z6yj74d/how-to-write-a-file-with-custom-block-size-to-hdfs # TODO: Not sure if this has a performance impact # round up to MB and add 2MB to have a margin (on-disk size is not # exactly equal to len(data) for some reason... block metadata ?) # minimum blocksize = 1MB - fully masked fracs are pretty small # Verify write correctness by requesting file status and check # that filesize < blocksize Read a local (fs://) or HDFS (hdfs://) file as a blob Delete a local (fs://) or HDFS(hdfs://) file or directory, possibly recursively Test if a file exists List a directory | 2.484745 | 2 |
maya/custom_nodes_python/retargetSolver/CapsuleLink.py | JeromeEippers/python_rnd_collection | 0 | 6632619 | <reponame>JeromeEippers/python_rnd_collection
import mtypes as t
class CapsuleLink(object):
def __init__(self, distance, ratioA, ratioB, normalA, normalB, relativePq):
self.distance = distance
self.ratioA = ratioA
self.ratioB = ratioB
self.normalA = normalA
self.normalB = normalB
self.relativePq = relativePq
def __repr__(self):
return """CapsuleLink(
distance={},
ratioA={},
ratioB={},
normalA={},
normalB={},
relativePq={}
)""".format(self.distance, self.ratioA, self.ratioB, self.normalA, self.normalB, self.relativePq)
@classmethod
def gather(cls, capsuleA, capsuleB):
return cls(*(capsuleA.distance(capsuleB)))
def solve(self, capsuleA, capsuleB, weight=1.0, ABRatio=0.0, AOrientationRatio=1.0):
A = t.PosQuat(capsuleA.globalSurfacePosition(self.ratioA, self.normalA), capsuleA.pq.q)
B = t.PosQuat(capsuleB.globalSurfacePosition(self.ratioB, self.normalB), capsuleB.pq.q)
resultA = capsuleA.pq.copy()
resultB = capsuleB.pq.copy()
#compute the target pq
target = A * self.relativePq
localATarget = capsuleA.pq.inverse() * target
localB = capsuleB.pq.inverse() * B
if ABRatio < 0.999 :
#compute how we will move capsuleB so B matches the target
resultB = target * (B.inverse() * capsuleB.pq)
#if the ratio is not 1.0 we will move B a little then compute the motion we have to do on A to reach also the target
if ABRatio > 0.001 :
resultB = t.PosQuat.lerp( capsuleB.pq, resultB, 1.0-ABRatio )
if ABRatio > 0.001 :
#compute how we will move primA so that target matches bPQ
goalB = (resultB * localB)
#check if we want to move only in translation or not
#in that case we change the goalB (to reach) to have the same orientation as what the target is already, so no rotation will happen
if AOrientationRatio < 0.999:
goalB = (resultB * localB)
goalB.q = t.Quaternion.lerp(target.q, goalB.q, AOrientationRatio)
resultA = goalB * ( target.inverse() * capsuleA.pq )
#check that primA has been moved completly and not only on translation
#otherwise we move back the primB to make sure we are solving the constraint
if AOrientationRatio < 0.999:
resultB = (resultA * localATarget) * ( B.inverse() * capsuleB.pq )
#solve weights
resultA = t.PosQuat.lerp( capsuleA.pq, resultA, weight )
resultB = t.PosQuat.lerp( capsuleB.pq, resultB, weight )
return resultA, resultB
| import mtypes as t
class CapsuleLink(object):
def __init__(self, distance, ratioA, ratioB, normalA, normalB, relativePq):
self.distance = distance
self.ratioA = ratioA
self.ratioB = ratioB
self.normalA = normalA
self.normalB = normalB
self.relativePq = relativePq
def __repr__(self):
return """CapsuleLink(
distance={},
ratioA={},
ratioB={},
normalA={},
normalB={},
relativePq={}
)""".format(self.distance, self.ratioA, self.ratioB, self.normalA, self.normalB, self.relativePq)
@classmethod
def gather(cls, capsuleA, capsuleB):
return cls(*(capsuleA.distance(capsuleB)))
def solve(self, capsuleA, capsuleB, weight=1.0, ABRatio=0.0, AOrientationRatio=1.0):
A = t.PosQuat(capsuleA.globalSurfacePosition(self.ratioA, self.normalA), capsuleA.pq.q)
B = t.PosQuat(capsuleB.globalSurfacePosition(self.ratioB, self.normalB), capsuleB.pq.q)
resultA = capsuleA.pq.copy()
resultB = capsuleB.pq.copy()
#compute the target pq
target = A * self.relativePq
localATarget = capsuleA.pq.inverse() * target
localB = capsuleB.pq.inverse() * B
if ABRatio < 0.999 :
#compute how we will move capsuleB so B matches the target
resultB = target * (B.inverse() * capsuleB.pq)
#if the ratio is not 1.0 we will move B a little then compute the motion we have to do on A to reach also the target
if ABRatio > 0.001 :
resultB = t.PosQuat.lerp( capsuleB.pq, resultB, 1.0-ABRatio )
if ABRatio > 0.001 :
#compute how we will move primA so that target matches bPQ
goalB = (resultB * localB)
#check if we want to move only in translation or not
#in that case we change the goalB (to reach) to have the same orientation as what the target is already, so no rotation will happen
if AOrientationRatio < 0.999:
goalB = (resultB * localB)
goalB.q = t.Quaternion.lerp(target.q, goalB.q, AOrientationRatio)
resultA = goalB * ( target.inverse() * capsuleA.pq )
#check that primA has been moved completly and not only on translation
#otherwise we move back the primB to make sure we are solving the constraint
if AOrientationRatio < 0.999:
resultB = (resultA * localATarget) * ( B.inverse() * capsuleB.pq )
#solve weights
resultA = t.PosQuat.lerp( capsuleA.pq, resultA, weight )
resultB = t.PosQuat.lerp( capsuleB.pq, resultB, weight )
return resultA, resultB | en | 0.854876 | CapsuleLink( distance={}, ratioA={}, ratioB={}, normalA={}, normalB={}, relativePq={} ) #compute the target pq #compute how we will move capsuleB so B matches the target #if the ratio is not 1.0 we will move B a little then compute the motion we have to do on A to reach also the target #compute how we will move primA so that target matches bPQ #check if we want to move only in translation or not #in that case we change the goalB (to reach) to have the same orientation as what the target is already, so no rotation will happen #check that primA has been moved completly and not only on translation #otherwise we move back the primB to make sure we are solving the constraint #solve weights | 2.569821 | 3 |
rest.py | zoomdbz/rest | 0 | 6632620 | <filename>rest.py
#!/usr/bin/env python3
#MIT License
#Copyright (C) 2019 <EMAIL>
#https://github.com/plasticuproject/rest
#Thanks to mikesz81 for concept and nbulischeck for code review
#linpeas.sh script added by Dividesbyzer0
from termcolor import cprint
import subprocess
import paramiko
import argparse
import pathlib
import re
badpacks = ('centos','debian','ubuntu','redhat','addon','agent','apps','base','bin','bsd','cache','check','client','command',
'common','configuration','control','core','cron','data','database','dev','editor','events','extras','family','file',
'files','form','ftp','generic','gnu','headers','http','info','installation','kernel','legacy','linux','load','manager',
'message','module','monitor','net','network','one','open','patch','path','plugin','plugins','release','router','secure',
'security','server','ssl','software','standard','support','system','team','text','the','theme','time','toolkit','tools',
'unix','update','user','utility','viewer','web','wifi','windows','wireless')
ssh_errors = (paramiko.ssh_exception.AuthenticationException,
paramiko.ssh_exception.BadAuthenticationType,
paramiko.ssh_exception.BadHostKeyException,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.PartialAuthentication,
paramiko.ssh_exception.PasswordRequiredException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.SSHException)
def info():
#print tool information
cprint('\nRemote Exploit Scan Tool', 'red', attrs=['bold'])
cprint('Remotely scan Linux system packages via SSH.\n', attrs=['bold'])
print('Use SSH credentials to remotely scan linux system')
print('packages for known exploits in Exploit-DB and run')
print('basic enumeration scripts.\n')
def get_args():
# parse arguments
parser = argparse.ArgumentParser(description=info())
parser.add_argument('host', type=str, metavar='hostname', help='hostname or IP address of remote machine')
parser.add_argument('user', type=str, metavar='username', help='username used to login to host')
parser.add_argument('-n', type=int, metavar='port_number', nargs='?', help='port number (default is 22)', default=22)
parser.add_argument('-p', type=str, metavar='password', help='password for user')
parser.add_argument('-k', type=str, metavar='key_file', help='location of RSA or DSA Key file')
parser.add_argument('-ss', action='store_true', help='run package list against searchsploit database')
parser.add_argument('-le', action='store_true', help='run LinEnum.sh and return LE_report')
parser.add_argument('-t', action='store_true', help='add thorough switch to -le LinEnum.sh')
parser.add_argument('-lp', action='store_true', help='run linpeas.sh and return LP_report')
parser.add_argument('-ps', action='store_true', help='run pspy64 or pspy32 with defaults and return pspy_out')
args = parser.parse_args()
return args
def check_searchsploit():
# checks if searchsploit is installed
usr_path = pathlib.Path('/usr/bin/searchsploit')
if usr_path.is_file() == False:
cprint('\n[*]Please install searchsploit to continue.[*]\n', 'red')
quit()
def transfer(ssh, lin_enum, lin_enum_t, pspy):
# downloads list of installed packages
sftp = ssh.open_sftp()
ssh.exec_command('dpkg -l > packages.txt')
local_path = pathlib.Path.cwd() / 'packages.txt'
sftp.get('packages.txt', local_path)
ssh.exec_command('rm packages.txt')
format_dpkg_file()
if local_path.stat().st_size == 0:
ssh.exec_command('rpm -qa > packages.txt')
sftp.get('packages.txt', local_path)
ssh.exec_command('rm packages.txt')
format_rpm_file()
cprint('[*]Downloading package list...[*]', 'green')
if lin_enum == True:
run_lin_enum(ssh, lin_enum_t)
if pspy == True:
run_pspy(ssh)
ssh.close()
def sftp_exists(sftp, path):
# check if report file is present
try:
sftp.stat(path)
return True
except FileNotFoundError:
return False
def run_lin_enum(ssh, lin_enum_t):
# run LinEnum.sh on remote machine
cprint('[*]Running LinEnum.sh.[*]', 'green')
cprint('[*]This may take a few minutes...[*]', 'yellow')
sftp = ssh.open_sftp()
script = pathlib.Path.cwd() / 'scripts/LinEnum.sh'
sftp.put(script, '/tmp/LinEnum.sh')
transport = ssh.get_transport()
channel = transport.open_session()
command = 'chmod +x /tmp/LinEnum.sh && /tmp/./LinEnum.sh -r /tmp/LE_report'
command_t = command + ' -t'
if sftp_exists(sftp, '/tmp/LE_report') == True:
ssh.exec_command('rm /tmp/LE_report')
if lin_enum_t == False:
channel.exec_command(command)
elif lin_enum_t == True:
channel.exec_command(command_t)
report = pathlib.Path.cwd() / 'LE_report'
finished = '### SCAN COMPLETE ###'
running = True
while running:
if sftp_exists(sftp, '/tmp/LE_report') == True:
ssh.exec_command('cp /tmp/LE_report /tmp/LE_report_test')
remote_file = sftp.open('/tmp/LE_report_test', 'r')
for line in remote_file:
if finished in line:
running = False
cprint('[*]Downloading LinEnum.sh LE_report...[*]', 'green')
sftp.get('/tmp/LE_report', report)
ssh.exec_command('rm /tmp/LE_report')
ssh.exec_command('rm /tmp/LE_report_test')
ssh.exec_command('rm /tmp/LinEnum.sh')
def run_linpeas(ssh):
# run linpeas.sh on remote machine
cprint('[*]Running linpeas.sh.[*]', 'green')
cprint('[*]This may take a few minutes...[*]', 'yellow')
sftp = ssh.open_sftp()
script = pathlib.Path.cwd() / 'scripts/linpeas.sh'
sftp.put(script, '/tmp/linpeas.sh')
transport = ssh.get_transport()
channel = transport.open_session()
command = 'chmod +x /tmp/linpeas.sh && /tmp/./linpeas.sh > /tmp/LP_report'
if sftp_exists(sftp, '/tmp/LP_report') == True:
ssh.exec_command('rm /tmp/LP_report')
report = pathlib.Path.cwd() / 'LP_report.txt'
finished = 'write-output $output'
running = True
while running:
if sftp_exists(sftp, '/tmp/LP_report') == True:
ssh.exec_command('cp /tmp/LP_report /tmp/LP_report_test')
remote_file = sftp.open('/tmp/LP_report_test', 'r')
for line in remote_file:
if finished in line:
running = False
cprint('[*]Downloading linpeas.sh LP_report...[*]', 'green')
sftp.get('/tmp/LP_report', report)
ssh.exec_command('rm /tmp/LP_report')
ssh.exec_command('rm /tmp/LP_report_test')
ssh.exec_command('rm /tmp/linpeas.sh')
def run_pspy(ssh):
# run pspy on remote machine
stdin, stdout, stderr = ssh.exec_command('uname -p')
arch_check = stdout.readline()
if arch_check == 'x86_64\n':
cprint('[*]Running pspy64 for 2 minutes...[*]', 'green')
script = pathlib.Path.cwd() / 'scripts/pspy64'
else:
cprint('[*]Running pspy32 for 2 minutes...[*]', 'green')
script = pathlib.Path.cwd() / 'scripts/pspy32'
sftp = ssh.open_sftp()
sftp.put(script, '/tmp/pspy')
command = 'chmod +x /tmp/pspy && timeout 30 /tmp/./pspy'
stdin, stdout, stderr = ssh.exec_command(command)
for line in iter(stdout.readline, ''):
print(line, end='')
with open('pspy_out', 'a') as outfile:
outfile.write(line)
ssh.exec_command('rm /tmp/pspy')
cprint('[*]Saving pspy_out...[*]', 'green')
def password_connect(hostname, user, secret, port_num, lin_enum, lin_enum_t, linpeas, pspy):
# connects to remote machine via ssh with user/pass combo
cprint('[*]Connecting to {} as {}...[*]'.format(hostname, user), 'green')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=user, password=<PASSWORD>, port=port_num)
transfer(ssh, lin_enum, lin_enum_t, linpeas, pspy)
def key_file_connect(hostname, user, port_num, secret, key_file, lin_enum, lin_enum_t, linpeas, pspy):
# connects to remote machine via ssh with private keyfile and downloads list of instaled packages
cprint('[*]Connecting to {} as {}...[*]'.format(hostname, user), 'green')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=user, password=<PASSWORD>, port=port_num, key_filename=key_file)
transfer(ssh, lin_enum, lin_enum_t, linpeas, pspy)
def format_dpkg_file():
# format packages.txt file for use in searchsploit
packages = []
first_field = 1
with open('packages.txt', 'r') as f:
packs = f.read().split('\n')
for line in packs:
if line[:2] == 'ii':
fields = line.split()
if len(fields) < 2 + first_field:
continue
search = fields[first_field].find(':')
if search != -1:
soft_name = clean(fields[first_field][:search])
else:
soft_name = clean(fields[first_field])
search = re.search(r"-|\+|~", fields[first_field + 1])
if search:
soft_version = fields[first_field + 1][:search.span()[0]]
else:
soft_version = fields[first_field + 1]
search = soft_version.find(':')
if search != -1:
soft_version = soft_version[search + 1:]
soft_version = clean_version_string(soft_version)
if not soft_name or not soft_version:
continue
if '-' in soft_name:
for sub_package in soft_name.split('-'):
if len(sub_package)>2 and '.' not in sub_package and sub_package not in badpacks:
name_version = sub_package +' ' + soft_version
else:
if soft_name not in badpacks:
name_version = soft_name + ' ' + soft_version
packages.append(name_version)
path = pathlib.Path.cwd() / 'packages.txt'
path.unlink()
with open('packages.txt', 'a') as f:
for pack in packages:
f.write(pack + '\n')
def format_rpm_file():
#format packages.txt file for use in searchsploit
packages = []
with open('packages.txt', 'r') as f:
packs = f.read().split('\n')
for line in packs:
fields = '.'.join(line.split('.')[:-2]).split('-')
if len(fields) < 2:
continue
soft_name = clean('-'.join(fields[:-2]))
soft_version = clean_version_string(fields[-2])
if not soft_name or not soft_version:
continue
if '-' in soft_name:
for sub_package in soft_name.split('-'):
if len(sub_package)> 2 and '.' not in sub_package and sub_package not in badpacks:
name_version = sub_package + ' ' + soft_version
else:
if soft_name not in badpacks:
name_version = soft_name + ' ' + soft_version
packages.append(name_version)
path = pathlib.Path.cwd() / 'packages.txt'
path.unlink()
with open('packages.txt', 'a') as f:
for pack in packages:
f.write(pack + '\n')
def clean(soft_name):
# clean package name from common strings
for badword in badpacks:
soft_name = re.sub(r'-' + badword, '', soft_name)
return soft_name
def clean_version_string(version_string):
# eliminate invalid characters and last dot from version string
search = re.search(r'[^0-9.]', version_string)
if search:
result = version_string[:search.span()[0]]
else:
result = version_string
if len(result) > 0 and result[-1] == '.':
result = result[:-1]
return result
def searchsploit():
# checks every package in pacakages.txt against searchsploit database, saves them to file and prints to screen
cprint('[*]Checking packages against Searchsploit Database...[*]', 'green')
cprint('[*]Please be patient, this may take a few minutes...[*]', 'yellow')
no_result = 'Exploits: No Result\nShellcodes: No Result\n'
packs = []
with open('packages.txt', 'r') as f:
packages = f.read().split('\n')
for package in packages:
res = subprocess.run(['searchsploit', package], capture_output=True)
output = res.stdout.decode('utf-8')
if output != no_result:
print(output)
packs.append(output)
cprint('[*]Writing results to exploits.txt...[*]', 'green')
with open('exploits.txt', 'a') as exploits:
for pack in packs:
exploits.write(pack)
def clean_old(lin_enum, linpeas, pspy, ss):
# removes files from past runs
path = pathlib.Path.cwd() / 'packages.txt'
if ss == True:
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'exploits.txt'
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'LE_report'
if lin_enum == True:
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'LP_report'
if linpeas == True:
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'pspy_out'
if pspy == True:
if path.is_file():
path.unlink()
def main():
# run program
try:
args = get_args()
try:
if args.k == None:
clean_old(args.le, args.lp, args.ps, args.ss)
password_connect(args.host, args.user, args.p, args.n, args.le, args.t, args.lp, args.ps)
elif args.k != None:
clean_old(args.le, args.ps, args.ss)
key_file_connect(args.host, args.user, args.p, args.n, args.k, args.le, args.t, args.lp, args.ps)
except ssh_errors as e:
print(e)
cprint('[*]Could not connect to {}.[*]'.format(args.host), 'red')
quit()
if args.ss == True:
check_searchsploit()
searchsploit()
cprint('[*]Done[*]', 'green')
except KeyboardInterrupt:
print('\n')
quit()
if __name__ == '__main__':
main()
| <filename>rest.py
#!/usr/bin/env python3
#MIT License
#Copyright (C) 2019 <EMAIL>
#https://github.com/plasticuproject/rest
#Thanks to mikesz81 for concept and nbulischeck for code review
#linpeas.sh script added by Dividesbyzer0
from termcolor import cprint
import subprocess
import paramiko
import argparse
import pathlib
import re
badpacks = ('centos','debian','ubuntu','redhat','addon','agent','apps','base','bin','bsd','cache','check','client','command',
'common','configuration','control','core','cron','data','database','dev','editor','events','extras','family','file',
'files','form','ftp','generic','gnu','headers','http','info','installation','kernel','legacy','linux','load','manager',
'message','module','monitor','net','network','one','open','patch','path','plugin','plugins','release','router','secure',
'security','server','ssl','software','standard','support','system','team','text','the','theme','time','toolkit','tools',
'unix','update','user','utility','viewer','web','wifi','windows','wireless')
ssh_errors = (paramiko.ssh_exception.AuthenticationException,
paramiko.ssh_exception.BadAuthenticationType,
paramiko.ssh_exception.BadHostKeyException,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.PartialAuthentication,
paramiko.ssh_exception.PasswordRequiredException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.SSHException)
def info():
#print tool information
cprint('\nRemote Exploit Scan Tool', 'red', attrs=['bold'])
cprint('Remotely scan Linux system packages via SSH.\n', attrs=['bold'])
print('Use SSH credentials to remotely scan linux system')
print('packages for known exploits in Exploit-DB and run')
print('basic enumeration scripts.\n')
def get_args():
# parse arguments
parser = argparse.ArgumentParser(description=info())
parser.add_argument('host', type=str, metavar='hostname', help='hostname or IP address of remote machine')
parser.add_argument('user', type=str, metavar='username', help='username used to login to host')
parser.add_argument('-n', type=int, metavar='port_number', nargs='?', help='port number (default is 22)', default=22)
parser.add_argument('-p', type=str, metavar='password', help='password for user')
parser.add_argument('-k', type=str, metavar='key_file', help='location of RSA or DSA Key file')
parser.add_argument('-ss', action='store_true', help='run package list against searchsploit database')
parser.add_argument('-le', action='store_true', help='run LinEnum.sh and return LE_report')
parser.add_argument('-t', action='store_true', help='add thorough switch to -le LinEnum.sh')
parser.add_argument('-lp', action='store_true', help='run linpeas.sh and return LP_report')
parser.add_argument('-ps', action='store_true', help='run pspy64 or pspy32 with defaults and return pspy_out')
args = parser.parse_args()
return args
def check_searchsploit():
# checks if searchsploit is installed
usr_path = pathlib.Path('/usr/bin/searchsploit')
if usr_path.is_file() == False:
cprint('\n[*]Please install searchsploit to continue.[*]\n', 'red')
quit()
def transfer(ssh, lin_enum, lin_enum_t, pspy):
# downloads list of installed packages
sftp = ssh.open_sftp()
ssh.exec_command('dpkg -l > packages.txt')
local_path = pathlib.Path.cwd() / 'packages.txt'
sftp.get('packages.txt', local_path)
ssh.exec_command('rm packages.txt')
format_dpkg_file()
if local_path.stat().st_size == 0:
ssh.exec_command('rpm -qa > packages.txt')
sftp.get('packages.txt', local_path)
ssh.exec_command('rm packages.txt')
format_rpm_file()
cprint('[*]Downloading package list...[*]', 'green')
if lin_enum == True:
run_lin_enum(ssh, lin_enum_t)
if pspy == True:
run_pspy(ssh)
ssh.close()
def sftp_exists(sftp, path):
# check if report file is present
try:
sftp.stat(path)
return True
except FileNotFoundError:
return False
def run_lin_enum(ssh, lin_enum_t):
# run LinEnum.sh on remote machine
cprint('[*]Running LinEnum.sh.[*]', 'green')
cprint('[*]This may take a few minutes...[*]', 'yellow')
sftp = ssh.open_sftp()
script = pathlib.Path.cwd() / 'scripts/LinEnum.sh'
sftp.put(script, '/tmp/LinEnum.sh')
transport = ssh.get_transport()
channel = transport.open_session()
command = 'chmod +x /tmp/LinEnum.sh && /tmp/./LinEnum.sh -r /tmp/LE_report'
command_t = command + ' -t'
if sftp_exists(sftp, '/tmp/LE_report') == True:
ssh.exec_command('rm /tmp/LE_report')
if lin_enum_t == False:
channel.exec_command(command)
elif lin_enum_t == True:
channel.exec_command(command_t)
report = pathlib.Path.cwd() / 'LE_report'
finished = '### SCAN COMPLETE ###'
running = True
while running:
if sftp_exists(sftp, '/tmp/LE_report') == True:
ssh.exec_command('cp /tmp/LE_report /tmp/LE_report_test')
remote_file = sftp.open('/tmp/LE_report_test', 'r')
for line in remote_file:
if finished in line:
running = False
cprint('[*]Downloading LinEnum.sh LE_report...[*]', 'green')
sftp.get('/tmp/LE_report', report)
ssh.exec_command('rm /tmp/LE_report')
ssh.exec_command('rm /tmp/LE_report_test')
ssh.exec_command('rm /tmp/LinEnum.sh')
def run_linpeas(ssh):
# run linpeas.sh on remote machine
cprint('[*]Running linpeas.sh.[*]', 'green')
cprint('[*]This may take a few minutes...[*]', 'yellow')
sftp = ssh.open_sftp()
script = pathlib.Path.cwd() / 'scripts/linpeas.sh'
sftp.put(script, '/tmp/linpeas.sh')
transport = ssh.get_transport()
channel = transport.open_session()
command = 'chmod +x /tmp/linpeas.sh && /tmp/./linpeas.sh > /tmp/LP_report'
if sftp_exists(sftp, '/tmp/LP_report') == True:
ssh.exec_command('rm /tmp/LP_report')
report = pathlib.Path.cwd() / 'LP_report.txt'
finished = 'write-output $output'
running = True
while running:
if sftp_exists(sftp, '/tmp/LP_report') == True:
ssh.exec_command('cp /tmp/LP_report /tmp/LP_report_test')
remote_file = sftp.open('/tmp/LP_report_test', 'r')
for line in remote_file:
if finished in line:
running = False
cprint('[*]Downloading linpeas.sh LP_report...[*]', 'green')
sftp.get('/tmp/LP_report', report)
ssh.exec_command('rm /tmp/LP_report')
ssh.exec_command('rm /tmp/LP_report_test')
ssh.exec_command('rm /tmp/linpeas.sh')
def run_pspy(ssh):
# run pspy on remote machine
stdin, stdout, stderr = ssh.exec_command('uname -p')
arch_check = stdout.readline()
if arch_check == 'x86_64\n':
cprint('[*]Running pspy64 for 2 minutes...[*]', 'green')
script = pathlib.Path.cwd() / 'scripts/pspy64'
else:
cprint('[*]Running pspy32 for 2 minutes...[*]', 'green')
script = pathlib.Path.cwd() / 'scripts/pspy32'
sftp = ssh.open_sftp()
sftp.put(script, '/tmp/pspy')
command = 'chmod +x /tmp/pspy && timeout 30 /tmp/./pspy'
stdin, stdout, stderr = ssh.exec_command(command)
for line in iter(stdout.readline, ''):
print(line, end='')
with open('pspy_out', 'a') as outfile:
outfile.write(line)
ssh.exec_command('rm /tmp/pspy')
cprint('[*]Saving pspy_out...[*]', 'green')
def password_connect(hostname, user, secret, port_num, lin_enum, lin_enum_t, linpeas, pspy):
# connects to remote machine via ssh with user/pass combo
cprint('[*]Connecting to {} as {}...[*]'.format(hostname, user), 'green')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=user, password=<PASSWORD>, port=port_num)
transfer(ssh, lin_enum, lin_enum_t, linpeas, pspy)
def key_file_connect(hostname, user, port_num, secret, key_file, lin_enum, lin_enum_t, linpeas, pspy):
# connects to remote machine via ssh with private keyfile and downloads list of instaled packages
cprint('[*]Connecting to {} as {}...[*]'.format(hostname, user), 'green')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=user, password=<PASSWORD>, port=port_num, key_filename=key_file)
transfer(ssh, lin_enum, lin_enum_t, linpeas, pspy)
def format_dpkg_file():
# format packages.txt file for use in searchsploit
packages = []
first_field = 1
with open('packages.txt', 'r') as f:
packs = f.read().split('\n')
for line in packs:
if line[:2] == 'ii':
fields = line.split()
if len(fields) < 2 + first_field:
continue
search = fields[first_field].find(':')
if search != -1:
soft_name = clean(fields[first_field][:search])
else:
soft_name = clean(fields[first_field])
search = re.search(r"-|\+|~", fields[first_field + 1])
if search:
soft_version = fields[first_field + 1][:search.span()[0]]
else:
soft_version = fields[first_field + 1]
search = soft_version.find(':')
if search != -1:
soft_version = soft_version[search + 1:]
soft_version = clean_version_string(soft_version)
if not soft_name or not soft_version:
continue
if '-' in soft_name:
for sub_package in soft_name.split('-'):
if len(sub_package)>2 and '.' not in sub_package and sub_package not in badpacks:
name_version = sub_package +' ' + soft_version
else:
if soft_name not in badpacks:
name_version = soft_name + ' ' + soft_version
packages.append(name_version)
path = pathlib.Path.cwd() / 'packages.txt'
path.unlink()
with open('packages.txt', 'a') as f:
for pack in packages:
f.write(pack + '\n')
def format_rpm_file():
#format packages.txt file for use in searchsploit
packages = []
with open('packages.txt', 'r') as f:
packs = f.read().split('\n')
for line in packs:
fields = '.'.join(line.split('.')[:-2]).split('-')
if len(fields) < 2:
continue
soft_name = clean('-'.join(fields[:-2]))
soft_version = clean_version_string(fields[-2])
if not soft_name or not soft_version:
continue
if '-' in soft_name:
for sub_package in soft_name.split('-'):
if len(sub_package)> 2 and '.' not in sub_package and sub_package not in badpacks:
name_version = sub_package + ' ' + soft_version
else:
if soft_name not in badpacks:
name_version = soft_name + ' ' + soft_version
packages.append(name_version)
path = pathlib.Path.cwd() / 'packages.txt'
path.unlink()
with open('packages.txt', 'a') as f:
for pack in packages:
f.write(pack + '\n')
def clean(soft_name):
# clean package name from common strings
for badword in badpacks:
soft_name = re.sub(r'-' + badword, '', soft_name)
return soft_name
def clean_version_string(version_string):
# eliminate invalid characters and last dot from version string
search = re.search(r'[^0-9.]', version_string)
if search:
result = version_string[:search.span()[0]]
else:
result = version_string
if len(result) > 0 and result[-1] == '.':
result = result[:-1]
return result
def searchsploit():
# checks every package in pacakages.txt against searchsploit database, saves them to file and prints to screen
cprint('[*]Checking packages against Searchsploit Database...[*]', 'green')
cprint('[*]Please be patient, this may take a few minutes...[*]', 'yellow')
no_result = 'Exploits: No Result\nShellcodes: No Result\n'
packs = []
with open('packages.txt', 'r') as f:
packages = f.read().split('\n')
for package in packages:
res = subprocess.run(['searchsploit', package], capture_output=True)
output = res.stdout.decode('utf-8')
if output != no_result:
print(output)
packs.append(output)
cprint('[*]Writing results to exploits.txt...[*]', 'green')
with open('exploits.txt', 'a') as exploits:
for pack in packs:
exploits.write(pack)
def clean_old(lin_enum, linpeas, pspy, ss):
# removes files from past runs
path = pathlib.Path.cwd() / 'packages.txt'
if ss == True:
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'exploits.txt'
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'LE_report'
if lin_enum == True:
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'LP_report'
if linpeas == True:
if path.is_file():
path.unlink()
path = pathlib.Path.cwd() / 'pspy_out'
if pspy == True:
if path.is_file():
path.unlink()
def main():
# run program
try:
args = get_args()
try:
if args.k == None:
clean_old(args.le, args.lp, args.ps, args.ss)
password_connect(args.host, args.user, args.p, args.n, args.le, args.t, args.lp, args.ps)
elif args.k != None:
clean_old(args.le, args.ps, args.ss)
key_file_connect(args.host, args.user, args.p, args.n, args.k, args.le, args.t, args.lp, args.ps)
except ssh_errors as e:
print(e)
cprint('[*]Could not connect to {}.[*]'.format(args.host), 'red')
quit()
if args.ss == True:
check_searchsploit()
searchsploit()
cprint('[*]Done[*]', 'green')
except KeyboardInterrupt:
print('\n')
quit()
if __name__ == '__main__':
main()
| en | 0.790029 | #!/usr/bin/env python3 #MIT License #Copyright (C) 2019 <EMAIL> #https://github.com/plasticuproject/rest #Thanks to mikesz81 for concept and nbulischeck for code review #linpeas.sh script added by Dividesbyzer0 #print tool information # parse arguments # checks if searchsploit is installed # downloads list of installed packages # check if report file is present # run LinEnum.sh on remote machine ## SCAN COMPLETE ###' # run linpeas.sh on remote machine # run pspy on remote machine # connects to remote machine via ssh with user/pass combo # connects to remote machine via ssh with private keyfile and downloads list of instaled packages # format packages.txt file for use in searchsploit #format packages.txt file for use in searchsploit # clean package name from common strings # eliminate invalid characters and last dot from version string # checks every package in pacakages.txt against searchsploit database, saves them to file and prints to screen # removes files from past runs # run program | 1.800968 | 2 |
q_network.py | timokau/task-placement | 1 | 6632621 | <gh_stars>1-10
# EncodeProcessDecode model is based on the graph_nets demo
# https://github.com/deepmind/graph_nets/blob/6f33ee4244ebe016b4d6296dd3eb99625fd9f3af/graph_nets/demos/models.py
"""Graph Q-Network"""
from functools import partial
from graph_nets import modules
from graph_nets import utils_tf
import sonnet as snt
import tensorflow as tf
from tf_util import ragged_boolean_mask
# The abstract sonnet _build function has a (*args, **kwargs) argument
# list, so we can pass whatever we want.
# pylint: disable=arguments-differ
def make_mlp_model(latent_size, num_layers):
"""Multilayer Perceptron followed by layer norm, parameters not
shared"""
return snt.Sequential(
[
# relu activation
snt.nets.MLP(
output_sizes=[latent_size] * num_layers, activate_final=True
),
# normalize to mean 0, sd 1
snt.LayerNorm(),
]
)
class LinearGraphIndependent(snt.AbstractModule):
"""GraphIndependent with linear edge, node, and global models"""
def __init__(
self,
edge_output_size=0,
node_output_size=0,
global_output_size=0,
name="LinearGraphIndependent",
):
super(LinearGraphIndependent, self).__init__(name=name)
edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
node_fn = lambda: snt.Linear(node_output_size, name="node_output")
global_fn = lambda: snt.Linear(
global_output_size, name="global_output"
)
with self._enter_variable_scope():
self._network = modules.GraphIndependent(
edge_model_fn=edge_fn,
node_model_fn=node_fn,
global_model_fn=global_fn,
)
def _build(self, inputs):
return self._network(inputs)
class MLPGraphIndependent(snt.AbstractModule):
"""GraphIndependent with MLP edge, node, and global models"""
def __init__(
self,
# for simplicity, all layers have the same size and the edge,
# node and global models use the same structure
latent_size,
num_layers,
name="MLPGraphIndependent",
):
super(MLPGraphIndependent, self).__init__(name=name)
model_fn = partial(
make_mlp_model, latent_size=latent_size, num_layers=num_layers
)
with self._enter_variable_scope():
self._network = modules.GraphIndependent(
edge_model_fn=model_fn,
node_model_fn=model_fn,
global_model_fn=model_fn,
)
def _build(self, inputs):
return self._network(inputs)
class MLPGraphNetwork(snt.AbstractModule):
"""GraphNetwork with MLP edge, node, and global models"""
def __init__(
self,
# for simplicity, all layers have the same size and the edge,
# node and global models use the same structure
latent_size,
num_layers,
name="MLPGraphNetwork",
):
super(MLPGraphNetwork, self).__init__(name=name)
model_fn = partial(
make_mlp_model, latent_size=latent_size, num_layers=num_layers
)
with self._enter_variable_scope():
self._network = modules.GraphNetwork(
edge_model_fn=model_fn,
node_model_fn=model_fn,
global_model_fn=model_fn,
)
def _build(self, inputs):
return self._network(inputs)
class EncodeProcessDecode(snt.AbstractModule):
"""Full encode-process-decode model
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge,
node, and global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing
(message-passing) steps. The input to the Core is the
concatenation of the Encoder's output and the previous output of
the Core (labeled "Hidden(t)" below, where "t" is the processing
step).
- A "Decoder" graph net, which independently decodes the edge, node,
and global attributes (does not compute relations etc.), on each
message-passing step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(
self,
edge_output_size,
node_output_size,
global_output_size,
# for simplicity, all layers have the same size and all MLPs use
# the same structure
latent_size,
num_layers,
name="EncodeProcessDecode",
):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = MLPGraphIndependent(latent_size, num_layers)
self._core = MLPGraphNetwork(latent_size, num_layers)
self._decoder = MLPGraphIndependent(latent_size, num_layers)
self._output_transform = LinearGraphIndependent(
edge_output_size=edge_output_size,
node_output_size=node_output_size,
global_output_size=global_output_size,
)
def _build(self, input_op, num_processing_steps):
latent = self._encoder(input_op) # hidden(t)
latent0 = latent
output_ops = []
for _ in range(num_processing_steps):
core_input = utils_tf.concat([latent0, latent], axis=1)
latent = self._core(core_input)
decoded_op = self._decoder(latent)
output_ops.append(self._output_transform(decoded_op))
return output_ops
class EdgeQNetwork(snt.AbstractModule):
"""Takes an input_graph, returns q-values.
graph_nets based model that takes an input graph and returns a
(variable length) vector of q-values corresponding to the edges in
the input graph that represent valid actions (according to the
boolean edge attribute in first position)"""
def __init__(
self,
latent_size,
num_layers,
num_processing_steps,
edge_filter_idx,
ignore_first_edge_features,
name="edge_q_network",
):
self._latent_size = latent_size
self._num_layers = num_layers
self._num_processing_steps = num_processing_steps
self._edge_filter_idx = edge_filter_idx
self._ignore_first_edge_features = ignore_first_edge_features
super(EdgeQNetwork, self).__init__(name=name)
def _build(self, graph_tuple):
model = EncodeProcessDecode(
edge_output_size=1, # edge output is the Q-value
global_output_size=0,
node_output_size=0,
latent_size=self._latent_size,
num_layers=self._num_layers,
)
# edges is 2d tensor of all edges in all graphs
# ignore some columns for learning, for example possible bit and
# edge id
learn_graph_tuple = graph_tuple.map(
lambda edges: tf.slice(
edges, [0, self._ignore_first_edge_features], [-1, -1]
),
fields=["edges"],
)
out = model(learn_graph_tuple, self._num_processing_steps)[-1]
q_vals = tf.cast(tf.reshape(out.edges, [-1]), tf.float32)
ragged_q_vals = tf.RaggedTensor.from_row_lengths(
q_vals, tf.cast(out.n_edge, tf.int64)
)
def edge_is_possible_action(edge):
possible = edge[self._edge_filter_idx]
return tf.math.equal(possible, 1)
viable_actions_mask = tf.map_fn(
edge_is_possible_action, graph_tuple.edges, dtype=tf.bool
)
ragged_mask = tf.RaggedTensor.from_row_lengths(
viable_actions_mask, tf.cast(graph_tuple.n_edge, tf.int64)
)
result = ragged_boolean_mask(ragged_q_vals, ragged_mask)
return result.to_tensor(default_value=tf.float32.min)
| # EncodeProcessDecode model is based on the graph_nets demo
# https://github.com/deepmind/graph_nets/blob/6f33ee4244ebe016b4d6296dd3eb99625fd9f3af/graph_nets/demos/models.py
"""Graph Q-Network"""
from functools import partial
from graph_nets import modules
from graph_nets import utils_tf
import sonnet as snt
import tensorflow as tf
from tf_util import ragged_boolean_mask
# The abstract sonnet _build function has a (*args, **kwargs) argument
# list, so we can pass whatever we want.
# pylint: disable=arguments-differ
def make_mlp_model(latent_size, num_layers):
"""Multilayer Perceptron followed by layer norm, parameters not
shared"""
return snt.Sequential(
[
# relu activation
snt.nets.MLP(
output_sizes=[latent_size] * num_layers, activate_final=True
),
# normalize to mean 0, sd 1
snt.LayerNorm(),
]
)
class LinearGraphIndependent(snt.AbstractModule):
"""GraphIndependent with linear edge, node, and global models"""
def __init__(
self,
edge_output_size=0,
node_output_size=0,
global_output_size=0,
name="LinearGraphIndependent",
):
super(LinearGraphIndependent, self).__init__(name=name)
edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
node_fn = lambda: snt.Linear(node_output_size, name="node_output")
global_fn = lambda: snt.Linear(
global_output_size, name="global_output"
)
with self._enter_variable_scope():
self._network = modules.GraphIndependent(
edge_model_fn=edge_fn,
node_model_fn=node_fn,
global_model_fn=global_fn,
)
def _build(self, inputs):
return self._network(inputs)
class MLPGraphIndependent(snt.AbstractModule):
"""GraphIndependent with MLP edge, node, and global models"""
def __init__(
self,
# for simplicity, all layers have the same size and the edge,
# node and global models use the same structure
latent_size,
num_layers,
name="MLPGraphIndependent",
):
super(MLPGraphIndependent, self).__init__(name=name)
model_fn = partial(
make_mlp_model, latent_size=latent_size, num_layers=num_layers
)
with self._enter_variable_scope():
self._network = modules.GraphIndependent(
edge_model_fn=model_fn,
node_model_fn=model_fn,
global_model_fn=model_fn,
)
def _build(self, inputs):
return self._network(inputs)
class MLPGraphNetwork(snt.AbstractModule):
"""GraphNetwork with MLP edge, node, and global models"""
def __init__(
self,
# for simplicity, all layers have the same size and the edge,
# node and global models use the same structure
latent_size,
num_layers,
name="MLPGraphNetwork",
):
super(MLPGraphNetwork, self).__init__(name=name)
model_fn = partial(
make_mlp_model, latent_size=latent_size, num_layers=num_layers
)
with self._enter_variable_scope():
self._network = modules.GraphNetwork(
edge_model_fn=model_fn,
node_model_fn=model_fn,
global_model_fn=model_fn,
)
def _build(self, inputs):
return self._network(inputs)
class EncodeProcessDecode(snt.AbstractModule):
"""Full encode-process-decode model
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge,
node, and global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing
(message-passing) steps. The input to the Core is the
concatenation of the Encoder's output and the previous output of
the Core (labeled "Hidden(t)" below, where "t" is the processing
step).
- A "Decoder" graph net, which independently decodes the edge, node,
and global attributes (does not compute relations etc.), on each
message-passing step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(
self,
edge_output_size,
node_output_size,
global_output_size,
# for simplicity, all layers have the same size and all MLPs use
# the same structure
latent_size,
num_layers,
name="EncodeProcessDecode",
):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = MLPGraphIndependent(latent_size, num_layers)
self._core = MLPGraphNetwork(latent_size, num_layers)
self._decoder = MLPGraphIndependent(latent_size, num_layers)
self._output_transform = LinearGraphIndependent(
edge_output_size=edge_output_size,
node_output_size=node_output_size,
global_output_size=global_output_size,
)
def _build(self, input_op, num_processing_steps):
latent = self._encoder(input_op) # hidden(t)
latent0 = latent
output_ops = []
for _ in range(num_processing_steps):
core_input = utils_tf.concat([latent0, latent], axis=1)
latent = self._core(core_input)
decoded_op = self._decoder(latent)
output_ops.append(self._output_transform(decoded_op))
return output_ops
class EdgeQNetwork(snt.AbstractModule):
"""Takes an input_graph, returns q-values.
graph_nets based model that takes an input graph and returns a
(variable length) vector of q-values corresponding to the edges in
the input graph that represent valid actions (according to the
boolean edge attribute in first position)"""
def __init__(
self,
latent_size,
num_layers,
num_processing_steps,
edge_filter_idx,
ignore_first_edge_features,
name="edge_q_network",
):
self._latent_size = latent_size
self._num_layers = num_layers
self._num_processing_steps = num_processing_steps
self._edge_filter_idx = edge_filter_idx
self._ignore_first_edge_features = ignore_first_edge_features
super(EdgeQNetwork, self).__init__(name=name)
def _build(self, graph_tuple):
model = EncodeProcessDecode(
edge_output_size=1, # edge output is the Q-value
global_output_size=0,
node_output_size=0,
latent_size=self._latent_size,
num_layers=self._num_layers,
)
# edges is 2d tensor of all edges in all graphs
# ignore some columns for learning, for example possible bit and
# edge id
learn_graph_tuple = graph_tuple.map(
lambda edges: tf.slice(
edges, [0, self._ignore_first_edge_features], [-1, -1]
),
fields=["edges"],
)
out = model(learn_graph_tuple, self._num_processing_steps)[-1]
q_vals = tf.cast(tf.reshape(out.edges, [-1]), tf.float32)
ragged_q_vals = tf.RaggedTensor.from_row_lengths(
q_vals, tf.cast(out.n_edge, tf.int64)
)
def edge_is_possible_action(edge):
possible = edge[self._edge_filter_idx]
return tf.math.equal(possible, 1)
viable_actions_mask = tf.map_fn(
edge_is_possible_action, graph_tuple.edges, dtype=tf.bool
)
ragged_mask = tf.RaggedTensor.from_row_lengths(
viable_actions_mask, tf.cast(graph_tuple.n_edge, tf.int64)
)
result = ragged_boolean_mask(ragged_q_vals, ragged_mask)
return result.to_tensor(default_value=tf.float32.min) | en | 0.772672 | # EncodeProcessDecode model is based on the graph_nets demo # https://github.com/deepmind/graph_nets/blob/6f33ee4244ebe016b4d6296dd3eb99625fd9f3af/graph_nets/demos/models.py Graph Q-Network # The abstract sonnet _build function has a (*args, **kwargs) argument # list, so we can pass whatever we want. # pylint: disable=arguments-differ Multilayer Perceptron followed by layer norm, parameters not shared # relu activation # normalize to mean 0, sd 1 GraphIndependent with linear edge, node, and global models GraphIndependent with MLP edge, node, and global models # for simplicity, all layers have the same size and the edge, # node and global models use the same structure GraphNetwork with MLP edge, node, and global models # for simplicity, all layers have the same size and the edge, # node and global models use the same structure Full encode-process-decode model The model we explore includes three components: - An "Encoder" graph net, which independently encodes the edge, node, and global attributes (does not compute relations etc.). - A "Core" graph net, which performs N rounds of processing (message-passing) steps. The input to the Core is the concatenation of the Encoder's output and the previous output of the Core (labeled "Hidden(t)" below, where "t" is the processing step). - A "Decoder" graph net, which independently decodes the edge, node, and global attributes (does not compute relations etc.), on each message-passing step. Hidden(t) Hidden(t+1) | ^ *---------* | *------* | *---------* | | | | | | | | Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t) | |---->| | | | *---------* *------* *---------* # for simplicity, all layers have the same size and all MLPs use # the same structure # hidden(t) Takes an input_graph, returns q-values. graph_nets based model that takes an input graph and returns a (variable length) vector of q-values corresponding to the edges in the input graph that represent valid actions (according to the boolean edge attribute in first position) # edge output is the Q-value # edges is 2d tensor of all edges in all graphs # ignore some columns for learning, for example possible bit and # edge id | 2.495373 | 2 |
display.py | egurnick/Sorting-Algorithms-Visualizer | 0 | 6632622 | <filename>display.py<gh_stars>0
import pygame
from sys import exit
from math import ceil
# Initialize pygame modules
pygame.init()
# Display settings
windowSize = (900, 500)
screen = pygame.display.set_mode(windowSize)
pygame.display.set_caption('Sorting Algorithms Visualizer')
# Font
baseFont = pygame.font.SysFont('Arial', 24)
# Used Colors
grey = (100, 100, 100)
green = (150, 255, 150)
white = (250, 250, 250)
red = (255, 50, 50)
black = (0, 0, 0)
blue = (50, 50, 255)
class InputBox:
class TextBox:
def __init__(self, label, color, rect):
self.isActive = False
self.text = ''
self.rect = pygame.Rect(rect)
self.label = baseFont.render(label, True, color)
def draw(self, color, width):
xPos = self.rect.x
yPos = self.rect.y
surface = baseFont.render(self.text, True, color)
screen.blit(self.label, (xPos+(self.rect.w - self.label.get_width())/2, yPos - 32))
pygame.draw.rect(screen, color, self.rect, 3)
screen.blit(surface, (xPos + 10, yPos + 10))
self.rect.w = max(surface.get_width() + 20, width)
def write(self, wEvent):
if wEvent.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += wEvent.unicode
class SliderBox:
def __init__(self, name, rect):
self.isActive = False
self.rect = pygame.Rect(rect)
self.name = name
self.value = self.rect.x+6
def draw(self, color):
xPos = self.rect.x
yPos = self.rect.y
# Draw the label
label = baseFont.render(self.name + ' (%dms)' % (self.value - xPos - 6), True, color)
screen.blit(label, (xPos+(self.rect.w - label.get_width())/2, yPos - 32))
# Draw the rect button
pygame.draw.rect(screen, color, self.rect, 3)
# Draw central line
pygame.draw.line(screen, color, (xPos+6, yPos+25), (xPos+self.rect.w-6, yPos+25), 2)
# Draw bar control
pygame.draw.line(screen, color, (self.value, yPos+5), (self.value, yPos+45), 12)
def write(self):
x = pygame.mouse.get_pos()[0]
if x <= self.rect.x+6:
self.value = self.rect.x+6
elif x >= self.rect.w+self.rect.x-6:
self.value = self.rect.w+self.rect.x-6
else:
self.value = x
# Input Boxes
sizeBox = InputBox.TextBox("Size", grey, (30, 440, 50, 50))
delayBox = InputBox.SliderBox("Delay", (105, 440, 112, 50))
algorithmBox = InputBox.TextBox("Algorithm", grey, (242, 440, 112, 50))
# Button
playButton = pygame.image.load('data/playButton.png')
stopButton = pygame.image.load('data/stopButton.png')
button_rect = playButton.get_rect()
button_rect.center = (415, 460)
# Global Variables
numBars = 0
delay = 0
toDraw = True
button = playButton
def drawBars(array, redBar1, redBar2, blueBar1, blueBar2):
"""Draw the bars and control their colors"""
for num in range(numBars):
if num in [redBar1, redBar2]:
color = red
elif num in [blueBar1, blueBar2]:
color = blue
else:
color = grey
bar_width = 900/numBars
pygame.draw.rect(screen, color, [num * bar_width, 400 - array[num], ceil(bar_width), array[num]])
def drawBottomMenu():
"""Draw the menu below the bars"""
sizeBox.draw(grey, 50)
delayBox.draw(grey)
algorithmBox.draw(grey, 100)
screen.blit(button, (390, 435))
def drawInterface(array, redBar1, redBar2, blueBar1, blueBar2):
"""Draw all the interface"""
screen.fill(white)
drawBars(array, redBar1, redBar2, blueBar1, blueBar2)
drawBottomMenu()
pygame.display.update()
def handleDrawing(array, redBar1, redBar2, blueBar1, blueBar2):
global toDraw
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
elif event.type == pygame.MOUSEBUTTONDOWN:
if button_rect.collidepoint(event.pos):
toDraw = False
if toDraw:
drawInterface(array, redBar1, redBar2, blueBar1, blueBar2)
pygame.time.wait(delay)
| <filename>display.py<gh_stars>0
import pygame
from sys import exit
from math import ceil
# Initialize pygame modules
pygame.init()
# Display settings
windowSize = (900, 500)
screen = pygame.display.set_mode(windowSize)
pygame.display.set_caption('Sorting Algorithms Visualizer')
# Font
baseFont = pygame.font.SysFont('Arial', 24)
# Used Colors
grey = (100, 100, 100)
green = (150, 255, 150)
white = (250, 250, 250)
red = (255, 50, 50)
black = (0, 0, 0)
blue = (50, 50, 255)
class InputBox:
class TextBox:
def __init__(self, label, color, rect):
self.isActive = False
self.text = ''
self.rect = pygame.Rect(rect)
self.label = baseFont.render(label, True, color)
def draw(self, color, width):
xPos = self.rect.x
yPos = self.rect.y
surface = baseFont.render(self.text, True, color)
screen.blit(self.label, (xPos+(self.rect.w - self.label.get_width())/2, yPos - 32))
pygame.draw.rect(screen, color, self.rect, 3)
screen.blit(surface, (xPos + 10, yPos + 10))
self.rect.w = max(surface.get_width() + 20, width)
def write(self, wEvent):
if wEvent.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += wEvent.unicode
class SliderBox:
def __init__(self, name, rect):
self.isActive = False
self.rect = pygame.Rect(rect)
self.name = name
self.value = self.rect.x+6
def draw(self, color):
xPos = self.rect.x
yPos = self.rect.y
# Draw the label
label = baseFont.render(self.name + ' (%dms)' % (self.value - xPos - 6), True, color)
screen.blit(label, (xPos+(self.rect.w - label.get_width())/2, yPos - 32))
# Draw the rect button
pygame.draw.rect(screen, color, self.rect, 3)
# Draw central line
pygame.draw.line(screen, color, (xPos+6, yPos+25), (xPos+self.rect.w-6, yPos+25), 2)
# Draw bar control
pygame.draw.line(screen, color, (self.value, yPos+5), (self.value, yPos+45), 12)
def write(self):
x = pygame.mouse.get_pos()[0]
if x <= self.rect.x+6:
self.value = self.rect.x+6
elif x >= self.rect.w+self.rect.x-6:
self.value = self.rect.w+self.rect.x-6
else:
self.value = x
# Input Boxes
sizeBox = InputBox.TextBox("Size", grey, (30, 440, 50, 50))
delayBox = InputBox.SliderBox("Delay", (105, 440, 112, 50))
algorithmBox = InputBox.TextBox("Algorithm", grey, (242, 440, 112, 50))
# Button
playButton = pygame.image.load('data/playButton.png')
stopButton = pygame.image.load('data/stopButton.png')
button_rect = playButton.get_rect()
button_rect.center = (415, 460)
# Global Variables
numBars = 0
delay = 0
toDraw = True
button = playButton
def drawBars(array, redBar1, redBar2, blueBar1, blueBar2):
"""Draw the bars and control their colors"""
for num in range(numBars):
if num in [redBar1, redBar2]:
color = red
elif num in [blueBar1, blueBar2]:
color = blue
else:
color = grey
bar_width = 900/numBars
pygame.draw.rect(screen, color, [num * bar_width, 400 - array[num], ceil(bar_width), array[num]])
def drawBottomMenu():
"""Draw the menu below the bars"""
sizeBox.draw(grey, 50)
delayBox.draw(grey)
algorithmBox.draw(grey, 100)
screen.blit(button, (390, 435))
def drawInterface(array, redBar1, redBar2, blueBar1, blueBar2):
"""Draw all the interface"""
screen.fill(white)
drawBars(array, redBar1, redBar2, blueBar1, blueBar2)
drawBottomMenu()
pygame.display.update()
def handleDrawing(array, redBar1, redBar2, blueBar1, blueBar2):
global toDraw
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
elif event.type == pygame.MOUSEBUTTONDOWN:
if button_rect.collidepoint(event.pos):
toDraw = False
if toDraw:
drawInterface(array, redBar1, redBar2, blueBar1, blueBar2)
pygame.time.wait(delay)
| en | 0.609971 | # Initialize pygame modules # Display settings # Font # Used Colors # Draw the label # Draw the rect button # Draw central line # Draw bar control # Input Boxes # Button # Global Variables Draw the bars and control their colors Draw the menu below the bars Draw all the interface | 2.97669 | 3 |
arbol-ordinales-jp.py | andresvillamayor/Ejemplos_ML | 0 | 6632623 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 9 09:23:19 2020
Arbol de Desiciones
@author: andyvillamayor
"""
#El calulo del arbol esta hecho automaticamente por DecisionTreeClassifier
#Parametros = entropia valor maximo de desglose = 6
#librerias
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import export_graphviz
import pydot
from sklearn import preprocessing
#lectura de datos
data= pd.read_csv('creditos.csv',sep =',')
#verificacion de los datos
print(data.head())
#validacion del dataframe
print(pd.isnull(data).sum()) #tipo cartera tiene 3777 valores faltantes Nan
#calular los la edad verificacion de datos
data['fechaHora'] = pd.to_datetime(data['fechaHora'])
data['nacimiento'] = pd.to_datetime(data['nacimiento'])
data['edad'] = ((data['fechaHora']-data['nacimiento'])/np.timedelta64(1,'Y')).astype(int)
#columna edad esta en el ultimo lugar del dataframe
# seleccionar variables y target, descartar variables pos aprobación
# utilizando Hold out
df1 = data.iloc[:,2:3]
df2 = data.iloc[:,83:84]
df3 = data.iloc[:,4:68]
df4 = data.iloc[:,82:83]
# # Unificar en un dataframe filtrado
df = pd.concat([df1,df2,df3,df4], axis=1)
print (df.shape) # cantidad de filas y columnas
#verificar en el explorador de variables los cambios
#Modificar los datos LabelEncoder()
#Codificar cada variable categorica con su propio encoder sin utilizar el replace
#Nacionalidad
var_nacionalidad = preprocessing.LabelEncoder()
df["nacionalidad"] = var_nacionalidad.fit_transform(df["nacionalidad"])
#Sexo (M,F)
var_sexo = preprocessing.LabelEncoder()
df["sexo"] = var_sexo.fit_transform(df["sexo"].astype(str))
#Estado Civil(S,C)
var_estcivil = preprocessing.LabelEncoder()
df["est_civil"] = var_estcivil.fit_transform(df["est_civil"].astype(str))
# Cargo que ocupa (Empleado)
var_ocupcargo = preprocessing.LabelEncoder()
df["ocup_cargo"] = var_ocupcargo.fit_transform(df["ocup_cargo"].astype(str))
#Cliente Nuevo o Recurrente (N/R)
var_cliente = preprocessing.LabelEncoder()
df["cliente_nuevo_o_recurrente"] = var_cliente.fit_transform(df["cliente_nuevo_o_recurrente"])
#Ver si tiene tarjeta Visa Clasica
var_tienevisa = preprocessing.LabelEncoder()
df["tiene_visa_classic"] = var_tienevisa.fit_transform(df["tiene_visa_classic"])
#ver si tiene visa Gold
var_tienevisagold= preprocessing.LabelEncoder()
df["tiene_visa_gold"] = var_tienevisagold.fit_transform(df["tiene_visa_gold"])
#tiene mastercard gold
var_mastercardgold = preprocessing.LabelEncoder()
df["tiene_mc_gold"] = var_mastercardgold.fit_transform(df["tiene_mc_gold"])
#tiene fc
var_tienefc = preprocessing.LabelEncoder()
df["tiene_fc"] = var_tienefc.fit_transform(df["tiene_fc"])
#tiene mastercard clasica
var_mastercardclasica = preprocessing.LabelEncoder()
df["tiene_mc_classic"] = var_mastercardclasica.fit_transform(df["tiene_mc_classic"])
#ver la Faja
var_faja = preprocessing.LabelEncoder()
df["respuesta_iconf_faja_score"] = var_faja.fit_transform(df["respuesta_iconf_faja_score"].astype(str))
#ver target Resultado final
var_resultadofinal = preprocessing.LabelEncoder()
df["resultadoFinal"] = var_resultadofinal.fit_transform(df["resultadoFinal"])
########
# split dataset en train (70%) y test (30%)
X = df.iloc[:,0:66 ]
y = df['resultadoFinal']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# Entrenar decision tree usando entropia y profundidad maxima 6
clf = DecisionTreeClassifier(criterion="entropy",min_samples_leaf=50,max_depth=6)
clf = clf.fit(X_train,y_train)
# Predecir con datos de test
y_pred = clf.predict(X_test)
print('-----------------------------------------------------------------------------------------')
# Accuracy: (tp+tn)/n - donde n es la cantidad de FP+FN+TP+TN
print("Accuracy - Acertividad y Ecxactitud de las muestras:",metrics.accuracy_score(y_test, y_pred))
print(metrics.precision_recall_fscore_support(y_test, y_pred, average=None))
print('-----------------------------------------------------------------------------------------')
print (
'Se entreno el modelo con 30% de datos 0.3 en el test_size'
' usando entropia con max de profundiad de 6'
'Este modelo esta inclinado por hacia los creditos malos con un % 88 '
)
print('----------------------------')
print('Calcular matriz de confusion')
print('----------------------------')
#metrics.confusion_matrix(y_test, y_pred)
print(pd.crosstab(y_test, y_pred,
rownames=['actual'],
colnames=['pred'], margins=False, margins_name="Total")
)
# Obtener importancia de variables y vertificar variables mas relevantes
print('----------------------------')
print('Importancia de Variables')
print('Variables mas relevantes')
print('----------------------------')
fi = pd.DataFrame(zip(X.columns,clf.feature_importances_), columns=['feature','importance'])
print(fi[fi['importance'] > 0.0].sort_values(by=['importance'], ascending=False))
# cargar exportador de grafos y funcion de llamada a sistema
export_graphviz(clf, out_file="creditos.dot",
filled=True, rounded=True,
special_characters=True, feature_names = X.columns,class_names = ['BIEN','MAL'])
(graph,) = pydot.graph_from_dot_file('creditos.dot')
graph.write_png('creditos.png')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 9 09:23:19 2020
Arbol de Desiciones
@author: andyvillamayor
"""
#El calulo del arbol esta hecho automaticamente por DecisionTreeClassifier
#Parametros = entropia valor maximo de desglose = 6
#librerias
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import export_graphviz
import pydot
from sklearn import preprocessing
#lectura de datos
data= pd.read_csv('creditos.csv',sep =',')
#verificacion de los datos
print(data.head())
#validacion del dataframe
print(pd.isnull(data).sum()) #tipo cartera tiene 3777 valores faltantes Nan
#calular los la edad verificacion de datos
data['fechaHora'] = pd.to_datetime(data['fechaHora'])
data['nacimiento'] = pd.to_datetime(data['nacimiento'])
data['edad'] = ((data['fechaHora']-data['nacimiento'])/np.timedelta64(1,'Y')).astype(int)
#columna edad esta en el ultimo lugar del dataframe
# seleccionar variables y target, descartar variables pos aprobación
# utilizando Hold out
df1 = data.iloc[:,2:3]
df2 = data.iloc[:,83:84]
df3 = data.iloc[:,4:68]
df4 = data.iloc[:,82:83]
# # Unificar en un dataframe filtrado
df = pd.concat([df1,df2,df3,df4], axis=1)
print (df.shape) # cantidad de filas y columnas
#verificar en el explorador de variables los cambios
#Modificar los datos LabelEncoder()
#Codificar cada variable categorica con su propio encoder sin utilizar el replace
#Nacionalidad
var_nacionalidad = preprocessing.LabelEncoder()
df["nacionalidad"] = var_nacionalidad.fit_transform(df["nacionalidad"])
#Sexo (M,F)
var_sexo = preprocessing.LabelEncoder()
df["sexo"] = var_sexo.fit_transform(df["sexo"].astype(str))
#Estado Civil(S,C)
var_estcivil = preprocessing.LabelEncoder()
df["est_civil"] = var_estcivil.fit_transform(df["est_civil"].astype(str))
# Cargo que ocupa (Empleado)
var_ocupcargo = preprocessing.LabelEncoder()
df["ocup_cargo"] = var_ocupcargo.fit_transform(df["ocup_cargo"].astype(str))
#Cliente Nuevo o Recurrente (N/R)
var_cliente = preprocessing.LabelEncoder()
df["cliente_nuevo_o_recurrente"] = var_cliente.fit_transform(df["cliente_nuevo_o_recurrente"])
#Ver si tiene tarjeta Visa Clasica
var_tienevisa = preprocessing.LabelEncoder()
df["tiene_visa_classic"] = var_tienevisa.fit_transform(df["tiene_visa_classic"])
#ver si tiene visa Gold
var_tienevisagold= preprocessing.LabelEncoder()
df["tiene_visa_gold"] = var_tienevisagold.fit_transform(df["tiene_visa_gold"])
#tiene mastercard gold
var_mastercardgold = preprocessing.LabelEncoder()
df["tiene_mc_gold"] = var_mastercardgold.fit_transform(df["tiene_mc_gold"])
#tiene fc
var_tienefc = preprocessing.LabelEncoder()
df["tiene_fc"] = var_tienefc.fit_transform(df["tiene_fc"])
#tiene mastercard clasica
var_mastercardclasica = preprocessing.LabelEncoder()
df["tiene_mc_classic"] = var_mastercardclasica.fit_transform(df["tiene_mc_classic"])
#ver la Faja
var_faja = preprocessing.LabelEncoder()
df["respuesta_iconf_faja_score"] = var_faja.fit_transform(df["respuesta_iconf_faja_score"].astype(str))
#ver target Resultado final
var_resultadofinal = preprocessing.LabelEncoder()
df["resultadoFinal"] = var_resultadofinal.fit_transform(df["resultadoFinal"])
########
# split dataset en train (70%) y test (30%)
X = df.iloc[:,0:66 ]
y = df['resultadoFinal']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# Entrenar decision tree usando entropia y profundidad maxima 6
clf = DecisionTreeClassifier(criterion="entropy",min_samples_leaf=50,max_depth=6)
clf = clf.fit(X_train,y_train)
# Predecir con datos de test
y_pred = clf.predict(X_test)
print('-----------------------------------------------------------------------------------------')
# Accuracy: (tp+tn)/n - donde n es la cantidad de FP+FN+TP+TN
print("Accuracy - Acertividad y Ecxactitud de las muestras:",metrics.accuracy_score(y_test, y_pred))
print(metrics.precision_recall_fscore_support(y_test, y_pred, average=None))
print('-----------------------------------------------------------------------------------------')
print (
'Se entreno el modelo con 30% de datos 0.3 en el test_size'
' usando entropia con max de profundiad de 6'
'Este modelo esta inclinado por hacia los creditos malos con un % 88 '
)
print('----------------------------')
print('Calcular matriz de confusion')
print('----------------------------')
#metrics.confusion_matrix(y_test, y_pred)
print(pd.crosstab(y_test, y_pred,
rownames=['actual'],
colnames=['pred'], margins=False, margins_name="Total")
)
# Obtener importancia de variables y vertificar variables mas relevantes
print('----------------------------')
print('Importancia de Variables')
print('Variables mas relevantes')
print('----------------------------')
fi = pd.DataFrame(zip(X.columns,clf.feature_importances_), columns=['feature','importance'])
print(fi[fi['importance'] > 0.0].sort_values(by=['importance'], ascending=False))
# cargar exportador de grafos y funcion de llamada a sistema
export_graphviz(clf, out_file="creditos.dot",
filled=True, rounded=True,
special_characters=True, feature_names = X.columns,class_names = ['BIEN','MAL'])
(graph,) = pydot.graph_from_dot_file('creditos.dot')
graph.write_png('creditos.png')
| es | 0.745231 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Sat Jun 9 09:23:19 2020 Arbol de Desiciones @author: andyvillamayor #El calulo del arbol esta hecho automaticamente por DecisionTreeClassifier #Parametros = entropia valor maximo de desglose = 6 #librerias #lectura de datos #verificacion de los datos #validacion del dataframe #tipo cartera tiene 3777 valores faltantes Nan #calular los la edad verificacion de datos #columna edad esta en el ultimo lugar del dataframe # seleccionar variables y target, descartar variables pos aprobación # utilizando Hold out # # Unificar en un dataframe filtrado # cantidad de filas y columnas #verificar en el explorador de variables los cambios #Modificar los datos LabelEncoder() #Codificar cada variable categorica con su propio encoder sin utilizar el replace #Nacionalidad #Sexo (M,F) #Estado Civil(S,C) # Cargo que ocupa (Empleado) #Cliente Nuevo o Recurrente (N/R) #Ver si tiene tarjeta Visa Clasica #ver si tiene visa Gold #tiene mastercard gold #tiene fc #tiene mastercard clasica #ver la Faja #ver target Resultado final ######## # split dataset en train (70%) y test (30%) # Entrenar decision tree usando entropia y profundidad maxima 6 # Predecir con datos de test # Accuracy: (tp+tn)/n - donde n es la cantidad de FP+FN+TP+TN #metrics.confusion_matrix(y_test, y_pred) # Obtener importancia de variables y vertificar variables mas relevantes # cargar exportador de grafos y funcion de llamada a sistema | 3.030308 | 3 |
test/optimizer/test_jsonscan.py | robes/chisel | 1 | 6632624 | """Unit tests for the JSONDataExtant operator.
"""
import unittest
import deriva.chisel.optimizer as _opt
payload = [
{
'RID': 1,
'property_1': 'hello'
},
{
'RID': 2,
'property_1': 'world'
}
]
class TestJSONScan (unittest.TestCase):
"""Basic tests for JSONDataExtant operator."""
def setUp(self):
self._plan = _opt.JSONDataExtant(input_filename=None, json_content=None, object_payload=payload, key_regex=None)
def tearDown(self):
self._plan = None
def test_logical_planner(self):
self.assertIsNotNone(_opt.logical_planner(self._plan))
def test_physical_planner(self):
lp = _opt.logical_planner(self._plan)
self.assertIsNotNone(_opt.physical_planner(lp))
def test_has_description(self):
lp = _opt.logical_planner(self._plan)
pp = _opt.physical_planner(lp)
self.assertIsNotNone(pp.description, 'description is None')
def test_has_key(self):
lp = _opt.logical_planner(self._plan)
pp = _opt.physical_planner(lp)
self.assertGreaterEqual(len(pp.description['keys']), 1, 'does not have a key definition')
def test_can_iterate_rows(self):
lp = _opt.logical_planner(self._plan)
pp = _opt.physical_planner(lp)
count_rows = 0
for row in pp:
count_rows += 1
self.assertIn('RID', row, 'could not find RID in row')
self.assertIn('property_1', row, 'could not find property_1 in row')
self.assertEqual(count_rows, len(payload), 'could not iterate all rows')
if __name__ == '__main__':
unittest.main()
| """Unit tests for the JSONDataExtant operator.
"""
import unittest
import deriva.chisel.optimizer as _opt
payload = [
{
'RID': 1,
'property_1': 'hello'
},
{
'RID': 2,
'property_1': 'world'
}
]
class TestJSONScan (unittest.TestCase):
"""Basic tests for JSONDataExtant operator."""
def setUp(self):
self._plan = _opt.JSONDataExtant(input_filename=None, json_content=None, object_payload=payload, key_regex=None)
def tearDown(self):
self._plan = None
def test_logical_planner(self):
self.assertIsNotNone(_opt.logical_planner(self._plan))
def test_physical_planner(self):
lp = _opt.logical_planner(self._plan)
self.assertIsNotNone(_opt.physical_planner(lp))
def test_has_description(self):
lp = _opt.logical_planner(self._plan)
pp = _opt.physical_planner(lp)
self.assertIsNotNone(pp.description, 'description is None')
def test_has_key(self):
lp = _opt.logical_planner(self._plan)
pp = _opt.physical_planner(lp)
self.assertGreaterEqual(len(pp.description['keys']), 1, 'does not have a key definition')
def test_can_iterate_rows(self):
lp = _opt.logical_planner(self._plan)
pp = _opt.physical_planner(lp)
count_rows = 0
for row in pp:
count_rows += 1
self.assertIn('RID', row, 'could not find RID in row')
self.assertIn('property_1', row, 'could not find property_1 in row')
self.assertEqual(count_rows, len(payload), 'could not iterate all rows')
if __name__ == '__main__':
unittest.main()
| en | 0.711233 | Unit tests for the JSONDataExtant operator. Basic tests for JSONDataExtant operator. | 3.013136 | 3 |
evdev/eventio.py | alexbprofit/python-evdev | 231 | 6632625 | <gh_stars>100-1000
# encoding: utf-8
import os
import fcntl
import select
import functools
from evdev import _input, _uinput, ecodes, util
from evdev.events import InputEvent
#--------------------------------------------------------------------------
class EvdevError(Exception):
pass
class EventIO(object):
'''
Base class for reading and writing input events.
This class is used by :class:`InputDevice` and :class:`UInput`.
- On, :class:`InputDevice` it used for reading user-generated events (e.g.
key presses, mouse movements) and writing feedback events (e.g. leds,
beeps).
- On, :class:`UInput` it used for writing user-generated events (e.g.
key presses, mouse movements) and reading feedback events (e.g. leds,
beeps).
'''
def fileno(self):
'''
Return the file descriptor to the open event device. This makes
it possible to pass instances directly to :func:`select.select()` and
:class:`asyncore.file_dispatcher`.
'''
return self.fd
def read_loop(self):
'''
Enter an endless :func:`select.select()` loop that yields input events.
'''
while True:
r, w, x = select.select([self.fd], [], [])
for event in self.read():
yield event
def read_one(self):
'''
Read and return a single input event as an instance of
:class:`InputEvent <evdev.events.InputEvent>`.
Return ``None`` if there are no pending input events.
'''
# event -> (sec, usec, type, code, val)
event = _input.device_read(self.fd)
if event:
return InputEvent(*event)
def read(self):
'''
Read multiple input events from device. Return a generator object that
yields :class:`InputEvent <evdev.events.InputEvent>` instances. Raises
`BlockingIOError` if there are no available events at the moment.
'''
# events -> [(sec, usec, type, code, val), ...]
events = _input.device_read_many(self.fd)
for event in events:
yield InputEvent(*event)
def need_write(func):
'''
Decorator that raises :class:`EvdevError` if there is no write access to the
input device.
'''
@functools.wraps(func)
def wrapper(*args):
fd = args[0].fd
if fcntl.fcntl(fd, fcntl.F_GETFL) & os.O_RDWR:
return func(*args)
msg = 'no write access to device "%s"' % args[0].path
raise EvdevError(msg)
return wrapper
def write_event(self, event):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
Arguments
---------
event: InputEvent
InputEvent instance or an object with an ``event`` attribute
(:class:`KeyEvent <evdev.events.KeyEvent>`, :class:`RelEvent
<evdev.events.RelEvent>` etc).
Example
-------
>>> ev = InputEvent(1334414993, 274296, ecodes.EV_KEY, ecodes.KEY_A, 1)
>>> ui.write_event(ev)
'''
if hasattr(event, 'event'):
event = event.event
self.write(event.type, event.code, event.value)
@need_write
def write(self, etype, code, value):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
Arguments
---------
etype
event type (e.g. ``EV_KEY``).
code
event code (e.g. ``KEY_A``).
value
event value (e.g. 0 1 2 - depends on event type).
Example
---------
>>> ui.write(e.EV_KEY, e.KEY_A, 1) # key A - down
>>> ui.write(e.EV_KEY, e.KEY_A, 0) # key A - up
'''
_uinput.write(self.fd, etype, code, value)
def close(self):
pass
| # encoding: utf-8
import os
import fcntl
import select
import functools
from evdev import _input, _uinput, ecodes, util
from evdev.events import InputEvent
#--------------------------------------------------------------------------
class EvdevError(Exception):
pass
class EventIO(object):
'''
Base class for reading and writing input events.
This class is used by :class:`InputDevice` and :class:`UInput`.
- On, :class:`InputDevice` it used for reading user-generated events (e.g.
key presses, mouse movements) and writing feedback events (e.g. leds,
beeps).
- On, :class:`UInput` it used for writing user-generated events (e.g.
key presses, mouse movements) and reading feedback events (e.g. leds,
beeps).
'''
def fileno(self):
'''
Return the file descriptor to the open event device. This makes
it possible to pass instances directly to :func:`select.select()` and
:class:`asyncore.file_dispatcher`.
'''
return self.fd
def read_loop(self):
'''
Enter an endless :func:`select.select()` loop that yields input events.
'''
while True:
r, w, x = select.select([self.fd], [], [])
for event in self.read():
yield event
def read_one(self):
'''
Read and return a single input event as an instance of
:class:`InputEvent <evdev.events.InputEvent>`.
Return ``None`` if there are no pending input events.
'''
# event -> (sec, usec, type, code, val)
event = _input.device_read(self.fd)
if event:
return InputEvent(*event)
def read(self):
'''
Read multiple input events from device. Return a generator object that
yields :class:`InputEvent <evdev.events.InputEvent>` instances. Raises
`BlockingIOError` if there are no available events at the moment.
'''
# events -> [(sec, usec, type, code, val), ...]
events = _input.device_read_many(self.fd)
for event in events:
yield InputEvent(*event)
def need_write(func):
'''
Decorator that raises :class:`EvdevError` if there is no write access to the
input device.
'''
@functools.wraps(func)
def wrapper(*args):
fd = args[0].fd
if fcntl.fcntl(fd, fcntl.F_GETFL) & os.O_RDWR:
return func(*args)
msg = 'no write access to device "%s"' % args[0].path
raise EvdevError(msg)
return wrapper
def write_event(self, event):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
Arguments
---------
event: InputEvent
InputEvent instance or an object with an ``event`` attribute
(:class:`KeyEvent <evdev.events.KeyEvent>`, :class:`RelEvent
<evdev.events.RelEvent>` etc).
Example
-------
>>> ev = InputEvent(1334414993, 274296, ecodes.EV_KEY, ecodes.KEY_A, 1)
>>> ui.write_event(ev)
'''
if hasattr(event, 'event'):
event = event.event
self.write(event.type, event.code, event.value)
@need_write
def write(self, etype, code, value):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
Arguments
---------
etype
event type (e.g. ``EV_KEY``).
code
event code (e.g. ``KEY_A``).
value
event value (e.g. 0 1 2 - depends on event type).
Example
---------
>>> ui.write(e.EV_KEY, e.KEY_A, 1) # key A - down
>>> ui.write(e.EV_KEY, e.KEY_A, 0) # key A - up
'''
_uinput.write(self.fd, etype, code, value)
def close(self):
pass | en | 0.683059 | # encoding: utf-8 #-------------------------------------------------------------------------- Base class for reading and writing input events. This class is used by :class:`InputDevice` and :class:`UInput`. - On, :class:`InputDevice` it used for reading user-generated events (e.g. key presses, mouse movements) and writing feedback events (e.g. leds, beeps). - On, :class:`UInput` it used for writing user-generated events (e.g. key presses, mouse movements) and reading feedback events (e.g. leds, beeps). Return the file descriptor to the open event device. This makes it possible to pass instances directly to :func:`select.select()` and :class:`asyncore.file_dispatcher`. Enter an endless :func:`select.select()` loop that yields input events. Read and return a single input event as an instance of :class:`InputEvent <evdev.events.InputEvent>`. Return ``None`` if there are no pending input events. # event -> (sec, usec, type, code, val) Read multiple input events from device. Return a generator object that yields :class:`InputEvent <evdev.events.InputEvent>` instances. Raises `BlockingIOError` if there are no available events at the moment. # events -> [(sec, usec, type, code, val), ...] Decorator that raises :class:`EvdevError` if there is no write access to the input device. Inject an input event into the input subsystem. Events are queued until a synchronization event is received. Arguments --------- event: InputEvent InputEvent instance or an object with an ``event`` attribute (:class:`KeyEvent <evdev.events.KeyEvent>`, :class:`RelEvent <evdev.events.RelEvent>` etc). Example ------- >>> ev = InputEvent(1334414993, 274296, ecodes.EV_KEY, ecodes.KEY_A, 1) >>> ui.write_event(ev) Inject an input event into the input subsystem. Events are queued until a synchronization event is received. Arguments --------- etype event type (e.g. ``EV_KEY``). code event code (e.g. ``KEY_A``). value event value (e.g. 0 1 2 - depends on event type). Example --------- >>> ui.write(e.EV_KEY, e.KEY_A, 1) # key A - down >>> ui.write(e.EV_KEY, e.KEY_A, 0) # key A - up | 2.635843 | 3 |
azure-mgmt-redis/azure/mgmt/redis/models/redis_reboot_parameters.py | CharaD7/azure-sdk-for-python | 0 | 6632626 | <filename>azure-mgmt-redis/azure/mgmt/redis/models/redis_reboot_parameters.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RedisRebootParameters(Model):
"""Specifies which redis node(s) to reboot.
:param reboot_type: Which redis node(s) to reboot. Depending on this
value data loss is possible. Possible values include: 'PrimaryNode',
'SecondaryNode', 'AllNodes'
:type reboot_type: str or :class:`RebootType
<azure.mgmt.redis.models.RebootType>`
:param shard_id: In case of cluster cache, this specifies shard id which
should be rebooted.
:type shard_id: int
"""
_validation = {
'reboot_type': {'required': True},
}
_attribute_map = {
'reboot_type': {'key': 'rebootType', 'type': 'str'},
'shard_id': {'key': 'shardId', 'type': 'int'},
}
def __init__(self, reboot_type, shard_id=None):
self.reboot_type = reboot_type
self.shard_id = shard_id
| <filename>azure-mgmt-redis/azure/mgmt/redis/models/redis_reboot_parameters.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RedisRebootParameters(Model):
"""Specifies which redis node(s) to reboot.
:param reboot_type: Which redis node(s) to reboot. Depending on this
value data loss is possible. Possible values include: 'PrimaryNode',
'SecondaryNode', 'AllNodes'
:type reboot_type: str or :class:`RebootType
<azure.mgmt.redis.models.RebootType>`
:param shard_id: In case of cluster cache, this specifies shard id which
should be rebooted.
:type shard_id: int
"""
_validation = {
'reboot_type': {'required': True},
}
_attribute_map = {
'reboot_type': {'key': 'rebootType', 'type': 'str'},
'shard_id': {'key': 'shardId', 'type': 'int'},
}
def __init__(self, reboot_type, shard_id=None):
self.reboot_type = reboot_type
self.shard_id = shard_id
| en | 0.532538 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- Specifies which redis node(s) to reboot. :param reboot_type: Which redis node(s) to reboot. Depending on this value data loss is possible. Possible values include: 'PrimaryNode', 'SecondaryNode', 'AllNodes' :type reboot_type: str or :class:`RebootType <azure.mgmt.redis.models.RebootType>` :param shard_id: In case of cluster cache, this specifies shard id which should be rebooted. :type shard_id: int | 2.040643 | 2 |
naeval/syntax/models/slovnet.py | sdspieg/naeval | 36 | 6632627 | <filename>naeval/syntax/models/slovnet.py
from naeval.const import SLOVNET, SLOVNET_BERT
from naeval.chop import chop
from ..markup import Token, Markup
from .base import post, ChunkModel
SLOVNET_CONTAINER_PORT = 8080
SLOVNET_IMAGE = 'natasha/slovnet-morph'
SLOVNET_BERT_IMAGE = 'natasha/slovnet-syntax-bert'
SLOVNET_URL = 'http://{host}:{port}/'
SLOVNET_CHUNK = 1000
def parse_tokens(items):
for item in items:
yield Token(
item['id'], item['text'],
item['head_id'], item['rel']
)
def parse_slovnet(data):
for item in data:
tokens = list(parse_tokens(item['tokens']))
yield Markup(tokens)
def call_slovnet(items, host, port):
url = SLOVNET_URL.format(
host=host,
port=port
)
response = post(url, json=items)
data = response.json()
return parse_slovnet(data)
def map_slovnet(items, host, port):
chunks = chop(items, SLOVNET_CHUNK)
for chunk in chunks:
yield from call_slovnet(chunk, host, port)
class SlovnetModel(ChunkModel):
name = SLOVNET
image = SLOVNET_IMAGE
container_port = SLOVNET_CONTAINER_PORT
def map(self, texts):
return map_slovnet(texts, self.host, self.port)
class SlovnetBERTModel(SlovnetModel):
name = SLOVNET_BERT
image = SLOVNET_BERT_IMAGE
| <filename>naeval/syntax/models/slovnet.py
from naeval.const import SLOVNET, SLOVNET_BERT
from naeval.chop import chop
from ..markup import Token, Markup
from .base import post, ChunkModel
SLOVNET_CONTAINER_PORT = 8080
SLOVNET_IMAGE = 'natasha/slovnet-morph'
SLOVNET_BERT_IMAGE = 'natasha/slovnet-syntax-bert'
SLOVNET_URL = 'http://{host}:{port}/'
SLOVNET_CHUNK = 1000
def parse_tokens(items):
for item in items:
yield Token(
item['id'], item['text'],
item['head_id'], item['rel']
)
def parse_slovnet(data):
for item in data:
tokens = list(parse_tokens(item['tokens']))
yield Markup(tokens)
def call_slovnet(items, host, port):
url = SLOVNET_URL.format(
host=host,
port=port
)
response = post(url, json=items)
data = response.json()
return parse_slovnet(data)
def map_slovnet(items, host, port):
chunks = chop(items, SLOVNET_CHUNK)
for chunk in chunks:
yield from call_slovnet(chunk, host, port)
class SlovnetModel(ChunkModel):
name = SLOVNET
image = SLOVNET_IMAGE
container_port = SLOVNET_CONTAINER_PORT
def map(self, texts):
return map_slovnet(texts, self.host, self.port)
class SlovnetBERTModel(SlovnetModel):
name = SLOVNET_BERT
image = SLOVNET_BERT_IMAGE
| none | 1 | 2.363212 | 2 |
|
src/tests/test_consumer.py | jankatins/python-kafka-postgresql-project | 2 | 6632628 | <filename>src/tests/test_consumer.py
import pytest
from unittest.mock import MagicMock, Mock
import checkweb.consumer
import checkweb.check_event
def test_handle_event_dict():
# based on https://stackoverflow.com/questions/28850070/python-mocking-a-context-manager
# and a lot of trial and error...
pg_cursor_context_mock = Mock()
cursor_mock = Mock()
pg_cursor_context_mock.return_value.__enter__ = cursor_mock
pg_cursor_context_mock.return_value.__exit__ = Mock()
execute_mock = Mock()
cursor_mock.return_value.execute = execute_mock
checkweb.consumer.pg.postgres_cursor_context = pg_cursor_context_mock
check_event = dict(
timestamp=1,
url='http://www.whatever.de/',
response_time_seconds=1.2,
status_code=1,
found_regex_pattern=False,
exception_message=None,
version=1,
)
checkweb.consumer.handle_event_dict(check_event)
assert execute_mock.call_count == 1
assert 'found_regex_pattern' in execute_mock.call_args.args[0]
assert isinstance(execute_mock.call_args.args[1], dict)
assert execute_mock.call_args.args[1]['url'] == check_event['url']
def test_handle_event_dict_with_bad_event(capsys):
# based on https://stackoverflow.com/questions/28850070/python-mocking-a-context-manager
# and a lot of trial and error...
pg_cursor_context_mock = Mock()
checkweb.consumer.pg.postgres_cursor_context = pg_cursor_context_mock
check_event = dict(
timestamp=1,
url='http://www.whatever.de/',
response_time_seconds=1.2,
status_code=-1,
found_regex_pattern=False,
exception_message=None,
version=1,
)
checkweb.consumer.handle_event_dict(check_event)
captured = capsys.readouterr()
assert 'ignoring event:' in captured.out
assert pg_cursor_context_mock.call_count == 0
def test_migrate():
pg_cursor_context_mock = Mock()
cursor_mock = Mock()
pg_cursor_context_mock.return_value.__enter__ = cursor_mock
pg_cursor_context_mock.return_value.__exit__ = Mock()
execute_mock = Mock()
cursor_mock.return_value.execute = execute_mock
checkweb.consumer.pg.postgres_cursor_context = pg_cursor_context_mock
checkweb.consumer.migrate_db()
# the last migration, so needs adjustments with every new one
assert execute_mock.call_count == 2
assert 'IF NOT EXISTS version SMALLINT NOT NULL DEFAULT 0' in execute_mock.call_args.args[0]
def test_main():
checkweb.consumer.KafkaConsumer = MagicMock()
kafka_consumer_instance = checkweb.consumer.KafkaConsumer.return_value
event2_mock = MagicMock()
event2_mock.value = {3,4}
kafka_consumer_instance.__iter__.return_value = [MagicMock(), event2_mock]
checkweb.consumer.migrate_db = MagicMock()
checkweb.consumer.handle_event_dict = MagicMock()
checkweb.consumer.main()
assert checkweb.consumer.migrate_db.call_count == 1
assert checkweb.consumer.KafkaConsumer.call_count == 1
assert checkweb.consumer.handle_event_dict.call_count == 2
assert checkweb.consumer.handle_event_dict.call_args.args[0] == event2_mock.value
| <filename>src/tests/test_consumer.py
import pytest
from unittest.mock import MagicMock, Mock
import checkweb.consumer
import checkweb.check_event
def test_handle_event_dict():
# based on https://stackoverflow.com/questions/28850070/python-mocking-a-context-manager
# and a lot of trial and error...
pg_cursor_context_mock = Mock()
cursor_mock = Mock()
pg_cursor_context_mock.return_value.__enter__ = cursor_mock
pg_cursor_context_mock.return_value.__exit__ = Mock()
execute_mock = Mock()
cursor_mock.return_value.execute = execute_mock
checkweb.consumer.pg.postgres_cursor_context = pg_cursor_context_mock
check_event = dict(
timestamp=1,
url='http://www.whatever.de/',
response_time_seconds=1.2,
status_code=1,
found_regex_pattern=False,
exception_message=None,
version=1,
)
checkweb.consumer.handle_event_dict(check_event)
assert execute_mock.call_count == 1
assert 'found_regex_pattern' in execute_mock.call_args.args[0]
assert isinstance(execute_mock.call_args.args[1], dict)
assert execute_mock.call_args.args[1]['url'] == check_event['url']
def test_handle_event_dict_with_bad_event(capsys):
# based on https://stackoverflow.com/questions/28850070/python-mocking-a-context-manager
# and a lot of trial and error...
pg_cursor_context_mock = Mock()
checkweb.consumer.pg.postgres_cursor_context = pg_cursor_context_mock
check_event = dict(
timestamp=1,
url='http://www.whatever.de/',
response_time_seconds=1.2,
status_code=-1,
found_regex_pattern=False,
exception_message=None,
version=1,
)
checkweb.consumer.handle_event_dict(check_event)
captured = capsys.readouterr()
assert 'ignoring event:' in captured.out
assert pg_cursor_context_mock.call_count == 0
def test_migrate():
pg_cursor_context_mock = Mock()
cursor_mock = Mock()
pg_cursor_context_mock.return_value.__enter__ = cursor_mock
pg_cursor_context_mock.return_value.__exit__ = Mock()
execute_mock = Mock()
cursor_mock.return_value.execute = execute_mock
checkweb.consumer.pg.postgres_cursor_context = pg_cursor_context_mock
checkweb.consumer.migrate_db()
# the last migration, so needs adjustments with every new one
assert execute_mock.call_count == 2
assert 'IF NOT EXISTS version SMALLINT NOT NULL DEFAULT 0' in execute_mock.call_args.args[0]
def test_main():
checkweb.consumer.KafkaConsumer = MagicMock()
kafka_consumer_instance = checkweb.consumer.KafkaConsumer.return_value
event2_mock = MagicMock()
event2_mock.value = {3,4}
kafka_consumer_instance.__iter__.return_value = [MagicMock(), event2_mock]
checkweb.consumer.migrate_db = MagicMock()
checkweb.consumer.handle_event_dict = MagicMock()
checkweb.consumer.main()
assert checkweb.consumer.migrate_db.call_count == 1
assert checkweb.consumer.KafkaConsumer.call_count == 1
assert checkweb.consumer.handle_event_dict.call_count == 2
assert checkweb.consumer.handle_event_dict.call_args.args[0] == event2_mock.value
| en | 0.876287 | # based on https://stackoverflow.com/questions/28850070/python-mocking-a-context-manager # and a lot of trial and error... # based on https://stackoverflow.com/questions/28850070/python-mocking-a-context-manager # and a lot of trial and error... # the last migration, so needs adjustments with every new one | 2.41958 | 2 |
leetcode/M0714_Best_Time_to_Buy_and_Sell_Stock_with_Transaction_Fee.py | jjmoo/daily | 1 | 6632629 | class Solution(object):
def maxProfit(self, prices, fee):
"""
:type prices: List[int]
:type fee: int
:rtype: int
"""
if not prices:
return 0
sold, keep = 0, -1<<31
for price in prices:
last_sold = sold
sold = max(sold, keep + price - fee)
keep = max(keep, last_sold - price)
return sold
print(8, Solution().maxProfit([1, 3, 2, 8, 4, 9], fee = 2))
# Your are given an array of integers prices, for which the i-th element is the price of a given stock on day i; and a non-negative integer fee representing a transaction fee.
# You may complete as many transactions as you like, but you need to pay the transaction fee for each transaction. You may not buy more than 1 share of a stock at a time (ie. you must sell the stock share before you buy again.)
# Return the maximum profit you can make.
# Example 1:
# Input: prices = [1, 3, 2, 8, 4, 9], fee = 2
# Output: 8
# Explanation: The maximum profit can be achieved by:
# Buying at prices[0] = 1
# Selling at prices[3] = 8
# Buying at prices[4] = 4
# Selling at prices[5] = 9
# The total profit is ((8 - 1) - 2) + ((9 - 4) - 2) = 8.
# Note:
# 0 < prices.length <= 50000.
# 0 < prices[i] < 50000.
# 0 <= fee < 50000.
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
| class Solution(object):
def maxProfit(self, prices, fee):
"""
:type prices: List[int]
:type fee: int
:rtype: int
"""
if not prices:
return 0
sold, keep = 0, -1<<31
for price in prices:
last_sold = sold
sold = max(sold, keep + price - fee)
keep = max(keep, last_sold - price)
return sold
print(8, Solution().maxProfit([1, 3, 2, 8, 4, 9], fee = 2))
# Your are given an array of integers prices, for which the i-th element is the price of a given stock on day i; and a non-negative integer fee representing a transaction fee.
# You may complete as many transactions as you like, but you need to pay the transaction fee for each transaction. You may not buy more than 1 share of a stock at a time (ie. you must sell the stock share before you buy again.)
# Return the maximum profit you can make.
# Example 1:
# Input: prices = [1, 3, 2, 8, 4, 9], fee = 2
# Output: 8
# Explanation: The maximum profit can be achieved by:
# Buying at prices[0] = 1
# Selling at prices[3] = 8
# Buying at prices[4] = 4
# Selling at prices[5] = 9
# The total profit is ((8 - 1) - 2) + ((9 - 4) - 2) = 8.
# Note:
# 0 < prices.length <= 50000.
# 0 < prices[i] < 50000.
# 0 <= fee < 50000.
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
| en | 0.853912 | :type prices: List[int] :type fee: int :rtype: int # Your are given an array of integers prices, for which the i-th element is the price of a given stock on day i; and a non-negative integer fee representing a transaction fee. # You may complete as many transactions as you like, but you need to pay the transaction fee for each transaction. You may not buy more than 1 share of a stock at a time (ie. you must sell the stock share before you buy again.) # Return the maximum profit you can make. # Example 1: # Input: prices = [1, 3, 2, 8, 4, 9], fee = 2 # Output: 8 # Explanation: The maximum profit can be achieved by: # Buying at prices[0] = 1 # Selling at prices[3] = 8 # Buying at prices[4] = 4 # Selling at prices[5] = 9 # The total profit is ((8 - 1) - 2) + ((9 - 4) - 2) = 8. # Note: # 0 < prices.length <= 50000. # 0 < prices[i] < 50000. # 0 <= fee < 50000. # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 | 3.673709 | 4 |
lasto3dtiles/__init__.py | colspan/las-to-3d-tiles | 2 | 6632630 | <gh_stars>1-10
import lasto3dtiles.format
import lasto3dtiles.task
import lasto3dtiles.util
| import lasto3dtiles.format
import lasto3dtiles.task
import lasto3dtiles.util | none | 1 | 1.023813 | 1 |
|
portals/wwits/groups/service/wo_update_site_addr/models.py | jalanb/portals | 0 | 6632631 | <filename>portals/wwits/groups/service/wo_update_site_addr/models.py
from dataclasses import dataclass
@dataclass
class ParmModel:
UserID: str
Version: str
Env: str
Source: str
Session: int
OrderNum: str
Street: str
City: str
State: str
Zip: str
Country: str
CompanyName: str
EndCust: str
Address1: str
Address3: str
AbbrvState: str
TaxCode: str
SitePhone: str
SiteContact: str
SiteTitle: str
MapQstOvrd: bool
RC: int
ResultMsg: str
@dataclass
class WOUpdateSiteAddrModel:
Parms: ParmModel
| <filename>portals/wwits/groups/service/wo_update_site_addr/models.py
from dataclasses import dataclass
@dataclass
class ParmModel:
UserID: str
Version: str
Env: str
Source: str
Session: int
OrderNum: str
Street: str
City: str
State: str
Zip: str
Country: str
CompanyName: str
EndCust: str
Address1: str
Address3: str
AbbrvState: str
TaxCode: str
SitePhone: str
SiteContact: str
SiteTitle: str
MapQstOvrd: bool
RC: int
ResultMsg: str
@dataclass
class WOUpdateSiteAddrModel:
Parms: ParmModel
| none | 1 | 1.836916 | 2 |
|
ITcoach/DataAnalysis-master/day02/code/page48.py | ww35133634/chenxusheng | 0 | 6632632 | <gh_stars>0
# coding=utf-8
from matplotlib import pyplot as plt
from matplotlib import font_manager
interval = [0,5,10,15,20,25,30,35,40,45,60,90]
width = [5,5,5,5,5,5,5,5,5,15,30,60]
quantity = [836,2737,3723,3926,3596,1438,3273,642,824,613,215,47]
print(len(interval),len(width),len(quantity))
#设置图形大小
plt.figure(figsize=(20,8),dpi=80)
plt.bar(range(12),quantity,width=1)
#设置x轴的刻度
_x = [i-0.5 for i in range(13)]
_xtick_labels = interval+[150]
plt.xticks(_x,_xtick_labels)
plt.grid(alpha=0.4)
plt.show()
| # coding=utf-8
from matplotlib import pyplot as plt
from matplotlib import font_manager
interval = [0,5,10,15,20,25,30,35,40,45,60,90]
width = [5,5,5,5,5,5,5,5,5,15,30,60]
quantity = [836,2737,3723,3926,3596,1438,3273,642,824,613,215,47]
print(len(interval),len(width),len(quantity))
#设置图形大小
plt.figure(figsize=(20,8),dpi=80)
plt.bar(range(12),quantity,width=1)
#设置x轴的刻度
_x = [i-0.5 for i in range(13)]
_xtick_labels = interval+[150]
plt.xticks(_x,_xtick_labels)
plt.grid(alpha=0.4)
plt.show() | zh | 0.851315 | # coding=utf-8 #设置图形大小 #设置x轴的刻度 | 2.998125 | 3 |
measurepace/urls.py | kodecharlie/PeerPace | 0 | 6632633 | <filename>measurepace/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^parameterize-performance-calculation/$', views.parameterize_performance_calculation, name='parameterize-performance-calculation'),
url(r'^identify-project$', views.identify_project, name='identify-project'),
url(r'^calculate-programmer-performance$', views.calculate_programmer_performance, name='calculate-programmer-performance'),
]
| <filename>measurepace/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^parameterize-performance-calculation/$', views.parameterize_performance_calculation, name='parameterize-performance-calculation'),
url(r'^identify-project$', views.identify_project, name='identify-project'),
url(r'^calculate-programmer-performance$', views.calculate_programmer_performance, name='calculate-programmer-performance'),
]
| none | 1 | 1.711774 | 2 |
|
project/app/models/partner.py | rvaccari/sbgo | 0 | 6632634 | <filename>project/app/models/partner.py
from tortoise import fields
from app.models.base import BaseModel
class Partner(BaseModel):
partner_id = fields.CharField(max_length=255, unique=True)
name = fields.CharField(max_length=255)
| <filename>project/app/models/partner.py
from tortoise import fields
from app.models.base import BaseModel
class Partner(BaseModel):
partner_id = fields.CharField(max_length=255, unique=True)
name = fields.CharField(max_length=255)
| none | 1 | 2.433456 | 2 |
|
parsifal/activities/urls.py | michelav/parsifal | 1 | 6632635 | # coding: utf-8
from django.conf.urls import patterns, include, url
urlpatterns = patterns('parsifal.activities.views',
url(r'^follow/$', 'follow', name='follow'),
url(r'^unfollow/$', 'unfollow', name='unfollow'),
url(r'^update_followers_count/$', 'update_followers_count', name='update_followers_count'),
) | # coding: utf-8
from django.conf.urls import patterns, include, url
urlpatterns = patterns('parsifal.activities.views',
url(r'^follow/$', 'follow', name='follow'),
url(r'^unfollow/$', 'unfollow', name='unfollow'),
url(r'^update_followers_count/$', 'update_followers_count', name='update_followers_count'),
) | en | 0.833554 | # coding: utf-8 | 1.600509 | 2 |
mixins/models.py | rigelk/magiciendose | 2 | 6632636 | <reponame>rigelk/magiciendose<filename>mixins/models.py
from django.db import models
from pgeocode import Nominatim
nomi = Nominatim('fr')
class GeoCodeMixin(models.Model):
code_postal = models.PositiveSmallIntegerField(null=True)
code_commune_insee = models.PositiveSmallIntegerField(null=True)
@property
def departement(self):
return nomi.query_postal_code(self.code_postal).county_name
class Meta:
abstract = True
| from django.db import models
from pgeocode import Nominatim
nomi = Nominatim('fr')
class GeoCodeMixin(models.Model):
code_postal = models.PositiveSmallIntegerField(null=True)
code_commune_insee = models.PositiveSmallIntegerField(null=True)
@property
def departement(self):
return nomi.query_postal_code(self.code_postal).county_name
class Meta:
abstract = True | none | 1 | 2.261224 | 2 |
|
plugins/ctags_generator/ctags_generator.py | mohnjahoney/website_source | 13 | 6632637 | <filename>plugins/ctags_generator/ctags_generator.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import os
from pelican import signals
CTAGS_TEMPLATE = """{% for tag, articles in tags_articles %}
{% for article in articles %}
{{tag}}\t{{article}}\t0;"\ttag
{% endfor %}
{% endfor %}
"""
def generate_ctags(article_generator, writer):
tags_file_path = os.path.join(article_generator.path, "tags")
if article_generator.settings.get("WRITE_SELECTED"):
article_generator.settings["WRITE_SELECTED"].append(tags_file_path)
writer.output_path = article_generator.path
try:
writer.write_file(
"tags",
article_generator.env.from_string(CTAGS_TEMPLATE),
article_generator.context,
tags_articles=sorted(article_generator.tags.items()),
)
finally:
writer.output_path = article_generator.output_path
def register():
signals.article_writer_finalized.connect(generate_ctags)
| <filename>plugins/ctags_generator/ctags_generator.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import os
from pelican import signals
CTAGS_TEMPLATE = """{% for tag, articles in tags_articles %}
{% for article in articles %}
{{tag}}\t{{article}}\t0;"\ttag
{% endfor %}
{% endfor %}
"""
def generate_ctags(article_generator, writer):
tags_file_path = os.path.join(article_generator.path, "tags")
if article_generator.settings.get("WRITE_SELECTED"):
article_generator.settings["WRITE_SELECTED"].append(tags_file_path)
writer.output_path = article_generator.path
try:
writer.write_file(
"tags",
article_generator.env.from_string(CTAGS_TEMPLATE),
article_generator.context,
tags_articles=sorted(article_generator.tags.items()),
)
finally:
writer.output_path = article_generator.output_path
def register():
signals.article_writer_finalized.connect(generate_ctags)
| en | 0.31957 | # -*- coding: utf-8 -*- {% for tag, articles in tags_articles %} {% for article in articles %} {{tag}}\t{{article}}\t0;"\ttag {% endfor %} {% endfor %} | 2.2634 | 2 |
FPS/Libraries/mynodes/logicnode_definitions/value_seperate_quat.py | Simonrazer/ArmoryProjects | 1 | 6632638 | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SeparateQuatNode(Node, ArmLogicTreeNode):
'''SeparateQuatNode'''
bl_idname = 'LNSeparateQuatNode'
bl_label = 'Separate Quat'
bl_icon = 'QUESTION'
def init(self, context):
self.inputs.new('NodeSocketVector', 'Quat')
self.outputs.new('NodeSocketFloat', 'X')
self.outputs.new('NodeSocketFloat', 'Y')
self.outputs.new('NodeSocketFloat', 'Z')
self.outputs.new('NodeSocketFloat', 'W')
self.outputs.new('NodeSocketVector', 'Euler')
add_node(SeparateQuatNode, category='Value')
| import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SeparateQuatNode(Node, ArmLogicTreeNode):
'''SeparateQuatNode'''
bl_idname = 'LNSeparateQuatNode'
bl_label = 'Separate Quat'
bl_icon = 'QUESTION'
def init(self, context):
self.inputs.new('NodeSocketVector', 'Quat')
self.outputs.new('NodeSocketFloat', 'X')
self.outputs.new('NodeSocketFloat', 'Y')
self.outputs.new('NodeSocketFloat', 'Z')
self.outputs.new('NodeSocketFloat', 'W')
self.outputs.new('NodeSocketVector', 'Euler')
add_node(SeparateQuatNode, category='Value')
| en | 0.178899 | SeparateQuatNode | 2.590006 | 3 |
tests/implementation_utils.py | btk15049/online-judge-tools | 2 | 6632639 | <reponame>btk15049/online-judge-tools<gh_stars>1-10
import bs4
import onlinejudge._implementation.logging as log
import onlinejudge._implementation.testcase_zipper
import onlinejudge._implementation.utils as utils
from onlinejudge.type import *
def get_handmade_sample_cases(self, *, html: str) -> List[TestCase]:
# parse
soup = bs4.BeautifulSoup(html, utils.html_parser)
samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
for pre in soup.select('.sample pre'):
log.debug('pre %s', str(pre))
it = self._parse_sample_tag(pre)
if it is not None:
data, name = it
samples.add(data.encode(), name)
return samples.get()
| import bs4
import onlinejudge._implementation.logging as log
import onlinejudge._implementation.testcase_zipper
import onlinejudge._implementation.utils as utils
from onlinejudge.type import *
def get_handmade_sample_cases(self, *, html: str) -> List[TestCase]:
# parse
soup = bs4.BeautifulSoup(html, utils.html_parser)
samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
for pre in soup.select('.sample pre'):
log.debug('pre %s', str(pre))
it = self._parse_sample_tag(pre)
if it is not None:
data, name = it
samples.add(data.encode(), name)
return samples.get() | none | 1 | 2.302789 | 2 |
|
src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py | DiracResearch/grpc | 1 | 6632640 | <reponame>DiracResearch/grpc
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for the old '_low'."""
import threading
import time
import unittest
import six
from six.moves import queue
from grpc._adapter import _intermediary_low as _low
_STREAM_LENGTH = 300
_TIMEOUT = 5
_AFTER_DELAY = 2
_FUTURE = time.time() + 60 * 60 * 24
_BYTE_SEQUENCE = b'\abcdefghijklmnopqrstuvwxyz0123456789' * 200
_BYTE_SEQUENCE_SEQUENCE = tuple(
bytes(bytearray((row + column) % 256 for column in range(row)))
for row in range(_STREAM_LENGTH))
class LonelyClientTest(unittest.TestCase):
def testLonelyClient(self):
host = 'nosuchhostexists'
port = 54321
method = 'test method'
deadline = time.time() + _TIMEOUT
after_deadline = deadline + _AFTER_DELAY
metadata_tag = object()
finish_tag = object()
completion_queue = _low.CompletionQueue()
channel = _low.Channel('%s:%d' % (host, port), None)
client_call = _low.Call(channel, completion_queue, method, host, deadline)
client_call.invoke(completion_queue, metadata_tag, finish_tag)
first_event = completion_queue.get(after_deadline)
self.assertIsNotNone(first_event)
second_event = completion_queue.get(after_deadline)
self.assertIsNotNone(second_event)
kinds = [event.kind for event in (first_event, second_event)]
six.assertCountEqual(self,
(_low.Event.Kind.METADATA_ACCEPTED, _low.Event.Kind.FINISH),
kinds)
self.assertIsNone(completion_queue.get(after_deadline))
completion_queue.stop()
stop_event = completion_queue.get(_FUTURE)
self.assertEqual(_low.Event.Kind.STOP, stop_event.kind)
del client_call
del channel
del completion_queue
def _drive_completion_queue(completion_queue, event_queue):
while True:
event = completion_queue.get(_FUTURE)
if event.kind is _low.Event.Kind.STOP:
break
event_queue.put(event)
class EchoTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.server_events = queue.Queue()
self.server_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.server_completion_queue, self.server_events))
self.server_completion_queue_thread.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
self.client_events = queue.Queue()
self.client_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.client_completion_queue, self.client_events))
self.client_completion_queue_thread.start()
def tearDown(self):
self.server.stop()
self.server.cancel_all_calls()
self.server_completion_queue.stop()
self.client_completion_queue.stop()
self.server_completion_queue_thread.join()
self.client_completion_queue_thread.join()
del self.server
def _perform_echo_test(self, test_data):
method = 'test method'
details = 'test details'
server_leading_metadata_key = 'my_server_leading_key'
server_leading_metadata_value = 'my_server_leading_value'
server_trailing_metadata_key = 'my_server_trailing_key'
server_trailing_metadata_value = 'my_server_trailing_value'
client_metadata_key = 'my_client_key'
client_metadata_value = 'my_client_value'
server_leading_binary_metadata_key = 'my_server_leading_key-bin'
server_leading_binary_metadata_value = b'\0'*2047
server_trailing_binary_metadata_key = 'my_server_trailing_key-bin'
server_trailing_binary_metadata_value = b'\0'*2047
client_binary_metadata_key = 'my_client_key-bin'
client_binary_metadata_value = b'\0'*2047
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
complete_tag = object()
service_tag = object()
read_tag = object()
status_tag = object()
server_data = []
client_data = []
client_call = _low.Call(self.channel, self.client_completion_queue,
method, self.host, deadline)
client_call.add_metadata(client_metadata_key, client_metadata_value)
client_call.add_metadata(client_binary_metadata_key,
client_binary_metadata_value)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_events.get()
self.assertIsNotNone(service_accepted)
self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED)
self.assertIs(service_accepted.tag, service_tag)
self.assertEqual(method, service_accepted.service_acceptance.method)
self.assertEqual(self.host, service_accepted.service_acceptance.host)
self.assertIsNotNone(service_accepted.service_acceptance.call)
metadata = dict(service_accepted.metadata)
self.assertIn(client_metadata_key, metadata)
self.assertEqual(client_metadata_value, metadata[client_metadata_key])
self.assertIn(client_binary_metadata_key, metadata)
self.assertEqual(client_binary_metadata_value,
metadata[client_binary_metadata_key])
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.add_metadata(server_leading_metadata_key,
server_leading_metadata_value)
server_call.add_metadata(server_leading_binary_metadata_key,
server_leading_binary_metadata_value)
server_call.premetadata()
metadata_accepted = self.client_events.get()
self.assertIsNotNone(metadata_accepted)
self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind)
self.assertEqual(metadata_tag, metadata_accepted.tag)
metadata = dict(metadata_accepted.metadata)
self.assertIn(server_leading_metadata_key, metadata)
self.assertEqual(server_leading_metadata_value,
metadata[server_leading_metadata_key])
self.assertIn(server_leading_binary_metadata_key, metadata)
self.assertEqual(server_leading_binary_metadata_value,
metadata[server_leading_binary_metadata_key])
for datum in test_data:
client_call.write(datum, write_tag, _low.WriteFlags.WRITE_NO_COMPRESS)
write_accepted = self.client_events.get()
self.assertIsNotNone(write_accepted)
self.assertIs(write_accepted.kind, _low.Event.Kind.WRITE_ACCEPTED)
self.assertIs(write_accepted.tag, write_tag)
self.assertIs(write_accepted.write_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag, 0)
write_accepted = self.server_events.get()
self.assertIsNotNone(write_accepted)
self.assertEqual(_low.Event.Kind.WRITE_ACCEPTED, write_accepted.kind)
self.assertEqual(write_tag, write_accepted.tag)
self.assertTrue(write_accepted.write_accepted)
client_call.read(read_tag)
read_accepted = self.client_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
client_data.append(read_accepted.bytes)
client_call.complete(complete_tag)
complete_accepted = self.client_events.get()
self.assertIsNotNone(complete_accepted)
self.assertIs(complete_accepted.kind, _low.Event.Kind.COMPLETE_ACCEPTED)
self.assertIs(complete_accepted.tag, complete_tag)
self.assertIs(complete_accepted.complete_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
server_call.add_metadata(server_trailing_metadata_key,
server_trailing_metadata_value)
server_call.add_metadata(server_trailing_binary_metadata_key,
server_trailing_binary_metadata_value)
server_call.status(_low.Status(_low.Code.OK, details), status_tag)
server_terminal_event_one = self.server_events.get()
server_terminal_event_two = self.server_events.get()
if server_terminal_event_one.kind == _low.Event.Kind.COMPLETE_ACCEPTED:
status_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
status_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(status_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.COMPLETE_ACCEPTED, status_accepted.kind)
self.assertEqual(status_tag, status_accepted.tag)
self.assertTrue(status_accepted.complete_accepted)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(finish_tag, rpc_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, ''), rpc_accepted.status)
client_call.read(read_tag)
client_terminal_event_one = self.client_events.get()
client_terminal_event_two = self.client_events.get()
if client_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = client_terminal_event_one
finish_accepted = client_terminal_event_two
else:
read_accepted = client_terminal_event_two
finish_accepted = client_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(finish_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, finish_accepted.kind)
self.assertEqual(finish_tag, finish_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, details), finish_accepted.status)
metadata = dict(finish_accepted.metadata)
self.assertIn(server_trailing_metadata_key, metadata)
self.assertEqual(server_trailing_metadata_value,
metadata[server_trailing_metadata_key])
self.assertIn(server_trailing_binary_metadata_key, metadata)
self.assertEqual(server_trailing_binary_metadata_value,
metadata[server_trailing_binary_metadata_key])
self.assertSetEqual(set(key for key, _ in finish_accepted.metadata),
set((server_trailing_metadata_key,
server_trailing_binary_metadata_key,)))
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
def testNoEcho(self):
self._perform_echo_test(())
def testOneByteEcho(self):
self._perform_echo_test([b'\x07'])
def testOneManyByteEcho(self):
self._perform_echo_test([_BYTE_SEQUENCE])
def testManyOneByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE)
def testManyManyByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE)
class CancellationTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.server_events = queue.Queue()
self.server_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.server_completion_queue, self.server_events))
self.server_completion_queue_thread.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
self.client_events = queue.Queue()
self.client_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.client_completion_queue, self.client_events))
self.client_completion_queue_thread.start()
def tearDown(self):
self.server.stop()
self.server.cancel_all_calls()
self.server_completion_queue.stop()
self.client_completion_queue.stop()
self.server_completion_queue_thread.join()
self.client_completion_queue_thread.join()
del self.server
def testCancellation(self):
method = 'test method'
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
service_tag = object()
read_tag = object()
test_data = _BYTE_SEQUENCE_SEQUENCE
server_data = []
client_data = []
client_call = _low.Call(self.channel, self.client_completion_queue,
method, self.host, deadline)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_events.get()
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.premetadata()
metadata_accepted = self.client_events.get()
self.assertIsNotNone(metadata_accepted)
for datum in test_data:
client_call.write(datum, write_tag, 0)
write_accepted = self.client_events.get()
server_call.read(read_tag)
read_accepted = self.server_events.get()
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag, 0)
write_accepted = self.server_events.get()
self.assertIsNotNone(write_accepted)
client_call.read(read_tag)
read_accepted = self.client_events.get()
client_data.append(read_accepted.bytes)
client_call.cancel()
# cancel() is idempotent.
client_call.cancel()
client_call.cancel()
client_call.cancel()
server_call.read(read_tag)
server_terminal_event_one = self.server_events.get()
server_terminal_event_two = self.server_events.get()
if server_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
read_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), rpc_accepted.status)
finish_event = self.client_events.get()
self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, 'Cancelled'),
finish_event.status)
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
class ExpirationTest(unittest.TestCase):
@unittest.skip('TODO(nathaniel): Expiration test!')
def testExpiration(self):
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
| # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for the old '_low'."""
import threading
import time
import unittest
import six
from six.moves import queue
from grpc._adapter import _intermediary_low as _low
_STREAM_LENGTH = 300
_TIMEOUT = 5
_AFTER_DELAY = 2
_FUTURE = time.time() + 60 * 60 * 24
_BYTE_SEQUENCE = b'\abcdefghijklmnopqrstuvwxyz0123456789' * 200
_BYTE_SEQUENCE_SEQUENCE = tuple(
bytes(bytearray((row + column) % 256 for column in range(row)))
for row in range(_STREAM_LENGTH))
class LonelyClientTest(unittest.TestCase):
def testLonelyClient(self):
host = 'nosuchhostexists'
port = 54321
method = 'test method'
deadline = time.time() + _TIMEOUT
after_deadline = deadline + _AFTER_DELAY
metadata_tag = object()
finish_tag = object()
completion_queue = _low.CompletionQueue()
channel = _low.Channel('%s:%d' % (host, port), None)
client_call = _low.Call(channel, completion_queue, method, host, deadline)
client_call.invoke(completion_queue, metadata_tag, finish_tag)
first_event = completion_queue.get(after_deadline)
self.assertIsNotNone(first_event)
second_event = completion_queue.get(after_deadline)
self.assertIsNotNone(second_event)
kinds = [event.kind for event in (first_event, second_event)]
six.assertCountEqual(self,
(_low.Event.Kind.METADATA_ACCEPTED, _low.Event.Kind.FINISH),
kinds)
self.assertIsNone(completion_queue.get(after_deadline))
completion_queue.stop()
stop_event = completion_queue.get(_FUTURE)
self.assertEqual(_low.Event.Kind.STOP, stop_event.kind)
del client_call
del channel
del completion_queue
def _drive_completion_queue(completion_queue, event_queue):
while True:
event = completion_queue.get(_FUTURE)
if event.kind is _low.Event.Kind.STOP:
break
event_queue.put(event)
class EchoTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.server_events = queue.Queue()
self.server_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.server_completion_queue, self.server_events))
self.server_completion_queue_thread.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
self.client_events = queue.Queue()
self.client_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.client_completion_queue, self.client_events))
self.client_completion_queue_thread.start()
def tearDown(self):
self.server.stop()
self.server.cancel_all_calls()
self.server_completion_queue.stop()
self.client_completion_queue.stop()
self.server_completion_queue_thread.join()
self.client_completion_queue_thread.join()
del self.server
def _perform_echo_test(self, test_data):
method = 'test method'
details = 'test details'
server_leading_metadata_key = 'my_server_leading_key'
server_leading_metadata_value = 'my_server_leading_value'
server_trailing_metadata_key = 'my_server_trailing_key'
server_trailing_metadata_value = 'my_server_trailing_value'
client_metadata_key = 'my_client_key'
client_metadata_value = 'my_client_value'
server_leading_binary_metadata_key = 'my_server_leading_key-bin'
server_leading_binary_metadata_value = b'\0'*2047
server_trailing_binary_metadata_key = 'my_server_trailing_key-bin'
server_trailing_binary_metadata_value = b'\0'*2047
client_binary_metadata_key = 'my_client_key-bin'
client_binary_metadata_value = b'\0'*2047
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
complete_tag = object()
service_tag = object()
read_tag = object()
status_tag = object()
server_data = []
client_data = []
client_call = _low.Call(self.channel, self.client_completion_queue,
method, self.host, deadline)
client_call.add_metadata(client_metadata_key, client_metadata_value)
client_call.add_metadata(client_binary_metadata_key,
client_binary_metadata_value)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_events.get()
self.assertIsNotNone(service_accepted)
self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED)
self.assertIs(service_accepted.tag, service_tag)
self.assertEqual(method, service_accepted.service_acceptance.method)
self.assertEqual(self.host, service_accepted.service_acceptance.host)
self.assertIsNotNone(service_accepted.service_acceptance.call)
metadata = dict(service_accepted.metadata)
self.assertIn(client_metadata_key, metadata)
self.assertEqual(client_metadata_value, metadata[client_metadata_key])
self.assertIn(client_binary_metadata_key, metadata)
self.assertEqual(client_binary_metadata_value,
metadata[client_binary_metadata_key])
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.add_metadata(server_leading_metadata_key,
server_leading_metadata_value)
server_call.add_metadata(server_leading_binary_metadata_key,
server_leading_binary_metadata_value)
server_call.premetadata()
metadata_accepted = self.client_events.get()
self.assertIsNotNone(metadata_accepted)
self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind)
self.assertEqual(metadata_tag, metadata_accepted.tag)
metadata = dict(metadata_accepted.metadata)
self.assertIn(server_leading_metadata_key, metadata)
self.assertEqual(server_leading_metadata_value,
metadata[server_leading_metadata_key])
self.assertIn(server_leading_binary_metadata_key, metadata)
self.assertEqual(server_leading_binary_metadata_value,
metadata[server_leading_binary_metadata_key])
for datum in test_data:
client_call.write(datum, write_tag, _low.WriteFlags.WRITE_NO_COMPRESS)
write_accepted = self.client_events.get()
self.assertIsNotNone(write_accepted)
self.assertIs(write_accepted.kind, _low.Event.Kind.WRITE_ACCEPTED)
self.assertIs(write_accepted.tag, write_tag)
self.assertIs(write_accepted.write_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag, 0)
write_accepted = self.server_events.get()
self.assertIsNotNone(write_accepted)
self.assertEqual(_low.Event.Kind.WRITE_ACCEPTED, write_accepted.kind)
self.assertEqual(write_tag, write_accepted.tag)
self.assertTrue(write_accepted.write_accepted)
client_call.read(read_tag)
read_accepted = self.client_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
client_data.append(read_accepted.bytes)
client_call.complete(complete_tag)
complete_accepted = self.client_events.get()
self.assertIsNotNone(complete_accepted)
self.assertIs(complete_accepted.kind, _low.Event.Kind.COMPLETE_ACCEPTED)
self.assertIs(complete_accepted.tag, complete_tag)
self.assertIs(complete_accepted.complete_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
server_call.add_metadata(server_trailing_metadata_key,
server_trailing_metadata_value)
server_call.add_metadata(server_trailing_binary_metadata_key,
server_trailing_binary_metadata_value)
server_call.status(_low.Status(_low.Code.OK, details), status_tag)
server_terminal_event_one = self.server_events.get()
server_terminal_event_two = self.server_events.get()
if server_terminal_event_one.kind == _low.Event.Kind.COMPLETE_ACCEPTED:
status_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
status_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(status_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.COMPLETE_ACCEPTED, status_accepted.kind)
self.assertEqual(status_tag, status_accepted.tag)
self.assertTrue(status_accepted.complete_accepted)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(finish_tag, rpc_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, ''), rpc_accepted.status)
client_call.read(read_tag)
client_terminal_event_one = self.client_events.get()
client_terminal_event_two = self.client_events.get()
if client_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = client_terminal_event_one
finish_accepted = client_terminal_event_two
else:
read_accepted = client_terminal_event_two
finish_accepted = client_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(finish_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, finish_accepted.kind)
self.assertEqual(finish_tag, finish_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, details), finish_accepted.status)
metadata = dict(finish_accepted.metadata)
self.assertIn(server_trailing_metadata_key, metadata)
self.assertEqual(server_trailing_metadata_value,
metadata[server_trailing_metadata_key])
self.assertIn(server_trailing_binary_metadata_key, metadata)
self.assertEqual(server_trailing_binary_metadata_value,
metadata[server_trailing_binary_metadata_key])
self.assertSetEqual(set(key for key, _ in finish_accepted.metadata),
set((server_trailing_metadata_key,
server_trailing_binary_metadata_key,)))
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
def testNoEcho(self):
self._perform_echo_test(())
def testOneByteEcho(self):
self._perform_echo_test([b'\x07'])
def testOneManyByteEcho(self):
self._perform_echo_test([_BYTE_SEQUENCE])
def testManyOneByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE)
def testManyManyByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE)
class CancellationTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.server_events = queue.Queue()
self.server_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.server_completion_queue, self.server_events))
self.server_completion_queue_thread.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
self.client_events = queue.Queue()
self.client_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.client_completion_queue, self.client_events))
self.client_completion_queue_thread.start()
def tearDown(self):
self.server.stop()
self.server.cancel_all_calls()
self.server_completion_queue.stop()
self.client_completion_queue.stop()
self.server_completion_queue_thread.join()
self.client_completion_queue_thread.join()
del self.server
def testCancellation(self):
method = 'test method'
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
service_tag = object()
read_tag = object()
test_data = _BYTE_SEQUENCE_SEQUENCE
server_data = []
client_data = []
client_call = _low.Call(self.channel, self.client_completion_queue,
method, self.host, deadline)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_events.get()
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.premetadata()
metadata_accepted = self.client_events.get()
self.assertIsNotNone(metadata_accepted)
for datum in test_data:
client_call.write(datum, write_tag, 0)
write_accepted = self.client_events.get()
server_call.read(read_tag)
read_accepted = self.server_events.get()
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag, 0)
write_accepted = self.server_events.get()
self.assertIsNotNone(write_accepted)
client_call.read(read_tag)
read_accepted = self.client_events.get()
client_data.append(read_accepted.bytes)
client_call.cancel()
# cancel() is idempotent.
client_call.cancel()
client_call.cancel()
client_call.cancel()
server_call.read(read_tag)
server_terminal_event_one = self.server_events.get()
server_terminal_event_two = self.server_events.get()
if server_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
read_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), rpc_accepted.status)
finish_event = self.client_events.get()
self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, 'Cancelled'),
finish_event.status)
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
class ExpirationTest(unittest.TestCase):
@unittest.skip('TODO(nathaniel): Expiration test!')
def testExpiration(self):
pass
if __name__ == '__main__':
unittest.main(verbosity=2) | en | 0.719406 | # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Tests for the old '_low'. # cancel() is idempotent. | 1.413204 | 1 |
crawler.py | DeenisLan/python_crawler | 0 | 6632641 | <reponame>DeenisLan/python_crawler<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import requests
from bs4 import BeautifulSoup
from urllib.request import Request
from urllib.request import urlopen
import os
import sys
import pandas as pd
from datetime import date
today = date.today()
d = today.strftime("%Y-%b-%d")
raw_request = Request('https://stock.wespai.com/rate110')
raw_request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0')
raw_request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
resp = urlopen(raw_request)
raw_html = resp.read()
soup = BeautifulSoup(raw_html,"html.parser")
tables = soup.findAll("table")
list_header = []
header = soup.find_all("table")[0].find("tr")
past_request = Request('https://stock.wespai.com/rate109')
past_request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0')
past_request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
resppast = urlopen(past_request)
past_html = resppast.read()
souppast = BeautifulSoup(past_html,"html.parser")
pasttables = souppast.findAll("table")
for items in header:
try:
list_header.append(items.get_text())
except:
continue
tableMatrix = []
for table in tables:
#Here you can do whatever you want with the data! You can findAll table row headers, etc...
list_of_rows = []
for row in table.findAll('tr')[1:]:
list_of_cells = []
for cell in row.findAll('td'):
text = cell.text.replace(' ', '')
list_of_cells.append(text)
list_of_rows.append(list_of_cells)
tableMatrix = list_of_rows
# DataFrame
dataFrame = pd.DataFrame(data = tableMatrix, columns = list_header)
tableMatrix = []
for table in pasttables:
list_of_rows = []
for row in table.findAll('tr')[1:]:
list_of_cells = []
for cell in row.findAll('td'):
text = cell.text.replace(' ', '')
list_of_cells.append(text)
if list_of_cells[0] not in dataFrame['代號'].tolist():
list_of_rows.append(list_of_cells)
tableMatrix = list_of_rows
# Converting Pandas DataFrame
# DataFrame
newDataFrame = pd.DataFrame(data = tableMatrix, columns = list_header)
# print(newDataFrame)
# into CSV file
newDataFrame.to_csv('~/Desktop/'+d+'.csv', encoding='utf_8_sig') | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import requests
from bs4 import BeautifulSoup
from urllib.request import Request
from urllib.request import urlopen
import os
import sys
import pandas as pd
from datetime import date
today = date.today()
d = today.strftime("%Y-%b-%d")
raw_request = Request('https://stock.wespai.com/rate110')
raw_request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0')
raw_request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
resp = urlopen(raw_request)
raw_html = resp.read()
soup = BeautifulSoup(raw_html,"html.parser")
tables = soup.findAll("table")
list_header = []
header = soup.find_all("table")[0].find("tr")
past_request = Request('https://stock.wespai.com/rate109')
past_request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0')
past_request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
resppast = urlopen(past_request)
past_html = resppast.read()
souppast = BeautifulSoup(past_html,"html.parser")
pasttables = souppast.findAll("table")
for items in header:
try:
list_header.append(items.get_text())
except:
continue
tableMatrix = []
for table in tables:
#Here you can do whatever you want with the data! You can findAll table row headers, etc...
list_of_rows = []
for row in table.findAll('tr')[1:]:
list_of_cells = []
for cell in row.findAll('td'):
text = cell.text.replace(' ', '')
list_of_cells.append(text)
list_of_rows.append(list_of_cells)
tableMatrix = list_of_rows
# DataFrame
dataFrame = pd.DataFrame(data = tableMatrix, columns = list_header)
tableMatrix = []
for table in pasttables:
list_of_rows = []
for row in table.findAll('tr')[1:]:
list_of_cells = []
for cell in row.findAll('td'):
text = cell.text.replace(' ', '')
list_of_cells.append(text)
if list_of_cells[0] not in dataFrame['代號'].tolist():
list_of_rows.append(list_of_cells)
tableMatrix = list_of_rows
# Converting Pandas DataFrame
# DataFrame
newDataFrame = pd.DataFrame(data = tableMatrix, columns = list_header)
# print(newDataFrame)
# into CSV file
newDataFrame.to_csv('~/Desktop/'+d+'.csv', encoding='utf_8_sig') | en | 0.651871 | #!/usr/bin/env python # coding: utf-8 # In[7]: #Here you can do whatever you want with the data! You can findAll table row headers, etc... # DataFrame # Converting Pandas DataFrame # DataFrame # print(newDataFrame) # into CSV file | 2.991289 | 3 |
qasrl/pipelines/afirst_pipeline_sequential.py | gililior/qasrl-modeling | 1 | 6632642 | # completely ridiculous hack to import stuff properly. somebody save me from myself
import importlib
from allennlp.common.util import import_submodules
importlib.invalidate_caches()
import sys
sys.path.append(".")
import_submodules("qasrl")
from typing import List, Iterator, Optional, Set
import torch, os, json, tarfile, argparse, uuid, shutil
import gzip
import sys
from tqdm import tqdm
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.util import JsonDict, sanitize
from allennlp.common.util import get_spacy_model
from allennlp.data import Instance
from allennlp.data.dataset import Batch
from allennlp.data.fields import ListField, SpanField
from allennlp.nn.util import move_to_device
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import JsonDict, Predictor
from qasrl.common.span import Span
from qasrl.data.dataset_readers import QasrlReader
from qasrl.data.util import read_lines, get_verb_fields
from qasrl.models.span import SpanModel
from qasrl.models.span_to_question import SpanToQuestionModel
from qasrl.util.archival_utils import load_archive_from_folder
span_minimum_threshold_default = 0.3
question_minimum_threshold_default = 0.01
question_beam_size_default = 20
# Modifies original pipeline to output a hierarchical representation of the beam (span -> question).
class AFirstPipelineSequential():
"""
Span -> question pipeline. Outputs a sentence per line as a JSON object.
The output format for each sentence is:
{
"sentenceId": String,
"sentenceTokens": List[String],
"verbs": List[{
"verbIndex": Int,
"verbInflectedForms": { "stem": String, ... } # same as QA-SRL verb inflected forms
"beam": Beam
}]
}
where the sentenceId, tokens, and indices of each verb (predicate) are taken from the input
(which is expected to have these in the QA-SRL Bank 2.0 format). Inflected forms are only
included in the output if they are found in the input (as they will be when running directly on
the QA-SRL Bank). The "Beam" object contains the model predictions, and is formatted as follows:
Beam: List[{
"span": [Int, Int], # [beginning (inclusive), end (exclusive)] formatted as a 2-element list
"spanProb": Float, # the probability assigned to the span by the span detection model
"questions": List[{
"questionSlots": QuestionSlots, # JSON object with question slots, in QA-SRL Bank format
"questionProb": Float # probability assigned to the question by the question gen model
}]
}]
The beam will contain an entry for every span that receives a probability above the
`span_minimum_threshold` argument as well as any spans that were already provided in the input
JSON. The "questions" field for each span will contain an entry for every question output during
beam search. The beam has a size cutoff of `question_beam_size` and a probability cutoff of
`question_minimum_threshold`.
"""
def __init__(self,
span_model: SpanModel,
span_model_dataset_reader: QasrlReader,
span_to_question_model: SpanToQuestionModel,
span_to_question_model_dataset_reader: QasrlReader,
span_minimum_threshold: float = span_minimum_threshold_default,
question_minimum_threshold: float = question_minimum_threshold_default,
question_beam_size: int = question_beam_size_default) -> None:
self._span_model = span_model
self._span_model_dataset_reader = span_model_dataset_reader
self._span_to_question_model = span_to_question_model
self._span_to_question_model_dataset_reader = span_to_question_model_dataset_reader
self._span_minimum_threshold = span_minimum_threshold
self._question_minimum_threshold = question_minimum_threshold
self._question_beam_size = question_beam_size
# if there are no spans found in the input, that's fine; just don't add any required ones
def _get_verb_spans_for_sentence(self, inputs: JsonDict) -> List[Set[Span]]:
verb_spans = []
verb_entries = [v for _, v in inputs["verbEntries"].items()]
verb_entries = sorted(verb_entries, key = lambda v: v["verbIndex"])
for verb in verb_entries:
spans = []
if "questionLabels" in verb:
for _, question_label in verb["questionLabels"].items():
for aj in question_label["answerJudgments"]:
if aj["isValid"]:
for s in aj["spans"]:
spans.append(Span(s[0], s[1] - 1))
if "argumentSpans" in verb:
for s in verb["argumentSpans"]:
spans.append(Span(s[0], s[1] - 1))
verb_spans.append(set(spans))
return verb_spans
def predict(self, inputs: JsonDict) -> JsonDict:
# produce different sets of instances to account for
# the possibility of different token indexers as well as different vocabularies
span_verb_instances = list(self._span_model_dataset_reader.sentence_json_to_instances(inputs, verbs_only = True))
span_to_question_verb_instances = list(self._span_to_question_model_dataset_reader.sentence_json_to_instances(inputs, verbs_only = True))
verb_spans = self._get_verb_spans_for_sentence(inputs)
# get spans and ensure same order
verb_dicts = []
if len(span_verb_instances) > 0:
span_outputs = self._span_model.forward_on_instances(span_verb_instances)
for (verb_instance, span_output, ref_spans) in zip(span_to_question_verb_instances, span_outputs, verb_spans):
beam = []
scored_spans = [
(s, p)
for s, p in span_output["spans"]
if p >= self._span_minimum_threshold or s in ref_spans # always include reference spans
]
span_fields = [SpanField(span.start(), span.end(), verb_instance["text"]) for span, _ in scored_spans]
if len(span_fields) > 0:
verb_instance.index_fields(self._span_to_question_model.vocab)
verb_instance.add_field("answer_spans", ListField(span_fields), self._span_to_question_model.vocab)
qgen_input_tensors = move_to_device(
Batch([verb_instance]).as_tensor_dict(),
self._span_to_question_model._get_prediction_device())
question_beams = self._span_to_question_model.beam_decode(
text = qgen_input_tensors["text"],
predicate_indicator = qgen_input_tensors["predicate_indicator"],
predicate_index = qgen_input_tensors["predicate_index"],
answer_spans = qgen_input_tensors["answer_spans"],
max_beam_size = self._question_beam_size,
min_beam_probability = self._question_minimum_threshold)
for (span, span_prob), (_, slot_values, question_probs) in zip(scored_spans, question_beams):
scored_questions = []
for i in range(len(question_probs)):
question_slots = {
slot_name: slot_values[slot_name][i]
for slot_name in self._span_to_question_model.get_slot_names()
}
scored_questions.append({
"questionSlots": question_slots,
"questionProb": question_probs[i]
})
beam.append({
"span": [span.start(), span.end() + 1],
"spanProb": span_prob,
"questions": scored_questions
})
verb_entry = {"verbIndex": verb_instance["metadata"]["verb_index"]}
if "verb_inflected_forms" in verb_instance["metadata"]:
verb_entry["verbInflectedForms"] = verb_instance["metadata"]["verb_inflected_forms"]
verb_entry["beam"] = beam
verb_dicts.append(verb_entry)
return {
"sentenceId": inputs["sentenceId"],
"sentenceTokens": inputs["sentenceTokens"],
"verbs": verb_dicts
}
def main(span_model_path: str,
span_to_question_model_path: str,
cuda_device: int,
input_file: str,
output_file: str,
span_min_prob: float,
question_min_prob: float,
question_beam_size: int) -> None:
check_for_gpu(cuda_device)
span_model_archive = load_archive(
span_model_path,
cuda_device = cuda_device,
overrides = '{ "model": { "span_selector": {"span_decoding_threshold": 0.00} } }',
# weights_file = os.path.join(span_model_path, "best.th")
)
# override span detection threshold to be low enough so we can reasonably approximate bad spans
# as having probability 0.
span_to_question_model_archive = load_archive(
span_to_question_model_path,
cuda_device = cuda_device,
# weights_file = os.path.join(span_to_question_model_path, "best.th")
)
span_model_dataset_reader_params = span_model_archive.config["dataset_reader"].duplicate()
span_model_dataset_reader_params["qasrl_filter"]["allow_all"] = True
span_to_question_model_dataset_reader_params = span_to_question_model_archive.config["dataset_reader"].duplicate()
span_to_question_model_dataset_reader_params["qasrl_filter"]["allow_all"] = True
pipeline = AFirstPipelineSequential(
span_model = span_model_archive.model,
span_model_dataset_reader = DatasetReader.from_params(span_model_dataset_reader_params),
span_to_question_model = span_to_question_model_archive.model,
span_to_question_model_dataset_reader = DatasetReader.from_params(span_to_question_model_dataset_reader_params),
span_minimum_threshold = span_min_prob,
question_minimum_threshold = question_min_prob,
question_beam_size = question_beam_size)
if output_file is None:
for line in tqdm(read_lines(cached_path(input_file))):
input_json = json.loads(line)
output_json = pipeline.predict(input_json)
print(json.dumps(output_json))
elif output_file.endswith('.gz'):
with gzip.open(output_file, 'wt') as f:
for line in tqdm(read_lines(cached_path(input_file))):
input_json = json.loads(line)
output_json = pipeline.predict(input_json)
f.write(json.dumps(output_json))
f.write('\n')
else:
with open(output_file, 'w', encoding = 'utf8') as out:
for line in tqdm(read_lines(cached_path(input_file))):
input_json = json.loads(line)
output_json = pipeline.predict(input_json)
print(json.dumps(output_json), file = out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Run the answer-first pipeline")
parser.add_argument('--span', type=str, help = "Path to span detector model serialization dir.")
parser.add_argument('--span_to_question', type=str, help = "Path to span-to-question generator serialization dir.")
parser.add_argument('--cuda_device', type=int, default=-1)
parser.add_argument('--input_file', type=str)
parser.add_argument('--output_file', type=str, default = None)
parser.add_argument('--span_min_prob', type=float, default = span_minimum_threshold_default)
parser.add_argument('--question_min_prob', type=float, default = question_minimum_threshold_default)
parser.add_argument('--question_beam_size', type=int, default = question_beam_size_default)
args = parser.parse_args()
main(span_model_path = args.span,
span_to_question_model_path = args.span_to_question,
cuda_device = args.cuda_device,
input_file = args.input_file,
output_file = args.output_file,
span_min_prob = args.span_min_prob,
question_min_prob = args.question_min_prob,
question_beam_size = args.question_beam_size)
| # completely ridiculous hack to import stuff properly. somebody save me from myself
import importlib
from allennlp.common.util import import_submodules
importlib.invalidate_caches()
import sys
sys.path.append(".")
import_submodules("qasrl")
from typing import List, Iterator, Optional, Set
import torch, os, json, tarfile, argparse, uuid, shutil
import gzip
import sys
from tqdm import tqdm
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.util import JsonDict, sanitize
from allennlp.common.util import get_spacy_model
from allennlp.data import Instance
from allennlp.data.dataset import Batch
from allennlp.data.fields import ListField, SpanField
from allennlp.nn.util import move_to_device
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import JsonDict, Predictor
from qasrl.common.span import Span
from qasrl.data.dataset_readers import QasrlReader
from qasrl.data.util import read_lines, get_verb_fields
from qasrl.models.span import SpanModel
from qasrl.models.span_to_question import SpanToQuestionModel
from qasrl.util.archival_utils import load_archive_from_folder
span_minimum_threshold_default = 0.3
question_minimum_threshold_default = 0.01
question_beam_size_default = 20
# Modifies original pipeline to output a hierarchical representation of the beam (span -> question).
class AFirstPipelineSequential():
"""
Span -> question pipeline. Outputs a sentence per line as a JSON object.
The output format for each sentence is:
{
"sentenceId": String,
"sentenceTokens": List[String],
"verbs": List[{
"verbIndex": Int,
"verbInflectedForms": { "stem": String, ... } # same as QA-SRL verb inflected forms
"beam": Beam
}]
}
where the sentenceId, tokens, and indices of each verb (predicate) are taken from the input
(which is expected to have these in the QA-SRL Bank 2.0 format). Inflected forms are only
included in the output if they are found in the input (as they will be when running directly on
the QA-SRL Bank). The "Beam" object contains the model predictions, and is formatted as follows:
Beam: List[{
"span": [Int, Int], # [beginning (inclusive), end (exclusive)] formatted as a 2-element list
"spanProb": Float, # the probability assigned to the span by the span detection model
"questions": List[{
"questionSlots": QuestionSlots, # JSON object with question slots, in QA-SRL Bank format
"questionProb": Float # probability assigned to the question by the question gen model
}]
}]
The beam will contain an entry for every span that receives a probability above the
`span_minimum_threshold` argument as well as any spans that were already provided in the input
JSON. The "questions" field for each span will contain an entry for every question output during
beam search. The beam has a size cutoff of `question_beam_size` and a probability cutoff of
`question_minimum_threshold`.
"""
def __init__(self,
span_model: SpanModel,
span_model_dataset_reader: QasrlReader,
span_to_question_model: SpanToQuestionModel,
span_to_question_model_dataset_reader: QasrlReader,
span_minimum_threshold: float = span_minimum_threshold_default,
question_minimum_threshold: float = question_minimum_threshold_default,
question_beam_size: int = question_beam_size_default) -> None:
self._span_model = span_model
self._span_model_dataset_reader = span_model_dataset_reader
self._span_to_question_model = span_to_question_model
self._span_to_question_model_dataset_reader = span_to_question_model_dataset_reader
self._span_minimum_threshold = span_minimum_threshold
self._question_minimum_threshold = question_minimum_threshold
self._question_beam_size = question_beam_size
# if there are no spans found in the input, that's fine; just don't add any required ones
def _get_verb_spans_for_sentence(self, inputs: JsonDict) -> List[Set[Span]]:
verb_spans = []
verb_entries = [v for _, v in inputs["verbEntries"].items()]
verb_entries = sorted(verb_entries, key = lambda v: v["verbIndex"])
for verb in verb_entries:
spans = []
if "questionLabels" in verb:
for _, question_label in verb["questionLabels"].items():
for aj in question_label["answerJudgments"]:
if aj["isValid"]:
for s in aj["spans"]:
spans.append(Span(s[0], s[1] - 1))
if "argumentSpans" in verb:
for s in verb["argumentSpans"]:
spans.append(Span(s[0], s[1] - 1))
verb_spans.append(set(spans))
return verb_spans
def predict(self, inputs: JsonDict) -> JsonDict:
# produce different sets of instances to account for
# the possibility of different token indexers as well as different vocabularies
span_verb_instances = list(self._span_model_dataset_reader.sentence_json_to_instances(inputs, verbs_only = True))
span_to_question_verb_instances = list(self._span_to_question_model_dataset_reader.sentence_json_to_instances(inputs, verbs_only = True))
verb_spans = self._get_verb_spans_for_sentence(inputs)
# get spans and ensure same order
verb_dicts = []
if len(span_verb_instances) > 0:
span_outputs = self._span_model.forward_on_instances(span_verb_instances)
for (verb_instance, span_output, ref_spans) in zip(span_to_question_verb_instances, span_outputs, verb_spans):
beam = []
scored_spans = [
(s, p)
for s, p in span_output["spans"]
if p >= self._span_minimum_threshold or s in ref_spans # always include reference spans
]
span_fields = [SpanField(span.start(), span.end(), verb_instance["text"]) for span, _ in scored_spans]
if len(span_fields) > 0:
verb_instance.index_fields(self._span_to_question_model.vocab)
verb_instance.add_field("answer_spans", ListField(span_fields), self._span_to_question_model.vocab)
qgen_input_tensors = move_to_device(
Batch([verb_instance]).as_tensor_dict(),
self._span_to_question_model._get_prediction_device())
question_beams = self._span_to_question_model.beam_decode(
text = qgen_input_tensors["text"],
predicate_indicator = qgen_input_tensors["predicate_indicator"],
predicate_index = qgen_input_tensors["predicate_index"],
answer_spans = qgen_input_tensors["answer_spans"],
max_beam_size = self._question_beam_size,
min_beam_probability = self._question_minimum_threshold)
for (span, span_prob), (_, slot_values, question_probs) in zip(scored_spans, question_beams):
scored_questions = []
for i in range(len(question_probs)):
question_slots = {
slot_name: slot_values[slot_name][i]
for slot_name in self._span_to_question_model.get_slot_names()
}
scored_questions.append({
"questionSlots": question_slots,
"questionProb": question_probs[i]
})
beam.append({
"span": [span.start(), span.end() + 1],
"spanProb": span_prob,
"questions": scored_questions
})
verb_entry = {"verbIndex": verb_instance["metadata"]["verb_index"]}
if "verb_inflected_forms" in verb_instance["metadata"]:
verb_entry["verbInflectedForms"] = verb_instance["metadata"]["verb_inflected_forms"]
verb_entry["beam"] = beam
verb_dicts.append(verb_entry)
return {
"sentenceId": inputs["sentenceId"],
"sentenceTokens": inputs["sentenceTokens"],
"verbs": verb_dicts
}
def main(span_model_path: str,
span_to_question_model_path: str,
cuda_device: int,
input_file: str,
output_file: str,
span_min_prob: float,
question_min_prob: float,
question_beam_size: int) -> None:
check_for_gpu(cuda_device)
span_model_archive = load_archive(
span_model_path,
cuda_device = cuda_device,
overrides = '{ "model": { "span_selector": {"span_decoding_threshold": 0.00} } }',
# weights_file = os.path.join(span_model_path, "best.th")
)
# override span detection threshold to be low enough so we can reasonably approximate bad spans
# as having probability 0.
span_to_question_model_archive = load_archive(
span_to_question_model_path,
cuda_device = cuda_device,
# weights_file = os.path.join(span_to_question_model_path, "best.th")
)
span_model_dataset_reader_params = span_model_archive.config["dataset_reader"].duplicate()
span_model_dataset_reader_params["qasrl_filter"]["allow_all"] = True
span_to_question_model_dataset_reader_params = span_to_question_model_archive.config["dataset_reader"].duplicate()
span_to_question_model_dataset_reader_params["qasrl_filter"]["allow_all"] = True
pipeline = AFirstPipelineSequential(
span_model = span_model_archive.model,
span_model_dataset_reader = DatasetReader.from_params(span_model_dataset_reader_params),
span_to_question_model = span_to_question_model_archive.model,
span_to_question_model_dataset_reader = DatasetReader.from_params(span_to_question_model_dataset_reader_params),
span_minimum_threshold = span_min_prob,
question_minimum_threshold = question_min_prob,
question_beam_size = question_beam_size)
if output_file is None:
for line in tqdm(read_lines(cached_path(input_file))):
input_json = json.loads(line)
output_json = pipeline.predict(input_json)
print(json.dumps(output_json))
elif output_file.endswith('.gz'):
with gzip.open(output_file, 'wt') as f:
for line in tqdm(read_lines(cached_path(input_file))):
input_json = json.loads(line)
output_json = pipeline.predict(input_json)
f.write(json.dumps(output_json))
f.write('\n')
else:
with open(output_file, 'w', encoding = 'utf8') as out:
for line in tqdm(read_lines(cached_path(input_file))):
input_json = json.loads(line)
output_json = pipeline.predict(input_json)
print(json.dumps(output_json), file = out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Run the answer-first pipeline")
parser.add_argument('--span', type=str, help = "Path to span detector model serialization dir.")
parser.add_argument('--span_to_question', type=str, help = "Path to span-to-question generator serialization dir.")
parser.add_argument('--cuda_device', type=int, default=-1)
parser.add_argument('--input_file', type=str)
parser.add_argument('--output_file', type=str, default = None)
parser.add_argument('--span_min_prob', type=float, default = span_minimum_threshold_default)
parser.add_argument('--question_min_prob', type=float, default = question_minimum_threshold_default)
parser.add_argument('--question_beam_size', type=int, default = question_beam_size_default)
args = parser.parse_args()
main(span_model_path = args.span,
span_to_question_model_path = args.span_to_question,
cuda_device = args.cuda_device,
input_file = args.input_file,
output_file = args.output_file,
span_min_prob = args.span_min_prob,
question_min_prob = args.question_min_prob,
question_beam_size = args.question_beam_size)
| en | 0.899541 | # completely ridiculous hack to import stuff properly. somebody save me from myself # Modifies original pipeline to output a hierarchical representation of the beam (span -> question). Span -> question pipeline. Outputs a sentence per line as a JSON object. The output format for each sentence is: { "sentenceId": String, "sentenceTokens": List[String], "verbs": List[{ "verbIndex": Int, "verbInflectedForms": { "stem": String, ... } # same as QA-SRL verb inflected forms "beam": Beam }] } where the sentenceId, tokens, and indices of each verb (predicate) are taken from the input (which is expected to have these in the QA-SRL Bank 2.0 format). Inflected forms are only included in the output if they are found in the input (as they will be when running directly on the QA-SRL Bank). The "Beam" object contains the model predictions, and is formatted as follows: Beam: List[{ "span": [Int, Int], # [beginning (inclusive), end (exclusive)] formatted as a 2-element list "spanProb": Float, # the probability assigned to the span by the span detection model "questions": List[{ "questionSlots": QuestionSlots, # JSON object with question slots, in QA-SRL Bank format "questionProb": Float # probability assigned to the question by the question gen model }] }] The beam will contain an entry for every span that receives a probability above the `span_minimum_threshold` argument as well as any spans that were already provided in the input JSON. The "questions" field for each span will contain an entry for every question output during beam search. The beam has a size cutoff of `question_beam_size` and a probability cutoff of `question_minimum_threshold`. # if there are no spans found in the input, that's fine; just don't add any required ones # produce different sets of instances to account for # the possibility of different token indexers as well as different vocabularies # get spans and ensure same order # always include reference spans # weights_file = os.path.join(span_model_path, "best.th") # override span detection threshold to be low enough so we can reasonably approximate bad spans # as having probability 0. # weights_file = os.path.join(span_to_question_model_path, "best.th") | 1.970061 | 2 |
Leetcode/0121. Best Time to Buy and Sell Stock/0121.py | Next-Gen-UI/Code-Dynamics | 0 | 6632643 | class Solution:
def maxProfit(self, prices: List[int]) -> int:
sellOne = 0
holdOne = -math.inf
for price in prices:
sellOne = max(sellOne, holdOne + price)
holdOne = max(holdOne, -price)
return sellOne
| class Solution:
def maxProfit(self, prices: List[int]) -> int:
sellOne = 0
holdOne = -math.inf
for price in prices:
sellOne = max(sellOne, holdOne + price)
holdOne = max(holdOne, -price)
return sellOne
| none | 1 | 3.213969 | 3 |
|
urduhack/conll/tests/__init__.py | cinfotech94/urduhackk | 252 | 6632644 | # coding: utf8
"""Conll test cases"""
| # coding: utf8
"""Conll test cases"""
| ca | 0.394952 | # coding: utf8 Conll test cases | 1.001009 | 1 |
iceworm/engine/tests/test_dual.py | wrmsr0/iceworm | 0 | 6632645 | <gh_stars>0
from omnibus import check
from .. import connectors as ctrs
from .. import elements as els
def test_dual():
elements = els.ElementSet.of([
ctrs.impls.dual.DualConnector.Config(),
])
connectors = ctrs.ConnectorSet.of(elements.get_type_set(ctrs.Connector.Config))
conn = check.isinstance(connectors['dual'], ctrs.impls.dual.DualConnector).connect()
rows = list(conn.create_row_source('select * from dual').produce_rows())
assert rows == [{'dummy': 'x'}]
| from omnibus import check
from .. import connectors as ctrs
from .. import elements as els
def test_dual():
elements = els.ElementSet.of([
ctrs.impls.dual.DualConnector.Config(),
])
connectors = ctrs.ConnectorSet.of(elements.get_type_set(ctrs.Connector.Config))
conn = check.isinstance(connectors['dual'], ctrs.impls.dual.DualConnector).connect()
rows = list(conn.create_row_source('select * from dual').produce_rows())
assert rows == [{'dummy': 'x'}] | none | 1 | 2.151895 | 2 |
|
tests/test_attribute_copy.py | iAndriy/marshmallow_dataclass | 342 | 6632646 | <gh_stars>100-1000
import dataclasses
import unittest
import marshmallow.validate
from marshmallow_dataclass import class_schema
class TestAttributesCopy(unittest.TestCase):
def test_meta_class_copied(self):
@dataclasses.dataclass
class Anything:
class Meta:
pass
schema = class_schema(Anything)
self.assertEqual(schema.Meta, Anything.Meta)
def test_validates_schema_copied(self):
@dataclasses.dataclass
class Anything:
@marshmallow.validates_schema
def validates_schema(self, *args, **kwargs):
pass
schema = class_schema(Anything)
self.assertIn("validates_schema", dir(schema))
def test_custom_method_not_copied(self):
@dataclasses.dataclass
class Anything:
def custom_method(self):
pass
schema = class_schema(Anything)
self.assertNotIn("custom_method", dir(schema))
def test_custom_property_not_copied(self):
@dataclasses.dataclass
class Anything:
@property
def custom_property(self):
return 42
schema = class_schema(Anything)
self.assertNotIn("custom_property", dir(schema))
if __name__ == "__main__":
unittest.main()
| import dataclasses
import unittest
import marshmallow.validate
from marshmallow_dataclass import class_schema
class TestAttributesCopy(unittest.TestCase):
def test_meta_class_copied(self):
@dataclasses.dataclass
class Anything:
class Meta:
pass
schema = class_schema(Anything)
self.assertEqual(schema.Meta, Anything.Meta)
def test_validates_schema_copied(self):
@dataclasses.dataclass
class Anything:
@marshmallow.validates_schema
def validates_schema(self, *args, **kwargs):
pass
schema = class_schema(Anything)
self.assertIn("validates_schema", dir(schema))
def test_custom_method_not_copied(self):
@dataclasses.dataclass
class Anything:
def custom_method(self):
pass
schema = class_schema(Anything)
self.assertNotIn("custom_method", dir(schema))
def test_custom_property_not_copied(self):
@dataclasses.dataclass
class Anything:
@property
def custom_property(self):
return 42
schema = class_schema(Anything)
self.assertNotIn("custom_property", dir(schema))
if __name__ == "__main__":
unittest.main() | none | 1 | 2.589873 | 3 |
|
messagebird/voice_recording.py | smadivad/sreemessage | 57 | 6632647 | <filename>messagebird/voice_recording.py
from messagebird.base import Base
class VoiceRecording(Base):
def __init__(self):
self.id = None
self.format = None
self.type = None
self.legId = None
self.status = None
self.duration = None
self._createdDatetime = None
self._updatedDatetime = None
self._links = None
@property
def createdDatetime(self):
return self._createdDatetime
@createdDatetime.setter
def createdAt(self, value):
if value is not None:
self._createdDatetime = self.value_to_time(value, '%Y-%m-%dT%H:%M:%SZ')
@property
def updatedDatetime(self):
return self._updatedDatetime
@updatedDatetime.setter
def updatedAt(self, value):
if value is not None:
self._updatedDatetime = self.value_to_time(value, '%Y-%m-%dT%H:%M:%SZ')
def __str__(self):
return "\n".join([
'recording id : %s' % self.id,
'format : %s' % self.format,
'type : %s' % self.type,
'leg id : %s' % self.legId,
'status : %s' % self.status,
'duration : %s' % self.duration,
'created date time : %s' % self._createdDatetime,
'updated date time : %s' % self._updatedDatetime,
'links : %s' % self._links
])
class VoiceRecordingsList(Base):
def __init__(self):
self._items = None
@property
def data(self):
return self._items
@data.setter
def data(self, value):
if isinstance(value, list):
self._items = []
for item in value:
self._items.append(VoiceRecording().load(item))
def __str__(self):
item_ids = []
if self._items is not None:
for recording_item in self._items:
item_ids.append(recording_item.id)
return "\n".join([
'items IDs : %s' % item_ids,
'count : %s' % len(item_ids)
])
| <filename>messagebird/voice_recording.py
from messagebird.base import Base
class VoiceRecording(Base):
def __init__(self):
self.id = None
self.format = None
self.type = None
self.legId = None
self.status = None
self.duration = None
self._createdDatetime = None
self._updatedDatetime = None
self._links = None
@property
def createdDatetime(self):
return self._createdDatetime
@createdDatetime.setter
def createdAt(self, value):
if value is not None:
self._createdDatetime = self.value_to_time(value, '%Y-%m-%dT%H:%M:%SZ')
@property
def updatedDatetime(self):
return self._updatedDatetime
@updatedDatetime.setter
def updatedAt(self, value):
if value is not None:
self._updatedDatetime = self.value_to_time(value, '%Y-%m-%dT%H:%M:%SZ')
def __str__(self):
return "\n".join([
'recording id : %s' % self.id,
'format : %s' % self.format,
'type : %s' % self.type,
'leg id : %s' % self.legId,
'status : %s' % self.status,
'duration : %s' % self.duration,
'created date time : %s' % self._createdDatetime,
'updated date time : %s' % self._updatedDatetime,
'links : %s' % self._links
])
class VoiceRecordingsList(Base):
def __init__(self):
self._items = None
@property
def data(self):
return self._items
@data.setter
def data(self, value):
if isinstance(value, list):
self._items = []
for item in value:
self._items.append(VoiceRecording().load(item))
def __str__(self):
item_ids = []
if self._items is not None:
for recording_item in self._items:
item_ids.append(recording_item.id)
return "\n".join([
'items IDs : %s' % item_ids,
'count : %s' % len(item_ids)
])
| none | 1 | 2.668937 | 3 |
|
tests/settings.py | jdolter/django-tagulous | 0 | 6632648 | """
Django settings for test project.
"""
import os
import re
import django
testenv = re.sub(
r"[^a-zA-Z0-9]",
"_",
os.environ.get("TOXENV", "_".join(str(v) for v in django.VERSION)),
)
tests_migration_module_name = "migrations_{}".format(testenv)
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.messages",
"tagulous",
"tests",
"tests.tagulous_tests_app",
"tests.tagulous_tests_app2",
"tests.tagulousTestsApp3",
"tests.tagulous_tests_migration",
]
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
SECRET_KEY = "secret"
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
ROOT_URLCONF = "tests.tagulous_tests_app.urls"
SERIALIZATION_MODULES = {
"xml": "tagulous.serializers.xml_serializer",
"json": "tagulous.serializers.json",
"python": "tagulous.serializers.python",
}
# If pyyaml is installed, add to serialisers
try:
import yaml # noqa
except ImportError:
pass
else:
SERIALIZATION_MODULES["yaml"] = "tagulous.serializers.pyyaml"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
],
},
}
]
MIGRATION_MODULES = {
"tagulous_tests_migration": "tests.tagulous_tests_migration.{}".format(
tests_migration_module_name
),
}
TAGULOUS_NAME_MAX_LENGTH = 191
# Build database settings
DATABASE = {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
engine = os.environ.get("DATABASE_ENGINE")
if engine:
if engine == "postgresql":
DATABASE["ENGINE"] = "django.db.backends.postgresql_psycopg2"
DATABASE["HOST"] = "localhost"
elif engine == "mysql":
DATABASE["ENGINE"] = "django.db.backends.mysql"
# Make sure test DB is going to be UTF8
DATABASE_TEST = {"TEST": {"CHARSET": "utf8", "COLLATION": "utf8_general_ci"}}
DATABASE.update(DATABASE_TEST)
else:
raise ValueError("Unknown database engine")
DATABASE["NAME"] = os.environ.get("DATABASE_NAME", "test_tagulous_%s" % testenv)
for key in ["USER", "PASSWORD", "HOST", "PORT"]:
if "DATABASE_" + key in os.environ:
DATABASE[key] = os.environ["DATABASE_" + key]
DATABASES = {"default": DATABASE, "test": DATABASE}
# Make sure the django migration loader will find MIGRATION_MODULES
# This will be cleaned away after each test
tests_migration_path = os.path.join(
os.path.dirname(__file__),
"tagulous_tests_migration",
)
if not os.path.isdir(tests_migration_path):
raise ValueError("tests.tagulous_tests_migration not found")
tests_migration_module_path = os.path.join(
tests_migration_path,
tests_migration_module_name,
)
if not os.path.exists(tests_migration_module_path):
os.mkdir(tests_migration_module_path)
tests_migration_module_init = os.path.join(
tests_migration_module_path,
"__init__.py",
)
if not os.path.exists(tests_migration_module_init):
open(tests_migration_module_init, "a").close()
| """
Django settings for test project.
"""
import os
import re
import django
testenv = re.sub(
r"[^a-zA-Z0-9]",
"_",
os.environ.get("TOXENV", "_".join(str(v) for v in django.VERSION)),
)
tests_migration_module_name = "migrations_{}".format(testenv)
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.messages",
"tagulous",
"tests",
"tests.tagulous_tests_app",
"tests.tagulous_tests_app2",
"tests.tagulousTestsApp3",
"tests.tagulous_tests_migration",
]
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
SECRET_KEY = "secret"
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
ROOT_URLCONF = "tests.tagulous_tests_app.urls"
SERIALIZATION_MODULES = {
"xml": "tagulous.serializers.xml_serializer",
"json": "tagulous.serializers.json",
"python": "tagulous.serializers.python",
}
# If pyyaml is installed, add to serialisers
try:
import yaml # noqa
except ImportError:
pass
else:
SERIALIZATION_MODULES["yaml"] = "tagulous.serializers.pyyaml"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
],
},
}
]
MIGRATION_MODULES = {
"tagulous_tests_migration": "tests.tagulous_tests_migration.{}".format(
tests_migration_module_name
),
}
TAGULOUS_NAME_MAX_LENGTH = 191
# Build database settings
DATABASE = {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
engine = os.environ.get("DATABASE_ENGINE")
if engine:
if engine == "postgresql":
DATABASE["ENGINE"] = "django.db.backends.postgresql_psycopg2"
DATABASE["HOST"] = "localhost"
elif engine == "mysql":
DATABASE["ENGINE"] = "django.db.backends.mysql"
# Make sure test DB is going to be UTF8
DATABASE_TEST = {"TEST": {"CHARSET": "utf8", "COLLATION": "utf8_general_ci"}}
DATABASE.update(DATABASE_TEST)
else:
raise ValueError("Unknown database engine")
DATABASE["NAME"] = os.environ.get("DATABASE_NAME", "test_tagulous_%s" % testenv)
for key in ["USER", "PASSWORD", "HOST", "PORT"]:
if "DATABASE_" + key in os.environ:
DATABASE[key] = os.environ["DATABASE_" + key]
DATABASES = {"default": DATABASE, "test": DATABASE}
# Make sure the django migration loader will find MIGRATION_MODULES
# This will be cleaned away after each test
tests_migration_path = os.path.join(
os.path.dirname(__file__),
"tagulous_tests_migration",
)
if not os.path.isdir(tests_migration_path):
raise ValueError("tests.tagulous_tests_migration not found")
tests_migration_module_path = os.path.join(
tests_migration_path,
tests_migration_module_name,
)
if not os.path.exists(tests_migration_module_path):
os.mkdir(tests_migration_module_path)
tests_migration_module_init = os.path.join(
tests_migration_module_path,
"__init__.py",
)
if not os.path.exists(tests_migration_module_init):
open(tests_migration_module_init, "a").close()
| en | 0.810623 | Django settings for test project. # If pyyaml is installed, add to serialisers # noqa # Build database settings # Make sure test DB is going to be UTF8 # Make sure the django migration loader will find MIGRATION_MODULES # This will be cleaned away after each test | 2.097349 | 2 |
examples/Draw.py | nodedge/pyqtgraph | 1 | 6632649 | # -*- coding: utf-8 -*-
"""
Demonstrate ability of ImageItem to be used as a canvas for painting with
the mouse.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
app = pg.mkQApp("Draw Example")
## Create window with GraphicsView widget
w = pg.GraphicsView()
w.show()
w.resize(800,800)
w.setWindowTitle('pyqtgraph example: Draw')
view = pg.ViewBox()
w.setCentralItem(view)
## lock the aspect ratio
view.setAspectLocked(True)
## Create image item
img = pg.ImageItem(np.zeros((200,200)))
view.addItem(img)
## Set initial view bounds
view.setRange(QtCore.QRectF(0, 0, 200, 200))
## start drawing with 3x3 brush
kern = np.array([
[0.0, 0.5, 0.0],
[0.5, 1.0, 0.5],
[0.0, 0.5, 0.0]
])
img.setDrawKernel(kern, mask=kern, center=(1,1), mode='add')
img.setLevels([0, 10])
if __name__ == '__main__':
pg.mkQApp().exec_()
| # -*- coding: utf-8 -*-
"""
Demonstrate ability of ImageItem to be used as a canvas for painting with
the mouse.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
app = pg.mkQApp("Draw Example")
## Create window with GraphicsView widget
w = pg.GraphicsView()
w.show()
w.resize(800,800)
w.setWindowTitle('pyqtgraph example: Draw')
view = pg.ViewBox()
w.setCentralItem(view)
## lock the aspect ratio
view.setAspectLocked(True)
## Create image item
img = pg.ImageItem(np.zeros((200,200)))
view.addItem(img)
## Set initial view bounds
view.setRange(QtCore.QRectF(0, 0, 200, 200))
## start drawing with 3x3 brush
kern = np.array([
[0.0, 0.5, 0.0],
[0.5, 1.0, 0.5],
[0.0, 0.5, 0.0]
])
img.setDrawKernel(kern, mask=kern, center=(1,1), mode='add')
img.setLevels([0, 10])
if __name__ == '__main__':
pg.mkQApp().exec_()
| en | 0.847118 | # -*- coding: utf-8 -*- Demonstrate ability of ImageItem to be used as a canvas for painting with the mouse. ## Add path to library (just for examples; you do not need this) ## Create window with GraphicsView widget ## lock the aspect ratio ## Create image item ## Set initial view bounds ## start drawing with 3x3 brush | 3.013595 | 3 |
Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/purearrays/ex08_add_one_example.py | Kreijeck/learning | 0 | 6632650 | <reponame>Kreijeck/learning<filename>Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/purearrays/ex08_add_one_example.py
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by <NAME>
import numpy as np
from ch06_arrays.solutions.ex08_add_one import add_one
def main():
# Wrap result in numpy array
result = add_one(np.array([1, 3, 2, 4]))
result_as_array = np.array(result)
print(result) # [1, 3, 2, 5]
print(result_as_array) # [1 3 2 5]
print(add_one(np.array([1, 4, 8, 9]))) # [1, 4, 9, 0]
print(add_one(np.array([9, 9, 9, 9]))) # [1, 0, 0, 0, 0]
if __name__ == "__main__":
main()
| # Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by <NAME>
import numpy as np
from ch06_arrays.solutions.ex08_add_one import add_one
def main():
# Wrap result in numpy array
result = add_one(np.array([1, 3, 2, 4]))
result_as_array = np.array(result)
print(result) # [1, 3, 2, 5]
print(result_as_array) # [1 3 2 5]
print(add_one(np.array([1, 4, 8, 9]))) # [1, 4, 9, 0]
print(add_one(np.array([9, 9, 9, 9]))) # [1, 0, 0, 0, 0]
if __name__ == "__main__":
main() | en | 0.377414 | # Beispielprogramm für das Buch "Python Challenge" # # Copyright 2020 by <NAME> # Wrap result in numpy array # [1, 3, 2, 5] # [1 3 2 5] # [1, 4, 9, 0] # [1, 0, 0, 0, 0] | 3.790612 | 4 |
Subsets and Splits