hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
793f0e1647881b1e34f40a457f912611bda2613c | 1,913 | py | Python | contrib/devtools/check-doc.py | cxccoin/cxc | afef248557cb315a99453a0622d502c79528ca57 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | cxccoin/cxc | afef248557cb315a99453a0622d502c79528ca57 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | cxccoin/cxc | afef248557cb315a99453a0622d502c79528ca57 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizekoipayamount'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
exit(len(args_need_doc))
if __name__ == "__main__":
main()
| 42.511111 | 301 | 0.690538 |
793f0e538dac32c7bd2067f089e31a80fb30376c | 1,827 | py | Python | src/my/kadenze/lesson1/GaussianTF.py | AlfredNeverKog/BrainCarya | 2ee065a1fbface0c993c3ddfd0ca2cea236bbad5 | [
"MIT"
] | null | null | null | src/my/kadenze/lesson1/GaussianTF.py | AlfredNeverKog/BrainCarya | 2ee065a1fbface0c993c3ddfd0ca2cea236bbad5 | [
"MIT"
] | null | null | null | src/my/kadenze/lesson1/GaussianTF.py | AlfredNeverKog/BrainCarya | 2ee065a1fbface0c993c3ddfd0ca2cea236bbad5 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
sess = tf.InteractiveSession() #open eval
sigma = 3.0
mean = 0
def gaus(x):
y = (1 / (sigma * tf.sqrt(2.0 * 3.14))) * tf.exp(tf.neg((tf.pow(x - mean, 2.0)) / (2 * tf.pow(sigma, 2.0))))
return y
def geus2d():
x = tf.linspace(-5.0,5.0,3)
y = gaus(x)
plt.plot(x.eval(), y.eval())
plt.show()
def gaus3d():
x = tf.linspace(-5.0, 5.0, 150)
y = gaus(x)
size = x.get_shape().as_list()[0]
gaus2d = tf.matmul(tf.reshape(y, [size, 1]), tf.reshape(y, [1, size]))
plt.imshow(gaus2d.eval())
plt.show()
def animation():
from matplotlib import animation
import random
fig = plt.figure()
ax = plt.axes()
line = ax.imshow([[]])
def animate(size):
global mean
print
size, mean
size = 300
mean += ((random.random() / 5) * (-1.0 if random.random() > .5 else 1.0))
x = tf.linspace(-5.0, 5.0, size + 1)
y = (1 / (sigma * tf.sqrt(2.0 * 3.14))) * tf.exp(tf.neg((tf.pow(x - mean, 2.0)) / (2 * tf.pow(sigma, 2.0))))
size = x.get_shape().as_list()[0]
gaus2d = tf.matmul(tf.reshape(y, [size, 1]), tf.reshape(y, [1, size]))
val = gaus2d.eval()
return ax.imshow(val),
"""
animate quality
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=60, interval=1, blit=True)
anim.save('gausian_quality.mp4', fps=3, extra_args=['-vcodec', 'libx264'])
plt.show()
sigma = 1.0
mean = 0.0
"""
"""
animate(5)
anim = animation.FuncAnimation(fig, animate,
frames=20, interval=1, blit=True)
anim.save('gausian_move.mp4', fps=5, extra_args=['-vcodec', 'libx264'])
plt.show()
"""
gaus3d() | 26.867647 | 116 | 0.539683 |
793f0e538efd2fecfd7d60e59f1f5d83e9acedf4 | 31,307 | py | Python | nltk/parse/transitionparser.py | oplatek/nltk | 7216fb5aac79153c14015a5a234d34c2327c3188 | [
"Apache-2.0"
] | null | null | null | nltk/parse/transitionparser.py | oplatek/nltk | 7216fb5aac79153c14015a5a234d34c2327c3188 | [
"Apache-2.0"
] | null | null | null | nltk/parse/transitionparser.py | oplatek/nltk | 7216fb5aac79153c14015a5a234d34c2327c3188 | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
#
# Author: Long Duong <[email protected]>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import tempfile
import pickle
from os import remove
from copy import deepcopy
from operator import itemgetter
try:
from scipy import sparse
from numpy import array
from sklearn.datasets import load_svmlight_file
from sklearn import svm
except ImportError:
pass
from nltk.parse import ParserI, DependencyGraph, DependencyEvaluator
class Configuration(object):
"""
Class for holding configuration which is the partial analysis of the input sentence.
The transition based parser aims at finding set of operators that transfer the initial
configuration to the terminal configuration.
The configuration includes:
- Stack: for storing partially proceeded words
- Buffer: for storing remaining input words
- Set of arcs: for storing partially built dependency tree
This class also provides a method to represent a configuration as list of features.
"""
def __init__(self, dep_graph):
"""
:param dep_graph: the representation of an input in the form of dependency graph.
:type dep_graph: DependencyGraph where the dependencies are not specified.
"""
# dep_graph.nodes contain list of token for a sentence
self.stack = [0] # The root element
self.buffer = range(
1, len(
dep_graph.nodes)) # The rest is in the buffer
self.arcs = [] # empty set of arc
self._tokens = dep_graph.nodes
self._max_address = len(self.buffer)
def __str__(self):
return 'Stack : ' + \
str(self.stack) + ' Buffer : ' + str(self.buffer) + ' Arcs : ' + str(self.arcs)
def _check_informative(self, feat, flag=False):
"""
Check whether a feature is informative
The flag control whether "_" is informative or not
"""
if feat is None:
return False
if feat == '':
return False
if flag is False:
if feat == '_':
return False
return True
def extract_features(self):
"""
Extract the set of features for the current configuration. Implement standard features as describe in
Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
Please note that these features are very basic.
:return: list(str)
"""
result = []
# Todo : can come up with more complicated features set for better
# performance.
if len(self.stack) > 0:
# Stack 0
stack_idx0 = self.stack[len(self.stack) - 1]
token = self._tokens[stack_idx0]
if self._check_informative(token['word'], True):
result.append('STK_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('STK_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('STK_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('STK_0_FEATS_' + feat)
# Stack 1
if len(self.stack) > 1:
stack_idx1 = self.stack[len(self.stack) - 2]
token = self._tokens[stack_idx1]
if self._check_informative(token['tag']):
result.append('STK_1_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == stack_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('STK_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('STK_0_RDEP_' + dep_right_most)
# Check Buffered 0
if len(self.buffer) > 0:
# Buffer 0
buffer_idx0 = self.buffer[0]
token = self._tokens[buffer_idx0]
if self._check_informative(token['word'], True):
result.append('BUF_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('BUF_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('BUF_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('BUF_0_FEATS_' + feat)
# Buffer 1
if len(self.buffer) > 1:
buffer_idx1 = self.buffer[1]
token = self._tokens[buffer_idx1]
if self._check_informative(token['word'], True):
result.append('BUF_1_FORM_' + token['word'])
if self._check_informative(token['tag']):
result.append('BUF_1_POS_' + token['tag'])
if len(self.buffer) > 2:
buffer_idx2 = self.buffer[2]
token = self._tokens[buffer_idx2]
if self._check_informative(token['tag']):
result.append('BUF_2_POS_' + token['tag'])
if len(self.buffer) > 3:
buffer_idx3 = self.buffer[3]
token = self._tokens[buffer_idx3]
if self._check_informative(token['tag']):
result.append('BUF_3_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == buffer_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('BUF_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('BUF_0_RDEP_' + dep_right_most)
return result
class Transition(object):
"""
This class defines a set of transition which is applied to a configuration to get another configuration
Note that for different parsing algorithm, the transition is different.
"""
# Define set of transitions
LEFT_ARC = 'LEFTARC'
RIGHT_ARC = 'RIGHTARC'
SHIFT = 'SHIFT'
REDUCE = 'REDUCE'
def __init__(self, alg_option):
"""
:param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type alg_option: str
"""
self._algo = alg_option
if alg_option not in [
TransitionParser.ARC_STANDARD,
TransitionParser.ARC_EAGER]:
raise ValueError(" Currently we only support %s and %s " %
(TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER))
def left_arc(self, conf, relation):
"""
Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if conf.buffer[0] == 0:
# here is the Root element
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = True
if self._algo == TransitionParser.ARC_EAGER:
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = False
if flag:
conf.stack.pop()
idx_wj = conf.buffer[0]
conf.arcs.append((idx_wj, relation, idx_wi))
else:
return -1
def right_arc(self, conf, relation):
"""
Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if self._algo == TransitionParser.ARC_STANDARD:
idx_wi = conf.stack.pop()
idx_wj = conf.buffer[0]
conf.buffer[0] = idx_wi
conf.arcs.append((idx_wi, relation, idx_wj))
else: # arc-eager
idx_wi = conf.stack[len(conf.stack) - 1]
idx_wj = conf.buffer.pop(0)
conf.stack.append(idx_wj)
conf.arcs.append((idx_wi, relation, idx_wj))
def reduce(self, conf):
"""
Note that the algorithm for reduce is only available for arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if self._algo != TransitionParser.ARC_EAGER:
return -1
if len(conf.stack) <= 0:
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = False
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = True
if flag:
conf.stack.pop() # reduce it
else:
return -1
def shift(self, conf):
"""
Note that the algorithm for shift is the SAME for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if len(conf.buffer) <= 0:
return -1
idx_wi = conf.buffer.pop(0)
conf.stack.append(idx_wi)
class TransitionParser(ParserI):
"""
Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
"""
ARC_STANDARD = 'arc-standard'
ARC_EAGER = 'arc-eager'
def __init__(self, algorithm):
"""
:param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type algorithm: str
"""
if not(algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
raise ValueError(" Currently we only support %s and %s " %
(self.ARC_STANDARD, self.ARC_EAGER))
self._algorithm = algorithm
self._dictionary = {}
self._transition = {}
self._match_transition = {}
def _get_dep_relation(self, idx_parent, idx_child, depgraph):
p_node = depgraph.nodes[idx_parent]
c_node = depgraph.nodes[idx_child]
if c_node['word'] is None:
return None # Root word
if c_node['head'] == p_node['address']:
return c_node['rel']
else:
return None
def _convert_to_binary_features(self, features):
"""
:param features: list of feature string which is needed to convert to binary features
:type features: list(str)
:return : string of binary features in libsvm format which is 'featureID:value' pairs
"""
unsorted_result = []
for feature in features:
self._dictionary.setdefault(feature, len(self._dictionary))
unsorted_result.append(self._dictionary[feature])
# Default value of each feature is 1.0
return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result))
def _is_projective(self, depgraph):
arc_list = []
for key in depgraph.nodes:
node = depgraph.nodes[key]
if 'head' in node:
childIdx = node['address']
parentIdx = node['head']
arc_list.append((parentIdx, childIdx))
for (parentIdx, childIdx) in arc_list:
# Ensure that childIdx < parentIdx
if childIdx > parentIdx:
temp = childIdx
childIdx = parentIdx
parentIdx = temp
for k in range(childIdx + 1, parentIdx):
for m in range(len(depgraph.nodes)):
if (m < childIdx) or (m > parentIdx):
if (k, m) in arc_list:
return False
if (m, k) in arc_list:
return False
return True
def _write_to_file(self, key, binary_features, input_file):
"""
write the binary features to input file and update the transition dictionary
"""
self._transition.setdefault(key, len(self._transition) + 1)
self._match_transition[self._transition[key]] = key
input_str = str(self._transition[key]) + ' ' + binary_features + '\n'
input_file.write(input_str.encode('utf-8'))
def _create_training_examples_arc_std(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
"""
operation = Transition(self.ARC_STANDARD)
count_proj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
count_proj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
precondition = True
# Get the max-index of buffer
maxID = conf._max_address
for w in range(maxID + 1):
if w != b0:
relw = self._get_dep_relation(b0, w, depgraph)
if relw is not None:
if (b0, relw, w) not in conf.arcs:
precondition = False
if precondition:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(
key,
binary_features,
input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(count_proj))
return training_seq
def _create_training_examples_arc_eager(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
"""
operation = Transition(self.ARC_EAGER)
countProj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
countProj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# reduce operation
flag = False
for k in range(s0):
if self._get_dep_relation(k, b0, depgraph) is not None:
flag = True
if self._get_dep_relation(b0, k, depgraph) is not None:
flag = True
if flag:
key = Transition.REDUCE
self._write_to_file(key, binary_features, input_file)
operation.reduce(conf)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(countProj))
return training_seq
def train(self, depgraphs, modelfile):
"""
:param depgraphs : list of DependencyGraph as the training data
:type depgraphs : DependencyGraph
:param modelfile : file name to save the trained model
:type modelfile : str
"""
try:
input_file = tempfile.NamedTemporaryFile(
prefix='transition_parse.train',
dir=tempfile.gettempdir(),
delete=False)
if self._algorithm == self.ARC_STANDARD:
self._create_training_examples_arc_std(depgraphs, input_file)
else:
self._create_training_examples_arc_eager(depgraphs, input_file)
input_file.close()
# Using the temporary file to train the libsvm classifier
x_train, y_train = load_svmlight_file(input_file.name)
# The parameter is set according to the paper:
# Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
# Todo : because of probability = True => very slow due to
# cross-validation. Need to improve the speed here
model = svm.SVC(
kernel='poly',
degree=2,
coef0=0,
gamma=0.2,
C=0.5,
verbose=True,
probability=True)
model.fit(x_train, y_train)
# Save the model to file name (as pickle)
pickle.dump(model, open(modelfile, 'wb'))
finally:
remove(input_file.name)
def parse(self, depgraphs, modelFile):
"""
:param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
:type depgraphs: list(DependencyGraph)
:param modelfile: the model file
:type modelfile: str
:return: list (DependencyGraph) with the 'head' and 'rel' information
"""
result = []
# First load the model
model = pickle.load(open(modelFile, 'rb'))
operation = Transition(self._algorithm)
for depgraph in depgraphs:
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
features = conf.extract_features()
col = []
row = []
data = []
for feature in features:
if feature in self._dictionary:
col.append(self._dictionary[feature])
row.append(0)
data.append(1.0)
np_col = array(sorted(col)) # NB : index must be sorted
np_row = array(row)
np_data = array(data)
x_test = sparse.csr_matrix((np_data, (np_row, np_col)), shape=(1, len(self._dictionary)))
# It's best to use decision function as follow BUT it's not supported yet for sparse SVM
# Using decision funcion to build the votes array
#dec_func = model.decision_function(x_test)[0]
#votes = {}
#k = 0
# for i in range(len(model.classes_)):
# for j in range(i+1, len(model.classes_)):
# #if dec_func[k] > 0:
# votes.setdefault(i,0)
# votes[i] +=1
# else:
# votes.setdefault(j,0)
# votes[j] +=1
# k +=1
# Sort votes according to the values
#sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
# We will use predict_proba instead of decision_function
prob_dict = {}
pred_prob = model.predict_proba(x_test)[0]
for i in range(len(pred_prob)):
prob_dict[i] = pred_prob[i]
sorted_Prob = sorted(
prob_dict.items(),
key=itemgetter(1),
reverse=True)
# Note that SHIFT is always a valid operation
for (y_pred_idx, confidence) in sorted_Prob:
#y_pred = model.predict(x_test)[0]
# From the prediction match to the operation
y_pred = model.classes_[y_pred_idx]
if y_pred in self._match_transition:
strTransition = self._match_transition[y_pred]
baseTransition = strTransition.split(":")[0]
if baseTransition == Transition.LEFT_ARC:
if operation.left_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.RIGHT_ARC:
if operation.right_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.REDUCE:
if operation.reduce(conf) != -1:
break
elif baseTransition == Transition.SHIFT:
if operation.shift(conf) != -1:
break
else:
raise ValueError("The predicted transition is not recognized, expected errors")
# Finish with operations build the dependency graph from Conf.arcs
new_depgraph = deepcopy(depgraph)
for key in new_depgraph.nodes:
node = new_depgraph.nodes[key]
node['rel'] = ''
# With the default, all the token depend on the Root
node['head'] = 0
for (head, rel, child) in conf.arcs:
c_node = new_depgraph.nodes[child]
c_node['head'] = head
c_node['rel'] = rel
result.append(new_depgraph)
return result
def demo():
"""
>>> from nltk.parse import DependencyGraph, DependencyEvaluator
>>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
>>> gold_sent = DependencyGraph(\"""
... Economic JJ 2 ATT
... news NN 3 SBJ
... has VBD 0 ROOT
... little JJ 5 ATT
... effect NN 3 OBJ
... on IN 5 ATT
... financial JJ 8 ATT
... markets NNS 6 PC
... . . 3 PU
... \""")
>>> conf = Configuration(gold_sent)
###################### Check the Initial Feature ########################
>>> print(conf.extract_features())
[u'STK_0_POS_TOP', u'BUF_0_FORM_Economic', u'BUF_0_LEMMA_Economic', u'BUF_0_POS_JJ', u'BUF_1_FORM_news', u'BUF_1_POS_NN', u'BUF_2_POS_VBD', u'BUF_3_POS_JJ']
###################### Check The Transition #######################
Check the Initialized Configuration
>>> print(conf)
Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : []
A. Do some transition checks for ARC-STANDARD
>>> operation = Transition('arc-standard')
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.left_arc(conf,"SBJ")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
Middle Configuration and Features Check
>>> print(conf)
Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
>>> print(conf.extract_features())
[u'STK_0_FORM_on', u'STK_0_LEMMA_on', u'STK_0_POS_IN', u'STK_1_POS_NN', u'BUF_0_FORM_markets', u'BUF_0_LEMMA_markets', u'BUF_0_POS_NNS', u'BUF_1_FORM_.', u'BUF_1_POS_.', 'BUF_0_LDEP_ATT']
>>> operation.right_arc(conf, "PC")
>>> operation.right_arc(conf, "ATT")
>>> operation.right_arc(conf, "OBJ")
>>> operation.shift(conf)
>>> operation.right_arc(conf, "PU")
>>> operation.right_arc(conf, "ROOT")
>>> operation.shift(conf)
Terminated Configuration Check
>>> print(conf)
Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
B. Do some transition checks for ARC-EAGER
>>> conf = Configuration(gold_sent)
>>> operation = Transition('arc-eager')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'SBJ')
>>> operation.right_arc(conf,'ROOT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'OBJ')
>>> operation.right_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'PC')
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.right_arc(conf,'PU')
>>> print(conf)
Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
###################### Check The Training Function #######################
A. Check the ARC-STANDARD training
>>> import tempfile
>>> import os
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
>>> parser_std = TransitionParser('arc-standard')
>>> parser_std._create_training_examples_arc_std([gold_sent], input_file)
Number of training examples : 1
Number of valid (projective) examples : 1
['SHIFT', u'LEFTARC:ATT', 'SHIFT', u'LEFTARC:SBJ', 'SHIFT', 'SHIFT', u'LEFTARC:ATT', 'SHIFT', 'SHIFT', 'SHIFT', u'LEFTARC:ATT', u'RIGHTARC:PC', u'RIGHTARC:ATT', u'RIGHTARC:OBJ', 'SHIFT', u'RIGHTARC:PU', u'RIGHTARC:ROOT', 'SHIFT']
>>> parser_std.train([gold_sent],'temp.arcstd.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
B. Check the ARC-EAGER training
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
>>> parser_eager = TransitionParser('arc-eager')
>>> parser_eager._create_training_examples_arc_eager([gold_sent], input_file)
Number of training examples : 1
Number of valid (projective) examples : 1
['SHIFT', u'LEFTARC:ATT', 'SHIFT', u'LEFTARC:SBJ', u'RIGHTARC:ROOT', 'SHIFT', u'LEFTARC:ATT', u'RIGHTARC:OBJ', u'RIGHTARC:ATT', 'SHIFT', u'LEFTARC:ATT', u'RIGHTARC:PC', 'REDUCE', 'REDUCE', 'REDUCE', u'RIGHTARC:PU']
>>> parser_eager.train([gold_sent],'temp.arceager.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
###################### Check The Parsing Function ########################
A. Check the ARC-STANDARD parser
>>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> print(de.eval())
(0.125, 0.0)
B. Check the ARC-EAGER parser
>>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> print(de.eval())
(0.0, 0.0)
Note that result is very poor because of only one training example.
"""
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
| 40.553109 | 233 | 0.544032 |
793f0e5c2d0d697eb55583645c21f6e4f7579522 | 13,089 | py | Python | wand/sequence.py | Harmon758/wand | 84bd4a20a315b76b467d5c62ffabc4f7577e9008 | [
"MIT"
] | null | null | null | wand/sequence.py | Harmon758/wand | 84bd4a20a315b76b467d5c62ffabc4f7577e9008 | [
"MIT"
] | null | null | null | wand/sequence.py | Harmon758/wand | 84bd4a20a315b76b467d5c62ffabc4f7577e9008 | [
"MIT"
] | null | null | null | """:mod:`wand.sequence` --- Sequences
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.3.0
"""
import contextlib
import ctypes
import numbers
from .api import libmagick, library
from .compat import abc, binary, xrange
from .image import BaseImage, ImageProperty
from .version import MAGICK_VERSION_INFO
__all__ = 'Sequence', 'SingleImage'
class Sequence(ImageProperty, abc.MutableSequence):
"""The list-like object that contains every :class:`SingleImage`
in the :class:`~wand.image.Image` container. It implements
:class:`collections.abc.Sequence` protocol.
.. versionadded:: 0.3.0
"""
def __init__(self, image):
super(Sequence, self).__init__(image)
self.instances = []
def __del__(self):
for instance in self.instances:
if instance is not None:
instance.c_resource = None
@property
def current_index(self):
"""(:class:`numbers.Integral`) The current index of
its internal iterator.
.. note::
It's only for internal use.
"""
return library.MagickGetIteratorIndex(self.image.wand)
@current_index.setter
def current_index(self, index):
library.MagickSetIteratorIndex(self.image.wand, index)
@contextlib.contextmanager
def index_context(self, index):
"""Scoped setter of :attr:`current_index`. Should be
used for :keyword:`with` statement e.g.::
with image.sequence.index_context(3):
print(image.size)
.. note::
It's only for internal use.
"""
index = self.validate_position(index)
tmp_idx = self.current_index
self.current_index = index
yield index
self.current_index = tmp_idx
def __len__(self):
return library.MagickGetNumberImages(self.image.wand)
def validate_position(self, index):
if not isinstance(index, numbers.Integral):
raise TypeError('index must be integer, not ' + repr(index))
length = len(self)
if index >= length or index < -length:
raise IndexError(
'out of index: {0} (total: {1})'.format(index, length)
)
if index < 0:
index += length
return index
def validate_slice(self, slice_, as_range=False):
if not (slice_.step is None or slice_.step == 1):
raise ValueError('slicing with step is unsupported')
length = len(self)
if slice_.start is None:
start = 0
elif slice_.start < 0:
start = length + slice_.start
else:
start = slice_.start
start = min(length, start)
if slice_.stop is None:
stop = 0
elif slice_.stop < 0:
stop = length + slice_.stop
else:
stop = slice_.stop
stop = min(length, stop or length)
return xrange(start, stop) if as_range else slice(start, stop, None)
def __getitem__(self, index):
if isinstance(index, slice):
slice_ = self.validate_slice(index)
return [self[i] for i in xrange(slice_.start, slice_.stop)]
index = self.validate_position(index)
instances = self.instances
instances_length = len(instances)
if index < instances_length:
instance = instances[index]
if (instance is not None and
getattr(instance, 'c_resource', None) is not None):
return instance
else:
number_to_extend = index - instances_length + 1
instances.extend(None for _ in xrange(number_to_extend))
wand = self.image.wand
tmp_idx = library.MagickGetIteratorIndex(wand)
library.MagickSetIteratorIndex(wand, index)
image = library.GetImageFromMagickWand(wand)
exc = libmagick.AcquireExceptionInfo()
single_image = libmagick.CloneImages(image, binary(str(index)), exc)
libmagick.DestroyExceptionInfo(exc)
single_wand = library.NewMagickWandFromImage(single_image)
single_image = libmagick.DestroyImage(single_image)
library.MagickSetIteratorIndex(wand, tmp_idx)
instance = SingleImage(single_wand, self.image, image)
self.instances[index] = instance
return instance
def __setitem__(self, index, image):
if isinstance(index, slice):
tmp_idx = self.current_index
slice_ = self.validate_slice(index)
del self[slice_]
self.extend(image, offset=slice_.start)
self.current_index = tmp_idx
else:
if not isinstance(image, BaseImage):
raise TypeError('image must be an instance of wand.image.'
'BaseImage, not ' + repr(image))
with self.index_context(index) as index:
library.MagickRemoveImage(self.image.wand)
library.MagickAddImage(self.image.wand, image.wand)
def __delitem__(self, index):
if isinstance(index, slice):
range_ = self.validate_slice(index, as_range=True)
for i in reversed(range_):
del self[i]
else:
with self.index_context(index) as index:
library.MagickRemoveImage(self.image.wand)
if index < len(self.instances):
del self.instances[index]
def insert(self, index, image):
try:
index = self.validate_position(index)
except IndexError:
index = len(self)
if not isinstance(image, BaseImage):
raise TypeError('image must be an instance of wand.image.'
'BaseImage, not ' + repr(image))
if not self:
library.MagickAddImage(self.image.wand, image.wand)
elif index == 0:
tmp_idx = self.current_index
self_wand = self.image.wand
wand = image.sequence[0].wand
try:
# Prepending image into the list using MagickSetFirstIterator()
# and MagickAddImage() had not worked properly, but was fixed
# since 6.7.6-0 (rev7106).
if MAGICK_VERSION_INFO >= (6, 7, 6, 0):
library.MagickSetFirstIterator(self_wand)
library.MagickAddImage(self_wand, wand)
else: # pragma: no cover
self.current_index = 0
library.MagickAddImage(self_wand,
self.image.sequence[0].wand)
self.current_index = 0
library.MagickAddImage(self_wand, wand)
self.current_index = 0
library.MagickRemoveImage(self_wand)
finally:
self.current_index = tmp_idx
else:
with self.index_context(index - 1):
library.MagickAddImage(self.image.wand, image.sequence[0].wand)
self.instances.insert(index, None)
def append(self, image):
if not isinstance(image, BaseImage):
raise TypeError('image must be an instance of wand.image.'
'BaseImage, not ' + repr(image))
wand = self.image.wand
tmp_idx = self.current_index
try:
library.MagickSetLastIterator(wand)
library.MagickAddImage(wand, image.sequence[0].wand)
finally:
self.current_index = tmp_idx
self.instances.append(None)
def extend(self, images, offset=None):
tmp_idx = self.current_index
wand = self.image.wand
length = 0
try:
if offset is None:
library.MagickSetLastIterator(self.image.wand)
else:
if offset == 0:
images = iter(images)
self.insert(0, next(images))
offset += 1
self.current_index = offset - 1
if isinstance(images, type(self)):
library.MagickAddImage(wand, images.image.wand)
length = len(images)
else:
delta = 1 if MAGICK_VERSION_INFO >= (6, 7, 6, 0) else 2
for image in images:
if not isinstance(image, BaseImage):
raise TypeError(
'images must consist of only instances of '
'wand.image.BaseImage, not ' + repr(image)
)
else:
library.MagickAddImage(wand, image.sequence[0].wand)
self.instances = []
if offset is None:
library.MagickSetLastIterator(self.image.wand)
else:
self.current_index += delta
length += 1
finally:
self.current_index = tmp_idx
null_list = [None] * length
if offset is None:
self.instances[offset:] = null_list
else:
self.instances[offset:offset] = null_list
def _repr_png_(self): # pragma: no cover
library.MagickResetIterator(self.image.wand)
repr_wand = library.MagickAppendImages(self.image.wand, 1)
length = ctypes.c_size_t()
blob_p = library.MagickGetImagesBlob(repr_wand,
ctypes.byref(length))
if blob_p and length.value:
blob = ctypes.string_at(blob_p, length.value)
library.MagickRelinquishMemory(blob_p)
return blob
else:
return None
class SingleImage(BaseImage):
"""Each single image in :class:`~wand.image.Image` container.
For example, it can be a frame of GIF animation.
Note that all changes on single images are invisible to their
containers unless they are altered a ``with ...`` context manager.
with Image(filename='animation.gif') as container:
with container.sequence[0] as frame:
frame.negate()
.. versionadded:: 0.3.0
.. versionchanged:: 0.5.1
Only sync changes of a :class:`SingleImage` when exiting a ``with ...``
context. Not when parent :class:`~wand.image.Image` closes.
"""
#: (:class:`wand.image.Image`) The container image.
container = None
def __init__(self, wand, container, c_original_resource):
super(SingleImage, self).__init__(wand)
self.container = container
self.c_original_resource = c_original_resource
self._delay = None
@property
def sequence(self):
return self,
@property
def index(self):
"""(:class:`numbers.Integral`) The index of the single image in
the :attr:`container` image.
"""
wand = self.container.wand
library.MagickResetIterator(wand)
image = library.GetImageFromMagickWand(wand)
i = 0
while self.c_original_resource != image and image:
image = libmagick.GetNextImageInList(image)
i += 1
assert image
assert self.c_original_resource == image
return i
@property
def delay(self):
"""(:class:`numbers.Integral`) The delay to pause before display
the next image (in the :attr:`~wand.image.BaseImage.sequence` of
its :attr:`container`). It's hundredths of a second.
"""
if self._delay is None:
container = self.container
with container.sequence.index_context(self.index):
self._delay = library.MagickGetImageDelay(container.wand)
return self._delay
@delay.setter
def delay(self, delay):
if not isinstance(delay, numbers.Integral):
raise TypeError('delay must be an integer, not ' + repr(delay))
elif delay < 0:
raise ValueError('delay cannot be less than zero')
container = self.container
with container.sequence.index_context(self.index):
library.MagickSetImageDelay(container.wand, delay)
self._delay = delay
def _sync_container_sequence(self):
"""If instances was flagged as :attr:`dirty` by any manipulation
methods, then this instance will overwrite :attr:`container` internal
version at :attr:`index`.
.. versionadded:: 0.5.1
"""
if self.dirty:
self.container.sequence[self.index] = self
self.dirty = False # Reset dirty flag
def __exit__(self, type_, value, traceback):
self._sync_container_sequence()
super(SingleImage, self).__exit__(type_, value, traceback)
def __repr__(self):
cls = type(self)
if getattr(self, 'c_resource', None) is None:
return '<{0}.{1}: (closed)>'.format(cls.__module__, cls.__name__)
return '<{0}.{1}: {2} ({3}x{4})>'.format(
cls.__module__, cls.__name__,
self.signature[:7], self.width, self.height
)
| 36.358333 | 79 | 0.577813 |
793f0eb02919f318cc40fb7e889280e3a7f56dce | 14,171 | py | Python | circews/classes/label_gen.py | ratschlab/circEWS | b2b1f00dac4f5d46856a2c7abe2ca4f12d4c612d | [
"MIT"
] | 34 | 2020-03-17T16:42:00.000Z | 2022-03-29T15:53:24.000Z | circews/classes/label_gen.py | ranxiao/circEWS | 1e52880c268f8f763bbc16763131634ffc217153 | [
"MIT"
] | 3 | 2020-07-30T22:37:10.000Z | 2021-08-10T00:02:30.000Z | circews/classes/label_gen.py | ranxiao/circEWS | 1e52880c268f8f763bbc16763131634ffc217153 | [
"MIT"
] | 14 | 2020-04-22T01:13:54.000Z | 2021-11-27T20:23:41.000Z | '''
Class wrapper for label generation, transforming an input data-frame with endpoints and
an input data-frame with imputed data to a Pandas data-frame
with labels
'''
import os
import sys
import os.path
import ipdb
import numpy as np
import scipy as sp
import pandas as pd
import circews.functions.labels as bern_labels
class AllLabel:
'''
Annotate all labels jointly, including full WorseState and WorseStateSoft labels, multi-class
classification labels, regression-time-to-event labels, and smaller component labels
that refer to conditions on MAP, Lactate and the medications.
'''
def __init__(self, lhours, rhours, dataset=None):
self.abs_datetime_key="AbsDatetime"
self.rel_datetime_key="RelDatetime"
self.patient_id_key="PatientID"
self.lhours=lhours
self.rhours=rhours
self.label_key="AllLabels{}To{}Hours".format(self.lhours, self.rhours)
self.grid_step_seconds=300.0
self.dataset=dataset
def transform(self, df_pat, df_endpoint, pid=None):
abs_time_col=df_pat[self.abs_datetime_key]
rel_time_col=df_pat[self.rel_datetime_key]
patient_col=df_pat[self.patient_id_key]
if df_pat.shape[0]==0 or df_endpoint.shape[0]==0:
print("WARNING: Patient {} has no impute data, skipping...".format(pid), flush=True)
return None
df_endpoint.set_index(keys="Datetime", inplace=True, verify_integrity=True)
try:
if self.dataset=="bern":
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="nearest")
elif self.dataset=="mimic":
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="ffill")
except:
print("WARNING: Issue when re-indexing frame of patient: {}".format(pid), flush=True)
return None
endpoint_status_arr=np.array(df_endpoint.endpoint_status)
unique_status=np.unique(endpoint_status_arr)
for status in unique_status:
assert(status in ["unknown","event 0","event 1", "event 2", "event 3",
"maybe 1","maybe 2", "maybe 3","probably not 1", "probably not 2", "probably not 3"])
lactate_above_ts=np.array(df_endpoint.lactate_above_threshold,dtype=np.float)
map_below_ts=np.array(df_endpoint.MAP_below_threshold,dtype=np.float)
l1_present=np.array(df_endpoint.level1_drugs_present,dtype=np.float)
l2_present=np.array(df_endpoint.level2_drugs_present,dtype=np.float)
l3_present=np.array(df_endpoint.level3_drugs_present,dtype=np.float)
worse_state_arr=bern_labels.future_worse_state(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Joint (A|D|E)
worse_state_soft_arr=bern_labels.future_worse_state_soft(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Joint (B|C|D|E)
from_0_arr=bern_labels.future_worse_state_from_0(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate A
from_0_soft_arr=bern_labels.future_worse_state_soft_from_0(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate B
from_probably_not_arr=bern_labels.future_worse_state_from_pn(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate C
from_1_arr=bern_labels.future_worse_state_from_1(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate D
from_2_arr=bern_labels.future_worse_state_from_2(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate E;
from_1_or_2_arr=bern_labels.future_worse_state_from_1_or_2(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Join(D|E)
lactate_any_arr=bern_labels.any_positive_transition(lactate_above_ts, self.lhours, self.rhours, self.grid_step_seconds)
map_any_arr=bern_labels.any_positive_transition(map_below_ts, self.lhours, self.rhours, self.grid_step_seconds)
l1_drugs_any_arr=bern_labels.any_positive_transition(l1_present, self.lhours, self.rhours, self.grid_step_seconds)
l2_drugs_any_arr=bern_labels.any_positive_transition(l2_present, self.lhours, self.rhours, self.grid_step_seconds)
l3_drugs_any_arr=bern_labels.any_positive_transition(l3_present, self.lhours, self.rhours, self.grid_step_seconds)
time_to_worse_state_binned_arr=bern_labels.time_to_worse_state_binned(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds)
time_to_worse_state_arr=bern_labels.time_to_worse_state(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds)
output_df_dict={}
output_df_dict[self.abs_datetime_key]=abs_time_col
output_df_dict[self.rel_datetime_key]=rel_time_col
output_df_dict[self.patient_id_key]=patient_col
output_df_dict["WorseState{}To{}Hours".format(self.lhours, self.rhours)]=worse_state_arr
output_df_dict["WorseStateSoft{}To{}Hours".format(self.lhours, self.rhours)]=worse_state_soft_arr
output_df_dict["WorseStateFromZero{}To{}Hours".format(self.lhours, self.rhours)]=from_0_arr
output_df_dict["WorseStateSoftFromZero{}To{}Hours".format(self.lhours, self.rhours)]=from_0_soft_arr
output_df_dict["WorseStateFromPn{}To{}Hours".format(self.lhours, self.rhours)]=from_probably_not_arr
output_df_dict["WorseStateFromOne{}To{}Hours".format(self.lhours, self.rhours)]=from_1_arr
output_df_dict["WorseStateFromTwo{}To{}Hours".format(self.lhours, self.rhours)]=from_2_arr
output_df_dict["WorseStateFromOneOrTwo{}To{}Hours".format(self.lhours, self.rhours)]=from_1_or_2_arr
output_df_dict["LactateAboveTs{}To{}Hours".format(self.lhours, self.rhours)]=lactate_any_arr
output_df_dict["MAPBelowTs{}To{}Hours".format(self.lhours, self.rhours)]=map_any_arr
output_df_dict["L1Drugs{}To{}Hours".format(self.lhours, self.rhours)]=l1_drugs_any_arr
output_df_dict["L2Drugs{}To{}Hours".format(self.lhours, self.rhours)]=l2_drugs_any_arr
output_df_dict["L3Drugs{}To{}Hours".format(self.lhours, self.rhours)]=l3_drugs_any_arr
output_df_dict["TimeToWorseState{}To{}Hours".format(self.lhours, self.rhours)]=time_to_worse_state_arr
output_df_dict["TimeToWorseStateBinned{}To{}Hours".format(self.lhours, self.rhours)]=time_to_worse_state_binned_arr
output_df=pd.DataFrame(output_df_dict)
return output_df
class DeteriorationLabel:
def __init__(self,lhours,rhours):
self.abs_datetime_key="AbsDatetime"
self.rel_datetime_key="RelDatetime"
self.patient_id_key="PatientID"
self.lhours=lhours
self.rhours=rhours
self.label_key="Deterioration_{}To{}Hours".format(self.lhours,self.rhours)
self.grid_step_seconds=300.0
def transform(self, df_pat, df_endpoint, pid=None):
abs_time_col=df_pat[self.abs_datetime_key]
rel_time_col=df_pat[self.rel_datetime_key]
patient_col=df_pat[self.patient_id_key]
## Patient has no information in the imputed table or the endpoints (SHOULD NOT HAPPEN)
if df_pat.shape[0]==0 or df_endpoint.shape[0]==0:
print("WARNING: Patient {} has no impute data, skipping...".format(pid),flush=True)
return None
df_endpoint.set_index(keys="Datetime",inplace=True,verify_integrity=True)
# Re-index the endpoint to the grid of the imputed data.
try:
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="nearest")
except :
print("WARNING: Issue when re-indexing frame of patient {}".format(pid),flush=True)
return None
event1_arr=np.array(df_endpoint.event1)
event2_arr=np.array(df_endpoint.event2)
event3_arr=np.array(df_endpoint.event3)
maybe1_arr=np.array(df_endpoint.maybe_event1)
maybe2_arr=np.array(df_endpoint.maybe_event2)
maybe3_arr=np.array(df_endpoint.maybe_event3)
not1_arr=np.array(df_endpoint.probably_not_event1)
not2_arr=np.array(df_endpoint.probably_not_event2)
not3_arr=np.array(df_endpoint.probably_not_event3)
# Any deterioration, does not require that the exact downward takes place in the forward horizon, but only if there
# is some more severe endpoint in the period
if self.lhours==0:
label_arr=bern_labels.future_worse_state(event1_arr, event2_arr, event3_arr,maybe1_arr, maybe2_arr, maybe3_arr, self.lhours, self.rhours, self.grid_step_seconds)
else:
label_arr=bern_labels.future_deterioration(event1_arr, event2_arr, event3_arr,maybe1_arr, maybe2_arr, maybe3_arr, self.lhours, self.rhours, self.grid_step_seconds)
output_df_dict={}
output_df_dict[self.abs_datetime_key]=abs_time_col
output_df_dict[self.rel_datetime_key]=rel_time_col
output_df_dict[self.patient_id_key]=patient_col
output_df_dict[self.label_key]=label_arr
output_df=pd.DataFrame(output_df_dict)
return output_df
class WorseStateLabel:
def __init__(self, lhours, rhours):
self.abs_datetime_key="AbsDatetime"
self.rel_datetime_key="RelDatetime"
self.patient_id_key="PatientID"
self.lhours=lhours
self.rhours=rhours
self.label_key="WorseState_{}To{}Hours".format(float(self.lhours),float(self.rhours))
self.grid_step_seconds=300.0
def transform(self, df_pat, df_endpoint,pid=None):
abs_time_col=df_pat[self.abs_datetime_key]
rel_time_col=df_pat[self.rel_datetime_key]
patient_col=df_pat[self.patient_id_key]
## Patient has no information in the imputed table or the endpoints (SHOULD NOT HAPPEN)
if df_pat.shape[0]==0 or df_endpoint.shape[0]==0:
print("WARNING: Patient {} has no impute data, skipping...".format(pid),flush=True)
return None
df_endpoint.set_index(keys="Datetime",inplace=True,verify_integrity=True)
# Re-index the endpoint to the grid of the imputed data.
try:
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="nearest")
except :
print("WARNING: Issue when re-indexing frame of patient {}".format(pid),flush=True)
return None
event1_arr=np.array(df_endpoint.event1)
event2_arr=np.array(df_endpoint.event2)
event3_arr=np.array(df_endpoint.event3)
maybe1_arr=np.array(df_endpoint.maybe_event1)
maybe2_arr=np.array(df_endpoint.maybe_event2)
maybe3_arr=np.array(df_endpoint.maybe_event3)
not1_arr=np.array(df_endpoint.probably_not_event1)
not2_arr=np.array(df_endpoint.probably_not_event2)
not3_arr=np.array(df_endpoint.probably_not_event3)
# Any deterioration, does not require that the exact downward takes place in the forward horizon, but only if there
# is some more severe endpoint in the period
label_arr=bern_labels.future_worse_state(event1_arr, event2_arr, event3_arr, maybe1_arr, maybe2_arr, maybe3_arr, not1_arr, not2_arr,
not3_arr, self.lhours, self.rhours, self.grid_step_seconds)
output_df_dict={}
output_df_dict[self.abs_datetime_key]=abs_time_col
output_df_dict[self.rel_datetime_key]=rel_time_col
output_df_dict[self.patient_id_key]=patient_col
output_df_dict[self.label_key]=label_arr
output_df=pd.DataFrame(output_df_dict)
return output_df
class WorseStateSoftLabel:
def __init__(self, lhours, rhours):
self.abs_datetime_key="AbsDatetime"
self.rel_datetime_key="RelDatetime"
self.patient_id_key="PatientID"
self.lhours=lhours
self.rhours=rhours
self.label_key="WorseState_{}To{}Hours".format(float(self.lhours),float(self.rhours))
self.grid_step_seconds=300.0
def transform(self, df_pat, df_endpoint,pid=None):
abs_time_col=df_pat[self.abs_datetime_key]
rel_time_col=df_pat[self.rel_datetime_key]
patient_col=df_pat[self.patient_id_key]
## Patient has no information in the imputed table or the endpoints (SHOULD NOT HAPPEN)
if df_pat.shape[0]==0 or df_endpoint.shape[0]==0:
print("WARNING: Patient {} has no impute data, skipping...".format(pid),flush=True)
return None
df_endpoint.set_index(keys="Datetime",inplace=True,verify_integrity=True)
# Re-index the endpoint to the grid of the imputed data.
try:
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="nearest")
except :
print("WARNING: Issue when re-indexing frame of patient {}".format(pid),flush=True)
return None
event1_arr=np.array(df_endpoint.event1)
event2_arr=np.array(df_endpoint.event2)
event3_arr=np.array(df_endpoint.event3)
maybe1_arr=np.array(df_endpoint.maybe_event1)
maybe2_arr=np.array(df_endpoint.maybe_event2)
maybe3_arr=np.array(df_endpoint.maybe_event3)
not1_arr=np.array(df_endpoint.probably_not_event1)
not2_arr=np.array(df_endpoint.probably_not_event2)
not3_arr=np.array(df_endpoint.probably_not_event3)
# Any deterioration, does not require that the exact downward takes place in the forward horizon, but only if there
# is some more severe endpoint in the period
label_arr=bern_labels.future_worse_state_soft(event1_arr, event2_arr, event3_arr, maybe1_arr, maybe2_arr, maybe3_arr, not1_arr, not2_arr,
not3_arr, self.lhours, self.rhours, self.grid_step_seconds)
output_df_dict={}
output_df_dict[self.abs_datetime_key]=abs_time_col
output_df_dict[self.rel_datetime_key]=rel_time_col
output_df_dict[self.patient_id_key]=patient_col
output_df_dict[self.label_key]=label_arr
output_df=pd.DataFrame(output_df_dict)
return output_df
| 50.610714 | 175 | 0.71851 |
793f102c8b2e5840be88ab1fb3ea0d779a0aae37 | 729 | py | Python | django-rgd-3d/rgd_3d/tasks/jobs.py | Kitware/ResonantGeoData | 6d111cbe1d57df2cd230edcf4724f6e33471f5ff | [
"Apache-2.0"
] | 3 | 2020-03-10T14:47:07.000Z | 2020-05-05T16:55:27.000Z | django-rgd-3d/rgd_3d/tasks/jobs.py | Kitware/ResonantGeoData | 6d111cbe1d57df2cd230edcf4724f6e33471f5ff | [
"Apache-2.0"
] | 13 | 2020-04-14T14:36:06.000Z | 2020-05-07T15:03:42.000Z | django-rgd-3d/rgd_3d/tasks/jobs.py | Kitware/ResonantGeoData | 6d111cbe1d57df2cd230edcf4724f6e33471f5ff | [
"Apache-2.0"
] | 1 | 2020-03-03T15:47:52.000Z | 2020-03-03T15:47:52.000Z | from celery import shared_task
from django.conf import settings
from rgd.tasks import helpers
@shared_task(time_limit=settings.CELERY_TASK_TIME_LIMIT)
def task_read_mesh_3d_file(pc_file_pk):
from rgd_3d.models import Mesh3D
from .etl import read_mesh_3d_file
pc_file = Mesh3D.objects.get(pk=pc_file_pk)
helpers._run_with_failure_reason(pc_file, read_mesh_3d_file, pc_file_pk)
@shared_task(time_limit=settings.CELERY_TASK_TIME_LIMIT)
def task_read_3d_tiles_file(tiles_3d_pk: int):
from rgd_3d.models import Tiles3D
from .etl import read_3d_tiles_tileset_json
tiles_3d = Tiles3D.objects.get(pk=tiles_3d_pk)
helpers._run_with_failure_reason(tiles_3d, read_3d_tiles_tileset_json, tiles_3d_pk)
| 30.375 | 87 | 0.81893 |
793f114feffb6c289f02087c1bbc337882a29f15 | 21,057 | py | Python | test/commands/extended/get_inputs_test.py | plenarius/iota.lib.py | ac6167dadb8b60a64b33eeb9db755be32c7cef12 | [
"MIT"
] | 2 | 2018-02-21T12:04:41.000Z | 2018-04-01T18:56:18.000Z | test/commands/extended/get_inputs_test.py | plenarius/iota.lib.py | ac6167dadb8b60a64b33eeb9db755be32c7cef12 | [
"MIT"
] | null | null | null | test/commands/extended/get_inputs_test.py | plenarius/iota.lib.py | ac6167dadb8b60a64b33eeb9db755be32c7cef12 | [
"MIT"
] | 3 | 2018-02-19T09:35:44.000Z | 2018-04-01T19:16:26.000Z | # coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from iota import Address, BadApiResponse, Iota, TransactionHash
from iota.adapter import MockAdapter
from iota.commands.extended.get_inputs import GetInputsCommand, \
GetInputsRequestFilter
from iota.crypto.types import Seed
from iota.filters import Trytes
from test import mock
class GetInputsRequestFilterTestCase(BaseFilterTestCase):
filter_type = GetInputsCommand(MockAdapter()).get_request_filter
skip_value_check = True
# noinspection SpellCheckingInspection
def setUp(self):
super(GetInputsRequestFilterTestCase, self).setUp()
# Define a few tryte sequences that we can re-use between tests.
self.seed = 'HELLOIOTA'
def test_pass_happy_path(self):
"""
Request is valid.
"""
request = {
# Raw trytes are extracted to match the IRI's JSON protocol.
'seed': self.seed,
'start': 0,
'stop': 10,
'threshold': 100,
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, request)
def test_pass_compatible_types(self):
"""
The request contains values that can be converted to the expected
types.
"""
filter_ = self._filter({
# ``seed`` can be any value that is convertible into an ASCII
# representation of a TryteString.
'seed': bytearray(self.seed.encode('ascii')),
# These values must still be integers, however.
'start': 42,
'stop': 86,
'threshold': 99,
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'seed': Seed(self.seed),
'start': 42,
'stop': 86,
'threshold': 99,
},
)
def test_pass_optional_parameters_excluded(self):
"""
The request contains only required parameters.
"""
filter_ = self._filter({
'seed': Seed(self.seed),
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'seed': Seed(self.seed),
'start': 0,
'stop': None,
'threshold': None,
}
)
def test_fail_empty_request(self):
"""
The request is empty.
"""
self.assertFilterErrors(
{},
{
'seed': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_unexpected_parameters(self):
"""
The request contains unexpected parameters.
"""
self.assertFilterErrors(
{
'seed': Seed(self.seed),
# Told you I did. Reckless is he. Now, matters are worse.
'foo': 'bar',
},
{
'foo': [f.FilterMapper.CODE_EXTRA_KEY],
},
)
def test_fail_seed_null(self):
"""
``seed`` is null.
"""
self.assertFilterErrors(
{
'seed': None,
},
{
'seed': [f.Required.CODE_EMPTY],
},
)
def test_fail_seed_wrong_type(self):
"""
``seed`` cannot be converted into a TryteString.
"""
self.assertFilterErrors(
{
'seed': 42,
},
{
'seed': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_seed_malformed(self):
"""
``seed`` has the correct type, but it contains invalid characters.
"""
self.assertFilterErrors(
{
'seed': b'not valid; seeds can only contain uppercase and "9".',
},
{
'seed': [Trytes.CODE_NOT_TRYTES],
},
)
def test_fail_start_string(self):
"""
``start`` is a string.
"""
self.assertFilterErrors(
{
# Not valid; it must be an int.
'start': '0',
'seed': Seed(self.seed),
},
{
'start': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_start_float(self):
"""
``start`` is a float.
"""
self.assertFilterErrors(
{
# Even with an empty fpart, floats are not valid.
# It's gotta be an int.
'start': 8.0,
'seed': Seed(self.seed),
},
{
'start': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_start_too_small(self):
"""
``start`` is less than 0.
"""
self.assertFilterErrors(
{
'start': -1,
'seed': Seed(self.seed),
},
{
'start': [f.Min.CODE_TOO_SMALL],
},
)
def test_fail_stop_string(self):
"""
``stop`` is a string.
"""
self.assertFilterErrors(
{
# Not valid; it must be an int.
'stop': '0',
'seed': Seed(self.seed),
},
{
'stop': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_stop_float(self):
"""
``stop`` is a float.
"""
self.assertFilterErrors(
{
# Even with an empty fpart, floats are not valid.
# It's gotta be an int.
'stop': 8.0,
'seed': Seed(self.seed),
},
{
'stop': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_stop_too_small(self):
"""
``stop`` is less than 0.
"""
self.assertFilterErrors(
{
'stop': -1,
'seed': Seed(self.seed),
},
{
'stop': [f.Min.CODE_TOO_SMALL],
},
)
def test_fail_stop_occurs_before_start(self):
"""
``stop`` is less than ``start``.
"""
self.assertFilterErrors(
{
'start': 1,
'stop': 0,
'seed': Seed(self.seed),
},
{
'start': [GetInputsRequestFilter.CODE_INTERVAL_INVALID],
},
)
def test_fail_interval_too_large(self):
"""
``stop`` is way more than ``start``.
"""
self.assertFilterErrors(
{
'start': 0,
'stop': GetInputsRequestFilter.MAX_INTERVAL + 1,
'seed': Seed(self.seed),
},
{
'stop': [GetInputsRequestFilter.CODE_INTERVAL_TOO_BIG],
},
)
def test_fail_threshold_string(self):
"""
``threshold`` is a string.
"""
self.assertFilterErrors(
{
# Not valid; it must be an int.
'threshold': '0',
'seed': Seed(self.seed),
},
{
'threshold': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_threshold_float(self):
"""
``threshold`` is a float.
"""
self.assertFilterErrors(
{
# Even with an empty fpart, floats are not valid.
# It's gotta be an int.
'threshold': 8.0,
'seed': Seed(self.seed),
},
{
'threshold': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_threshold_too_small(self):
"""
``threshold`` is less than 0.
"""
self.assertFilterErrors(
{
'threshold': -1,
'seed': Seed(self.seed),
},
{
'threshold': [f.Min.CODE_TOO_SMALL],
},
)
class GetInputsCommandTestCase(TestCase):
# noinspection SpellCheckingInspection
def setUp(self):
super(GetInputsCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = GetInputsCommand(self.adapter)
# Define some valid tryte sequences that we can reuse between
# tests.
self.addy0 =\
Address(
trytes =
b'TESTVALUE9DONTUSEINPRODUCTION99999FIODSG'
b'IC9CCIFCNBTBDFIEHHE9RBAEVGK9JECCLCPBIINAX',
key_index = 0,
)
self.addy1 =\
Address(
trytes =
b'TESTVALUE9DONTUSEINPRODUCTION999999EPCNH'
b'MBTEH9KDVFMHHESDOBTFFACCGBFGACEDCDDCGICIL',
key_index = 1,
)
self.addy2 =\
Address(
trytes =
b'TESTVALUE9DONTUSEINPRODUCTION99999YDOHWF'
b'U9PFOFHGKFACCCBGDALGI9ZBEBABFAMBPDSEQ9XHJ',
key_index = 2,
)
def test_wireup(self):
"""
Verify that the command is wired up correctly.
"""
self.assertIsInstance(
Iota(self.adapter).getInputs,
GetInputsCommand,
)
def test_stop_threshold_met(self):
"""
``stop`` provided, balance meets ``threshold``.
"""
self.adapter.seed_response('getBalances', {
'balances': [42, 29],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
mock_address_generator = mock.Mock(return_value=[self.addy0, self.addy1])
with mock.patch(
'iota.crypto.addresses.AddressGenerator.get_addresses',
mock_address_generator,
):
response = self.command(
seed = Seed.random(),
stop = 2,
threshold = 71,
)
self.assertEqual(response['totalBalance'], 71)
self.assertEqual(len(response['inputs']), 2)
input0 = response['inputs'][0]
self.assertIsInstance(input0, Address)
self.assertEqual(input0, self.addy0)
self.assertEqual(input0.balance, 42)
self.assertEqual(input0.key_index, 0)
input1 = response['inputs'][1]
self.assertIsInstance(input1, Address)
self.assertEqual(input1, self.addy1)
self.assertEqual(input1.balance, 29)
self.assertEqual(input1.key_index, 1)
def test_stop_threshold_not_met(self):
"""
``stop`` provided, balance does not meet ``threshold``.
"""
self.adapter.seed_response('getBalances', {
'balances': [42, 29],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
mock_address_generator = mock.Mock(return_value=[self.addy0, self.addy1])
with mock.patch(
'iota.crypto.addresses.AddressGenerator.get_addresses',
mock_address_generator,
):
with self.assertRaises(BadApiResponse):
self.command(
seed = Seed.random(),
stop = 2,
threshold = 72,
)
def test_stop_threshold_zero(self):
"""
``stop`` provided, ``threshold`` is 0.
"""
# Note that the first address has a zero balance.
self.adapter.seed_response('getBalances', {
'balances': [0, 1],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
mock_address_generator = mock.Mock(return_value=[self.addy0, self.addy1])
with mock.patch(
'iota.crypto.addresses.AddressGenerator.get_addresses',
mock_address_generator,
):
response = self.command(
seed = Seed.random(),
stop = 2,
threshold = 0,
)
self.assertEqual(response['totalBalance'], 1)
self.assertEqual(len(response['inputs']), 1)
# Address 0 was skipped because it has a zero balance.
input0 = response['inputs'][0]
self.assertIsInstance(input0, Address)
self.assertEqual(input0, self.addy1)
self.assertEqual(input0.balance, 1)
self.assertEqual(input0.key_index, 1)
def test_stop_no_threshold(self):
"""
``stop`` provided, no ``threshold``.
"""
self.adapter.seed_response('getBalances', {
'balances': [42, 29],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
mock_address_generator = mock.Mock(return_value=[self.addy0, self.addy1])
with mock.patch(
'iota.crypto.addresses.AddressGenerator.get_addresses',
mock_address_generator,
):
response = self.command(
seed = Seed.random(),
start = 0,
stop = 2,
)
self.assertEqual(response['totalBalance'], 71)
self.assertEqual(len(response['inputs']), 2)
input0 = response['inputs'][0]
self.assertIsInstance(input0, Address)
self.assertEqual(input0, self.addy0)
self.assertEqual(input0.balance, 42)
self.assertEqual(input0.key_index, 0)
input1 = response['inputs'][1]
self.assertIsInstance(input1, Address)
self.assertEqual(input1, self.addy1)
self.assertEqual(input1.balance, 29)
self.assertEqual(input1.key_index, 1)
def test_no_stop_threshold_met(self):
"""
No ``stop`` provided, balance meets ``threshold``.
"""
self.adapter.seed_response('getBalances', {
'balances': [42, 29],
})
# ``getInputs`` uses ``findTransactions`` to identify unused
# addresses.
# noinspection SpellCheckingInspection
self.adapter.seed_response('findTransactions', {
'hashes': [
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999WBL9KD'
b'EIZDMEDFPEYDIIA9LEMEUCC9MFPBY9TEVCUGSEGGN'
),
],
})
# noinspection SpellCheckingInspection
self.adapter.seed_response('findTransactions', {
'hashes': [
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999YFXGOD'
b'GISBJAX9PDJIRDMDV9DCRDCAEG9FN9KECCBDDFZ9H'
),
],
})
self.adapter.seed_response('findTransactions', {
'hashes': [],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
# noinspection PyUnusedLocal
def mock_address_generator(ag, start, step=1):
for addy in [self.addy0, self.addy1, self.addy2][start::step]:
yield addy
# When ``stop`` is None, the command uses a generator internally.
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
mock_address_generator,
):
response = self.command(
seed = Seed.random(),
threshold = 71,
)
self.assertEqual(response['totalBalance'], 71)
self.assertEqual(len(response['inputs']), 2)
input0 = response['inputs'][0]
self.assertIsInstance(input0, Address)
self.assertEqual(input0, self.addy0)
self.assertEqual(input0.balance, 42)
self.assertEqual(input0.key_index, 0)
input1 = response['inputs'][1]
self.assertIsInstance(input1, Address)
self.assertEqual(input1, self.addy1)
self.assertEqual(input1.balance, 29)
self.assertEqual(input1.key_index, 1)
def test_no_stop_threshold_not_met(self):
"""
No ``stop`` provided, balance does not meet ``threshold``.
"""
self.adapter.seed_response('getBalances', {
'balances': [42, 29, 0],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
# noinspection PyUnusedLocal
def mock_address_generator(ag, start, step=1):
for addy in [self.addy0, self.addy1, self.addy2][start::step]:
yield addy
# When ``stop`` is None, the command uses a generator internally.
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
mock_address_generator,
):
with self.assertRaises(BadApiResponse):
self.command(
seed = Seed.random(),
threshold = 72,
)
def test_no_stop_threshold_zero(self):
"""
No ``stop`` provided, ``threshold`` is 0.
"""
# Note that the first address has a zero balance.
self.adapter.seed_response('getBalances', {
'balances': [0, 1],
})
# ``getInputs`` uses ``findTransactions`` to identify unused
# addresses.
# noinspection SpellCheckingInspection
self.adapter.seed_response('findTransactions', {
'hashes': [
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999WBL9KD'
b'EIZDMEDFPEYDIIA9LEMEUCC9MFPBY9TEVCUGSEGGN'
),
],
})
# noinspection SpellCheckingInspection
self.adapter.seed_response('findTransactions', {
'hashes': [
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999YFXGOD'
b'GISBJAX9PDJIRDMDV9DCRDCAEG9FN9KECCBDDFZ9H'
),
],
})
self.adapter.seed_response('findTransactions', {
'hashes': [],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
# noinspection PyUnusedLocal
def mock_address_generator(ag, start, step=1):
for addy in [self.addy0, self.addy1, self.addy2][start::step]:
yield addy
# When ``stop`` is None, the command uses a generator internally.
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
mock_address_generator,
):
response = self.command(
seed = Seed.random(),
threshold = 0,
)
self.assertEqual(response['totalBalance'], 1)
self.assertEqual(len(response['inputs']), 1)
# Because the first address had a zero balance, it was skipped.
input0 = response['inputs'][0]
self.assertIsInstance(input0, Address)
self.assertEqual(input0, self.addy1)
self.assertEqual(input0.balance, 1)
self.assertEqual(input0.key_index, 1)
def test_no_stop_no_threshold(self):
"""
No ``stop`` provided, no ``threshold``.
"""
self.adapter.seed_response('getBalances', {
'balances': [42, 29],
})
# ``getInputs`` uses ``findTransactions`` to identify unused
# addresses.
# noinspection SpellCheckingInspection
self.adapter.seed_response('findTransactions', {
'hashes': [
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999WBL9KD'
b'EIZDMEDFPEYDIIA9LEMEUCC9MFPBY9TEVCUGSEGGN'
),
],
})
# noinspection SpellCheckingInspection
self.adapter.seed_response('findTransactions', {
'hashes': [
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999YFXGOD'
b'GISBJAX9PDJIRDMDV9DCRDCAEG9FN9KECCBDDFZ9H'
),
],
})
self.adapter.seed_response('findTransactions', {
'hashes': [],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
# noinspection PyUnusedLocal
def mock_address_generator(ag, start, step=1):
for addy in [self.addy0, self.addy1, self.addy2][start::step]:
yield addy
# When ``stop`` is None, the command uses a generator internally.
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
mock_address_generator,
):
response = self.command(
seed = Seed.random(),
)
self.assertEqual(response['totalBalance'], 71)
self.assertEqual(len(response['inputs']), 2)
input0 = response['inputs'][0]
self.assertIsInstance(input0, Address)
self.assertEqual(input0, self.addy0)
self.assertEqual(input0.balance, 42)
self.assertEqual(input0.key_index, 0)
input1 = response['inputs'][1]
self.assertIsInstance(input1, Address)
self.assertEqual(input1, self.addy1)
self.assertEqual(input1.balance, 29)
self.assertEqual(input1.key_index, 1)
def test_start(self):
"""
Using ``start`` to offset the key range.
"""
self.adapter.seed_response('getBalances', {
'balances': [86],
})
# ``getInputs`` uses ``findTransactions`` to identify unused
# addresses.
# noinspection SpellCheckingInspection
self.adapter.seed_response('findTransactions', {
'hashes': [
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999YFXGOD'
b'GISBJAX9PDJIRDMDV9DCRDCAEG9FN9KECCBDDFZ9H'
),
],
})
self.adapter.seed_response('findTransactions', {
'hashes': [],
})
# To keep the unit test nice and speedy, we will mock the address
# generator. We already have plenty of unit tests for that
# functionality, so we can get away with mocking it here.
# noinspection PyUnusedLocal
def mock_address_generator(ag, start, step=1):
# If ``start`` has the wrong value, return garbage to make the
# test asplode.
for addy in [None, self.addy1, self.addy2][start::step]:
yield addy
# When ``stop`` is None, the command uses a generator internally.
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
mock_address_generator,
):
response = self.command(
seed = Seed.random(),
start = 1,
)
self.assertEqual(response['totalBalance'], 86)
self.assertEqual(len(response['inputs']), 1)
input0 = response['inputs'][0]
self.assertIsInstance(input0, Address)
self.assertEqual(input0, self.addy1)
self.assertEqual(input0.balance, 86)
self.assertEqual(input0.key_index, 1)
| 25.64799 | 77 | 0.615662 |
793f11a323c838c9c22bb59415e95e956ce554ee | 3,679 | py | Python | lino/sandbox/bcss/test2.py | NewRGB/lino | 43799e42107169ff173d3b8bc0324d5773471499 | [
"BSD-2-Clause"
] | 1 | 2019-11-13T19:38:50.000Z | 2019-11-13T19:38:50.000Z | lino/sandbox/bcss/test2.py | khchine5/lino | 64f7ca9c9b83459b5b9f26174e5e3c26a137459d | [
"BSD-2-Clause"
] | null | null | null | lino/sandbox/bcss/test2.py | khchine5/lino | 64f7ca9c9b83459b5b9f26174e5e3c26a137459d | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
u"""
Send a SOAP request to the :term: `BCSS` server
using Dave Kuhlmann's :term:`generateDS` (version 2.6a).
Continued from :mod:`lino.sandbox.bcss.test`.
File :file:`SSDNRequest.py` has been modified manually
for <any> support according to Dave's instructions.
When running this script you need to set your
DJANGO_SETTINGS_MODULE environment variable
which points to a :xfile:`settings.py`
that defines your :attr:`lino.Lino.bcss_user_params`.
Since this script doesn't actually perform any connection,
the `bcss_user_params` may contain fictive values.
But they must exist.
"""
import sys
from cStringIO import StringIO
import logging
logger = logging.getLogger(__name__)
from SOAPpy import WSDL
import SSDNRequest
#~ import PerformInvestigation
from django.conf import settings
#~ The SOAP Envelope element is the root element of a SOAP message.
#~ http://www.w3schools.com/soap/soap_envelope.asp
SOAP_ENVELOPE = """
<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope
xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body>
<xmlString xmlns="http://ksz-bcss.fgov.be/connectors/WebServiceConnector">
<![CDATA[%s]]>
</xmlString>
</soap:Body>
</soap:Envelope>
"""
class Service:
# ,*args,**kw):
def __init__(self, name, requestClass, targetNamespace, doc):
self.name = name
self.requestClass = requestClass
self.targetNamespace = targetNamespace
self.doc = doc
#~ self.args = args
#~ self.kw = kw
def instantiate(self, *args, **kw):
return self.requestClass(*args, **kw)
from PerformInvestigation import PerformInvestigationRequest
SERVICES = []
SERVICES.append(
Service(
'OCMWCPASPerformInvestigation',
PerformInvestigationRequest,
'http://www.ksz-bcss.fgov.be/XSD/SSDN/OCMW_CPAS/PerformInvestigation',
u"""
Obtention d’informations des registres national et BCSS
en vue de l’enquête sociale (données légales, composition
de ménage, historique des adresses, recherche dans le
fichier d’attente).
Instance parameters:
- SocialSecurityUser (string)
- DataGroups : The possible types of information that can be obtained.
If not specified, all available information is returned
"""
))
def req2str(req):
f = StringIO()
req.export(f, 0)
s = f.getvalue()
f.close()
return s
def run_request(serviceId, *args, **kw):
srv = SERVICES[serviceId]
srvReq = srv.instantiate(*args, **kw)
user = SSDNRequest.AuthorizedUserType(**settings.SITE.bcss_user_params)
service = SSDNRequest.ServiceRequestType(
ServiceId=srv.name,
Version='20090409',
any_=srvReq)
msg = SSDNRequest.RequestMessageType(
Reference='123456789',
TimeRequest='20110921T105230')
context = SSDNRequest.RequestContextType(
AuthorizedUser=user,
Message=msg)
req = SSDNRequest.SSDNRequest(
RequestContext=context,
ServiceRequest=[service])
requestXML = SOAP_ENVELOPE % req2str(req)
print requestXML
if False:
logger.info("Going to send request:\n%s", requestXML)
proxy = WSDL.Proxy(wsdl_url)
#~ proxy.soapproxy.config.dumpSOAPOut = 1
#~ proxy.soapproxy.config.dumpSOAPIn = 1
m = proxy.methods['sendXML']
response = m(requestXML)
logger.info("Got response:\n%s", response)
def simple_test():
run_request(0, SocialSecurityUser='36806010010')
if __name__ == '__main__':
simple_test()
| 27.661654 | 78 | 0.686056 |
793f11c62f9f5f0a4337cd125a7bf924a6ddd631 | 46,402 | py | Python | pandas/tests/io/excel/test_writers.py | quanghm/pandas | 4441b3361998168a9f3621aae067a9d5b050516d | [
"BSD-3-Clause"
] | 1 | 2019-12-04T21:29:15.000Z | 2019-12-04T21:29:15.000Z | pandas/tests/io/excel/test_writers.py | quanghm/pandas | 4441b3361998168a9f3621aae067a9d5b050516d | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/io/excel/test_writers.py | quanghm/pandas | 4441b3361998168a9f3621aae067a9d5b050516d | [
"BSD-3-Clause"
] | null | null | null | from datetime import date, datetime, timedelta
from functools import partial
from io import BytesIO
import os
import numpy as np
import pytest
from pandas.compat import PY36
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, get_option, set_option
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
from pandas.io.excel import (
ExcelFile,
ExcelWriter,
_OpenpyxlWriter,
_XlsxWriter,
_XlwtWriter,
register_writer,
)
@pytest.fixture
def path(ext):
"""
Fixture to open file for use in each test case.
"""
with ensure_clean(ext) as file_path:
yield file_path
@pytest.fixture
def set_engine(engine, ext):
"""
Fixture to set engine for use in each test case.
Rather than requiring `engine=...` to be provided explicitly as an
argument in each test, this fixture sets a global option to dictate
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
"""
option_name = "io.excel.{ext}.writer".format(ext=ext.strip("."))
prev_engine = get_option(option_name)
set_option(option_name, engine)
yield
set_option(option_name, prev_engine) # Roll back option change
@td.skip_if_no("xlrd")
@pytest.mark.parametrize("ext", [".xls", ".xlsx", ".xlsm"])
class TestRoundTrip:
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize(
"header,expected",
[(None, DataFrame([np.nan] * 4)), (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))],
)
def test_read_one_empty_col_no_header(self, ext, header, expected):
# xref gh-12292
filename = "no_header"
df = pd.DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
with ensure_clean(ext) as path:
df.to_excel(path, filename, index=False, header=False)
result = pd.read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize(
"header,expected",
[(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))],
)
def test_read_one_empty_col_with_header(self, ext, header, expected):
filename = "with_header"
df = pd.DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
with ensure_clean(ext) as path:
df.to_excel(path, "with_header", index=False, header=True)
result = pd.read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("openpyxl")
@td.skip_if_no("xlwt")
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = pd.DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"])
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, "Data_no_head", header=False, index=False)
refdf.to_excel(writer, "Data_with_head", index=False)
refdf.columns = ["A", "B"]
with ExcelFile(pth) as reader:
xlsdf_no_head = pd.read_excel(
reader, "Data_no_head", header=None, names=["A", "B"]
)
xlsdf_with_head = pd.read_excel(
reader, "Data_with_head", index_col=None, names=["A", "B"]
)
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_creating_and_reading_multiple_sheets(self, ext):
# see gh-9450
#
# Test reading multiple sheets, from a runtime
# created Excel file with multiple sheets.
def tdf(col_sheet_name):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[col_sheet_name])
sheets = ["AAA", "BBB", "CCC"]
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in dfs.items():
df.to_excel(ew, sheetname)
dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
@td.skip_if_no("xlsxwriter")
def test_read_excel_multiindex_empty_level(self, ext):
# see gh-12453
with ensure_clean(ext) as path:
df = DataFrame(
{
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", ""): {0: 0},
}
)
expected = DataFrame(
{
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", "Unnamed: 4_level_1"): {0: 0},
}
)
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
df = pd.DataFrame(
{
("Beg", ""): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7},
}
)
expected = pd.DataFrame(
{
("Beg", "Unnamed: 1_level_1"): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7},
}
)
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlsxwriter")
@pytest.mark.parametrize("c_idx_names", [True, False])
@pytest.mark.parametrize("r_idx_names", [True, False])
@pytest.mark.parametrize("c_idx_levels", [1, 3])
@pytest.mark.parametrize("r_idx_levels", [1, 3])
def test_excel_multindex_roundtrip(
self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels
):
# see gh-4679
with ensure_clean(ext) as pth:
if c_idx_levels == 1 and c_idx_names:
pytest.skip(
"Column index name cannot be serialized unless it's a MultiIndex"
)
# Empty name case current read in as
# unnamed levels, not Nones.
check_names = r_idx_names or r_idx_levels <= 1
df = mkdf(5, 5, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels)
df.to_excel(pth)
act = pd.read_excel(
pth,
index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)),
)
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth,
index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)),
)
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth,
index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)),
)
tm.assert_frame_equal(df, act, check_names=check_names)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_read_excel_parse_dates(self, ext):
# see gh-11544, gh-12051
df = DataFrame(
{"col": [1, 2, 3], "date_strings": pd.date_range("2012-01-01", periods=3)}
)
df2 = df.copy()
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
with ensure_clean(ext) as pth:
df2.to_excel(pth)
res = pd.read_excel(pth, index_col=0)
tm.assert_frame_equal(df2, res)
res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0)
tm.assert_frame_equal(df, res)
date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
res = pd.read_excel(
pth, parse_dates=["date_strings"], date_parser=date_parser, index_col=0
)
tm.assert_frame_equal(df, res)
@td.skip_if_no("xlrd")
@pytest.mark.parametrize(
"engine,ext",
[
pytest.param("openpyxl", ".xlsx", marks=td.skip_if_no("openpyxl")),
pytest.param("openpyxl", ".xlsm", marks=td.skip_if_no("openpyxl")),
pytest.param("xlwt", ".xls", marks=td.skip_if_no("xlwt")),
pytest.param("xlsxwriter", ".xlsx", marks=td.skip_if_no("xlsxwriter")),
],
)
@pytest.mark.usefixtures("set_engine")
class TestExcelWriter:
def test_excel_sheet_size(self, path):
# GH 26080
breaking_row_count = 2 ** 20 + 1
breaking_col_count = 2 ** 14 + 1
# purposely using two arrays to prevent memory issues while testing
row_arr = np.zeros(shape=(breaking_row_count, 1))
col_arr = np.zeros(shape=(1, breaking_col_count))
row_df = pd.DataFrame(row_arr)
col_df = pd.DataFrame(col_arr)
msg = "sheet is too large"
with pytest.raises(ValueError, match=msg):
row_df.to_excel(path)
with pytest.raises(ValueError, match=msg):
col_df.to_excel(path)
def test_excel_sheet_by_name_raise(self, path):
import xlrd
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(path)
xl = ExcelFile(path)
df = pd.read_excel(xl, 0, index_col=0)
tm.assert_frame_equal(gt, df)
with pytest.raises(xlrd.XLRDError):
pd.read_excel(xl, "0")
def test_excel_writer_context_manager(self, frame, path):
with ExcelWriter(path) as writer:
frame.to_excel(writer, "Data1")
frame2 = frame.copy()
frame2.columns = frame.columns[::-1]
frame2.to_excel(writer, "Data2")
with ExcelFile(path) as reader:
found_df = pd.read_excel(reader, "Data1", index_col=0)
found_df2 = pd.read_excel(reader, "Data2", index_col=0)
tm.assert_frame_equal(found_df, frame)
tm.assert_frame_equal(found_df2, frame2)
def test_roundtrip(self, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# test roundtrip
frame.to_excel(path, "test1")
recons = pd.read_excel(path, "test1", index_col=0)
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "test1", index=False)
recons = pd.read_excel(path, "test1", index_col=None)
recons.index = frame.index
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "test1", na_rep="NA")
recons = pd.read_excel(path, "test1", index_col=0, na_values=["NA"])
tm.assert_frame_equal(frame, recons)
# GH 3611
frame.to_excel(path, "test1", na_rep="88")
recons = pd.read_excel(path, "test1", index_col=0, na_values=["88"])
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "test1", na_rep="88")
recons = pd.read_excel(path, "test1", index_col=0, na_values=[88, 88.0])
tm.assert_frame_equal(frame, recons)
# GH 6573
frame.to_excel(path, "Sheet1")
recons = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "0")
recons = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(frame, recons)
# GH 8825 Pandas Series should provide to_excel method
s = frame["A"]
s.to_excel(path)
recons = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(s.to_frame(), recons)
def test_mixed(self, frame, path):
mixed_frame = frame.copy()
mixed_frame["foo"] = "bar"
mixed_frame.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(mixed_frame, recons)
def test_ts_frame(self, tsframe, path):
df = tsframe
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
@pytest.mark.parametrize("np_type", [np.int8, np.int16, np.int32, np.int64])
def test_int_types(self, np_type, path):
# Test np.int values read come back as int
# (rather than float which is Excel's format).
df = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np_type)
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
int_frame = df.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = pd.read_excel(path, "test1", index_col=0)
tm.assert_frame_equal(int_frame, recons2)
# Test with convert_float=False comes back as float.
float_frame = df.astype(float)
recons = pd.read_excel(path, "test1", convert_float=False, index_col=0)
tm.assert_frame_equal(
recons, float_frame, check_index_type=False, check_column_type=False
)
@pytest.mark.parametrize("np_type", [np.float16, np.float32, np.float64])
def test_float_types(self, np_type, path):
# Test np.float values read come back as float.
df = DataFrame(np.random.random_sample(10), dtype=np_type)
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(df, recons, check_dtype=False)
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, np_type, path):
# Test np.bool values read come back as float.
df = DataFrame([1, 0, True, False], dtype=np_type)
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(df, recons)
def test_inf_roundtrip(self, path):
df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(df, recons)
def test_sheets(self, frame, tsframe, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# Test writing to separate sheets
writer = ExcelWriter(path)
frame.to_excel(writer, "test1")
tsframe.to_excel(writer, "test2")
writer.save()
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(frame, recons)
recons = pd.read_excel(reader, "test2", index_col=0)
tm.assert_frame_equal(tsframe, recons)
assert 2 == len(reader.sheet_names)
assert "test1" == reader.sheet_names[0]
assert "test2" == reader.sheet_names[1]
def test_colaliases(self, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
frame.to_excel(path, "test1", header=col_aliases)
reader = ExcelFile(path)
rs = pd.read_excel(reader, "test1", index_col=0)
xp = frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self, merge_cells, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# test index_label
df = DataFrame(np.random.randn(10, 2)) >= 0
df.to_excel(path, "test1", index_label=["test"], merge_cells=merge_cells)
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0).astype(np.int64)
df.index.names = ["test"]
assert df.index.names == recons.index.names
df = DataFrame(np.random.randn(10, 2)) >= 0
df.to_excel(
path,
"test1",
index_label=["test", "dummy", "dummy2"],
merge_cells=merge_cells,
)
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0).astype(np.int64)
df.index.names = ["test"]
assert df.index.names == recons.index.names
df = DataFrame(np.random.randn(10, 2)) >= 0
df.to_excel(path, "test1", index_label="test", merge_cells=merge_cells)
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0).astype(np.int64)
df.index.names = ["test"]
tm.assert_frame_equal(df, recons.astype(bool))
frame.to_excel(
path,
"test1",
columns=["A", "B", "C", "D"],
index=False,
merge_cells=merge_cells,
)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = frame.copy()
df = df.set_index(["A", "B"])
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self, merge_cells, path):
df = DataFrame(np.random.randn(10, 4))
df.index.name = "foo"
df.to_excel(path, merge_cells=merge_cells)
xf = ExcelFile(path)
result = pd.read_excel(xf, xf.sheet_names[0], index_col=0)
tm.assert_frame_equal(result, df)
assert result.index.name == "foo"
def test_excel_roundtrip_datetime(self, merge_cells, tsframe, path):
# datetime.date, not sure what to test here exactly
tsf = tsframe.copy()
tsf.index = [x.date() for x in tsframe.index]
tsf.to_excel(path, "test1", merge_cells=merge_cells)
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(tsframe, recons)
def test_excel_date_datetime_format(self, engine, ext, path):
# see gh-4133
#
# Excel output format strings
df = DataFrame(
[
[date(2014, 1, 31), date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
],
index=["DATE", "DATETIME"],
columns=["X", "Y"],
)
df_expected = DataFrame(
[
[datetime(2014, 1, 31), datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
],
index=["DATE", "DATETIME"],
columns=["X", "Y"],
)
with ensure_clean(ext) as filename2:
writer1 = ExcelWriter(path)
writer2 = ExcelWriter(
filename2,
date_format="DD.MM.YYYY",
datetime_format="DD.MM.YYYY HH-MM-SS",
)
df.to_excel(writer1, "test1")
df.to_excel(writer2, "test1")
writer1.close()
writer2.close()
reader1 = ExcelFile(path)
reader2 = ExcelFile(filename2)
rs1 = pd.read_excel(reader1, "test1", index_col=0)
rs2 = pd.read_excel(reader2, "test1", index_col=0)
tm.assert_frame_equal(rs1, rs2)
# Since the reader returns a datetime object for dates,
# we need to use df_expected to check the result.
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_interval_no_labels(self, path):
# see gh-19242
#
# Test writing Interval without labels.
df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)
expected = df.copy()
df["new"] = pd.cut(df[0], 10)
expected["new"] = pd.cut(expected[0], 10).astype(str)
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_interval_labels(self, path):
# see gh-19242
#
# Test writing Interval with labels.
df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)
expected = df.copy()
intervals = pd.cut(
df[0], 10, labels=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"]
)
df["new"] = intervals
expected["new"] = pd.Series(list(intervals))
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_timedelta(self, path):
# see gh-19242, gh-9155
#
# Test writing timedelta to xls.
df = DataFrame(
np.random.randint(-10, 10, size=(20, 1)), columns=["A"], dtype=np.int64
)
expected = df.copy()
df["new"] = df["A"].apply(lambda x: timedelta(seconds=x))
expected["new"] = expected["A"].apply(
lambda x: timedelta(seconds=x).total_seconds() / float(86400)
)
df.to_excel(path, "test1")
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_periodindex(self, tsframe, path):
xp = tsframe.resample("M", kind="period").mean()
xp.to_excel(path, "sht1")
reader = ExcelFile(path)
rs = pd.read_excel(reader, "sht1", index_col=0)
tm.assert_frame_equal(xp, rs.to_period("M"))
def test_to_excel_multiindex(self, merge_cells, frame, path):
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", columns=["A", "B"])
# round trip
frame.to_excel(path, "test1", merge_cells=merge_cells)
reader = ExcelFile(path)
df = pd.read_excel(reader, "test1", index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
def test_to_excel_multiindex_nan_label(self, merge_cells, path):
df = pd.DataFrame(
{"A": [None, 2, 3], "B": [10, 20, 30], "C": np.random.sample(3)}
)
df = df.set_index(["A", "B"])
df.to_excel(path, merge_cells=merge_cells)
df1 = pd.read_excel(path, index_col=[0, 1])
tm.assert_frame_equal(df, df1)
# Test for Issue 11328. If column indices are integers, make
# sure they are handled correctly for either setting of
# merge_cells
def test_to_excel_multiindex_cols(self, merge_cells, frame, path):
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)])
frame.columns = new_cols_index
header = [0, 1]
if not merge_cells:
header = 0
# round trip
frame.to_excel(path, "test1", merge_cells=merge_cells)
reader = ExcelFile(path)
df = pd.read_excel(reader, "test1", header=header, index_col=[0, 1])
if not merge_cells:
fm = frame.columns.format(sparsify=False, adjoin=False, names=False)
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self, merge_cells, tsframe, path):
# try multiindex with dates
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.index.names = ["time", "foo"]
tsframe.to_excel(path, "test1", merge_cells=merge_cells)
reader = ExcelFile(path)
recons = pd.read_excel(reader, "test1", index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
assert recons.index.names == ("time", "foo")
def test_to_excel_multiindex_no_write_index(self, path):
# Test writing and re-reading a MI without the index. GH 5616.
# Initial non-MI frame.
frame1 = DataFrame({"a": [10, 20], "b": [30, 40], "c": [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
# Write out to Excel without the index.
frame2.to_excel(path, "test1", index=False)
# Read it back in.
reader = ExcelFile(path)
frame3 = pd.read_excel(reader, "test1")
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self, path):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
df.to_excel(path, "test1", float_format="%.2f")
reader = ExcelFile(path)
result = pd.read_excel(reader, "test1", index_col=0)
expected = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(result, expected)
def test_to_excel_output_encoding(self, ext):
# Avoid mixed inferred_type.
df = DataFrame(
[["\u0192", "\u0193", "\u0194"], ["\u0195", "\u0196", "\u0197"]],
index=["A\u0192", "B"],
columns=["X\u0193", "Y", "Z"],
)
with ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
df.to_excel(filename, sheet_name="TestSheet", encoding="utf8")
result = pd.read_excel(filename, "TestSheet", encoding="utf8", index_col=0)
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self, ext, path):
with ensure_clean("\u0192u." + ext) as filename:
try:
f = open(filename, "wb")
except UnicodeEncodeError:
pytest.skip("No unicode file names on this system")
else:
f.close()
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
df.to_excel(filename, "test1", float_format="%.2f")
reader = ExcelFile(filename)
result = pd.read_excel(reader, "test1", index_col=0)
expected = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(result, expected)
# def test_to_excel_header_styling_xls(self, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# pytest.skip
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# assert ["test1"] == wbk.sheet_names()
# ws = wbk.sheet_by_name('test1')
# assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# assert 1 == font[cell_xf.font_index].bold
# assert 1 == cell_xf.border.top_line_style
# assert 1 == cell_xf.border.right_line_style
# assert 1 == cell_xf.border.bottom_line_style
# assert 1 == cell_xf.border.left_line_style
# assert 2 == cell_xf.alignment.hor_align
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# pytest.skip
# if openpyxl.__version__ < '1.6.1':
# pytest.skip
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# assert ["test1"] == wbk.get_sheet_names()
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# assert cell.style.font.bold
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.top.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.right.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.bottom.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.left.border_style)
# assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# assert ws.cell(maddr).merged
# os.remove(filename)
@pytest.mark.parametrize("use_headers", [True, False])
@pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
@pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
def test_excel_010_hemstring(
self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path
):
def roundtrip(data, header=True, parser_hdr=0, index=True):
data.to_excel(path, header=header, merge_cells=merge_cells, index=index)
xf = ExcelFile(path)
return pd.read_excel(xf, xf.sheet_names[0], header=parser_hdr)
# Basic test.
parser_header = 0 if use_headers else None
res = roundtrip(DataFrame([0]), use_headers, parser_header)
assert res.shape == (1, 2)
assert res.iloc[0, 0] is not np.nan
# More complex tests with multi-index.
nrows = 5
ncols = 3
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of gh-2370 until sorted out in 0.11
df = mkdf(
nrows, ncols, r_idx_nlevels=r_idx_nlevels, c_idx_nlevels=c_idx_nlevels
)
# This if will be removed once multi-column Excel writing
# is implemented. For now fixing gh-9794.
if c_idx_nlevels > 1:
with pytest.raises(NotImplementedError):
roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
if use_headers:
assert res.shape == (nrows, ncols + r_idx_nlevels)
else:
# First row taken as columns.
assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
# No NaNs.
for r in range(len(res.index)):
for c in range(len(res.columns)):
assert res.iloc[r, c] is not np.nan
def test_duplicated_columns(self, path):
# see gh-5235
df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B"])
df.to_excel(path, "test1")
expected = DataFrame(
[[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B.1"]
)
# By default, we mangle.
result = pd.read_excel(path, "test1", index_col=0)
tm.assert_frame_equal(result, expected)
# Explicitly, we pass in the parameter.
result = pd.read_excel(path, "test1", index_col=0, mangle_dupe_cols=True)
tm.assert_frame_equal(result, expected)
# see gh-11007, gh-10970
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"])
df.to_excel(path, "test1")
result = pd.read_excel(path, "test1", index_col=0)
expected = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"]
)
tm.assert_frame_equal(result, expected)
# see gh-10982
df.to_excel(path, "test1", index=False, header=False)
result = pd.read_excel(path, "test1", header=None)
expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
tm.assert_frame_equal(result, expected)
msg = "Setting mangle_dupe_cols=False is not supported yet"
with pytest.raises(ValueError, match=msg):
pd.read_excel(path, "test1", header=None, mangle_dupe_cols=False)
def test_swapped_columns(self, path):
# Test for issue #5427.
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
write_frame.to_excel(path, "test1", columns=["B", "A"])
read_frame = pd.read_excel(path, "test1", header=0)
tm.assert_series_equal(write_frame["A"], read_frame["A"])
tm.assert_series_equal(write_frame["B"], read_frame["B"])
def test_invalid_columns(self, path):
# see gh-10982
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
write_frame.to_excel(path, "test1", columns=["B", "C"])
expected = write_frame.reindex(columns=["B", "C"])
read_frame = pd.read_excel(path, "test1", index_col=0)
tm.assert_frame_equal(expected, read_frame)
with pytest.raises(
KeyError, match="'passes columns are not ALL present dataframe'"
):
write_frame.to_excel(path, "test1", columns=["C", "D"])
def test_comment_arg(self, path):
# see gh-18735
#
# Test the comment argument functionality to pd.read_excel.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
df.to_excel(path, "test_c")
# Read file without comment arg.
result1 = pd.read_excel(path, "test_c", index_col=0)
result1.iloc[1, 0] = None
result1.iloc[1, 1] = None
result1.iloc[2, 1] = None
result2 = pd.read_excel(path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result1, result2)
def test_comment_default(self, path):
# Re issue #18735
# Test the comment argument default to pd.read_excel
# Create file to read in
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
df.to_excel(path, "test_c")
# Read file with default and explicit comment=None
result1 = pd.read_excel(path, "test_c")
result2 = pd.read_excel(path, "test_c", comment=None)
tm.assert_frame_equal(result1, result2)
def test_comment_used(self, path):
# see gh-18735
#
# Test the comment argument is working as expected when used.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
df.to_excel(path, "test_c")
# Test read_frame_comment against manually produced expected output.
expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]})
result = pd.read_excel(path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result, expected)
def test_comment_empty_line(self, path):
# Re issue #18735
# Test that pd.read_excel ignores commented lines at the end of file
df = DataFrame({"a": ["1", "#2"], "b": ["2", "3"]})
df.to_excel(path, index=False)
# Test that all-comment lines at EoF are ignored
expected = DataFrame({"a": [1], "b": [2]})
result = pd.read_excel(path, comment="#")
tm.assert_frame_equal(result, expected)
def test_datetimes(self, path):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
datetimes = [
datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52),
]
write_frame = DataFrame({"A": datetimes})
write_frame.to_excel(path, "Sheet1")
read_frame = pd.read_excel(path, "Sheet1", header=0)
tm.assert_series_equal(write_frame["A"], read_frame["A"])
def test_bytes_io(self, engine):
# see gh-7074
bio = BytesIO()
df = DataFrame(np.random.randn(10, 2))
# Pass engine explicitly, as there is no file path to infer from.
writer = ExcelWriter(bio, engine=engine)
df.to_excel(writer)
writer.save()
bio.seek(0)
reread_df = pd.read_excel(bio, index_col=0)
tm.assert_frame_equal(df, reread_df)
def test_write_lists_dict(self, path):
# see gh-8188.
df = DataFrame(
{
"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
"numeric": [1, 2, 3.0],
"str": ["apple", "banana", "cherry"],
}
)
df.to_excel(path, "Sheet1")
read = pd.read_excel(path, "Sheet1", header=0, index_col=0)
expected = df.copy()
expected.mixed = expected.mixed.apply(str)
expected.numeric = expected.numeric.astype("int64")
tm.assert_frame_equal(read, expected)
def test_true_and_false_value_options(self, path):
# see gh-13347
df = pd.DataFrame([["foo", "bar"]], columns=["col1", "col2"])
expected = df.replace({"foo": True, "bar": False})
df.to_excel(path)
read_frame = pd.read_excel(
path, true_values=["foo"], false_values=["bar"], index_col=0
)
tm.assert_frame_equal(read_frame, expected)
def test_freeze_panes(self, path):
# see gh-15160
expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
expected.to_excel(path, "Sheet1", freeze_panes=(1, 1))
result = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(result, expected)
def test_path_path_lib(self, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader, path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
def test_path_local_path(self, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader, path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
def test_merged_cell_custom_objects(self, merge_cells, path):
# see GH-27006
mi = MultiIndex.from_tuples(
[
(pd.Period("2018"), pd.Period("2018Q1")),
(pd.Period("2018"), pd.Period("2018Q2")),
]
)
expected = DataFrame(np.ones((2, 2)), columns=mi)
expected.to_excel(path)
result = pd.read_excel(path, header=[0, 1], index_col=0, convert_float=False)
# need to convert PeriodIndexes to standard Indexes for assert equal
expected.columns.set_levels(
[[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]],
level=[0, 1],
inplace=True,
)
expected.index = expected.index.astype(np.float64)
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("dtype", [None, object])
def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path):
# GH 27008, GH 7056
tz = tz_aware_fixture
data = pd.Timestamp("2019", tz=tz)
df = DataFrame([data], dtype=dtype)
with pytest.raises(ValueError, match="Excel does not support"):
df.to_excel(path)
data = data.to_pydatetime()
df = DataFrame([data], dtype=dtype)
with pytest.raises(ValueError, match="Excel does not support"):
df.to_excel(path)
class TestExcelWriterEngineTests:
@pytest.mark.parametrize(
"klass,ext",
[
pytest.param(_XlsxWriter, ".xlsx", marks=td.skip_if_no("xlsxwriter")),
pytest.param(_OpenpyxlWriter, ".xlsx", marks=td.skip_if_no("openpyxl")),
pytest.param(_XlwtWriter, ".xls", marks=td.skip_if_no("xlwt")),
],
)
def test_ExcelWriter_dispatch(self, klass, ext):
with ensure_clean(ext) as path:
writer = ExcelWriter(path)
if ext == ".xlsx" and td.safe_import("xlsxwriter"):
# xlsxwriter has preference over openpyxl if both installed
assert isinstance(writer, _XlsxWriter)
else:
assert isinstance(writer, klass)
def test_ExcelWriter_dispatch_raises(self):
with pytest.raises(ValueError, match="No engine"):
ExcelWriter("nothing")
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ["xlsx", "xls"]
engine = "dummy"
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
assert len(called_save) >= 1
assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
with pd.option_context("io.excel.xlsx.writer", "dummy"):
register_writer(DummyClass)
writer = ExcelWriter("something.xlsx")
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
check_called(lambda: df.to_excel("something.xlsx"))
check_called(lambda: df.to_excel("something.xls", engine="dummy"))
@td.skip_if_no("xlrd")
@td.skip_if_no("openpyxl")
@pytest.mark.skipif(not PY36, reason="requires fspath")
class TestFSPath:
def test_excelfile_fspath(self):
with tm.ensure_clean("foo.xlsx") as path:
df = DataFrame({"A": [1, 2]})
df.to_excel(path)
xl = ExcelFile(path)
result = os.fspath(xl)
assert result == path
def test_excelwriter_fspath(self):
with tm.ensure_clean("foo.xlsx") as path:
writer = ExcelWriter(path)
assert os.fspath(writer) == str(path)
| 36.251563 | 88 | 0.569329 |
793f134743c6e9f10011a85535c64afbd76dafc4 | 9,429 | py | Python | models/layers/mesh_pool.py | KathaRies/MeshCNN | be73736890c49d175cde80830a28352946e23611 | [
"MIT"
] | null | null | null | models/layers/mesh_pool.py | KathaRies/MeshCNN | be73736890c49d175cde80830a28352946e23611 | [
"MIT"
] | null | null | null | models/layers/mesh_pool.py | KathaRies/MeshCNN | be73736890c49d175cde80830a28352946e23611 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from threading import Thread
from MeshCNN.models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(
Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(
len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(
self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
# recycle = []
# last_queue_len = len(queue)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.bool)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
i = 0
while mesh.edges_count > self.__out_target:
i += 1
value, edge_id = heappop(queue)
edge_id = int(edge_id)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(
self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(
mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(
mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = MeshPool.__get_invalids(
mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(
mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a %
2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a %
2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a,
other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b,
other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(
mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(
mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(
update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)
MeshPool.__union_groups(
mesh, edge_groups, middle_edge, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)
MeshPool.__union_groups(
mesh, edge_groups, middle_edge, update_key_b)
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a],
mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b],
mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert(len(vertex) == 1)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
# delete edges with smallest norm
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(
edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
| 42.09375 | 99 | 0.616608 |
793f14cc4fbfc1d4bf66467bdeab0c9602c83876 | 8,488 | py | Python | python/plugin/grad_shafranov.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | 1 | 2021-10-21T17:15:26.000Z | 2021-10-21T17:15:26.000Z | python/plugin/grad_shafranov.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | python/plugin/grad_shafranov.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
#!/usr/bin/env python
import numpy as np
from sympy import *
from sympy.matrices import *
from scipy.optimize import root
from matplotlib import pyplot as plt
from scipy.integrate import quad
#from poisson_nonlin import poisson_picard as PDE_picard
#__all__ = ['genPoints', 'genFigure', 'genDomain', 'picard', 'picardTwoGrids', 'testcase']
#__all__ = ['genPoints', 'genFigure', 'genDomain', 'picard', 'picardTwoGrids', 'testcase']
sqrt = np.sqrt
abs = np.abs; sin = np.sin ; cos = np.cos ; exp = np.exp ; sqrt = np.sqrt
pi = np.pi; atan = np.arctan2 ; cosh = np.cosh
sech = lambda x: 1./cosh(x)
# ------------------------------------------------------
# ... generate the boundary in the clock sens
def genPoints(R0=1., eps=0.32, k=1.7, d=0.33, mbnd=1500, m=500 \
# needed for the newton algo to converge
, rmin=0.67, rmax=1.33, delta=0.05 \
, dd = 1.e-3 \
, PLOT=False \
):
print ("eps, k, d = ", eps, k, d)
if (R0==1.) and (eps==0.32):
d1, d2, d3 = [0.0753850296600659, -0.206294962187880, -0.0314337072805334]
else:
d1, d2, d3 = compute_ds(eps, k, d)
print ("d1, d2, d3 = ", d1, d2, d3)
psi = lambda r,z: r**4/8 + d1 + d2 * r**2 + d3 * (r**4 - 4 * (r*z)**2)
psidr = lambda r,z: 2*d2*r + d3*(4*r**3 - 8*r*z**2) + r**3/2
psidz = lambda r,z: -8*d3*r**2*z
# .....................................
rgrid = list(np.linspace(rmin, rmin+delta, mbnd)[:-1])
rgrid += list(np.linspace(rmin+delta, rmax-delta, m))
rgrid += list(np.linspace(rmax-delta, rmax, mbnd)[1:])
rgrid = np.array(rgrid)
zgrid = np.zeros_like(rgrid)
# ...
from pigasus.utils.impeqpy import impeqpy
import pigasus.utils.impeqpy as impe
# print "============================"
# print impe.__file__
# print "============================"
level = 0.0
imp=impeqpy(tol=1.e-9, maxniter = 300, verbose=False)
imp.solve2Dx(psi,psidz,level,rgrid,zgrid)
list_r = [R0-eps] ; list_z = [0.]
for (r,z) in zip(rgrid, zgrid):
if (not np.isnan(r)) and (not np.isnan(z)):
list_r.append(r) ; list_z.append(z)
list_r.append(R0+eps) ; list_z.append(0.)
# ...
# .....................................
# .....................................
# def Z2(R):
# v = 0.
# v += d1/(4.*d3) * 1./R**2
# v += d2/(4.*d3)
# v += (1./8 + d3) / (4.*d3) * R**2
# return v
#
# def Z_plus(R):
# return np.sqrt(Z2(R))
#
# def Z_minus(R):
# return - np.sqrt(Z2(R))
#
# sol = root(Z2, rmin, jac=False)
# Rmin = sol.x
#
# sol = root(Z2, rmax, jac=False)
# Rmax = sol.x
#
# def dZdR(R):
# gamma = (1./8 + d3) / (4.*d3)
# alpha = d1/(4.*d3)
# Z = Z_plus(R)
# v = gamma * R - alpha / R**3
# v /= Z
# return v
#
# def measure(R):
# meas = dZdR(R)**2
# # meas += 1.
# return meas
#
# def density(ti,tj):
# return quad(measure, ti, tj)
#
# def adaptive_mesh(n, xmin, xmax, amin, amax):
# R = np.linspace(xmin, xmax, n)
# D = []
# for a,b in zip(R[:-1], R[1:]):
# D.append(density(a,b)[0])
#
# D = np.asarray(D)
# m_total = D.sum()
#
# M = np.zeros(n-1)
# for i in range(0,n-1):
# v = D[0:i]
# M[i] = v.sum() / m_total
#
# Rnew = (amax-amin)*M+amin
# return Rnew
#
# R = []
# R += list(adaptive_mesh(mbnd, Rmin+dd, Rmin*(1.+delta), Rmin, Rmin*(1.+delta)))
# R += list(adaptive_mesh(m, Rmin*(1.+delta), Rmax*(1.-delta), Rmin*(1.+delta), Rmax*(1.-delta)))
# R += list(adaptive_mesh(mbnd, Rmax*(1.-delta), Rmax-dd, Rmax*(1.-delta), Rmax))
# R = np.array(R)
#
# Z = Z_plus(R)
# R = np.array([Rmin] + list(R) + [Rmax])
# Z = np.array([ 0.] + list(Z) + [ 0.])
#
# list_r = R
# list_z = Z
# .....................................
# ... y < 0 part
rgrid = np.array(list_r); zgrid = np.array(list_z)
_rgrid = rgrid[::-1]
_zgrid = -zgrid[::-1]
# ...
# ...
if PLOT:
import matplotlib.pyplot as plt
plt.plot(rgrid, zgrid, '.b')
plt.plot(_rgrid, _zgrid, '.k')
tx = np.linspace(0.6,1.4, 300)
ty = np.linspace(-0.6,0.6, 300)
x,y = np.meshgrid(tx,ty)
u = psi(x,y)
levels = np.linspace(-0.04, 0.0, 100)
CS = plt.contourf(x,y,u, levels)
plt.colorbar()
plt.show()
genFigure(d=[d1,d2,d3], origin="upper")
# ...
r = list(rgrid) + list(_rgrid)
z = list(zgrid) + list(_zgrid)
return r,z
# ...
# ------------------------------------------------------
# ------------------------------------------------------
def genFigure(d=None, origin="upper"):
#origin = 'lower'
if d is None:
d1, d2, d3 = [ 0.07538503, -0.20629496, -0.03143371]
else:
d1,d2,d3=d[0:3]
import matplotlib.pyplot as plt
# ITER and ASDEX-Upgrade
tx = np.linspace(0.6,1.4, 300)
ty = np.linspace(-0.6,0.6, 300)
levels = np.linspace(-0.04, 0.0, 100)
# # JET
# levels = np.linspace(-0.045, 0., 100)
# tx = np.linspace(0.6,1.4, 300)
# ty = np.linspace(-0.65,0.65, 300)
x,y = np.meshgrid(tx,ty)
psi = lambda r,z: r**4/8 + d1 + d2 * r**2 + d3 * (r**4 - 4 * (r*z)**2)
u = psi(x,y)
#plt.contourf(x,y,u) ; plt.colorbar(); plt.show()
CS = plt.contourf(x,y,u, levels
# , colors = ('r', 'g', 'b') \
# , origin=origin \
# , extend='both' \
)
plt.colorbar()
CS2 = plt.contour(CS, levels=CS.levels[::10] \
, colors = 'k' \
, origin=origin \
, hold='on' \
, linewidths = (1,) \
)
plt.show()
# plt.pcolor(x,y,u, vmin=-0.04, vmax=0.01) ; plt.colorbar() ; plt.show()
# ------------------------------------------------------
# ------------------------------------------------------
class genDomain(object):
def __init__(self, R0=1, eps=0.32, mbnd=1500, m=500 \
# needed for the newton algo to converge
, rmin=0.67, rmax=1.33, delta=0.05 \
, PLOT=False):
r,z = genPoints(R0=R0, eps=eps, mbnd=mbnd, m=m \
, rmin=rmin, rmax=rmax, delta=delta \
, PLOT=PLOT\
)
self.boundary = [r,z]
# ------------------------------------------------------
def compute_ds(epsilon, kappa, delta):
# ... OLD VERSION
# from scipy import matrix
# from scipy.linalg import inv
# M = np.zeros((3,3))
# M[:,0] = 1.
# M[0,1] = (1+eps)**2
# M[1,1] = (1-eps)**2
# M[2,1] = (1-d*eps)**2
# M[0,2] = (1+eps)**4
# M[1,2] = (1-eps)**4
# M[2,2] = (1-d*eps)**4 - 4 * ( (1-d*eps)*k*eps )**2
# Y = np.zeros(3)
# Y[0] = -(1./8) * (1+eps)**4
# Y[1] = -(1./8) * (1-eps)**4
# Y[2] = -(1./8) * (1-d*eps)**4
# A = matrix(M)
# invA = inv(A)
# X = invA.dot(Y)
# ...
def compute_M():
e = Symbol('e')
k = Symbol('k')
d = Symbol('d')
A = Matrix([ [1, (1+e)**2, (1+e)**4] \
, [1, (1-e)**2, (1-e)**4] \
, [1,(1-d*e)**2,(1-d*e)**4 - 4.*((1.-d*e)*k*e)**2] \
])
Ainv = A.inv()
M = lambdify((e,k,d), Ainv)
return M
M = compute_M()
Y = np.zeros(3)
Y[0] = -(1./8) * (1+epsilon)**4
Y[1] = -(1./8) * (1-epsilon)**4
Y[2] = -(1./8) * (1-delta * epsilon)**4
D= M(epsilon, kappa, delta).dot(Y)
d1 = D[0,0]
d2 = D[0,1]
d3 = D[0,2]
return d1, d2, d3
class testcase(object):
def __init__(self, TEST):
initTEST = getattr(self, 'initTEST%d' % TEST)
initTEST()
def initTEST1(self):
"""
ITER relevant parameters
d1, d2, d3 = [ 0.0753850296600659 -0.206294962187880 -0.0314337072805334]
"""
d1, d2, d3 = compute_ds(eps=0.32, k=1.7, d=0.33)
# ...
F = lambda psi,x,y : x**2
psi = lambda r,z: r**4/8 + d1 + d2 * r**2 + d3 * (r**4 - 4 * (r*z)**2)
# ...
self.F = F
#if __name__ == '__main__':
# from caid.cad_geometry import square
# from matplotlib import pylab as plt
| 28.38796 | 100 | 0.441447 |
793f15eaf6f3343a2197c422e4d07ee2591e0b19 | 2,797 | py | Python | setup.py | dougPhilips/python-seleniumpm | 4ddff760cd4486bfd48efdb77e33fb4574dc0e5d | [
"Apache-2.0"
] | 2 | 2020-01-13T14:41:08.000Z | 2020-01-29T10:21:04.000Z | setup.py | dougPhilips/python-seleniumpm | 4ddff760cd4486bfd48efdb77e33fb4574dc0e5d | [
"Apache-2.0"
] | 1 | 2018-05-29T14:47:55.000Z | 2018-05-29T14:47:55.000Z | setup.py | dougPhilips/python-seleniumpm | 4ddff760cd4486bfd48efdb77e33fb4574dc0e5d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import re
import sys
from codecs import open
# from distutils.core import setup
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = ['seleniumpm', 'seleniumpm.webelements', 'seleniumpm.examples', 'seleniumpm.examples.widgets']
requires = ['selenium~=2.53.6']
test_requirements = ['pytest>=2.8.0', 'pytest-httpbin==0.0.7', 'pytest-cov']
with open('seleniumpm/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
readme_file = 'README.rst'
with open(readme_file, 'r', 'utf-8') as f:
readme = f.read()
with open('HISTORY.rst', 'r', 'utf-8') as f:
history = f.read()
setup(
name='seleniumpm',
version=version,
description='Selenium Pagemodel implementation for Python.',
long_description=readme + "\n\n" + history,
author='Peter Salas',
author_email='[email protected]',
url='https://github.com/gradeawarrior/python-seleniumpm',
packages=packages,
package_data={'': ['LICENSE', 'NOTICE'], 'seleniumpm': ['*.pem']},
package_dir={'seleniumpm': 'seleniumpm'},
include_package_data=True,
install_requires=requires,
license='Apache 2.0',
keywords=['testing', 'seleniumpm', 'selenium', 'pagemodel', 'pageobjectmodel'],
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
cmdclass={'test': PyTest},
tests_require=test_requirements,
)
| 31.426966 | 105 | 0.643547 |
793f166fb7139249a2431c93cf6b711a8cf1984c | 4,726 | py | Python | awswrangler/_utils.py | mbeacom/aws-data-wrangler | 2e018ec3d0d2eaf073f759be65cfd75ee2bdce10 | [
"Apache-2.0"
] | null | null | null | awswrangler/_utils.py | mbeacom/aws-data-wrangler | 2e018ec3d0d2eaf073f759be65cfd75ee2bdce10 | [
"Apache-2.0"
] | null | null | null | awswrangler/_utils.py | mbeacom/aws-data-wrangler | 2e018ec3d0d2eaf073f759be65cfd75ee2bdce10 | [
"Apache-2.0"
] | null | null | null | """Internal (private) Utilities Module."""
import logging
import math
import os
from typing import Any, Dict, Generator, List, Optional, Tuple
import boto3 # type: ignore
import botocore.config # type: ignore
import numpy as np # type: ignore
import psycopg2 # type: ignore
import s3fs # type: ignore
logger: logging.Logger = logging.getLogger(__name__)
def ensure_session(session: Optional[boto3.Session] = None) -> boto3.Session:
"""Ensure that a valid boto3.Session will be returned."""
if session is not None:
return session
return boto3.Session()
def client(service_name: str, session: Optional[boto3.Session] = None) -> boto3.client:
"""Create a valid boto3.client."""
return ensure_session(session=session).client(
service_name=service_name, use_ssl=True, config=botocore.config.Config(retries={"max_attempts": 15})
)
def parse_path(path: str) -> Tuple[str, str]:
"""Split a full S3 path in bucket and key strings.
's3://bucket/key' -> ('bucket', 'key')
Parameters
----------
path : str
S3 path (e.g. s3://bucket/key).
Returns
-------
Tuple[str, str]
Tuple of bucket and key strings
Examples
--------
>>> from awswrangler._utils import parse_path
>>> bucket, key = parse_path('s3://bucket/key')
"""
parts = path.replace("s3://", "").split("/", 1)
bucket: str = parts[0]
key: str = ""
if len(parts) == 2:
key = key if parts[1] is None else parts[1]
return bucket, key
def ensure_cpu_count(use_threads: bool = True) -> int:
"""Get the number of cpu cores to be used.
Note
----
In case of `use_threads=True` the number of process that could be spawned will be get from os.cpu_count().
Parameters
----------
use_threads : bool
True to enable multi-core utilization, False to disable.
Returns
-------
int
Number of cpu cores to be used.
Examples
--------
>>> from awswrangler._utils import ensure_cpu_count
>>> ensure_cpu_count(use_threads=True)
4
>>> ensure_cpu_count(use_threads=False)
1
"""
cpus: int = 1
if use_threads is True:
cpu_cnt: Optional[int] = os.cpu_count()
if cpu_cnt is not None:
cpus = cpu_cnt if cpu_cnt > cpus else cpus
return cpus
def chunkify(lst: List[Any], num_chunks: int = 1, max_length: Optional[int] = None) -> List[List[Any]]:
"""Split a list in a List of List (chunks) with even sizes.
Parameters
----------
lst: List
List of anything to be splitted.
num_chunks: int, optional
Maximum number of chunks.
max_length: int, optional
Max length of each chunk. Has priority over num_chunks.
Returns
-------
List[List[Any]]
List of List (chunks) with even sizes.
Examples
--------
>>> from awswrangler._utils import chunkify
>>> chunkify(list(range(13)), num_chunks=3)
[[0, 1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
>>> chunkify(list(range(13)), max_length=4)
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
"""
n: int = num_chunks if max_length is None else int(math.ceil((float(len(lst)) / float(max_length))))
np_chunks = np.array_split(lst, n)
return [arr.tolist() for arr in np_chunks if len(arr) > 0]
def get_fs(
session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None
) -> s3fs.S3FileSystem:
"""Build a S3FileSystem from a given boto3 session."""
fs: s3fs.S3FileSystem = s3fs.S3FileSystem(
anon=False,
use_ssl=True,
default_cache_type="none",
default_fill_cache=False,
default_block_size=134_217_728, # 128 MB (50 * 2**20)
config_kwargs={"retries": {"max_attempts": 15}},
session=ensure_session(session=session)._session, # pylint: disable=protected-access
s3_additional_kwargs=s3_additional_kwargs,
use_listings_cache=False,
skip_instance_cache=True,
)
fs.invalidate_cache()
fs.clear_instance_cache()
return fs
def empty_generator() -> Generator:
"""Empty Generator."""
yield from ()
def ensure_postgresql_casts():
"""Ensure that psycopg2 will handle some data types right."""
psycopg2.extensions.register_adapter(bytes, psycopg2.Binary)
typecast_bytea = lambda data, cur: None if data is None else bytes(psycopg2.BINARY(data, cur)) # noqa
BYTEA = psycopg2.extensions.new_type(psycopg2.BINARY.values, "BYTEA", typecast_bytea)
psycopg2.extensions.register_type(BYTEA)
def get_directory(path: str) -> str:
"""Extract directory path."""
return path.rsplit(sep="/", maxsplit=1)[0] + "/"
| 29.17284 | 110 | 0.636479 |
793f16d137e3d92249c3ab3f63fd587fc84dc916 | 10,479 | py | Python | StandardDataSets/1_5/collada/library_formulas/formula/multi_formula/multi_formula.py | KhronosGroup/COLLADA-CTS | 61f2a560cbb2a06ee62da8025241f6b08d06bfd9 | [
"MIT"
] | 20 | 2015-03-19T08:02:57.000Z | 2020-10-16T15:16:11.000Z | StandardDataSets/1_5/collada/library_formulas/formula/multi_formula/multi_formula.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 4 | 2017-04-19T18:42:05.000Z | 2017-06-17T03:03:28.000Z | StandardDataSets/1_5/collada/library_formulas/formula/multi_formula/multi_formula.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 10 | 2015-03-26T02:52:24.000Z | 2022-02-24T08:43:48.000Z |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLstRoot = ['library_formulas', 'formula']
rootAttrName = 'id'
tagLstNewparam = ['library_formulas', 'formula', 'newparam']
newparamAtttName = 'sid'
tagLstTarget = ['library_formulas', 'formula', 'target', 'param']
tagLstTechnique = ['library_formulas', 'formula', 'technique_common']
tagLstNewParamDataType = [['newparam', 'float'], ['newparam', 'int']]
# formula 1
rootAttrVal1 = 'pythagorean'
newparamAttrVal1 = ['hypotenuse', 'side1', 'side2']
tagLstCsymbol1 = ['library_formulas', 'formula', 'technique_common', 'math', 'apply', 'apply', 'apply', 'csymbol']
newparamCount1 = [1, 2]
# formula 2
rootAttrVal2 = 'pitch'
newparamAttrVal2 = ['target1', 'value', 'pitch']
tagLstCsymbol2 = [['library_formulas', 'formula', 'technique_common', 'math', 'apply', 'apply', 'csymbol'],
['library_formulas', 'formula', 'technique_common', 'math', 'apply', 'csymbol']]
newparamCount2 = [3, 0]
class SimpleJudgingObject:
def __init__(self, _tagLstRoot, _rootAttrName, _tagLstNewparam, _newparamAtttName, _tagLstTarget, _tagLstTechnique, _tagLstNewParamDataType,
_rootAttrVal1, _newparamAttrVal1, _tagLstCsymbol1, _newparamCount1,
_rootAttrVal2, _newparamAttrVal2, _tagLstCsymbol2, _newparamCount2):
self.tagListRoot = _tagLstRoot
self.rootAttrName = _rootAttrName
self.tagListNewparam = _tagLstNewparam
self.newparamAtttName = _newparamAtttName
self.tagListTarget = _tagLstTarget
self.tagListTechnique = _tagLstTechnique
self.tagListNewparamDataType = _tagLstNewParamDataType
self.rootAttrVal1 = _rootAttrVal1
self.newparamAttrVal1 = _newparamAttrVal1
self.tagListCsymbol1 = _tagLstCsymbol1
self.newparamCount1 = _newparamCount1
self.rootAttrVal2 = _rootAttrVal2
self.newparamAttrVal2 = _newparamAttrVal2
self.tagListCsymbol2 = _tagLstCsymbol2
self.newparamCount2 = _newparamCount2
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
# Add the name space to the tags
def AddNSToTagList(self, context):
nameSpace = self.__assistant.GetNameSpace(context, self.tagListTechnique)
if (nameSpace != None):
for i in range(3, len(self.tagListCsymbol1)):
self.tagListCsymbol1[i] = nameSpace + ":" + self.tagListCsymbol1[i]
for i in range(3, len(self.tagListCsymbol2[0])):
self.tagListCsymbol2[0][i] = nameSpace + ":" + self.tagListCsymbol2[0][i]
for i in range(3, len(self.tagListCsymbol2[1])):
self.tagListCsymbol2[1][i] = nameSpace + ":" + self.tagListCsymbol2[1][i]
def CheckFormula1(self, context):
# check that the newparam attributes are preserved
if ( (not self.__assistant.AttributeCheck(context, self.tagListNewparam, self.newparamAtttName, self.newparamAttrVal1[0])) or
(not self.__assistant.AttributeCheck(context, self.tagListNewparam, self.newparamAtttName, self.newparamAttrVal1[1])) or
(not self.__assistant.AttributeCheck(context, self.tagListNewparam, self.newparamAtttName, self.newparamAttrVal1[2])) ):
return False
# check that the target data is preserved
if ( not self.__assistant.ElementDataCheck(context, self.tagListTarget, self.newparamAttrVal1[0], "string") ):
return False
# check that the newparam data types are preserved
if ( (self.__assistant.GetElementCount(self.tagListRoot, self.rootAttrName, self.rootAttrVal1, self.tagListNewparamDataType[0]) != self.newparamCount1[0]) ):
context.Log("FAILED: newparam <float> type not preserved")
return False
else:
context.Log("PASSED: newparam <float> type is preserved")
if ( (self.__assistant.GetElementCount(self.tagListRoot, self.rootAttrName, self.rootAttrVal1, self.tagListNewparamDataType[1]) != self.newparamCount1[1]) ):
context.Log("FAILED: newparam <int> type not preserved")
return False
else:
context.Log("PASSED: newparam <int> type is preserved")
# check that the csymbol data is preserved
if ( (not self.__assistant.ElementDataCheck(context, self.tagListCsymbol1, self.newparamAttrVal1[1], "string")) or
(not self.__assistant.ElementDataCheck(context, self.tagListCsymbol1, self.newparamAttrVal1[2], "string")) ):
return False
return True
def CheckFormula2(self, context):
# check that the newparam attributes are preserved
if ( (not self.__assistant.AttributeCheck(context, self.tagListNewparam, self.newparamAtttName, self.newparamAttrVal2[0])) or
(not self.__assistant.AttributeCheck(context, self.tagListNewparam, self.newparamAtttName, self.newparamAttrVal2[1])) or
(not self.__assistant.AttributeCheck(context, self.tagListNewparam, self.newparamAtttName, self.newparamAttrVal2[2])) ):
return False
# check that the target data is preserved
if ( not self.__assistant.ElementDataCheck(context, self.tagListTarget, self.newparamAttrVal2[0], "string") ):
return False
# check that the newparam data types are preserved
if ( (self.__assistant.GetElementCount(self.tagListRoot, self.rootAttrName, self.rootAttrVal2, self.tagListNewparamDataType[0]) != self.newparamCount2[0]) ):
context.Log("FAILED: newparam <float> type not preserved")
return False
else:
context.Log("PASSED: newparam <float> type is preserved")
if ( (self.__assistant.GetElementCount(self.tagListRoot, self.rootAttrName, self.rootAttrVal2, self.tagListNewparamDataType[1]) != self.newparamCount2[1]) ):
context.Log("FAILED: newparam <int> type not preserved")
return False
else:
context.Log("PASSED: newparam <int> type is preserved")
# check that the csymbol data is preserved
if ( (not self.__assistant.ElementDataCheck(context, self.tagListCsymbol2[0], self.newparamAttrVal2[1], "string")) or
(not self.__assistant.ElementDataCheck(context, self.tagListCsymbol2[1], self.newparamAttrVal2[2], "string")) ):
return False
return True
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
self.AddNSToTagList(context)
if (self.CheckFormula1(context) == False):
self.status_exemplary = False
return self.status_exemplary
self.status_exemplary = self.CheckFormula2(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLstRoot, rootAttrName, tagLstNewparam, newparamAtttName, tagLstTarget, tagLstTechnique, tagLstNewParamDataType,
rootAttrVal1, newparamAttrVal1, tagLstCsymbol1, newparamCount1,
rootAttrVal2, newparamAttrVal2, tagLstCsymbol2, newparamCount2);
| 54.578125 | 467 | 0.672583 |
793f1705a54f2706f329ff204674ee2afef842ae | 9,690 | py | Python | fhir/resources/STU3/endpoint.py | chgl/fhir.resources | 35b22314642640c0b25960ab5b2855e7c51749ef | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/STU3/endpoint.py | chgl/fhir.resources | 35b22314642640c0b25960ab5b2855e7c51749ef | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/STU3/endpoint.py | chgl/fhir.resources | 35b22314642640c0b25960ab5b2855e7c51749ef | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Endpoint
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import domainresource, fhirtypes
class Endpoint(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The technical details of an endpoint that can be used for electronic
services.
The technical details of an endpoint that can be used for electronic
services, such as for web services providing XDS.b or a REST endpoint for
another FHIR server. This may include any security context information.
"""
resource_type = Field("Endpoint", const=True)
address: fhirtypes.Uri = Field(
None,
alias="address",
title="The technical base address for connecting to this endpoint",
description="The uri that describes the actual end-point to connect to.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
address__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_address", title="Extension field for ``address``."
)
connectionType: fhirtypes.CodingType = Field(
...,
alias="connectionType",
title="Protocol/Profile/Standard to be used with this endpoint connection",
description=(
"A coded value that represents the technical details of the usage of "
"this endpoint, such as what WSDLs should be used in what way. (e.g. "
"XDS.b/DICOM/cds-hook)."
),
# if property is element of this resource.
element_property=True,
)
contact: typing.List[fhirtypes.ContactPointType] = Field(
None,
alias="contact",
title="Contact details for source (e.g. troubleshooting)",
description=(
"Contact details for a human to contact about the subscription. The "
"primary use of this for system administrator troubleshooting."
),
# if property is element of this resource.
element_property=True,
)
header: typing.List[fhirtypes.String] = Field(
None,
alias="header",
title="Usage depends on the channel type",
description="Additional headers / information to send as part of the notification.",
# if property is element of this resource.
element_property=True,
)
header__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_header", title="Extension field for ``header``.")
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Identifies this endpoint across multiple systems",
description=(
"Identifier for the organization that is used to identify the endpoint "
"across multiple disparate systems."
),
# if property is element of this resource.
element_property=True,
)
managingOrganization: fhirtypes.ReferenceType = Field(
None,
alias="managingOrganization",
title=(
"Organization that manages this endpoint (may not be the organization "
"that exposes the endpoint)"
),
description=(
"The organization that manages this endpoint (even if technically "
"another organisation is hosting this in the cloud, it is the "
"organisation associated with the data)."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
name: fhirtypes.String = Field(
None,
alias="name",
title="A name that this endpoint can be identified by",
description="A friendly name that this endpoint can be referred to with.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
payloadMimeType: typing.List[fhirtypes.Code] = Field(
None,
alias="payloadMimeType",
title=(
"Mimetype to send. If not specified, the content could be anything "
"(including no payload, if the connectionType defined this)"
),
description=(
"The mime type to send the payload in - e.g. application/fhir+xml, "
"application/fhir+json. If the mime type is not specified, then the "
"sender could send any content (including no content depending on the "
"connectionType)."
),
# if property is element of this resource.
element_property=True,
)
payloadMimeType__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(
None, alias="_payloadMimeType", title="Extension field for ``payloadMimeType``."
)
payloadType: typing.List[fhirtypes.CodeableConceptType] = Field(
...,
alias="payloadType",
title=(
"The type of content that may be used at this endpoint (e.g. XDS "
"Discharge summaries)"
),
description=(
"The payload type describes the acceptable content that can be "
"communicated on the endpoint."
),
# if property is element of this resource.
element_property=True,
)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="Interval the endpoint is expected to be operational",
description="The interval during which the endpoint is expected to be operational.",
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="active | suspended | error | off | entered-in-error | test",
description="active | suspended | error | off | test.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["active", "suspended", "error", "off", "entered-in-error", "test"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1018(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("address", "address__ext"), ("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
| 39.713115 | 93 | 0.616512 |
793f17b586efd56a280155231d7a9545ba3fc0bc | 13,393 | py | Python | xcube/core/store/storepool.py | dcs4cop/xcube | c615df539d286d3adbd588886659602487bf5efa | [
"MIT"
] | 97 | 2018-06-26T13:02:55.000Z | 2022-03-26T21:03:13.000Z | xcube/core/store/storepool.py | dcs4cop/xcube | c615df539d286d3adbd588886659602487bf5efa | [
"MIT"
] | 524 | 2018-11-09T12:00:08.000Z | 2022-03-31T17:00:13.000Z | xcube/core/store/storepool.py | dcs4cop/xcube | c615df539d286d3adbd588886659602487bf5efa | [
"MIT"
] | 15 | 2019-07-09T08:46:03.000Z | 2022-02-07T18:47:34.000Z | # The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import os.path
from typing import Any, Dict, Optional, List, Union
import yaml
from xcube.util.assertions import assert_given
from xcube.util.assertions import assert_instance
from xcube.util.jsonschema import JsonIntegerSchema
from xcube.util.jsonschema import JsonNumberSchema
from xcube.util.jsonschema import JsonObjectSchema
from xcube.util.jsonschema import JsonStringSchema
from .assertions import assert_valid_config
from .error import DataStoreError
from .store import DataStore
from .store import new_data_store
def get_data_store_instance(store_id: str,
store_params: Dict[str, Any] = None,
store_pool: 'DataStorePool' = None) \
-> 'DataStoreInstance':
"""
Get a data store instance for identifier *store_id*.
If *store_id* is prefixed by a "@", it is an "instance identifier".
In this case the store instance is retrieved from
the expected *store_pool* argument. Otherwise a new store instance
is created using optional *store_params*.
:param store_id: Store identifier, may be prefixed by
a "@" to indicate a store instance identifier.
:param store_params: Store parameters, only valid if *store_id*
is not an instance identifier.
:param store_pool: A pool of configured store instances used
if *store_id* is an instance identifier.
:return: a DataStoreInstance object
:raise: DataStoreError if a configured store does not exist
"""
if store_id.startswith('@'):
store_instance_id = store_id[1:]
if store_pool is None:
raise ValueError(f'store_pool must be given,'
f' with store_id ("{store_id}")'
f' referring to a configured store')
if store_params:
raise ValueError(f'store_params cannot be given,'
f' with store_id ("{store_id}")'
f' referring to a configured store')
return store_pool.get_store_instance(store_instance_id)
return DataStoreInstance(DataStoreConfig(store_id, store_params))
DATA_STORE_CONFIG_SCHEMA = JsonObjectSchema(
properties=dict(
store_id=JsonStringSchema(min_length=1),
store_params=JsonObjectSchema(
additional_properties=True
),
title=JsonStringSchema(min_length=1),
description=JsonStringSchema(min_length=1),
cost_params=JsonObjectSchema(
properties=dict(
input_pixels_per_punit=JsonIntegerSchema(minimum=1),
output_pixels_per_punit=JsonIntegerSchema(minimum=1),
input_punits_weight=JsonNumberSchema(
exclusive_minimum=0.0, default=1.0
),
output_punits_weight=JsonNumberSchema(
exclusive_minimum=0.0, default=1.0
),
),
additional_properties=False,
required=['input_pixels_per_punit', 'output_pixels_per_punit'],
)
),
required=['store_id'],
)
DATA_STORE_POOL_SCHEMA = JsonObjectSchema(
additional_properties=DATA_STORE_CONFIG_SCHEMA,
)
class DataStoreConfig:
"""
The configuration of a data store.
The class is used by :class:DataStorePool to instantiate
stores in a deferred manner.
:param store_id: the data store identifier
:param store_params: optional store parameters
:param title: a human-readable title for the store instance
:param description: a human-readable description of the store instance
:param user_data: optional user-data
"""
def __init__(self,
store_id: str,
store_params: Dict[str, Any] = None,
title: str = None,
description: str = None,
user_data: Any = None):
assert_given(store_id, name='store_id')
if store_params is not None:
assert_instance(store_params, dict, name='store_params')
self._store_id = store_id
self._store_params = store_params
self._title = title
self._description = description
self._user_data = user_data
@property
def store_id(self) -> Optional[str]:
return self._store_id
@property
def store_params(self) -> Optional[Dict[str, Any]]:
return self._store_params
@property
def title(self) -> Optional[str]:
return self._title
@property
def description(self) -> Optional[str]:
return self._description
@property
def user_data(self) -> Optional[Any]:
return self._user_data
@classmethod
def from_dict(cls, data_store_config: Dict[str, Any]) \
-> 'DataStoreConfig':
assert_valid_config(data_store_config,
name='data_store_config',
schema=DATA_STORE_CONFIG_SCHEMA)
return DataStoreConfig(
data_store_config['store_id'],
store_params=data_store_config.get('store_params'),
title=data_store_config.get('title'),
description=data_store_config.get('description')
)
def to_dict(self) -> Dict[str, Any]:
data_store_config = dict(store_id=self._store_id)
if self._store_params:
data_store_config.update(store_params=self._store_params)
if self._title:
data_store_config.update(name=self._title)
if self._description:
data_store_config.update(description=self._description)
return data_store_config
class DataStoreInstance:
"""
Internal class used by DataStorePool to maintain
store configurations + instances.
"""
def __init__(self, store_config: DataStoreConfig):
assert_given(store_config, name='store_config')
assert_instance(store_config, DataStoreConfig, name='store_config')
self._store_config = store_config
self._store: Optional[DataStore] = None
@property
def store_config(self) -> DataStoreConfig:
return self._store_config
@property
def store(self) -> DataStore:
if self._store is None:
self._store = new_data_store(
self._store_config.store_id,
**(self._store_config.store_params or {})
)
return self._store
def close(self):
store = self._store
if store is not None \
and hasattr(store, 'close') \
and callable(store.close):
store.close()
DataStoreConfigDict = Dict[str, DataStoreConfig]
DataStoreInstanceDict = Dict[str, DataStoreInstance]
DataStorePoolLike = Union[str, Dict[str, Any], 'DataStorePool']
class DataStorePool:
"""
A pool of configured data store instances.
Actual data store instantiation only takes place lazily.
A pool is may be created using it :meth:from_dict() (or :meth:from_file())
which receives a (JSON) dictionary that maps store instance names to
store configurations:
{
"<store_instance_id>": {
"store_id": "<store_id>",
"store_params": {
"<param_name>": <param_value>,
...
},
"title": "<optional_human_readable_title>",
"description": "<optional_human_readable_description>",
},
...
}
:param store_configs: A dictionary that maps store instance
identifiers to to store configurations.
"""
def __init__(self, store_configs: DataStoreConfigDict = None):
if store_configs is not None:
assert_instance(store_configs, dict, name='stores_configs')
else:
store_configs = {}
self._instances: DataStoreInstanceDict = {
k: DataStoreInstance(v) for k, v in
store_configs.items()
}
@property
def is_empty(self) -> bool:
return len(self._instances) == 0
@property
def store_instance_ids(self) -> List[str]:
return sorted([k for k, v in self._instances.items()])
@property
def store_configs(self) -> List[DataStoreConfig]:
return [v.store_config for k, v in self._instances.items()]
def has_store_config(self, store_instance_id: str) -> bool:
assert_instance(store_instance_id, str, 'store_instance_id')
return store_instance_id in self._instances
def add_store_config(self,
store_instance_id: str,
store_config: DataStoreConfig):
assert_instance(store_instance_id, str, 'store_instance_id')
assert_instance(store_config, DataStoreConfig, 'store_config')
if store_instance_id in self._instances:
self._instances[store_instance_id].close()
self._instances[store_instance_id] = DataStoreInstance(store_config)
def remove_store_config(self, store_instance_id: str):
self._assert_valid_instance_id(store_instance_id)
self._instances[store_instance_id].close()
del self._instances[store_instance_id]
def remove_all_store_configs(self):
self._instances.clear()
def get_store_config(self, store_instance_id: str) -> DataStoreConfig:
self._assert_valid_instance_id(store_instance_id)
return self._instances[store_instance_id].store_config
def get_store(self, store_instance_id: str) -> DataStore:
self._assert_valid_instance_id(store_instance_id)
return self._instances[store_instance_id].store
def get_store_instance(self, store_instance_id: str) -> DataStoreInstance:
self._assert_valid_instance_id(store_instance_id)
return self._instances[store_instance_id]
def close_all_stores(self):
for instance in self._instances.values():
instance.close()
@classmethod
def normalize(cls, data_store_pool: DataStorePoolLike) \
-> 'DataStorePool':
"""
Normalize given *data_store_pool* to an instance of
:class:DataStorePool.
If *data_store_pool* is already a DataStorePool it is returned as is.
If it is a ``str``, it is interpreted as a YAML or JSON file path
and the request is read from file using ``DataStorePool.from_file()``.
If it is a ``dict``, it is interpreted as a JSON object and the
request is parsed using ``DataStorePool.from_dict()``.
:param data_store_pool The data store pool instance,
or data stores configuration file path, or data store pool
JSON object.
:raise TypeError if *data_store_pool* is not
a ``CubeGeneratorRequest``, ``str``, or ``dict``.
"""
if isinstance(data_store_pool, DataStorePool):
return data_store_pool
if isinstance(data_store_pool, str):
return DataStorePool.from_file(data_store_pool)
if isinstance(data_store_pool, dict):
return DataStorePool.from_dict(data_store_pool)
raise TypeError('data_store_pool must be a str, dict, '
'or a DataStorePool instance')
@classmethod
def from_file(cls, path: str) -> 'DataStorePool':
_, ext = os.path.splitext(path)
with open(path) as fp:
if ext == '.json':
store_configs = json.load(fp)
else:
store_configs = yaml.safe_load(fp)
return cls.from_dict(store_configs or {})
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> 'DataStorePool':
DATA_STORE_POOL_SCHEMA.validate_instance(d)
return cls({k: DataStoreConfig.from_dict(v) for k, v in d.items()})
def to_dict(self) -> Dict[str, Any]:
return {
instance_id: instance.store_config.to_dict()
for instance_id, instance in self._instances.items()
}
def _assert_valid_instance_id(self, store_instance_id: str):
assert_instance(store_instance_id, str, name='store_instance_id')
if store_instance_id not in self._instances:
raise DataStoreError(f'Configured data store instance'
f' "{store_instance_id}" not found.')
| 37.620787 | 81 | 0.656164 |
793f181ddde1a6f6da053fd3c531f8ac30573c89 | 18,553 | py | Python | masters/master.chromium.lkgr/master_lkgr_cfg.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | null | null | null | masters/master.chromium.lkgr/master_lkgr_cfg.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | null | null | null | masters/master.chromium.lkgr/master_lkgr_cfg.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master import gitiles_poller
from master import master_config
from master.factory import annotator_factory
from master.factory import chromium_factory
import master_site_config
ActiveMaster = master_site_config.ChromiumLKGR
defaults = {}
helper = master_config.Helper(defaults)
B = helper.Builder
F = helper.Factory
S = helper.Scheduler
def win(): return chromium_factory.ChromiumFactory('src/build', 'win32')
def win_out(): return chromium_factory.ChromiumFactory('src/out', 'win32')
def linux(): return chromium_factory.ChromiumFactory('src/build', 'linux2')
def mac(): return chromium_factory.ChromiumFactory('src/build', 'darwin')
def linux_android(): return chromium_factory.ChromiumFactory(
'src/out', 'linux2', nohooks_on_update=True, target_os='android')
m_annotator = annotator_factory.AnnotatorFactory()
defaults['category'] = '1lkgr'
# Global scheduler
S(name='chromium_lkgr', branch='lkgr')
################################################################################
## Windows
################################################################################
B('Win', 'win_full', 'compile|windows', 'chromium_lkgr')
F('win_full', win().ChromiumFactory(
clobber=True,
project='all.sln',
factory_properties={'archive_build': ActiveMaster.is_production_host,
'gs_bucket': 'gs://chromium-browser-continuous',
'gs_acl': 'public-read',
'gclient_env': {
'GYP_LINK_CONCURRENCY_MAX': '4',
},
}))
B('Win x64', 'win_x64_full', 'windows', 'chromium_lkgr')
F('win_x64_full', win_out().ChromiumFactory(
clobber=True,
compile_timeout=9600, # Release build is LOOONG
target='Release_x64',
options=['--build-tool=ninja', '--', 'all'],
factory_properties={
'archive_build': ActiveMaster.is_production_host,
'gclient_env': {
'GYP_DEFINES': 'component=static_library target_arch=x64',
'GYP_LINK_CONCURRENCY_MAX': '4',
},
'gs_bucket': 'gs://chromium-browser-continuous',
'gs_acl': 'public-read',
}))
# ASan/Win supports neither the component build nor NaCL at the moment.
asan_win_gyp = ('asan=1 component=static_library enable_ipc_fuzzer=1 '
'v8_enable_verify_heap=1')
# Clang is not stable enough on Windows to use a gatekeeper yet.
B('Win ASan Release', 'win_asan_rel', scheduler='chromium_lkgr')
F('win_asan_rel', win_out().ChromiumASANFactory(
compile_timeout=8*3600, # We currently use a VM, which is extremely slow.
clobber=True,
options=['--build-tool=ninja', '--', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_win_gyp}}))
# ASan/Win coverage bot.
B('Win ASan Release Coverage', 'win_asan_rel_cov', scheduler='chromium_lkgr')
F('win_asan_rel_cov', win_out().ChromiumASANFactory(
compile_timeout=8*3600, # We currently use a VM, which is extremely slow.
clobber=True,
options=['--build-tool=ninja', '--', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan-coverage',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_win_gyp + ' sanitizer_coverage=3'}}))
# ASan/Win supports neither the component build nor NaCL at the moment.
media_gyp = (' proprietary_codecs=1 ffmpeg_branding=Chrome')
asan_win_media_gyp = asan_win_gyp + media_gyp
# Clang is not stable enough on Windows to use a gatekeeper yet.
B('Win ASan Release Media', 'win_asan_rel_media',
scheduler='chromium_lkgr')
F('win_asan_rel_media', win_out().ChromiumASANFactory(
compile_timeout=8*3600, # We currently use a VM, which is extremely slow.
clobber=True,
options=['--build-tool=ninja', '--', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chrome-test-builds/media',
'gclient_env': {'GYP_DEFINES': asan_win_media_gyp}}))
# Win SyzyASan bot.
B('Win SyzyASAN LKGR', 'win_syzyasan_lkgr', 'compile', 'chromium_lkgr')
F('win_syzyasan_lkgr', m_annotator.BaseFactory(recipe='chromium', timeout=7200))
################################################################################
## Mac
################################################################################
asan_mac_gyp = 'asan=1 v8_enable_verify_heap=1 '
B('Mac', 'mac_full', 'compile|testers', 'chromium_lkgr')
F('mac_full', mac().ChromiumFactory(
clobber=True,
factory_properties={'archive_build': ActiveMaster.is_production_host,
'gs_bucket': 'gs://chromium-browser-continuous',
'gs_acl': 'public-read',}))
B('Mac ASAN Release', 'mac_asan_rel', 'compile', 'chromium_lkgr')
F('mac_asan_rel', linux().ChromiumASANFactory(
clobber=True,
options=['--compiler=goma-clang', '--', '-target', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_mac_gyp}}))
B('Mac ASAN Release Media', 'mac_asan_rel_media', 'compile', 'chromium_lkgr')
F('mac_asan_rel_media', linux().ChromiumASANFactory(
clobber=True,
options=['--compiler=goma-clang', '--', '-target', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chrome-test-builds/media',
'gclient_env': {'GYP_DEFINES': asan_mac_gyp + media_gyp}}))
B('Mac ASAN Debug', 'mac_asan_dbg', 'compile', 'chromium_lkgr')
F('mac_asan_dbg', linux().ChromiumASANFactory(
clobber=True,
target='Debug',
options=['--compiler=goma-clang', '--', '-target', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_mac_gyp +
' component=static_library '}}))
################################################################################
## Linux
################################################################################
B('Linux', 'linux_full', 'compile|testers', 'chromium_lkgr')
F('linux_full', linux().ChromiumFactory(
clobber=True,
factory_properties={'archive_build': ActiveMaster.is_production_host,
'gs_bucket': 'gs://chromium-browser-continuous',
'gs_acl': 'public-read',}))
B('Linux x64', 'linux64_full', 'compile|testers', 'chromium_lkgr')
F('linux64_full', linux().ChromiumFactory(
clobber=True,
factory_properties={
'archive_build': ActiveMaster.is_production_host,
'gs_bucket': 'gs://chromium-browser-continuous',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES':'target_arch=x64'}}))
asan_rel_gyp = ('asan=1 lsan=1 sanitizer_coverage=3 '
'v8_enable_verify_heap=1 enable_ipc_fuzzer=1 ')
B('ASAN Release', 'linux_asan_rel', 'compile', 'chromium_lkgr')
F('linux_asan_rel', linux().ChromiumASANFactory(
compile_timeout=2400, # We started seeing 29 minute links, bug 360158
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_rel_gyp}}))
linux_media_gyp = (' proprietary_codecs=1 ffmpeg_branding=ChromeOS')
B('ASAN Release Media', 'linux_asan_rel_media',
'compile', 'chromium_lkgr')
F('linux_asan_rel_media', linux().ChromiumASANFactory(
compile_timeout=2400, # We started seeing 29 minute links, bug 360158
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chrome-test-builds/media',
'gclient_env': {'GYP_DEFINES': asan_rel_gyp +
linux_media_gyp}}))
asan_rel_sym_gyp = ('asan=1 lsan=1 sanitizer_coverage=3 '
'v8_enable_verify_heap=1 enable_ipc_fuzzer=1 '
'release_extra_cflags="-O1 -fno-inline-functions '
'-fno-inline" ')
B('ASAN Release (symbolized)', 'linux_asan_rel_sym', 'compile', 'chromium_lkgr')
F('linux_asan_rel_sym', linux().ChromiumASANFactory(
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan-symbolized',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_rel_sym_gyp}}))
asan_debug_gyp = ('asan=1 lsan=1 sanitizer_coverage=3 enable_ipc_fuzzer=1 ')
B('ASAN Debug', 'linux_asan_dbg', 'compile', 'chromium_lkgr')
F('linux_asan_dbg', linux().ChromiumASANFactory(
clobber=True,
target='Debug',
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_debug_gyp}}))
asan_chromiumos_rel_gyp = ('%s chromeos=1' % asan_rel_gyp)
B('ChromiumOS ASAN Release', 'linux_chromiumos_asan_rel', 'compile',
'chromium_lkgr')
F('linux_chromiumos_asan_rel', linux().ChromiumASANFactory(
compile_timeout=2400, # We started seeing 29 minute links, bug 360158
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'asan',
'cf_archive_subdir_suffix': 'chromeos',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_chromiumos_rel_gyp}}))
asan_ia32_v8_arm = ('asan=1 sanitizer_coverage=3 disable_nacl=1 '
'v8_target_arch=arm host_arch=x86_64 target_arch=ia32 '
'v8_enable_verify_heap=1 enable_ipc_fuzzer=1 ')
asan_ia32_v8_arm_rel_sym = ('%s release_extra_cflags="-O1 '
'-fno-inline-functions -fno-inline"' %
asan_ia32_v8_arm)
asan_ia32_v8_arm_rel = asan_ia32_v8_arm
# The build process is described at
# https://sites.google.com/a/chromium.org/dev/developers/testing/addresssanitizer#TOC-Building-with-v8_target_arch-arm
B('ASan Debug (32-bit x86 with V8-ARM)',
'linux_asan_dbg_ia32_v8_arm',
'compile', 'chromium_lkgr')
F('linux_asan_dbg_ia32_v8_arm', linux().ChromiumASANFactory(
clobber=True,
target='Debug',
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_subdir_suffix': 'v8-arm',
'cf_archive_name': 'asan-v8-arm',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_ia32_v8_arm}}))
B('ASan Release (32-bit x86 with V8-ARM)',
'linux_asan_rel_ia32_v8_arm',
'compile', 'chromium_lkgr')
F('linux_asan_rel_ia32_v8_arm', linux().ChromiumASANFactory(
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_subdir_suffix': 'v8-arm',
'cf_archive_name': 'asan-v8-arm',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_ia32_v8_arm_rel}}))
B('ASan Release Media (32-bit x86 with V8-ARM)',
'linux_asan_rel_media_ia32_v8_arm',
'compile', 'chromium_lkgr')
F('linux_asan_rel_media_ia32_v8_arm', linux().ChromiumASANFactory(
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_subdir_suffix': 'v8-arm',
'cf_archive_name': 'asan-v8-arm',
'gs_bucket': 'gs://chrome-test-builds/media',
'gclient_env': {'GYP_DEFINES': asan_ia32_v8_arm_rel + linux_media_gyp}}))
B('ASan Release (32-bit x86 with V8-ARM, symbolized)',
'linux_asan_rel_sym_ia32_v8_arm',
'compile', 'chromium_lkgr')
F('linux_asan_rel_sym_ia32_v8_arm', linux().ChromiumASANFactory(
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_subdir_suffix': 'v8-arm',
'cf_archive_name': 'asan-symbolized-v8-arm',
'gs_bucket': 'gs://chromium-browser-asan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': asan_ia32_v8_arm_rel_sym}}))
# The build process for TSan is described at
# http://dev.chromium.org/developers/testing/threadsanitizer-tsan-v2
tsan_gyp = ('tsan=1 disable_nacl=1 '
'debug_extra_cflags="-gline-tables-only" ')
B('TSAN Release', 'linux_tsan_rel', 'compile', 'chromium_lkgr')
F('linux_tsan_rel', linux().ChromiumFactory(
clobber=True,
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'tsan',
'gs_bucket': 'gs://chromium-browser-tsan',
'gs_acl': 'public-read',
'tsan': True,
'gclient_env': {'GYP_DEFINES': tsan_gyp}}))
B('TSAN Debug', 'linux_tsan_dbg', 'compile', 'chromium_lkgr')
F('linux_tsan_dbg', linux().ChromiumFactory(
clobber=True,
target='Debug',
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'tsan',
'gs_bucket': 'gs://chromium-browser-tsan',
'gs_acl': 'public-read',
'tsan': True,
'gclient_env': {'GYP_DEFINES': tsan_gyp}}))
# The build process for MSan is described at
# http://dev.chromium.org/developers/testing/memorysanitizer
msan_gyp = ('msan=1 sanitizer_coverage=3 '
'use_prebuilt_instrumented_libraries=1 ')
B('MSAN Release (no origins)', 'linux_msan_rel_no_origins', 'compile',
'chromium_lkgr')
F('linux_msan_rel_no_origins', linux().ChromiumFactory(
clobber=True,
target='Release',
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'msan-no-origins',
'gs_bucket': 'gs://chromium-browser-msan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': msan_gyp + 'msan_track_origins=0 '}}))
B('MSAN Release (chained origins)', 'linux_msan_rel_chained_origins', 'compile',
'chromium_lkgr')
F('linux_msan_rel_chained_origins', linux().ChromiumFactory(
clobber=True,
target='Release',
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'msan-chained-origins',
'gs_bucket': 'gs://chromium-browser-msan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': msan_gyp + 'msan_track_origins=2 '}}))
# This is a bot that uploads LKGR telemetry harnesses to Google Storage.
B('Telemetry Harness Upload', 'telemetry_harness_upload', None, 'chromium_lkgr')
F('telemetry_harness_upload',
m_annotator.BaseFactory('perf/telemetry_harness_upload'))
# The build process for UBSan vptr is described at
# http://dev.chromium.org/developers/testing/undefinedbehaviorsanitizer
ubsan_gyp = ('ubsan=1 ')
B('UBSan Release', 'linux_ubsan_rel', 'compile', 'chromium_lkgr')
F('linux_ubsan_rel', linux().ChromiumFactory(
clobber=True,
compile_timeout=5400, # UBSan builds very slowly with edge level coverage
target='Release',
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_name': 'ubsan',
'gs_bucket': 'gs://chromium-browser-ubsan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': ubsan_gyp}}))
ubsan_vptr_gyp = ('ubsan_vptr=1 sanitizer_coverage=3 ')
B('UBSan vptr Release', 'linux_ubsan_vptr_rel', 'compile', 'chromium_lkgr')
F('linux_ubsan_vptr_rel', linux().ChromiumFactory(
clobber=True,
target='Release',
options=['--compiler=goma-clang', 'chromium_builder_asan'],
factory_properties={
'cf_archive_build': ActiveMaster.is_production_host,
'cf_archive_subdir_suffix': 'vptr',
'cf_archive_name': 'ubsan-vptr',
'gs_bucket': 'gs://chromium-browser-ubsan',
'gs_acl': 'public-read',
'gclient_env': {'GYP_DEFINES': ubsan_vptr_gyp}}))
################################################################################
## Android
################################################################################
B('Android', 'android', None, 'chromium_lkgr')
F('android', linux_android().ChromiumAnnotationFactory(
clobber=True,
target='Release',
factory_properties={
'android_bot_id': 'lkgr-clobber-rel',
'archive_build': True,
'gs_acl': 'public-read',
'gs_bucket': 'gs://chromium-browser-continuous',
'perf_id': 'android-release',
'show_perf_results': True,
},
annotation_script='src/build/android/buildbot/bb_run_bot.py',
))
def Update(_config, active_master, c):
lkgr_poller = gitiles_poller.GitilesPoller(
'https://chromium.googlesource.com/chromium/src',
branches=['lkgr'])
c['change_source'].append(lkgr_poller)
return helper.Update(c)
| 41.04646 | 118 | 0.65957 |
793f18ceaeedbcf29e81fc01885ef9e83ce200fd | 394 | py | Python | jobs/49-kafka-producer.py | Parthi10/Pyspark | a7943010f2af768d33051e09979c892c02bf0610 | [
"MIT"
] | 7 | 2019-03-14T04:50:47.000Z | 2020-02-04T14:04:33.000Z | jobs/49-kafka-producer.py | Ranjan90/learn-pyspark | 91152497eb733c3cf53ffa910dc9fc3948b01ad1 | [
"MIT"
] | null | null | null | jobs/49-kafka-producer.py | Ranjan90/learn-pyspark | 91152497eb733c3cf53ffa910dc9fc3948b01ad1 | [
"MIT"
] | 4 | 2019-12-04T18:24:56.000Z | 2022-02-14T15:56:19.000Z | # kafka lessons, using kafka-python module
# pip3 install kafka-python
from kafka import KafkaProducer, KafkaConsumer, TopicPartition
from time import sleep
from json import dumps
producer=KafkaProducer(bootstrap_servers=['localhost:9092'],value_serializer=lambda x:dumps(x).encode('utf-8'))
for e in range(1000):
data={'number' : e }
producer.send('numtest',value=data)
sleep(5)
| 30.307692 | 111 | 0.758883 |
793f1999021f052b64f7bf18f16dbb33f5708b5a | 27,448 | py | Python | ai/models/pytorch/boundingBoxes/retinanet.py | junxnone/aerial_wildlife_detection | 0eebed2aaf926ceb212b6a2b7a75bb0a82b28a88 | [
"MIT"
] | 1 | 2021-04-26T22:50:52.000Z | 2021-04-26T22:50:52.000Z | ai/models/pytorch/boundingBoxes/retinanet.py | junxnone/aerial_wildlife_detection | 0eebed2aaf926ceb212b6a2b7a75bb0a82b28a88 | [
"MIT"
] | null | null | null | ai/models/pytorch/boundingBoxes/retinanet.py | junxnone/aerial_wildlife_detection | 0eebed2aaf926ceb212b6a2b7a75bb0a82b28a88 | [
"MIT"
] | 2 | 2021-04-15T17:26:40.000Z | 2021-04-15T17:26:53.000Z | '''
RetinaNet trainer for PyTorch.
2019-20 Benjamin Kellenberger
'''
import io
import json
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from ..genericPyTorchModel import GenericPyTorchModel
from .. import parse_transforms
from ..functional._retinanet import DEFAULT_OPTIONS, collation, encoder, loss
from ..functional._retinanet.model import RetinaNet as Model
from ..functional.datasets.bboxDataset import BoundingBoxesDataset
from util.helpers import get_class_executable
from util import optionsHelper
'''
Map between new (GUI-enhanced) and old options JSON format fields.
In the new format, all options are rooted under "options".
'''
OPTIONS_MAPPING = {
'general.device.value': 'general.device',
'general.seed.value': 'general.seed',
'model.backbone.value': 'model.kwargs.backbone',
'model.pretrained.value': 'model.kwargs.pretrained',
'model.out_planes.value': 'model.kwargs.out_planes',
'model.convertToInstanceNorm.value': 'model.kwargs.convertToInstanceNorm',
'train.dataLoader.shuffle.value': 'train.dataLoader.kwargs.shuffle',
'train.dataLoader.batch_size.value': 'train.dataLoader.kwargs.batch_size',
'train.criterion.gamma.value': 'train.criterion.kwargs.gamma',
'train.criterion.alpha.value': 'train.criterion.kwargs.alpha',
'train.criterion.background_weight.value': 'train.criterion.kwargs.background_weight',
'train.ignore_unsure': 'train.ignore_unsure',
'inference.dataLoader.batch_size.value': 'inference.dataLoader.kwargs.batch_size'
# optimizer and transforms are treated separately
}
class RetinaNet(GenericPyTorchModel):
model_class = Model
def __init__(self, project, config, dbConnector, fileServer, options):
super(RetinaNet, self).__init__(project, config, dbConnector, fileServer, options)
self.model_class = Model
''' Model options parsing and verification functionalities '''
@staticmethod
def getDefaultOptions():
jsonFile = 'config/ai/model/pytorch/boundingBoxes/retinanet.json'
try:
# try to load defaults from JSON file first
options = json.load(open(jsonFile, 'r'))
except Exception as e:
# error; fall back to built-in defaults
print(f'Error reading default RetinaNet options file "{jsonFile}" (message: "{str(e)}"), falling back to built-in options.')
options = DEFAULT_OPTIONS
# expand options
options = optionsHelper.substitute_definitions(options)
return options
@staticmethod
def _convertOldOptions(options, defaults):
'''
Receives options in the previous JSON encoding
and converts them to the new GUI-enhanced scheme.
Returns the new, converted options accordingly.
'''
newOptions = defaults.copy()
warnings = []
# update defaults key by key
for key in OPTIONS_MAPPING.keys():
newTokens = ['options']
newTokens.extend(key.split('.'))
oldTokens = OPTIONS_MAPPING[key].split('.')
oldValue = optionsHelper.get_hierarchical_value(options, oldTokens, None)
if oldValue is None:
warnings.append(f'Value for options "{key}" could not be found in given options (expected location: "{OPTIONS_MAPPING[key]}").')
else:
optionsHelper.set_hierarchical_value(newOptions, newTokens, oldValue)
# take special care of the optimizer: try all possible values (only the ones present will be retained)
currentOptimType = options['train']['optim']['class']
optionsHelper.set_hierarchical_value(newOptions, ('train','optim','value'), currentOptimType)
optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'lr','value'), ('train', 'optim', 'kwargs', 'lr'))
optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'weight_decay','value'), ('train', 'optim', 'kwargs', 'weight_decay'))
optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'momentum','value'), ('train', 'optim', 'kwargs', 'momentum'))
optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'alpha','value'), ('train', 'optim', 'kwargs', 'alpha'))
optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'centered','value'), ('train', 'optim', 'kwargs', 'centered'))
optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'dampening','value'), ('train', 'optim', 'kwargs', 'dampening'))
optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'nesterov','value'), ('train', 'optim', 'kwargs', 'nesterov'))
# also take special care of the transforms
def _update_transforms(currentTransforms):
newTransforms = []
for tr in currentTr_train:
# get from template definition and then replace values
trClass = tr['class']
if trClass not in newOptions['defs']['transform']:
warnings.append(f'Transform "{trClass}" is not defined in the new scheme and will be substituted appropriately.')
continue
newTr = newOptions['defs']['transform'][trClass]
for kw in tr['kwargs'].keys():
if kw == 'size':
newTr['width']['value'] = tr['kwargs']['size'][0]
newTr['height']['value'] = tr['kwargs']['size'][1]
elif kw in ('brightness', 'contrast', 'saturation', 'hue'):
newTr[kw]['minV']['value'] = 0
newTr[kw]['maxV']['value'] = tr['kwargs'][kw]
warnings.append(f'{kw} values of transforms have been set as maximums (min: 0).')
elif kw in ('mean', 'std'):
newTr['mean']['r'] = tr['kwargs'][kw][0]
newTr['mean']['g'] = tr['kwargs'][kw][1]
newTr['mean']['b'] = tr['kwargs'][kw][2]
elif kw in newTr:
newTr[kw]['value'] = tr['kwargs'][kw]
newTransforms.append(newTr)
return newTransforms
currentTr_train = options['train']['transform']['kwargs']['transforms']
newTr_train = _update_transforms(currentTr_train)
newOptions['options']['train']['transform']['value'] = newTr_train
currentTr_inference = options['inference']['transform']['kwargs']['transforms']
newTr_inference = _update_transforms(currentTr_inference)
newOptions['options']['inference']['transform']['value'] = newTr_inference
print('Old RetinaNet options successfully converted to new format.')
return newOptions, warnings
@staticmethod
def _verify_transforms(transforms, allowGeometric=True):
warnings, errors = [], []
transforms_PIL_new, transforms_tensor_new = [], []
currentInputType = None # to keep track of transform order
for tr in transforms:
if isinstance(tr, str):
# only an ID provided; encapsulate
warnings.append(f'Using default arguments for transform "{tr}"')
tr = {
'id': tr
}
trID = tr['id']
trName = (tr['name'] if 'name' in tr else trID)
if trID == 'ai.models.pytorch.boundingBoxes.DefaultTransform':
if 'transform' in tr:
newTr, newWarn, newErr = RetinaNet._verify_transforms(
[tr['transform']], allowGeometric)
transforms_PIL_new.extend(newTr) #TODO: Compose could contain mixed transforms
warnings.extend(newWarn)
errors.extend(newErr)
else:
warnings.append(f'Default transform "{trName}" contains no sub-transform and will be skipped.')
elif trID == 'ai.models.pytorch.boundingBoxes.Compose':
if 'transforms' in tr:
newTr, newWarn, newErr = RetinaNet._verify_transforms(
tr['transforms'], allowGeometric)
transforms_PIL_new.extend(newTr) #TODO: Compose could contain mixed transforms
warnings.extend(newWarn)
errors.extend(newErr)
else:
warnings.append(f'Compose transform "{trName}" contains no sub-transforms and will be skipped.')
if trID in (
'torchvision.transforms.Normalize',
'torchvision.transforms.RandomErasing'
):
# transforms on torch.tensor; these come at the end
transforms_tensor_new.append({
'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',
'transform': tr
})
if currentInputType is not None and currentInputType != 'tensor':
warnings.append(f'Transform "{trName}" operates on Torch.tensor, but current input is PIL.Image. Transforms might be reordered.')
currentInputType = 'tensor'
elif trID in (
'ai.models.pytorch.boundingBoxes.RandomHorizontalFlip',
'ai.models.pytorch.boundingBoxes.RandomFlip'
):
# geometric transforms on PIL.Image
if not allowGeometric:
warnings.append(f'Transform "{trName}" modifies the image geometrically, which is not allowed here. The transform is being skipped.')
continue
transforms_PIL_new.append(tr)
if currentInputType is not None and currentInputType != 'image':
warnings.append(f'Transform "{trName}" operates on PIL images, but current input is Torch.tensor. Transforms might be reordered.')
currentInputType = 'image'
elif trID in (
'ai.models.pytorch.boundingBoxes.Resize',
'torchvision.transforms.ColorJitter',
'torchvision.transforms.Grayscale',
'torchvision.transforms.RandomGrayscale'
):
# non-geometric (+ always allowed resize) transforms on PIL.Image
transforms_PIL_new.append({
'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',
'transform': tr
})
if currentInputType is not None and currentInputType != 'image':
warnings.append(f'Transform "{trName}" operates on PIL images, but current input is Torch.tensor. Transforms might be reordered.')
currentInputType = None # reset
elif trID in (
'ai.models.pytorch.boundingBoxes.RandomClip',
'ai.models.pytorch.boundingBoxes.RandomSizedClip'
):
# transforms that work on both PIL.Image and torch.tensor
if currentInputType == 'tensor':
transforms_tensor_new.append(tr)
else:
transforms_PIL_new.append(tr)
else:
# unsupported transform
warnings.append(f'Transform "{trName}" is not a recognized option and will be skipped.')
# assemble transforms
transforms_out = transforms_PIL_new
# insert a ToTensor operation at the right location
transforms_out.append({
'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',
'transform': 'torchvision.transforms.ToTensor'
})
transforms_out.extend(transforms_tensor_new)
return transforms_out, warnings, errors
@staticmethod
def verifyOptions(options):
# get default options to compare to
defaultOptions = RetinaNet.getDefaultOptions()
# updated options with modifications made
if options is None:
updatedOptions = defaultOptions.copy()
else:
if not isinstance(options, dict):
try:
options = json.loads(options)
except Exception as e:
return {
'valid': False,
'warnings': [],
'errors': [
f'Options are not in valid JSON format (message: "{str(e)}").'
]
}
updatedOptions = options.copy()
result = {
'valid': True,
'warnings': [],
'errors': []
}
if not 'defs' in updatedOptions:
# old version (without GUI formatting): convert first
updatedOptions, warnings = RetinaNet._convertOldOptions(updatedOptions, defaultOptions)
result['warnings'].append('Options have been converted to new format.')
result['warnings'].extend(warnings)
# flatten and fill globals
updatedOptions = optionsHelper.substitute_definitions(updatedOptions)
# do the verification
missingClassOptions = optionsHelper.get_hierarchical_value(updatedOptions, ['options', 'general', 'labelClasses'])
if not isinstance(missingClassOptions, dict):
updatedOptions['options']['general']['labelClasses'] = \
optionsHelper.get_hierarchical_value(defaultOptions, ['options', 'general', 'labelClasses'])
#TODO: verify rest
# verify transforms
transforms_train = updatedOptions['options']['train']['transform']['value']
transforms_train, w, e = RetinaNet._verify_transforms(transforms_train, True)
result['warnings'].extend(w)
result['errors'].extend(e)
if transforms_train is None:
result['valid'] = False
else:
updatedOptions['options']['train']['transform']['value'] = transforms_train
transforms_inf = updatedOptions['options']['inference']['transform']['value']
transforms_inf, w, e = RetinaNet._verify_transforms(transforms_inf, False)
result['warnings'].extend(w)
result['errors'].extend(e)
if transforms_inf is None:
result['valid'] = False
else:
updatedOptions['options']['inference']['transform']['value'] = transforms_inf
if result['valid']:
result['options'] = updatedOptions
return result
@staticmethod
def _init_transform_instances(transform, imageSize):
'''
Receives a list of transform definition dicts (or names)
that are to be applied in order (either during training or
for inference), and creates class instances for all of them.
Also prepends a "Resize" operation (with the given image size)
as well as a "DefaultTransform" with a "ToTensor" operation,
to convert the image to a torch.Tensor instance.
Returns a "Compose" transform with all the specified transforms
in order.
'''
transforms = [{
'id': 'ai.models.pytorch.boundingBoxes.Resize',
'size': imageSize
}]
transforms.extend(transform)
# check if "ToTensor" is needed
hasToTensor = False
for tr in transform:
if tr['id'].endswith('DefaultTransform'):
if (isinstance(tr['transform'], str) and tr['transform'].endswith('ToTensor')) or \
(isinstance(tr['transform'], dict) and tr['transform']['id'].endswith('ToTensor')):
hasToTensor = True
break
if not hasToTensor:
transforms.append({
'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',
'transform': {
'id': 'torchvision.transforms.ToTensor'
}
})
transformsList = [{
'id': 'ai.models.pytorch.boundingBoxes.Compose',
'transforms': transforms
}]
transform_instances = GenericPyTorchModel.parseTransforms(transformsList)[0]
return transform_instances
''' Model training and inference functionalities '''
def train(self, stateDict, data, updateStateFun):
'''
Initializes a model based on the given stateDict and a data loader from the
provided data and trains the model, taking into account the parameters speci-
fied in the 'options' given to the class.
Returns a serializable state dict of the resulting model.
'''
# initialize model
model, labelclassMap = self.initializeModel(stateDict, data,
optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'labelClasses', 'add_missing', 'value']),
optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'labelClasses', 'remove_obsolete', 'value']))
# setup transform, data loader, dataset, optimizer, criterion
inputSize = (int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'width', 'value'])),
int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'height', 'value'])))
transform = RetinaNet._init_transform_instances(
optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'transform', 'value']),
inputSize
)
dataset = BoundingBoxesDataset(data=data,
fileServer=self.fileServer,
labelclassMap=labelclassMap,
targetFormat='xyxy',
transform=transform,
ignoreUnsure=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'encoding', 'ignore_unsure', 'value'], fallback=False))
dataEncoder = encoder.DataEncoder(
minIoU_pos=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'encoding', 'minIoU_pos', 'value'], fallback=0.5),
maxIoU_neg=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'encoding', 'maxIoU_neg', 'value'], fallback=0.4)
)
collator = collation.Collator(self.project, self.dbConnector, (inputSize[1], inputSize[0],), dataEncoder)
dataLoader = DataLoader(
dataset=dataset,
collate_fn=collator.collate_fn,
shuffle=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'dataLoader', 'shuffle', 'value'], fallback=True)
)
# optimizer
optimArgs = optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'optim', 'value'], None)
optimArgs_out = {}
optimClass = get_class_executable(optimArgs['id'])
for key in optimArgs.keys():
if key not in optionsHelper.RESERVED_KEYWORDS:
optimArgs_out[key] = optionsHelper.get_hierarchical_value(optimArgs[key], ['value'])
optimizer = optimClass(params=model.parameters(), **optimArgs_out)
# loss criterion
critArgs = optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'criterion'], None)
critArgs_out = {}
for key in critArgs.keys():
if key not in optionsHelper.RESERVED_KEYWORDS:
critArgs_out[key] = optionsHelper.get_hierarchical_value(critArgs[key], ['value'])
criterion = loss.FocalLoss(**critArgs_out)
# train model
device = self.get_device()
seed = int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'seed', 'value'], fallback=0))
torch.manual_seed(seed)
if 'cuda' in device:
torch.cuda.manual_seed(seed)
model.to(device)
imgCount = 0
for (img, bboxes_target, labels_target, fVec, _) in tqdm(dataLoader):
img, bboxes_target, labels_target = img.to(device), \
bboxes_target.to(device), \
labels_target.to(device)
optimizer.zero_grad()
bboxes_pred, labels_pred = model(img)
loss_value = criterion(bboxes_pred, bboxes_target, labels_pred, labels_target)
loss_value.backward()
optimizer.step()
# check for Inf and NaN values and raise exception if needed
if any([
torch.any(torch.isinf(bboxes_pred)).item(),
torch.any(torch.isinf(labels_pred)).item(),
torch.any(torch.isnan(bboxes_pred)).item(),
torch.any(torch.isnan(labels_pred)).item()
]):
raise Exception('Model produced Inf and/or NaN values; training was aborted. Try reducing the learning rate.')
# update worker state
imgCount += img.size(0)
updateStateFun(state='PROGRESS', message='training', done=imgCount, total=len(dataLoader.dataset))
# all done; return state dict as bytes
return self.exportModelState(model)
def inference(self, stateDict, data, updateStateFun):
# initialize model
if stateDict is None:
raise Exception('No trained model state found, but required for inference.')
# read state dict from bytes
model, labelclassMap = self.initializeModel(stateDict, data)
# initialize data loader, dataset, transforms
inputSize = (int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'width', 'value'])),
int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'height', 'value'])))
transform = RetinaNet._init_transform_instances(
optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'transform', 'value']),
inputSize
)
dataset = BoundingBoxesDataset(data=data,
fileServer=self.fileServer,
labelclassMap=labelclassMap,
transform=transform)
dataEncoder = encoder.DataEncoder(minIoU_pos=0.5, maxIoU_neg=0.4) # IoUs don't matter for inference
collator = collation.Collator(self.project, self.dbConnector, (inputSize[1], inputSize[0],), dataEncoder)
dataLoader = DataLoader(
dataset=dataset,
collate_fn=collator.collate_fn,
shuffle=False
)
# perform inference
response = {}
device = self.get_device()
model.to(device)
imgCount = 0
for (img, _, _, fVec, imgID) in tqdm(dataLoader):
# TODO: implement feature vectors
# if img is not None:
# dataItem = img.to(device)
# isFeatureVector = False
# else:
# dataItem = fVec.to(device)
# isFeatureVector = True
dataItem = img.to(device)
with torch.no_grad():
bboxes_pred_batch, labels_pred_batch = model(dataItem, False) #TODO: isFeatureVector
bboxes_pred_batch, labels_pred_batch, confs_pred_batch = dataEncoder.decode(bboxes_pred_batch.squeeze(0).cpu(),
labels_pred_batch.squeeze(0).cpu(),
inputSize,
cls_thresh=optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'encoding', 'cls_thresh', 'value'], fallback=0.1),
nms_thresh=optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'encoding', 'nms_thresh', 'value'], fallback=0.1),
numPred_max=int(optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'encoding', 'numPred_max', 'value'], fallback=128)),
return_conf=True)
for i in range(len(imgID)):
bboxes_pred = bboxes_pred_batch[i]
labels_pred = labels_pred_batch[i]
confs_pred = confs_pred_batch[i]
if bboxes_pred.dim() == 2:
bboxes_pred = bboxes_pred.unsqueeze(0)
labels_pred = labels_pred.unsqueeze(0)
confs_pred = confs_pred.unsqueeze(0)
# convert bounding boxes to YOLO format
predictions = []
bboxes_pred_img = bboxes_pred[0,...]
labels_pred_img = labels_pred[0,...]
confs_pred_img = confs_pred[0,...]
if len(bboxes_pred_img):
bboxes_pred_img[:,2] -= bboxes_pred_img[:,0]
bboxes_pred_img[:,3] -= bboxes_pred_img[:,1]
bboxes_pred_img[:,0] += bboxes_pred_img[:,2]/2
bboxes_pred_img[:,1] += bboxes_pred_img[:,3]/2
bboxes_pred_img[:,0] /= inputSize[0]
bboxes_pred_img[:,1] /= inputSize[1]
bboxes_pred_img[:,2] /= inputSize[0]
bboxes_pred_img[:,3] /= inputSize[1]
# limit to image bounds
bboxes_pred_img = torch.clamp(bboxes_pred_img, 0, 1)
# append to dict
for b in range(bboxes_pred_img.size(0)):
bbox = bboxes_pred_img[b,:]
label = labels_pred_img[b]
logits = confs_pred_img[b,:]
predictions.append({
'x': bbox[0].item(),
'y': bbox[1].item(),
'width': bbox[2].item(),
'height': bbox[3].item(),
'label': dataset.labelclassMap_inv[label.item()],
'logits': logits.numpy().tolist(), #TODO: for AL criterion?
'confidence': torch.max(logits).item()
})
response[imgID[i]] = {
'predictions': predictions,
#TODO: exception if fVec is not torch tensor: 'fVec': io.BytesIO(fVec.numpy().astype(np.float32)).getvalue()
}
# update worker state
imgCount += len(imgID)
updateStateFun(state='PROGRESS', message='predicting', done=imgCount, total=len(dataLoader.dataset))
model.cpu()
if 'cuda' in device:
torch.cuda.empty_cache()
return response | 48.323944 | 183 | 0.580771 |
793f1a535385e33397327f9f0c80090845ea1172 | 5,365 | py | Python | dna_features_viewer/GraphicRecord/BokehPlottableMixin.py | ATayls/DnaFeaturesViewer | 6ace5cdff96bf995aa26167868b0dbb47f5f5952 | [
"MIT"
] | 1 | 2020-12-12T16:55:58.000Z | 2020-12-12T16:55:58.000Z | dna_features_viewer/GraphicRecord/BokehPlottableMixin.py | ATayls/DnaFeaturesViewer | 6ace5cdff96bf995aa26167868b0dbb47f5f5952 | [
"MIT"
] | null | null | null | dna_features_viewer/GraphicRecord/BokehPlottableMixin.py | ATayls/DnaFeaturesViewer | 6ace5cdff96bf995aa26167868b0dbb47f5f5952 | [
"MIT"
] | null | null | null | try:
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import Range1d, HoverTool
BOKEH_AVAILABLE = True
except ImportError:
BOKEH_AVAILABLE = False
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
import matplotlib.pyplot as plt
class BokehPlottableMixin:
def bokeh_feature_patch(
self,
start,
end,
strand,
figure_width=5,
width=0.4,
level=0,
arrow_width_inches=0.05,
**kwargs
):
"""Return a dict with points coordinates of a Bokeh Feature arrow.
Parameters
----------
start, end, strand
"""
hw = width / 2.0
x1, x2 = (start, end) if (strand >= 0) else (end, start)
bp_per_width = figure_width / self.sequence_length
delta = arrow_width_inches / bp_per_width
if strand >= 0:
head_base = max(x1, x2 - delta)
else:
head_base = min(x1, x2 + delta)
result = dict(
xs=[x1, x1, head_base, x2, head_base, x1],
ys=[e + level for e in [-hw, hw, hw, 0, -hw, -hw]],
)
result.update(kwargs)
return result
def plot_with_bokeh(self, figure_width=5, figure_height="auto", tools="auto"):
"""Plot the graphic record using Bokeh.
Examples
--------
>>>
"""
if not BOKEH_AVAILABLE:
raise ImportError("``plot_with_bokeh`` requires Bokeh installed.")
if not PANDAS_AVAILABLE:
raise ImportError("``plot_with_bokeh`` requires Pandas installed.")
# Set up default tools
if tools == "auto":
tools = [HoverTool(tooltips="@hover_html"), "xpan,xwheel_zoom,reset,tap"]
# FIRST PLOT WITH MATPLOTLIB AND GATHER INFOS ON THE PLOT
ax, (features_levels, plot_data) = self.plot(figure_width=figure_width)
width, height = [int(100 * e) for e in ax.figure.get_size_inches()]
plt.close(ax.figure)
if figure_height == "auto":
height = int(0.5 * height)
else:
height = 100 * figure_height
height = max(height, 185) # Minimal height to see all icons
max_y = max(
[data["annotation_y"] for f, data in plot_data.items()]
+ list(features_levels.values())
)
# BUILD THE PLOT ()
plot = figure(
plot_width=width,
plot_height=height,
tools=tools,
x_range=Range1d(0, self.sequence_length),
y_range=Range1d(-1, max_y + 1),
)
plot.patches(
xs="xs",
ys="ys",
color="color",
line_color="#000000",
source=ColumnDataSource(
pd.DataFrame.from_records(
[
self.bokeh_feature_patch(
feature.start,
feature.end,
feature.strand,
figure_width=figure_width,
level=level,
color=feature.color,
label=feature.label,
hover_html=(
feature.html
if feature.html is not None
else feature.label
),
)
for feature, level in features_levels.items()
]
)
),
)
if plot_data != {}:
plot.text(
x="x",
y="y",
text="text",
text_align="center",
text_font_size="12px",
text_font="arial",
text_font_style="normal",
source=ColumnDataSource(
pd.DataFrame.from_records(
[
dict(
x=feature.x_center,
y=pdata["annotation_y"],
text=feature.label,
color=feature.color,
)
for feature, pdata in plot_data.items()
]
)
),
)
plot.segment(
x0="x0",
x1="x1",
y0="y0",
y1="y1",
line_width=0.5,
color="#000000",
source=ColumnDataSource(
pd.DataFrame.from_records(
[
dict(
x0=feature.x_center,
x1=feature.x_center,
y0=pdata["annotation_y"],
y1=pdata["feature_y"],
)
for feature, pdata in plot_data.items()
]
)
),
)
plot.yaxis.visible = False
plot.outline_line_color = None
plot.grid.grid_line_color = None
plot.toolbar.logo = None
return plot
| 30.657143 | 85 | 0.440261 |
793f1aeac96ebabef7debb23b3e338780abe24d6 | 4,456 | py | Python | source/conf.py | zozer/Denizen-Beginners-Guide | a98bcdcaf19c3aa1e4e07b18e703f679d5d04001 | [
"MIT"
] | 1 | 2022-01-25T17:07:56.000Z | 2022-01-25T17:07:56.000Z | source/conf.py | Hydroxycobalamin/Denizen-Beginners-Guide | a98bcdcaf19c3aa1e4e07b18e703f679d5d04001 | [
"MIT"
] | null | null | null | source/conf.py | Hydroxycobalamin/Denizen-Beginners-Guide | a98bcdcaf19c3aa1e4e07b18e703f679d5d04001 | [
"MIT"
] | null | null | null | # for custom dScript lexer
from pygments.lexer import RegexLexer, include
from pygments import token
from sphinx.highlighting import lexers
# for markdown support
import recommonmark
from recommonmark.transform import AutoStructify
# Project Info
project = "Denizen Beginner's Guide"
copyright = '2019-2022 The DenizenScript Team'
author = 'The DenizenScript Team'
version = '0.4'
release = '0.4'
# General Config
extensions = ['recommonmark', 'sphinx_rtd_dark_mode']
templates_path = ['_templates']
source_suffix = '.md'
master_doc = 'index'
language = 'en'
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# Changes to HTML format
html_title = "Denizen Beginner's Guide"
html_extra_path = ['_static_extra']
# Disabled stuff
smartquotes = False
templates_path = ['_templates']
reusable_tokens = {
'spaces_patch': [
(r'\s', token.Text ) # spaces
],
'inside_brackets': [
(r'\[', token.Name.Variable, '#push'), # [
(r'\]', token.Name.Variable, '#pop'), # ]
include('tag_finder'),
(r'$', token.Text, '#pop'),
(r'.', token.Name.Variable) # anything else
],
'inside_tag': [
(r'\[(?=([^\s]+)\])', token.Name.Variable, 'inside_brackets' ), # [brackets]
(r'\.', token.Operator ), # .
(r'>', token.Name.Tag, '#pop'), # >
(r'$', token.Text, '#pop'),
(r'.', token.Name.Tag) # anything else
],
'tag_finder': [
(r'<(?=([^\s]+)>)', token.Name.Tag, 'inside_tag' ), # <tag>
(r'%.*%', token.Generic.Error ) # %old_def%
],
'double_quoted': [
include('tag_finder'),
(r'"', token.Literal.String, '#pop'), # ]
(r'.', token.Literal.String ) # anything else
],
'single_quoted': [
include('tag_finder'),
(r'\'', token.Literal.String.Backtick, '#pop'), # ]
(r'.', token.Literal.String.Backtick ) # anything else
],
'code_line': [
(r'"(?=([^"]+)")', token.Literal.String, 'double_quoted' ), # "text"
(r'\'(?=([^\']+)\')', token.Literal.String.Backtick, 'single_quoted' ), # 'text'
(r'$', token.Text, '#pop'),
include('tag_finder'),
(r'.', token.Text ) # anything else
],
'root': [
(r'^\s*#\s*[\|+=].*$', token.Comment.Hashbang ), # # +--- header comment
(r'^\s*#\s*-.*$', token.Comment.Single ), # # - code comment
(r'^\s*#.*$', token.Comment ), # # regular comment
(r'^[^-#\n]*:', token.Name.Class ), # yaml key:
(r'^\s*-\s[^\s]+$', token.Keyword ), # - somecommand
(r'^\s*-\s[^\s]+\s', token.Keyword, 'code_line' ), # - somecommand someargs
include('spaces_patch'),
(r'.', token.Text ) # anything else
]
}
class dScriptLexerRed(RegexLexer):
name = 'dscript_red'
tokens = reusable_tokens
class dScriptLexerGreen(RegexLexer):
name = 'dscript_green'
tokens = reusable_tokens
class dScriptLexerYellow(RegexLexer):
name = 'dscript_yellow'
tokens = reusable_tokens
class dScriptLexerBlue(RegexLexer):
name = 'dscript_blue'
tokens = reusable_tokens
lexers['dscript_red'] = dScriptLexerRed(startinline=True)
lexers['dscript_green'] = dScriptLexerGreen(startinline=True)
lexers['dscript_yellow'] = dScriptLexerYellow(startinline=True)
lexers['dscript_blue'] = dScriptLexerBlue(startinline=True)
# For markdown
def setup(app):
app.add_css_file('css/stylesheet.css')
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
}, True)
app.add_transform(AutoStructify)
| 39.087719 | 104 | 0.494614 |
793f1ba0d7a2bbcded8640df566a00d926bcf53b | 1,140 | py | Python | kaaf/generate-example.py | itdagene-ntnu/kvittering | 86b308f77957214d726efdcae6f6f63491fb73ed | [
"MIT"
] | null | null | null | kaaf/generate-example.py | itdagene-ntnu/kvittering | 86b308f77957214d726efdcae6f6f63491fb73ed | [
"MIT"
] | null | null | null | kaaf/generate-example.py | itdagene-ntnu/kvittering | 86b308f77957214d726efdcae6f6f63491fb73ed | [
"MIT"
] | null | null | null | import argparse
import base64
from handler import create_pdf, modify_data
default_data = {
"date": "11-03-2020",
"amount": "153 kr",
"name": "Lar",
"accountNumber": "010101010101",
"group": "Bankett",
"occasion": "Teste litt",
"comment": "pls",
}
def main(data, out):
data = modify_data(data)
pdf = create_pdf(data)
with open(out, "wb") as f:
f.write(pdf.encode("latin-1"))
print("Done!")
def encode_image(img):
with open(img, "rb") as f:
b64 = base64.b64encode(f.read()).decode("ascii")
return f'data:image/{img.split(".")[-1]};base64,{b64}'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("signature", help="Path to signature")
parser.add_argument("out", help="Path to the generated pdf")
parser.add_argument(
"images", nargs=argparse.REMAINDER, default=[], help="Paths to images"
)
args = parser.parse_args()
data = {
**default_data,
"signature": encode_image(args.signature),
"images": [encode_image(img) for img in args.images],
}
main(data, args.out)
| 22.8 | 78 | 0.614035 |
793f1bbb6691bde58493df5c316ec4a1cea26b84 | 3,395 | py | Python | tests/frontend_api/test_dropout.py | MUTTERSCHIFF/ngraph-neon | 762e8ea639cdc671311ee4929bd1ee8cdf83e8bb | [
"Apache-2.0"
] | 13 | 2018-03-17T00:27:18.000Z | 2020-06-18T01:36:34.000Z | tests/frontend_api/test_dropout.py | MUTTERSCHIFF/ngraph-neon | 762e8ea639cdc671311ee4929bd1ee8cdf83e8bb | [
"Apache-2.0"
] | 20 | 2018-03-17T14:49:04.000Z | 2018-04-19T17:47:38.000Z | tests/frontend_api/test_dropout.py | NervanaSystems/ngraph-neon | 8988ab90ee81c8b219ea5c374702e56d7f383302 | [
"Apache-2.0"
] | 5 | 2018-03-23T22:47:17.000Z | 2020-10-21T16:15:02.000Z | # ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
'''
Test of the dropout layer
'''
import pytest
import numpy as np
from neon.testing import ExecutorFactory
import neon as ng
from neon.frontend.layer import Layer, Dropout
pytestmark = pytest.mark.transformer_dependent
atol, rtol = 0, 1e-6
@pytest.mark.parametrize("nin,batch_size", [(32, 2)])
@pytest.mark.parametrize("keep", [1.0, 0.75, 0.5])
def test_dropout_train(nin, batch_size, keep):
# set inputs
N = ng.make_axis(batch_size, name='N')
F = ng.make_axis(nin, name='F')
inp = ng.placeholder([F, N])
layer = Dropout(keep=keep)
fprop = layer(inp)
# create data
x = np.random.uniform(size=(nin, batch_size))
# evaluate
with ExecutorFactory() as ex:
comp = ex.executor([fprop, layer.mask], inp)
out, mask = comp(x)
numpy_out = x * mask[:, None]
ng.testing.assert_allclose(out, numpy_out, atol=atol, rtol=rtol)
if keep < 1.0:
out1, mask1 = out.copy(), mask.copy()
out2, mask2 = comp(x)
assert (out1 != out2).any()
assert (mask1 != mask2).any()
@pytest.mark.parametrize("nin,batch_size", [(32, 2)])
def test_dropout_inference(nin, batch_size):
# set inputs
N = ng.make_axis(batch_size, name='N')
F = ng.make_axis(nin, name='F')
inp = ng.placeholder([F, N])
layer = Dropout(keep=0.5)
with Layer.inference_mode_on():
fprop = layer(inp)
# create data
x = np.random.uniform(size=(nin, batch_size))
# evaluate
with ExecutorFactory() as ex:
comp = ex.executor(fprop, inp)
out = comp(x)
numpy_out = x * 0.5
ng.testing.assert_allclose(out, numpy_out, atol=atol, rtol=rtol)
out1 = out.copy()
out2 = comp(x)
ng.testing.assert_allclose(out1, out2, atol=atol, rtol=rtol)
@pytest.mark.parametrize("nin,batch_size", [(32, 2)])
@pytest.mark.parametrize("keep", [1.0, 0.5])
def test_dropout_bprop_single_comp(nin, batch_size, keep):
# set inputs
N = ng.make_axis(batch_size, name='N')
F = ng.make_axis(nin, name='F')
mul_factor = ng.placeholder(())
inp = ng.placeholder([F, N])
layer = Dropout(keep=keep)
fprop = layer(inp * mul_factor)
out_graph = ng.sum(fprop, out_axes=())
bprop = ng.deriv(out_graph, mul_factor)
# create data
x = np.random.uniform(size=(nin, batch_size))
# evaluate
with ExecutorFactory() as ex:
comp = ex.executor([fprop, bprop, layer.mask], inp, mul_factor)
fout, bout, mask = comp(x, 2)
# Calculate derivative by hand and compare
ng.testing.assert_allclose(bout, (x * mask[:, None]).sum(), rtol=1e-6)
| 31.146789 | 80 | 0.617968 |
793f1d4ad9be684cc95190eb52f100b48762750c | 180 | py | Python | Strings/1632.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | Strings/1632.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | Strings/1632.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | three = ['A', 'E', 'I', 'O', 'S', 'a', 'e', 'i', 'o', 's']
for k in range(int(input())):
res = 1
for w in input():
if w in three:
res *= 3
else:
res *= 2
print(res)
| 15 | 58 | 0.438889 |
793f1e8f6c80710317409b148a236e3460db2f6d | 7,482 | py | Python | mppsolar/protocols/jkabstractprotocol.py | trixing/mpp-solar | 52ccb79776dae2430437fb96c7870cdb31ab0aa5 | [
"MIT"
] | null | null | null | mppsolar/protocols/jkabstractprotocol.py | trixing/mpp-solar | 52ccb79776dae2430437fb96c7870cdb31ab0aa5 | [
"MIT"
] | null | null | null | mppsolar/protocols/jkabstractprotocol.py | trixing/mpp-solar | 52ccb79776dae2430437fb96c7870cdb31ab0aa5 | [
"MIT"
] | null | null | null | import logging
import struct
from .abstractprotocol import AbstractProtocol
from .protocol_helpers import crc8
log = logging.getLogger("jkAbstractProtocol")
SOR = bytes.fromhex("55aaeb90")
COMMANDS = {
"getInfo": {
"name": "getInfo",
"command_code": "97",
"record_type": "3",
"description": "BLE Device Information inquiry",
"help": " -- queries the ble device information",
"type": "QUERY",
"response_type": "POSITIONAL",
"response": [
["Hex2Str", 4, "Header", ""],
["Hex2Str", 1, "Record Type", ""],
["Hex2Int", 1, "Record Counter", ""],
["Hex2Ascii", 10, "Device Model", ""],
["Hex2Ascii", 10, "Hardware Version", ""],
["Hex2Ascii", 10, "Software Version", ""],
["discard", 10, "", ""],
["Hex2Ascii", 16, "Device Name", ""],
["Hex2Ascii", 10, "Device Passcode", ""],
["Hex2Ascii", 14, "Manufacturing Date", ""],
["Hex2Ascii", 14, "Serial Number", ""],
["Hex2Ascii", 16, "User Data", ""],
["Hex2Ascii", 16, "Settings Passcode?", ""],
["discard", 672, "unknown", ""],
],
"test_responses": [
bytes.fromhex(
"55aaeb9003f14a4b2d42324132345300000000000000332e300000000000332e322e330000000876450004000000506f7765722057616c6c203100000000313233340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c2"
),
bytes.fromhex(
"55aaeb9003b54a4b2d42443641323053313050000000342e300000000000342e312e37000000541d1600040000004e6f7468696e67204a4b31000000000031323334000000000000000000000000323030373038000032303036323834303735000000000000496e707574205573657264617461000031323334353600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c4"
),
],
},
}
class jkAbstractProtocol(AbstractProtocol):
"""
JKAbstractProtocol - Abstract Handler for JKBMS communication
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__()
self._protocol_id = b"JK"
self.COMMANDS = COMMANDS
self.STATUS_COMMANDS = [
"",
]
self.SETTINGS_COMMANDS = [
"getInfo",
]
self.DEFAULT_COMMAND = "getInfo"
def get_full_command(self, command) -> bytes:
"""
Override the default get_full_command as its different for JK
"""
# getInfo = b'\xaa\x55\x90\xeb\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'
log.info(f"Using protocol {self._protocol_id} with {len(self.COMMANDS)} commands")
# These need to be set to allow other functions to work`
self._command = command
self._command_defn = self.get_command_defn(command)
# log.debug(f"self._command = {self._command}, self._command_defn = {self._command_defn}")
log.debug(f"self._command = {self._command}")
# End of required variables setting
if self._command_defn is None:
# Maybe return a default here?
log.debug("No command_defn found")
return None
if "command_code" in self._command_defn:
# full command is 20 bytes long
cmd = bytearray(20)
# starts with \xaa\x55\x90\xeb
cmd[0:4] = bytes.fromhex("aa5590eb")
log.debug(f"cmd with SOR: {cmd}")
# then has command code
cmd[4] = int(self._command_defn["command_code"], 16)
if self._command_defn["type"] == "SETTER":
cmd[5] = 0x04
value = struct.pack("<h", int(float(self._command_value) * 1000))
cmd[6] = value[0]
cmd[7] = value[1]
log.debug(f"cmd with command code: {cmd}")
cmd[-1] = crc8(cmd)
log.debug(f"cmd with crc: {cmd}")
return cmd
return None
def get_command_defn(self, command):
log.debug(f"get_command_defn for: {command}")
if command is None:
log.debug("command is None")
return None
return super().get_command_defn(command)
def get_responses(self, response):
"""
Override the default get_responses as its different for JK
"""
responses = []
if self._command_defn is not None and self._command_defn["response_type"] == "POSITIONAL":
# Have a POSITIONAL type response, so need to break it up...
# example defn :
# "response": [
# ["discard", 1, "start flag", ""],
# ["discard", 1, "module address", ""],
# ["discard", 1, "command id", ""],
# ["discard", 1, "data length", ""],
# ]
# example response data b"\xa5\x01\x90\x08\x02\x10\x00\x00uo\x03\xbc\xf3",
for defn in self._command_defn["response"]:
log.debug(f"Got defn: {defn}")
if defn[0].startswith("lookup"):
responses.append("lookup")
continue
size = defn[1]
item = response[:size]
responses.append(item)
response = response[size:]
if len(response) > 0:
responses.append(response)
return responses
else:
return bytearray(response)
def is_record_start(self, record):
if record.startswith(SOR):
log.debug("SOR found in record")
return True
return False
def is_record_correct_type(self, record, type):
if len(record) < len(SOR):
return False
if record[len(SOR)] == int(type):
log.debug(f"Record is type {type}")
return True
return False
def is_record_complete(self, record):
""""""
# check record starts with 'SOR'
if not self.is_record_start(record):
log.debug("No SOR found in record looking for completeness")
return False
# check that length one of the valid lengths (300, 320)
if len(record) != 300 and len(record) <= 320:
log.debug("Record length is invalid %d" % len(record))
return False
for n in [300, 320]:
if len(record) < n:
continue
# check the crc/checksum is correct for the record data
crc = ord(record[n-1:n])
#crc = record[n-1] # ord(record[n-1:n])
calcCrc = crc8(record[:n-1])
# print (crc, calcCrc)
if crc == calcCrc:
log.debug("Record CRC is valid: %d" % n)
return True
log.debug("No valid CRC found")
return False
| 42.754286 | 618 | 0.59984 |
793f1f17cdb1e1992aa6cbbe20efaf0c63bc962d | 7,115 | py | Python | nova/virt/libvirt/designer.py | gyliu513/nova | 14e974a5f77c72a9bb44c6801746abb2eda8e91d | [
"Apache-2.0"
] | 1 | 2019-11-07T03:11:37.000Z | 2019-11-07T03:11:37.000Z | nova/virt/libvirt/designer.py | gyliu513/nova | 14e974a5f77c72a9bb44c6801746abb2eda8e91d | [
"Apache-2.0"
] | null | null | null | nova/virt/libvirt/designer.py | gyliu513/nova | 14e974a5f77c72a9bb44c6801746abb2eda8e91d | [
"Apache-2.0"
] | 1 | 2021-01-11T04:07:52.000Z | 2021-01-11T04:07:52.000Z | # Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy based configuration of libvirt objects
This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
from nova.pci import utils as pci_utils
from nova.virt.libvirt import config
MIN_LIBVIRT_ETHERNET_SCRIPT_PATH_NONE = (1, 3, 3)
def set_vif_guest_frontend_config(conf, mac, model, driver, queues,
rx_queue_size):
"""Populate a LibvirtConfigGuestInterface instance
with guest frontend details.
NOTE: @model, @driver, @queues and @rx_queue_size can be None.
"""
conf.mac_addr = mac
if model is not None:
conf.model = model
if driver is not None:
conf.driver_name = driver
if queues is not None:
conf.vhost_queues = queues
if rx_queue_size:
conf.vhost_rx_queue_size = rx_queue_size
def set_vif_host_backend_ethernet_config(conf, tapname, host):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an externally configured
host device.
NB use of this configuration is discouraged by
libvirt project and will mark domains as 'tainted'.
"""
conf.net_type = "ethernet"
conf.target_dev = tapname
# NOTE(mriedem): Before libvirt 1.3.3, passing script=None results
# in errors because /etc/qemu-ifup gets run which is blocked by
# AppArmor. Passing script='' between libvirt 1.3.3 and 3.1 will also
# result in errors. So we have to check the libvirt version and set
# the script value accordingly. Libvirt 3.1 allows and properly handles
# both None and '' as no-ops.
if host.has_min_version(MIN_LIBVIRT_ETHERNET_SCRIPT_PATH_NONE):
conf.script = None
else:
conf.script = ''
def set_vif_host_backend_802qbg_config(conf, devname, managerid,
typeid, typeidversion,
instanceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbg device.
"""
conf.net_type = "direct"
conf.source_dev = devname
conf.source_mode = "vepa"
conf.vporttype = "802.1Qbg"
conf.add_vport_param("managerid", managerid)
conf.add_vport_param("typeid", typeid)
conf.add_vport_param("typeidversion", typeidversion)
conf.add_vport_param("instanceid", instanceid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbh device.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vporttype = "802.1Qbh"
conf.add_vport_param("profileid", profileid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an device that supports hardware
virtual ethernet bridge.
"""
conf.net_type = net_type
conf.vlan = vlan
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else: # net_type == network_model.VNIC_TYPE_DIRECT
conf.source_dev = devname
conf.model = None
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hostdev_pci_config(conf, pci_slot):
"""Populate a LibvirtConfigGuestHostdev instance with pci address data."""
conf.domain, conf.bus, conf.slot, conf.function = (
pci_utils.get_pci_address_fields(pci_slot))
def set_vif_host_backend_direct_config(conf, devname, mode="passthrough"):
"""Populate a LibvirtConfigGuestInterface instance
with direct Interface.
"""
conf.net_type = "direct"
conf.source_mode = mode
conf.source_dev = devname
conf.model = "virtio"
def set_vif_host_backend_vhostuser_config(conf, mode, path, rx_queue_size,
tx_queue_size):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for vhostuser socket.
NOTE: @rx_queue_size and @tx_queue_size can be None
"""
conf.net_type = "vhostuser"
conf.vhostuser_type = "unix"
conf.vhostuser_mode = mode
conf.vhostuser_path = path
if rx_queue_size:
conf.vhost_rx_queue_size = rx_queue_size
if tx_queue_size:
conf.vhost_tx_queue_size = tx_queue_size
def set_vif_mtu_config(conf, mtu):
"""Populate a LibvirtConfigGuestInterface instance
with network mtu.
"""
conf.mtu = mtu
def set_vif_bandwidth_config(conf, inst_type):
"""Config vif inbound/outbound bandwidth limit. parameters are
set in instance_type_extra_specs table, key is in the format
quota:vif_inbound_average.
"""
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
'vif_outbound_burst']
for key, value in inst_type.get('extra_specs', {}).items():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in bandwidth_items:
setattr(conf, scope[1], value)
def set_numa_memnode(conf, guest_node_id, host_cell_id):
"""Prepares numa memory node config for the guest.
"""
conf.cellid = guest_node_id
conf.nodeset = [host_cell_id]
conf.mode = "strict"
def set_vcpu_realtime_scheduler(conf, vcpus_rt, priority):
"""Prepares realtime config for the guest."""
conf.vcpus = vcpus_rt
conf.scheduler = "fifo"
conf.priority = priority
def set_driver_iommu_for_sev(conf):
virtio_attrs = {
config.LibvirtConfigGuestDisk: 'target_bus',
config.LibvirtConfigGuestInterface: 'model',
config.LibvirtConfigGuestRng: 'device_model',
config.LibvirtConfigMemoryBalloon: 'model',
}
for dev in conf.devices:
virtio_attr = virtio_attrs.get(dev.__class__)
if virtio_attr and getattr(dev, virtio_attr) == 'virtio':
dev.driver_iommu = True
| 33.247664 | 78 | 0.683767 |
793f1f92a023de607058fa0c23401ce8f56710a5 | 1,186 | py | Python | src/dialogs/statisticDialog.py | Alopex4/spruce | 2bf0bae18fe9b0d13691f2ee926071635cbe7c6f | [
"MIT"
] | 1 | 2019-07-04T10:32:07.000Z | 2019-07-04T10:32:07.000Z | src/dialogs/statisticDialog.py | Alopex4/spruce | 2bf0bae18fe9b0d13691f2ee926071635cbe7c6f | [
"MIT"
] | null | null | null | src/dialogs/statisticDialog.py | Alopex4/spruce | 2bf0bae18fe9b0d13691f2ee926071635cbe7c6f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import \
NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
class StatisticDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
# super(statisDialog, self).__init__(parent)
super().__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# set the layout
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
main = StatisticDialog()
main.show()
sys.exit(app.exec_())
| 29.65 | 80 | 0.687184 |
793f1fe8577bea5ecfff49dc479bb6daedf262b0 | 46,285 | py | Python | wrappers/trace.py | Joshua-Ashton/apitrace | 2353c8654941f6c599183287a7235a91ee83c038 | [
"MIT"
] | 47 | 2020-11-08T15:20:49.000Z | 2021-08-16T01:41:49.000Z | wrappers/trace.py | Joshua-Ashton/apitrace | 2353c8654941f6c599183287a7235a91ee83c038 | [
"MIT"
] | null | null | null | wrappers/trace.py | Joshua-Ashton/apitrace | 2353c8654941f6c599183287a7235a91ee83c038 | [
"MIT"
] | 1 | 2020-11-18T06:37:31.000Z | 2020-11-18T06:37:31.000Z | ##########################################################################
#
# Copyright 2008-2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Common trace code generation."""
# Adjust path
import os.path
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import itertools
import specs.stdapi as stdapi
def getWrapperInterfaceName(interface):
return "Wrap" + interface.expr
def getPolymorphStructOffset(enumType, type):
return 'align(sizeof(%s), alignof(%s))' % (enumType, type)
debug = False
class ComplexValueSerializer(stdapi.OnceVisitor):
'''Type visitors which generates serialization functions for
complex types.
Simple types are serialized inline.
'''
def __init__(self, serializer):
stdapi.OnceVisitor.__init__(self)
self.serializer = serializer
def visitVoid(self, literal):
pass
def visitLiteral(self, literal):
pass
def visitString(self, string):
pass
def visitConst(self, const):
self.visit(const.type)
def visitStruct(self, struct):
# Write array with structure's member names
numMembers = len(struct.members)
if numMembers:
# Ensure member array has nonzero length to avoid MSVC error C2466
memberNames = '_struct%s_members' % (struct.tag,)
print('static const char * %s[%u] = {' % (memberNames, numMembers))
for type, name, in struct.members:
if name is None:
print(' "",')
else:
print(' "%s",' % (name,))
print('};')
else:
sys.stderr.write('warning: %s has no members\n' % struct.name)
memberNames = 'nullptr'
# Write structure's signature
print('static const trace::StructSig _struct%s_sig = {' % (struct.tag,))
if struct.name is None:
structName = '""'
else:
structName = '"%s"' % struct.name
print(' %u, %s, %u, %s' % (struct.id, structName, numMembers, memberNames))
print('};')
print()
def visitArray(self, array):
self.visit(array.type)
def visitAttribArray(self, array):
pass
def visitBlob(self, array):
pass
def visitEnum(self, enum):
print('static const trace::EnumValue _enum%s_values[] = {' % (enum.tag))
for value in enum.values:
print(' {"%s", %s},' % (value, value))
print('};')
print()
print('static const trace::EnumSig _enum%s_sig = {' % (enum.tag))
print(' %u, %u, _enum%s_values' % (enum.id, len(enum.values), enum.tag))
print('};')
print()
def visitBitmask(self, bitmask):
print('static const trace::BitmaskFlag _bitmask%s_flags[] = {' % (bitmask.tag))
for value in bitmask.values:
print(' {"%s", %s},' % (value, value))
print('};')
print()
print('static const trace::BitmaskSig _bitmask%s_sig = {' % (bitmask.tag))
print(' %u, %u, _bitmask%s_flags' % (bitmask.id, len(bitmask.values), bitmask.tag))
print('};')
print()
def visitPointer(self, pointer):
self.visit(pointer.type)
def visitIntPointer(self, pointer):
pass
def visitObjPointer(self, pointer):
self.visit(pointer.type)
def visitLinearPointer(self, pointer):
self.visit(pointer.type)
def visitHandle(self, handle):
self.visit(handle.type)
def visitReference(self, reference):
self.visit(reference.type)
def visitAlias(self, alias):
self.visit(alias.type)
def visitOpaque(self, opaque):
pass
def visitInterface(self, interface):
pass
def visitPolymorphic(self, polymorphic):
if polymorphic.stream:
self.visitEnum(polymorphic.streamEnum)
if not polymorphic.contextLess:
return
print('static void _write__%s(int selector, %s const & value) {' % (polymorphic.tag, polymorphic.expr))
print(' switch (selector) {')
for cases, type in polymorphic.iterSwitch():
for case in cases:
print(' %s:' % case)
self.serializer.visit(type, '(%s)(value)' % (type,))
print(' break;')
print(' }')
print('}')
print()
class ValueSerializer(stdapi.Visitor, stdapi.ExpanderMixin):
'''Visitor which generates code to serialize any type.
Simple types are serialized inline here, whereas the serialization of
complex types is dispatched to the serialization functions generated by
ComplexValueSerializer visitor above.
'''
def visitLiteral(self, literal, instance):
print(' trace::localWriter.write%s(%s);' % (literal.kind, instance))
def visitString(self, string, instance):
if not string.wide:
cast = 'const char *'
suffix = 'String'
else:
cast = 'const wchar_t *'
suffix = 'WString'
if cast != string.expr:
# reinterpret_cast is necessary for GLubyte * <=> char *
instance = 'reinterpret_cast<%s>(%s)' % (cast, instance)
if string.length is not None:
length = ', %s' % self.expand(string.length)
else:
length = ''
print(' trace::localWriter.write%s(%s%s);' % (suffix, instance, length))
def visitConst(self, const, instance):
self.visit(const.type, instance)
def visitStruct(self, struct, instance):
print(' trace::localWriter.beginStruct(&_struct%s_sig);' % (struct.tag,))
for member in struct.members:
self.visitMember(member, instance)
print(' trace::localWriter.endStruct();')
def visitArray(self, array, instance):
length = '_c' + array.type.tag
index = '_i' + array.type.tag
array_length = self.expand(array.length)
print(' if (%s) {' % instance)
print(' size_t %s = %s > 0 ? %s : 0;' % (length, array_length, array_length))
print(' trace::localWriter.beginArray(%s);' % length)
print(' for (size_t %s = 0; %s < %s; ++%s) {' % (index, index, length, index))
print(' trace::localWriter.beginElement();')
self.visitElement(index, array.type, '(%s)[%s]' % (instance, index))
print(' trace::localWriter.endElement();')
print(' }')
print(' trace::localWriter.endArray();')
print(' } else {')
print(' trace::localWriter.writeNull();')
print(' }')
def visitAttribArray(self, array, instance):
# For each element, decide if it is a key or a value (which depends on the previous key).
# If it is a value, store it as the right type - usually int, some bitfield, or some enum.
# It is currently assumed that an unknown key means that it is followed by an int value.
# determine the array length which must be passed to writeArray() up front
count = '_c' + array.baseType.tag
print(' {')
print(' int %s;' % count)
print(' for (%(c)s = 0; %(array)s && %(array)s[%(c)s] != %(terminator)s; %(c)s += 2) {' \
% {'c': count, 'array': instance, 'terminator': array.terminator})
if array.hasKeysWithoutValues:
print(' switch (int(%(array)s[%(c)s])) {' % {'array': instance, 'c': count})
for key, valueType in array.valueTypes:
if valueType is None:
print(' case %s:' % key)
print(' %s--;' % count) # the next value is a key again and checked if it's the terminator
print(' break;')
print(' }')
print(' }')
print(' %(c)s += %(array)s ? 1 : 0;' % {'c': count, 'array': instance})
print(' trace::localWriter.beginArray(%s);' % count)
# for each key / key-value pair write the key and the value, if the key requires one
index = '_i' + array.baseType.tag
print(' for (int %(i)s = 0; %(i)s < %(count)s; %(i)s++) {' % {'i': index, 'count': count})
print(' trace::localWriter.beginElement();')
self.visit(array.baseType, "%(array)s[%(i)s]" % {'array': instance, 'i': index})
print(' trace::localWriter.endElement();')
print(' if (%(i)s + 1 >= %(count)s) {' % {'i': index, 'count': count})
print(' break;')
print(' }')
print(' switch (int(%(array)s[%(i)s++])) {' % {'array': instance, 'i': index})
# write generic value the usual way
for key, valueType in array.valueTypes:
if valueType is not None:
print(' case %s:' % key)
print(' trace::localWriter.beginElement();')
self.visitElement(index, valueType, '(%(array)s)[%(i)s]' % {'array': instance, 'i': index})
print(' trace::localWriter.endElement();')
print(' break;')
# known key with no value, just decrease the index so we treat the next value as a key
if array.hasKeysWithoutValues:
for key, valueType in array.valueTypes:
if valueType is None:
print(' case %s:' % key)
print(' %s--;' % index)
print(' break;')
# unknown key, write an int value
print(' default:')
print(' trace::localWriter.beginElement();')
print(' os::log("apitrace: warning: %s: unknown key 0x%04X, interpreting value as int\\n", ' + \
'__FUNCTION__, int(%(array)s[%(i)s - 1]));' % {'array': instance, 'i': index})
print(' trace::localWriter.writeSInt(%(array)s[%(i)s]);' % {'array': instance, 'i': index})
print(' trace::localWriter.endElement();')
print(' break;')
print(' }')
print(' }')
print(' trace::localWriter.endArray();')
print(' }')
def visitBlob(self, blob, instance):
print(' trace::localWriter.writeBlob(%s, %s);' % (instance, self.expand(blob.size)))
def visitEnum(self, enum, instance):
print(' trace::localWriter.writeEnum(&_enum%s_sig, %s);' % (enum.tag, instance))
def visitBitmask(self, bitmask, instance):
print(' trace::localWriter.writeBitmask(&_bitmask%s_sig, %s);' % (bitmask.tag, instance))
def visitPointer(self, pointer, instance):
print(' if (%s) {' % instance)
print(' trace::localWriter.beginArray(1);')
print(' trace::localWriter.beginElement();')
self.visit(pointer.type, "*" + instance)
print(' trace::localWriter.endElement();')
print(' trace::localWriter.endArray();')
print(' } else {')
print(' trace::localWriter.writeNull();')
print(' }')
def visitIntPointer(self, pointer, instance):
print(' trace::localWriter.writePointer((uintptr_t)%s);' % instance)
def visitObjPointer(self, pointer, instance):
print(' trace::localWriter.writePointer((uintptr_t)%s);' % instance)
def visitLinearPointer(self, pointer, instance):
print(' trace::localWriter.writePointer((uintptr_t)%s);' % instance)
def visitReference(self, reference, instance):
self.visit(reference.type, instance)
def visitHandle(self, handle, instance):
self.visit(handle.type, instance)
def visitAlias(self, alias, instance):
self.visit(alias.type, instance)
def visitOpaque(self, opaque, instance):
print(' trace::localWriter.writePointer((uintptr_t)%s);' % instance)
def visitInterface(self, interface, instance):
assert False
def visitPolymorphic(self, polymorphic, instance):
if polymorphic.contextLess:
print(' _write__%s(%s, %s);' % (polymorphic.tag, polymorphic.switchExpr, instance))
else:
switchExpr = self.expand(polymorphic.switchExpr)
if polymorphic.stream:
switchValue = '_switch_type'
print(r' size_t _blob_array_count = 0;')
print(r' {')
print(r' char* _blob_ptr = reinterpret_cast<char*>(%s);' % instance)
print(r' char* _blob_end = _blob_ptr + %s;' % polymorphic.streamSize)
print(r' while(_blob_ptr < _blob_end) {')
print(r' auto _switch_type = %s;' % switchExpr)
print(r' switch (%s) {' % switchValue)
for cases, type in polymorphic.iterSwitch():
for case in cases:
print(' %s:' % case)
print(r' _blob_ptr += align(%s + sizeof(%s), sizeof(void*));' % (getPolymorphStructOffset(polymorphic.streamEnum, type.type), type.type))
print(r' _blob_array_count++;')
print(r' break;')
if polymorphic.defaultType is None:
print(r' default:')
print(r' _blob_ptr += sizeof(%s) + sizeof(void*);' % polymorphic.streamEnum)
print(r' _blob_array_count++;')
print(r' break;')
print(r' }')
print(r' }')
print(r' }')
print(r' char* _blob_ptr = reinterpret_cast<char*>(%s);' % instance)
print(r' char* _blob_tmp = reinterpret_cast<char*>(%s);' % instance)
print(r' char* _blob_end = _blob_ptr + %s;' % polymorphic.streamSize)
# * 2 to handle the enum also.
print(r' trace::localWriter.beginArray(_blob_array_count * 2);')
print(r' while(_blob_ptr < _blob_end) {')
print(r' trace::localWriter.beginElement();')
print(r' auto _switch_type = %s;' % switchExpr)
print(r' trace::localWriter.writeEnum(&_enum%s_sig, _switch_type);' % polymorphic.streamEnum.tag)
print(r' trace::localWriter.endElement();')
print(r' trace::localWriter.beginElement();')
switchExpr = '_switch_type'
print(' switch (%s) {' % switchExpr)
for cases, type in polymorphic.iterSwitch():
for case in cases:
print(' %s:' % case)
caseInstance = instance
if type.expr is not None:
caseInstance = 'static_cast<%s>(%s)' % (type, caseInstance)
if polymorphic.stream:
print(' _blob_tmp = _blob_ptr + %s;' % (getPolymorphStructOffset(polymorphic.streamEnum, type.type)))
caseInstance = 'reinterpret_cast<%s>(_blob_tmp)' % (type)
self.visit(type, caseInstance)
if polymorphic.stream:
print(r' _blob_ptr += align(%s + sizeof(%s), sizeof(void*));' % (getPolymorphStructOffset(polymorphic.streamEnum, type.type), type.type))
print(' break;')
if polymorphic.defaultType is None:
print(r' default:')
print(r' os::log("apitrace: warning: %%s: unexpected polymorphic case %%i\n", __FUNCTION__, (int)%s);' % (switchExpr,))
if polymorphic.stream:
print(r' _blob_ptr += sizeof(%s) + sizeof(void*);' % polymorphic.streamEnum)
print(r' trace::localWriter.writeNull();')
print(r' break;')
print(' }')
if polymorphic.stream:
print(' trace::localWriter.endElement();')
print(' }')
print(' trace::localWriter.endArray();')
class WrapDecider(stdapi.Traverser):
'''Type visitor which will decide wheter this type will need wrapping or not.
For complex types (arrays, structures), we need to know this before hand.
'''
def __init__(self):
self.needsWrapping = False
def visitLinearPointer(self, void):
pass
def visitObjPointer(self, interface):
self.needsWrapping = True
def typeNeedsWrapping(type):
visitor = WrapDecider()
visitor.visit(type)
return visitor.needsWrapping
class ValueWrapper(stdapi.Traverser, stdapi.ExpanderMixin):
'''Type visitor which will generate the code to wrap an instance.
Wrapping is necessary mostly for interfaces, however interface pointers can
appear anywhere inside complex types.
'''
def visitStruct(self, struct, instance):
for member in struct.members:
self.visitMember(member, instance)
def visitArray(self, array, instance):
array_length = self.expand(array.length)
print(" if (%s) {" % instance)
print(" for (size_t _i = 0, _s = %s; _i < _s; ++_i) {" % array_length)
self.visitElement('_i', array.type, instance + "[_i]")
print(" }")
print(" }")
def visitPointer(self, pointer, instance):
print(" if (%s) {" % instance)
self.visit(pointer.type, "*" + instance)
print(" }")
def visitObjPointer(self, pointer, instance):
elem_type = pointer.type.mutable()
if isinstance(elem_type, stdapi.Interface):
self.visitInterfacePointer(elem_type, instance)
elif isinstance(elem_type, stdapi.Alias) and isinstance(elem_type.type, stdapi.Interface):
self.visitInterfacePointer(elem_type.type, instance)
else:
# All interfaces should at least implement IUnknown
print(" WrapIUnknown::_wrap(__FUNCTION__, (IUnknown **) &%s);" % (instance,))
def visitInterface(self, interface, instance):
raise NotImplementedError
def visitInterfacePointer(self, interface, instance):
print(" Wrap%s::_wrap(__FUNCTION__, &%s);" % (interface.name, instance))
def visitPolymorphic(self, polymorphic, instance):
# XXX: There might be polymorphic values that need wrapping in the future
if typeNeedsWrapping(polymorphic):
switchExpr = self.expand(polymorphic.switchExpr)
if polymorphic.stream:
print(r' void* _blob = alloca(%s);' % polymorphic.streamSize)
print(r' size_t _blob_size = %s;' % polymorphic.streamSize)
print(r' memcpy(_blob, %s, _blob_size);' % instance)
print(r' %s = _blob;' % instance)
print(r' char* _blob_ptr = reinterpret_cast<char*>(%s);' % instance)
print(r' char* _blob_tmp = reinterpret_cast<char*>(%s);' % instance)
print(r' char* _blob_end = _blob_ptr + %s;' % polymorphic.streamSize)
print(r' while(_blob_ptr < _blob_end) {')
print(r' auto _switch_type = %s;' % switchExpr)
switchExpr = '_switch_type'
print(' switch (%s) {' % switchExpr)
for cases, type in polymorphic.iterSwitch():
for case in cases:
print(' %s:' % case)
caseInstance = instance
if type.expr is not None:
caseInstance = 'static_cast<%s>(%s)' % (type, caseInstance)
if polymorphic.stream:
print(' _blob_tmp = _blob_ptr + %s;' % getPolymorphStructOffset(polymorphic.streamEnum, type))
caseInstance = 'reinterpret_cast<%s>(_blob_tmp)' % (type)
self.visit(type, caseInstance)
if polymorphic.stream:
print(r' _blob_ptr += align(%s + sizeof(%s), sizeof(void*));' % (getPolymorphStructOffset(polymorphic.streamEnum, type.type), type.type))
print(' break;')
if polymorphic.defaultType is None:
print(r' default:')
print(r' break;')
print(' }')
if polymorphic.stream:
print(' }')
class ValueUnwrapper(ValueWrapper):
'''Reverse of ValueWrapper.'''
allocated = False
def visitStruct(self, struct, instance):
if not self.allocated:
# Argument is constant. We need to create a non const
print(' {')
print(" %s * _t = static_cast<%s *>(alloca(sizeof *_t));" % (struct, struct))
print(' *_t = %s;' % (instance,))
assert instance.startswith('*')
print(' %s = _t;' % (instance[1:],))
instance = '*_t'
self.allocated = True
try:
return ValueWrapper.visitStruct(self, struct, instance)
finally:
print(' }')
else:
return ValueWrapper.visitStruct(self, struct, instance)
def visitArray(self, array, instance):
if self.allocated or isinstance(instance, stdapi.Interface):
return ValueWrapper.visitArray(self, array, instance)
array_length = self.expand(array.length)
elem_type = array.type.mutable()
print(" if (%s && %s) {" % (instance, array_length))
print(" %s * _t = static_cast<%s *>(alloca(%s * sizeof *_t));" % (elem_type, elem_type, array_length))
print(" for (size_t _i = 0, _s = %s; _i < _s; ++_i) {" % array_length)
print(" _t[_i] = %s[_i];" % instance)
self.allocated = True
self.visit(array.type, "_t[_i]")
print(" }")
print(" %s = _t;" % instance)
print(" }")
def visitInterfacePointer(self, interface, instance):
if not '(' in instance and not '<' in instance and not '[' in instance:
print(r' Wrap%s* %s_original = reinterpret_cast<Wrap%s *>(%s);' % (interface.name, instance, interface.name, instance))
print(r' Wrap%s::_unwrap(__FUNCTION__, &%s);' % (interface.name, instance))
def _getInterfaceHierarchy(allIfaces, baseIface, result):
for iface in allIfaces:
if iface.base is baseIface:
_getInterfaceHierarchy(allIfaces, iface, result)
result.append(iface)
def getInterfaceHierarchy(allIfaces, baseIface):
result = []
_getInterfaceHierarchy(allIfaces, baseIface, result)
return result
class Tracer:
'''Base class to orchestrate the code generation of API tracing.'''
# 0-3 are reserved to memcpy, malloc, free, and realloc
# 4-7 are reserved for WaitForMultipleObjects
__id = 8
def __init__(self):
self.api = None
def serializerFactory(self):
'''Create a serializer.
Can be overriden by derived classes to inject their own serialzer.
'''
return ValueSerializer()
def traceApi(self, api):
self.api = api
self.header(api)
# Includes
for module in api.modules:
for header in module.headers:
print(header)
print()
# Generate the serializer functions
types = api.getAllTypes()
visitor = ComplexValueSerializer(self.serializerFactory())
for tp in types:
visitor.visit(tp)
print()
# Interfaces wrapers
self.traceInterfaces(api)
# Function wrappers
self.interface = None
self.base = None
for function in api.getAllFunctions():
self.traceFunctionDecl(function)
for function in api.getAllFunctions():
try:
self.traceFunctionImpl(function)
except:
sys.stderr.write("error: %s: exception\n" % function.name)
raise
print()
self.footer(api)
def header(self, api):
print('#ifdef _WIN32')
print('# include <malloc.h> // alloca')
print('# ifndef alloca')
print('# define alloca _alloca')
print('# endif')
print('#else')
print('# include <alloca.h> // alloca')
print('#endif')
print()
print()
print(r'/*')
print(r' * g_WrappedObjects is already protected by trace::LocalWriter::mutex')
print(r' * This lock is hold during the beginEnter/endEnter and beginLeave/endLeave sections')
print(r' */')
print('static std::map<void *, void *> g_WrappedObjects;')
def footer(self, api):
pass
def traceFunctionDecl(self, function):
# Per-function declarations
if not function.internal:
if function.args:
print('static const char * _%s_args[%u] = {%s};' % (function.name, len(function.args), ', '.join(['"%s"' % arg.name for arg in function.args])))
else:
print('static const char ** _%s_args = NULL;' % (function.name,))
print('static const trace::FunctionSig _%s_sig = {%u, "%s", %u, _%s_args};' % (function.name, self.getFunctionSigId(), function.sigName(), len(function.args), function.name))
print()
def getFunctionSigId(self):
id = Tracer.__id
Tracer.__id += 1
return id
def isFunctionPublic(self, function):
return True
def traceFunctionImpl(self, function):
if self.isFunctionPublic(function):
print('extern "C" PUBLIC')
else:
print('extern "C" PRIVATE')
print(function.prototype() + ' {')
if function.type is not stdapi.Void:
print(' %s _result;' % function.type)
for arg in function.args:
if not arg.output:
self.unwrapArg(function, arg)
self.traceFunctionImplBody(function)
# XXX: wrapping should go here, but before we can do that we'll need to protect g_WrappedObjects with its own mutex
if function.type is not stdapi.Void:
print(' return _result;')
print('}')
print()
def traceFunctionImplBody(self, function):
if not function.internal:
print(' unsigned _call = trace::localWriter.beginEnter(&_%s_sig);' % (function.name,))
for arg in function.args:
if not arg.output:
self.serializeArg(function, arg)
print(' trace::localWriter.endEnter();')
self.invokeFunction(function)
if not function.internal:
print(' trace::localWriter.beginLeave(_call);')
print(' if (%s) {' % self.wasFunctionSuccessful(function))
for arg in function.args:
if arg.output:
self.serializeArg(function, arg)
self.wrapArg(function, arg)
print(' }')
if function.type is not stdapi.Void:
self.serializeRet(function, "_result")
if function.type is not stdapi.Void:
self.wrapRet(function, "_result")
print(' trace::localWriter.endLeave();')
def invokeFunction(self, function):
self.doInvokeFunction(function)
def doInvokeFunction(self, function, prefix='_', suffix=''):
# Same as invokeFunction() but called both when trace is enabled or disabled.
if function.type is stdapi.Void:
result = ''
else:
result = '_result = '
dispatch = prefix + function.name + suffix
print(' %s%s(%s);' % (result, dispatch, ', '.join([str(arg.name) for arg in function.args])))
def wasFunctionSuccessful(self, function):
if function.type is stdapi.Void:
return 'true'
if str(function.type) == 'HRESULT':
return 'SUCCEEDED(_result)'
return 'true'
def serializeArg(self, function, arg):
print(' trace::localWriter.beginArg(%u);' % (arg.index,))
self.serializeArgValue(function, arg)
print(' trace::localWriter.endArg();')
def serializeArgValue(self, function, arg):
self.serializeValue(arg.type, arg.name)
def wrapArg(self, function, arg):
assert not isinstance(arg.type, stdapi.ObjPointer)
from specs.winapi import REFIID
riid = None
for other_arg in function.args:
if not other_arg.output and other_arg.type is REFIID:
riid = other_arg
if riid is not None \
and riid.name != 'EmulatedInterface' \
and isinstance(arg.type, stdapi.Pointer) \
and isinstance(arg.type.type, stdapi.ObjPointer):
self.wrapIid(function, riid, arg)
return
self.wrapValue(arg.type, arg.name)
def unwrapArg(self, function, arg):
self.unwrapValue(arg.type, arg.name)
def serializeRet(self, function, instance):
print(' trace::localWriter.beginReturn();')
self.serializeValue(function.type, instance)
print(' trace::localWriter.endReturn();')
def serializeValue(self, type, instance):
serializer = self.serializerFactory()
serializer.visit(type, instance)
def wrapRet(self, function, instance):
self.wrapValue(function.type, instance)
def wrapValue(self, type, instance):
if typeNeedsWrapping(type):
visitor = ValueWrapper()
visitor.visit(type, instance)
def unwrapValue(self, type, instance):
if typeNeedsWrapping(type):
visitor = ValueUnwrapper()
visitor.visit(type, instance)
def traceInterfaces(self, api):
interfaces = api.getAllInterfaces()
if not interfaces:
return
print(r'#include "guids.hpp"')
print()
# Helper functions to wrap/unwrap interface pointers
print(r'static inline bool')
print(r'hasChildInterface(REFIID riid, IUnknown *pUnknown) {')
print(r' IUnknown *pObj = NULL;')
print(r' HRESULT hr = pUnknown->QueryInterface(riid, (VOID **)&pObj);')
print(r' if (FAILED(hr)) {')
print(r' return false;')
print(r' }')
print(r' assert(pObj);')
print(r' pObj->Release();')
print(r' return pUnknown == pObj;')
print(r'}')
print()
print(r'static inline const void *')
print(r'getVtbl(const void *pvObj) {')
print(r' return pvObj ? *(const void **)pvObj : NULL;')
print(r'}')
print()
print(r'static void')
print(r'warnVtbl(const void *pVtbl) {')
print(r' HMODULE hModule = 0;')
print(r' BOOL bRet = GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |')
print(r' GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,')
print(r' (LPCTSTR)pVtbl,')
print(r' &hModule);')
print(r' if (bRet) {')
print(r' char szModule[MAX_PATH];')
print(r' DWORD dwRet = GetModuleFileNameA(hModule, szModule, sizeof szModule);')
print(r' assert(dwRet);')
print(r' if (dwRet) {')
print(r' DWORD dwOffset = (UINT_PTR)pVtbl - (UINT_PTR)hModule;')
print(r' os::log("apitrace: warning: pVtbl = %p (%s!+0x%0lx)\n", pVtbl, szModule, dwOffset);')
print(r' } else {')
print(r' os::log("apitrace: warning: pVtbl = %p\n", pVtbl);')
print(r' }')
print(r' }')
print(r'}')
print()
for iface in interfaces:
self.declareWrapperInterface(iface)
self.implementIidWrapper(api)
for iface in interfaces:
self.implementWrapperInterface(iface)
print()
def declareWrapperInterface(self, interface):
wrapperInterfaceName = getWrapperInterfaceName(interface)
print("class %s : public %s " % (wrapperInterfaceName, interface.name))
print("{")
print("private:")
print(" %s(%s * pInstance);" % (wrapperInterfaceName, interface.name))
print(" ~%s(); // Not implemented" % wrapperInterfaceName)
print("public:")
print(" static %s* _create(const char *entryName, %s * pInstance);" % (wrapperInterfaceName, interface.name))
print(" static void _wrap(const char *entryName, %s ** ppInstance);" % (interface.name,))
print(" static void _unwrap(const char *entryName, const %s * const * pInstance);" % (interface.name,))
print()
methods = list(interface.iterMethods())
for method in methods:
print(" " + method.prototype() + " override;")
print()
for type, name, value in self.enumWrapperInterfaceVariables(interface):
print(' %s %s;' % (type, name))
print()
print(r'private:')
print(r' void _dummy(unsigned i) const {')
print(r' os::log("error: %%s: unexpected virtual method %%i of instance pvObj=%%p pWrapper=%%p pVtbl=%%p\n", "%s", i, m_pInstance, this, m_pVtbl);' % interface.name)
print(r' warnVtbl(m_pVtbl);')
print(r' warnVtbl(getVtbl(m_pInstance));')
print(r' trace::localWriter.flush();')
print(r' os::abort();')
print(r' }')
print()
for i in range(len(methods), 64):
print(r' virtual void _dummy%i(void) const { _dummy(%i); }' % (i, i))
print()
print("};")
print()
def enumWrapperInterfaceVariables(self, interface):
return [
("DWORD", "m_dwMagic", "0xd8365d6c"),
("%s *" % interface.name, "m_pInstance", "pInstance"),
("const void *", "m_pVtbl", "getVtbl(pInstance)"),
("UINT", "m_NumMethods", len(list(interface.iterBaseMethods()))),
]
def implementWrapperInterface(self, iface):
self.interface = iface
wrapperInterfaceName = getWrapperInterfaceName(iface)
# Private constructor
print('%s::%s(%s * pInstance) {' % (wrapperInterfaceName, wrapperInterfaceName, iface.name))
for type, name, value in self.enumWrapperInterfaceVariables(iface):
if value is not None:
print(' %s = %s;' % (name, value))
print('}')
print()
# Public constructor
print('%s *%s::_create(const char *entryName, %s * pInstance) {' % (wrapperInterfaceName, wrapperInterfaceName, iface.name))
print(r' Wrap%s *pWrapper = new Wrap%s(pInstance);' % (iface.name, iface.name))
if debug:
print(r' os::log("%%s: created %s pvObj=%%p pWrapper=%%p pVtbl=%%p\n", entryName, pInstance, pWrapper, pWrapper->m_pVtbl);' % iface.name)
print(r' g_WrappedObjects[pInstance] = pWrapper;')
print(r' return pWrapper;')
print('}')
print()
baseMethods = list(iface.iterBaseMethods())
for base, method in baseMethods:
self.base = base
self.implementWrapperInterfaceMethod(iface, base, method)
print()
# Wrap pointer
ifaces = self.api.getAllInterfaces()
print(r'void')
print(r'%s::_wrap(const char *entryName, %s **ppObj) {' % (wrapperInterfaceName, iface.name))
print(r' if (!ppObj) {')
print(r' return;')
print(r' }')
print(r' %s *pObj = *ppObj;' % (iface.name,))
print(r' if (!pObj) {')
print(r' return;')
print(r' }')
print(r' assert(hasChildInterface(IID_%s, pObj));' % iface.name)
print(r' std::map<void *, void *>::const_iterator it = g_WrappedObjects.find(pObj);')
print(r' if (it != g_WrappedObjects.end()) {')
print(r' Wrap%s *pWrapper = (Wrap%s *)it->second;' % (iface.name, iface.name))
print(r' assert(pWrapper);')
print(r' assert(pWrapper->m_dwMagic == 0xd8365d6c);')
print(r' assert(pWrapper->m_pInstance == pObj);')
print(r' if (pWrapper->m_pVtbl == getVtbl(pObj) &&')
print(r' pWrapper->m_NumMethods >= %s) {' % len(baseMethods))
if debug:
print(r' os::log("%s: fetched pvObj=%p pWrapper=%p pVtbl=%p\n", entryName, pObj, pWrapper, pWrapper->m_pVtbl);')
print(r' assert(hasChildInterface(IID_%s, pWrapper->m_pInstance));' % iface.name)
print(r' *ppObj = pWrapper;')
print(r' return;')
print(r' } else {')
if debug:
print(r' os::log("%s::Release: deleted pvObj=%%p pWrapper=%%p pVtbl=%%p\n", pWrapper->m_pInstance, pWrapper, pWrapper->m_pVtbl);' % iface.name)
print(r' g_WrappedObjects.erase(pObj);')
print(r' }')
print(r' }')
for childIface in getInterfaceHierarchy(ifaces, iface):
print(r' if (hasChildInterface(IID_%s, pObj)) {' % (childIface.name,))
print(r' *ppObj = Wrap%s::_create(entryName, static_cast<%s *>(pObj));' % (childIface.name, childIface.name))
print(r' return;')
print(r' }')
print(r' *ppObj = Wrap%s::_create(entryName, pObj);' % iface.name)
print(r'}')
print()
# Unwrap pointer
print(r'void')
print(r'%s::_unwrap(const char *entryName, const %s * const *ppObjConst) {' % (wrapperInterfaceName, iface.name))
print(r' %s **ppObj = const_cast<%s**>(ppObjConst);' % (iface.name, iface.name))
print(r' if (!ppObj || !*ppObj) {')
print(r' return;')
print(r' }')
print(r' const %s *pWrapper = static_cast<const %s*>(*ppObj);' % (wrapperInterfaceName, getWrapperInterfaceName(iface)))
print(r' if (pWrapper && pWrapper->m_dwMagic == 0xd8365d6c) {')
print(r' *ppObj = pWrapper->m_pInstance;')
print(r' } else {')
print(r' os::log("apitrace: warning: %%s: unexpected %%s pointer %%p\n", entryName, "%s", *ppObj);' % iface.name)
print(r' trace::localWriter.flush();')
print(r' }')
print(r'}')
print()
def implementWrapperInterfaceMethod(self, interface, base, method):
wrapperInterfaceName = getWrapperInterfaceName(interface)
print(method.prototype(wrapperInterfaceName + '::' + method.name) + ' {')
if False:
print(r' os::log("%%s(%%p -> %%p)\n", "%s", this, m_pInstance);' % (wrapperInterfaceName + '::' + method.name))
if method.type is not stdapi.Void:
print(' %s _result;' % method.type)
print(' %s *_this = static_cast<%s *>(m_pInstance);' % (base, base))
for arg in method.args:
if not arg.output:
self.unwrapArg(method, arg)
self.implementWrapperInterfaceMethodBody(interface, base, method)
# XXX: wrapping should go here, but before we can do that we'll need to protect g_WrappedObjects with its own mutex
if method.type is not stdapi.Void:
print(' return _result;')
print('}')
print()
def implementWrapperInterfaceMethodBody(self, interface, base, method):
self.implementWrapperInterfaceMethodBodyEx(interface, base, method, '_result')
def implementWrapperInterfaceMethodBodyEx(self, interface, base, method, result_override):
assert not method.internal
sigName = interface.name + '::' + method.sigName()
if method.overloaded:
# Once the method signature name goes into a trace, we'll need to
# support it indefinetely, so log them so one can make sure nothing
# weird gets baked in
sys.stderr.write('note: overloaded method %s\n' % (sigName,))
numArgs = len(method.args) + 1
print(' static const char * _args[%u] = {%s};' % (numArgs, ', '.join(['"this"'] + ['"%s"' % arg.name for arg in method.args])))
print(' static const trace::FunctionSig _sig = {%u, "%s", %u, _args};' % (self.getFunctionSigId(), sigName, numArgs))
print(' unsigned _call = trace::localWriter.beginEnter(&_sig);')
print(' trace::localWriter.beginArg(0);')
print(' trace::localWriter.writePointer((uintptr_t)m_pInstance);')
print(' trace::localWriter.endArg();')
for arg in method.args:
if not arg.output:
self.serializeArg(method, arg)
print(' trace::localWriter.endEnter();')
if result_override == '_result':
self.invokeMethod(interface, base, method)
print(' trace::localWriter.beginLeave(_call);')
print(' if (%s) {' % self.wasFunctionSuccessful(method))
for arg in method.args:
if arg.output:
self.serializeArg(method, arg)
self.wrapArg(method, arg)
print(' }')
if method.type is not stdapi.Void:
self.serializeRet(method, result_override)
if method.type is not stdapi.Void:
self.wrapRet(method, result_override)
if method.name == 'Release':
assert method.type is not stdapi.Void
print(r' if (!_result) {')
print(r' // NOTE: Must not delete the wrapper here. See')
print(r' // https://github.com/apitrace/apitrace/issues/462')
print(r' }')
print(' trace::localWriter.endLeave();')
def implementIidWrapper(self, api):
ifaces = api.getAllInterfaces()
print(r'static void')
print(r'warnIID(const char *entryName, REFIID riid, void *pvObj, const char *reason) {')
print(r' os::log("apitrace: warning: %s: %s IID %s\n",')
print(r' entryName, reason,')
print(r' getGuidName(riid));')
print(r' const void * pVtbl = getVtbl(pvObj);')
print(r' warnVtbl(pVtbl);')
print(r'}')
print()
print(r'static void')
print(r'wrapIID(const char *entryName, REFIID riid, void * * ppvObj) {')
print(r' if (!ppvObj || !*ppvObj) {')
print(r' return;')
print(r' }')
for iface in ifaces:
print(r' if (riid == IID_%s) {' % (iface.name,))
print(r' Wrap%s::_wrap(entryName, (%s **) ppvObj);' % (iface.name, iface.name))
print(r' return;')
print(r' }')
print(r' warnIID(entryName, riid, *ppvObj, "unsupported");')
print(r'}')
print()
def wrapIid(self, function, riid, out):
# Cast output arg to `void **` if necessary
out_name = out.name
obj_type = out.type.type.type
if not obj_type is stdapi.Void:
assert isinstance(obj_type, stdapi.Interface)
out_name = 'reinterpret_cast<void * *>(%s)' % out_name
print(r' if (%s && *%s) {' % (out.name, out.name))
functionName = function.name
else_ = ''
if self.interface is not None:
functionName = self.interface.name + '::' + functionName
print(r' if (*%s == m_pInstance &&' % (out_name,))
print(r' (%s)) {' % ' || '.join('%s == IID_%s' % (riid.name, iface.name) for iface in self.interface.iterBases()))
print(r' *%s = this;' % (out_name,))
print(r' }')
else_ = 'else '
print(r' %s{' % else_)
print(r' wrapIID("%s", %s, %s);' % (functionName, riid.name, out_name))
print(r' }')
print(r' }')
def invokeMethod(self, interface, base, method):
if method.type is stdapi.Void:
result = ''
else:
result = '_result = '
print(' %s_this->%s(%s);' % (result, method.name, ', '.join([str(arg.name) for arg in method.args])))
def emit_memcpy(self, ptr, size):
print(' trace::fakeMemcpy(%s, %s);' % (ptr, size))
def fake_call(self, function, args):
print(' {')
print(' unsigned _fake_call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,))
for arg, instance in zip(function.args, args):
assert not arg.output
print(' trace::localWriter.beginArg(%u);' % (arg.index,))
self.serializeValue(arg.type, instance)
print(' trace::localWriter.endArg();')
print(' trace::localWriter.endEnter();')
print(' trace::localWriter.beginLeave(_fake_call);')
print(' trace::localWriter.endLeave();')
print(' }')
| 41.886878 | 186 | 0.554024 |
793f20d12b8a9c08ade34dc804b61036c60f49ba | 180 | py | Python | Lectures_Codes/examples-05/lecture05/own_exception.py | MichalKyjovsky/NPRG065_Programing_in_Python | 14436fbf8f0e547ab084083135a84c8ae49e083c | [
"MIT"
] | null | null | null | Lectures_Codes/examples-05/lecture05/own_exception.py | MichalKyjovsky/NPRG065_Programing_in_Python | 14436fbf8f0e547ab084083135a84c8ae49e083c | [
"MIT"
] | null | null | null | Lectures_Codes/examples-05/lecture05/own_exception.py | MichalKyjovsky/NPRG065_Programing_in_Python | 14436fbf8f0e547ab084083135a84c8ae49e083c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
class MyException(Exception):
pass
def foo():
raise MyException
try:
foo()
except MyException:
print('MyException occurred')
raise
| 10.588235 | 33 | 0.661111 |
793f20e0419588065fc50db89767482d8e304723 | 395 | py | Python | scripting/asgi.py | Kgermando/es-script | f1b10ecf2c805e8875a025e7033c724e236f6cd1 | [
"Apache-2.0"
] | null | null | null | scripting/asgi.py | Kgermando/es-script | f1b10ecf2c805e8875a025e7033c724e236f6cd1 | [
"Apache-2.0"
] | null | null | null | scripting/asgi.py | Kgermando/es-script | f1b10ecf2c805e8875a025e7033c724e236f6cd1 | [
"Apache-2.0"
] | null | null | null | """
ASGI config for scripting project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scripting.settings')
application = get_asgi_application()
| 23.235294 | 78 | 0.787342 |
793f20e70eecdb8eac8a88f8342dcadd5572ed6e | 4,023 | py | Python | trainTripleClassification.py | lualiu/GanforKGE | 29f68704cda5e58501c1ded635cd3ecae936d4bb | [
"Apache-2.0"
] | null | null | null | trainTripleClassification.py | lualiu/GanforKGE | 29f68704cda5e58501c1ded635cd3ecae936d4bb | [
"Apache-2.0"
] | null | null | null | trainTripleClassification.py | lualiu/GanforKGE | 29f68704cda5e58501c1ded635cd3ecae936d4bb | [
"Apache-2.0"
] | null | null | null | import argparse
import os
from preprocess.TripleClassificationData import TripleClassificationData
from train.TrainTripleClassification import TrainTripleClassifcation
from utils.readmodel import *
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
FLAGS = None
def main(FLAGS):
data = TripleClassificationData(
os.path.join(FLAGS.datapath,FLAGS.dataset),
FLAGS.trainfilename,
FLAGS.validfilename,
FLAGS.testfilename,
FLAGS.withreverse
)
embedding, generator, discriminator = read_gan_model(FLAGS, data.entity_numbers,data.relation_numbers)
if FLAGS.cuda:
embedding.cuda()
generator.cuda()
discriminator.cuda()
trainGan = TrainTripleClassifcation()
trainGan.set_data(data)
trainGan.set_model(embedding,generator,discriminator)
trainGan.train(
FLAGS.usepretrained,
FLAGS.pretrainedpath,
FLAGS.learningrate,
FLAGS.weightdecay,
FLAGS.margin,
FLAGS.epochs,
FLAGS.batchsize,
FLAGS.evaluationtimes,
FLAGS.savetimes,
FLAGS.savepath,
FLAGS.logpath,
FLAGS.dtuneembedding,
FLAGS.gtuneembedding,
FLAGS.dmargintype,
FLAGS.gusenegative,
FLAGS.meanorsum,
print_file = FLAGS.logpath+'.txt'
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=True, type=bool)
# parameters for model name
parser.add_argument("--embeddingname", default='TransE',type=str)
parser.add_argument("--gname", default='ConvTransE', type=str)
parser.add_argument("--dname", default='Translation', type=str)
# parameters for dataset
parser.add_argument("--datapath",default='data',type=str)
parser.add_argument("--dataset",default="Wordnet11",type=str)
parser.add_argument("--trainfilename", default="train.txt", type=str)
parser.add_argument("--validfilename", default="dev.txt", type=str)
parser.add_argument("--testfilename", default="test.txt", type=str)
parser.add_argument("--withreverse", default=False, type=bool)
# parameters for super parameters
parser.add_argument("--embeddingdim", default=100, type=int)
parser.add_argument("--usepretrained", default=False, type=bool)
parser.add_argument("--pretrainedpath", default='saved_model/TransE/baseline/WN18RR/embedding-model-2000.pkl', type=str)
parser.add_argument("--learningrate", default=0.001, type=float)
parser.add_argument("--epochs", default=1000, type=int)
parser.add_argument("--batchsize", default=1000, type=int)
parser.add_argument("--margin", default=2.0, type=float)
parser.add_argument("--weightdecay", default=1e-6, type=float)
# parameters for save and log times and path
parser.add_argument("--evaluationtimes", default=100, type=int)
parser.add_argument("--savetimes", default=500, type=int)
parser.add_argument("--logtimes", default=1, type=int)
parser.add_argument("--savepath", default='saved_model/FC_TransE/WN11', type=str)
parser.add_argument("--logpath", default='log/FC_TransE/WN11', type=str)
# parameters for fully connected layer
parser.add_argument("--hiddenlayers",default=[200,100],type=list)
# parameters for convolutional layer
parser.add_argument("--numfilter", default=32, type=int)
parser.add_argument("--inputdropout", default=0.2, type=float)
parser.add_argument("--featuredropout", default=0.3, type=float)
parser.add_argument("--kernelsize", default=3, type=int)
# parameters for different selection strategies for GN and DN
parser.add_argument("--dtuneembedding", default=True, type=bool)
parser.add_argument("--gtuneembedding", default=False, type=bool)
parser.add_argument("--dmargintype", default=True, type=bool)
parser.add_argument("--gusenegative", default=False, type=bool)
parser.add_argument("--meanorsum", default='mean', type=str)
FLAGS, unparsed = parser.parse_known_args()
main(FLAGS)
| 38.314286 | 124 | 0.706189 |
793f21c96ec032a51fe3251e0e20e4ab3d18ae85 | 391 | py | Python | ToDoApp/ToDoApp/asgi.py | AldarKose-dev/ToDoApp | 38a74cf89245c829f7bfdb9beedc7a1c5effeb16 | [
"MIT"
] | null | null | null | ToDoApp/ToDoApp/asgi.py | AldarKose-dev/ToDoApp | 38a74cf89245c829f7bfdb9beedc7a1c5effeb16 | [
"MIT"
] | null | null | null | ToDoApp/ToDoApp/asgi.py | AldarKose-dev/ToDoApp | 38a74cf89245c829f7bfdb9beedc7a1c5effeb16 | [
"MIT"
] | null | null | null | """
ASGI config for ToDoApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ToDoApp.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
793f2291de3c441ac9051fa9398c71f78ea830b0 | 7,269 | py | Python | scheduled_bots/geneprotein/__init__.py | gtsueng/scheduled-bots | e041905e4c5bf4e25d348474575cce91f78a0733 | [
"MIT"
] | 6 | 2017-05-04T01:04:26.000Z | 2022-03-04T12:22:17.000Z | scheduled_bots/geneprotein/__init__.py | gtsueng/scheduled-bots | e041905e4c5bf4e25d348474575cce91f78a0733 | [
"MIT"
] | 55 | 2017-03-14T21:16:44.000Z | 2022-03-02T12:39:14.000Z | scheduled_bots/geneprotein/__init__.py | gtsueng/scheduled-bots | e041905e4c5bf4e25d348474575cce91f78a0733 | [
"MIT"
] | 13 | 2017-02-10T21:40:06.000Z | 2022-01-18T01:27:52.000Z | #########
# Helper functions
#########
type_of_gene_map = {'ncRNA': 'Q427087',
'snRNA': 'Q284578',
'snoRNA': 'Q284416',
'rRNA': 'Q215980',
'tRNA': 'Q201448',
'pseudo': 'Q277338',
'protein-coding': 'Q7187', # replaced 'Q20747295'(protein coding gene) with gene
'other': 'Q7187',
'unknown': 'Q7187',
'miscRNA': 'Q11053',
'scRNA': 'Q25323710',
}
descriptions_by_type = {
'ncRNA': 'non-coding RNA in the species {}',
'snRNA': 'small nuclear RNA in the species {}',
'snoRNA': 'small nucleolar RNA in the species {}',
'rRNA': 'ribosomal RNA in the species {}',
'tRNA': 'transfer RNA in the species {}',
'pseudo': 'pseudogene in the species {}',
'protein-coding': 'protein-coding gene in the species {}',
'other': 'gene in the species {}',
'unknown': 'genetic element in the species {}',
'miscRNA': 'RNA in the species {}',
'scRNA': 'small conditional RNA in the species {}'
}
not_worth_adding = {
"none",
"None",
"gene",
"Gene",
"unknown",
"Unknown",
"null"
}
human_chromosome_map = {
'1': 'Q430258',
'10': 'Q840737',
'11': 'Q847096',
'12': 'Q847102',
'13': 'Q840734',
'14': 'Q138955',
'15': 'Q765245',
'16': 'Q742870',
'17': 'Q220677',
'18': 'Q780468',
'19': 'Q510786',
'2': 'Q638893',
'20': 'Q666752',
'21': 'Q753218',
'22': 'Q753805',
'3': 'Q668633',
'4': 'Q836605',
'5': 'Q840741',
'6': 'Q540857',
'7': 'Q657319',
'8': 'Q572848',
'9': 'Q840604',
'MT': 'Q27973632',
'X': 'Q29867336',
'Y': 'Q29867344'}
#########
# Mappings for GO
#########
go_props = {'MF': 'P680',
'Function': 'P680',
'F': 'P680',
'CC': 'P681',
'C': 'P681',
'Component': 'P681',
'BP': 'P682',
'P': 'P682',
'Process': 'P682'}
go_evidence_codes = {
'EXP': 'Q23173789',
'IDA': 'Q23174122',
'IPI': 'Q23174389',
'IMP': 'Q23174671',
'IGI': 'Q23174952',
'IEP': 'Q23175251',
'ISS': 'Q23175558',
'ISO': 'Q23190637',
'ISA': 'Q23190738',
'ISM': 'Q23190825',
'IGC': 'Q23190826',
'IBA': 'Q23190827',
'IBD': 'Q23190833',
'IKR': 'Q23190842',
'IRD': 'Q23190850',
'RCA': 'Q23190852',
'TAS': 'Q23190853',
'NAS': 'Q23190854',
'IC': 'Q23190856',
'ND': 'Q23190857',
'IEA': 'Q23190881',
'IMR': 'Q23190842',
'HDA': 'Q60521899',
'HEP': 'Q60521293'
}
###############
# For references
###############
# wd item representing a source database
sources_wdids = {'UniProt': 'Q905695',
'Uniprot': 'Q905695',
'UniProtKB': 'Q905695',
'ncbi_gene': 'Q20641742', # these two are the same? --v
'Entrez': 'Q20641742',
'ncbi_taxonomy': 'Q13711410',
'swiss_prot': 'Q2629752',
'trembl': 'Q22935315',
'Ensembl': 'Q1344256',
'refseq': 'Q7307074'
}
PROPS = {'found in taxon': 'P703',
'subclass of': 'P279',
'strand orientation': 'P2548',
'Entrez Gene ID': 'P351',
'NCBI Locus tag': 'P2393',
'Ensembl Gene ID': 'P594',
'Ensembl Transcript ID': 'P704',
'genomic assembly': 'P659',
'genomic start': 'P644',
'genomic end': 'P645',
'chromosome': 'P1057',
'Saccharomyces Genome Database ID': 'P3406',
'Mouse Genome Informatics ID': 'P671',
'HGNC ID': 'P354',
'HGNC Gene Symbol': 'P353',
'RefSeq RNA ID': 'P639',
'encoded by': 'P702',
'RefSeq Protein ID': 'P637',
'UniProt ID': 'P352',
'Ensembl Protein ID': 'P705',
'OMIM ID': 'P492',
'NCBI Taxonomy ID': 'P685'
}
# http://www.geneontology.org/doc/GO.xrf_abbs
curators_wdids = {'AgBase': 'Q4690901',
'Alzheimers_University_of_Toronto': 'Q28122976',
'BHF-UCL': 'Q4970039',
'CACAO': 'Q27929332',
'CAFA': 'Q29976522',
'CollecTF': 'Q17083998',
'DFLAT': 'Q28122980',
'dictyBase': 'Q5273990',
'EnsemblPlants': 'Q27927711',
'Ensembl': 'Q1344256',
'FlyBase': 'Q3074571',
'GDB': 'Q5513070',
'GOC': 'Q23809253',
'GO_Central': 'Q27927716',
'HGNC': 'Q1646383',
'HPA': 'Q5937310',
'IntAct': 'Q27901835',
'InterPro': 'Q3047275',
'LIFEdb': 'Q28122992',
'MGI': 'Q1951035',
'NTNU_SB': 'Q28122995',
'ParkinsonsUK-UCL': 'Q27929334',
'PINC': 'Q28122996',
'Reactome': 'Q2134522',
'SGD': 'Q3460832',
'SYSCILIA_CCNET': 'Q28122997',
'UniProt': 'Q905695',
'WormBase': 'Q3570042',
'PseudoCAP': 'Q60526133',
'ARUK-UCL': 'Q60526173',
'GOC-OWL': 'Q60527201',
'SynGO': 'Q60527231',
'SynGO-UCL': 'Q60527231',
'YuBioLab':'Q60527361',
'JCVI': 'Q1439786',
'Mengo': 'Q60527548',
'PAMGO_GAT': 'Q60527768',
'RGD': 'Q7295410',
'Roslin_Institute': 'Q1633976',
'TIGR': 'Q1439786'}
# These are for reference external IDs to use for GO annotations curators
curator_ref = {'SGD': 'Saccharomyces Genome Database ID',
'MGI': 'Mouse Genome Informatics ID',
'UniProt': 'UniProt ID', }
#########
# Organism Info
#########
organisms_info = {
559292: {
"type": "fungal",
"name": "Saccharomyces cerevisiae S288c",
"wdid": "Q27510868",
'taxid': 559292
},
9606: {
"name": "Homo sapiens",
"type": "mammalian",
"wdid": "Q15978631",
'taxid': 9606
},
10090: {
"name": "Mus musculus",
"type": "mammalian",
"wdid": "Q83310",
'taxid': 10090
},
10116: {
"name": "Rattus norvegicus",
"type": "mammalian",
"wdid": "Q184224",
'taxid': 10116
},
9545: {
"name": "Macaca nemestrina",
"type": "mammalian",
"wdid": "Q618026",
'taxid': 9545
},
3702: {
"name": "Arabidopsis thaliana",
"type": "plant",
"wdid": "Q158695",
'taxid': 3702
},
7227: {
"name": "Drosophila melanogaster",
"type": None,
"wdid": "Q130888",
'taxid': 7227
},
6239: {
"name": "Caenorhabditis elegans",
"type": None,
"wdid": "Q91703",
'taxid': 6239
},
7955: {
"name": "Danio rerio", # zebrafish
"type": None,
"wdid": "Q169444",
'taxid': 7955
},
}
| 28.505882 | 101 | 0.448342 |
793f2295752c3d0f0e9672042e24426226712328 | 232 | py | Python | python/exercise/gcd.py | kindrabbit/programming | 2c9b7e24e33ecc174c2efb51727b3886ebc00acf | [
"Apache-2.0"
] | 1 | 2021-01-24T02:07:34.000Z | 2021-01-24T02:07:34.000Z | python/exercise/gcd.py | kindrabbit/programming | 2c9b7e24e33ecc174c2efb51727b3886ebc00acf | [
"Apache-2.0"
] | null | null | null | python/exercise/gcd.py | kindrabbit/programming | 2c9b7e24e33ecc174c2efb51727b3886ebc00acf | [
"Apache-2.0"
] | null | null | null | def gcd(x,y):
if x > y:
divisor = y
else:
divisor = x
while divisor > 0:
if x % divisor == 0 and y % divisor == 0:
break
divisor -= 1
return divisor
print(gcd(15600,2400)) | 19.333333 | 49 | 0.478448 |
793f245481588dd838d9f16d2a0e180aa6749262 | 2,894 | py | Python | src/model/speech2gesture.py | chahuja/mix-stage | 6f47626ce46bd9b28c45d1255b328b17b3650c4f | [
"MIT"
] | 16 | 2020-07-14T10:13:59.000Z | 2022-03-23T09:28:42.000Z | src/model/speech2gesture.py | chahuja/mix-stage | 6f47626ce46bd9b28c45d1255b328b17b3650c4f | [
"MIT"
] | 6 | 2020-11-16T09:21:02.000Z | 2021-10-04T17:02:33.000Z | src/model/speech2gesture.py | chahuja/mix-stage | 6f47626ce46bd9b28c45d1255b328b17b3650c4f | [
"MIT"
] | 4 | 2020-08-23T20:05:13.000Z | 2021-08-09T16:30:12.000Z | import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pdb
from .layers import *
import torch
import torch.nn as nn
class Speech2Gesture_G(nn.Module):
'''
Baseline: http://people.eecs.berkeley.edu/~shiry/projects/speech2gesture/
input_shape: (N, time, frequency)
output_shape: (N, time, pose_feats)
'''
def __init__(self, time_steps=64, in_channels=256, out_feats=104, p=0):
super(Speech2Gesture_G, self).__init__()
self.audio_encoder = AudioEncoder(output_feats = time_steps, p=p)
self.unet = UNet1D(input_channels = in_channels, output_channels = in_channels, p=p)
self.decoder = nn.Sequential(*nn.ModuleList([ConvNormRelu(in_channels, in_channels,
type='1d', leaky=True, downsample=False,
p=p)
for i in range(4)]))
self.logits = nn.Conv1d(in_channels, out_feats, kernel_size=1, stride=1)
def forward(self, x, y, time_steps=None, **kwargs):
if x.dim() == 3:
x = x.unsqueeze(dim=1)
x = self.audio_encoder(x, time_steps)
x = self.unet(x)
x = self.decoder(x)
x = self.logits(x)
internal_losses = []
return x.transpose(-1, -2), internal_losses
class Speech2Gesture_D(nn.Module):
'''
Baseline: http://people.eecs.berkeley.edu/~shiry/projects/speech2gesture/
input_shape: (N, time, pose_feats)
output_shape: (N, *, 1) ## discriminator scores
'''
def __init__(self, in_channels=104, out_channels=64, n_downsampling=2, p=0, groups=1, **kwargs):
super(Speech2Gesture_D, self).__init__()
self.conv1 = nn.Sequential(torch.nn.Conv1d(in_channels*groups, out_channels*groups, 4, 2, padding=1, groups=groups),
torch.nn.LeakyReLU(negative_slope=0.2))
self.conv2 = nn.ModuleList([])
for n in range(1, n_downsampling):
ch_mul = min(2**n, 8)
self.conv2.append(ConvNormRelu(out_channels, out_channels*ch_mul,
type='1d', downsample=True, leaky=True, p=p, groups=groups))
self.conv2 = nn.Sequential(*self.conv2)
ch_mul_new = min(2**n_downsampling, 8)
self.conv3 = ConvNormRelu(out_channels*ch_mul, out_channels*ch_mul_new,
type='1d', leaky=True, kernel_size=4, stride=1, p=p, groups=groups)
out_shape = 1 if 'out_shape' not in kwargs else kwargs['out_shape']
self.logits = nn.Conv1d(out_channels*ch_mul_new*groups, out_shape*groups, kernel_size=4, stride=1, groups=groups)
def forward(self, x):
x = x.transpose(-1, -2)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.logits(x)
internal_losses = []
return x.transpose(-1, -2).squeeze(dim=-1), internal_losses
| 38.078947 | 120 | 0.637526 |
793f24cff86942dc36718fb5ed92cbac0dcaf179 | 626 | py | Python | migrations/versions/2020_07_25_6f1c5993e9f0_remove_sticker_id.py | annihilatorrrr/sticker-finder | 873468f8de26cc32d1de9b688140569b8086ab5b | [
"MIT"
] | 82 | 2018-11-13T05:39:44.000Z | 2022-01-18T17:08:44.000Z | migrations/versions/2020_07_25_6f1c5993e9f0_remove_sticker_id.py | annihilatorrrr/sticker-finder | 873468f8de26cc32d1de9b688140569b8086ab5b | [
"MIT"
] | 25 | 2018-12-02T18:45:52.000Z | 2022-03-21T22:54:19.000Z | migrations/versions/2020_07_25_6f1c5993e9f0_remove_sticker_id.py | annihilatorrrr/sticker-finder | 873468f8de26cc32d1de9b688140569b8086ab5b | [
"MIT"
] | 23 | 2019-01-22T20:04:50.000Z | 2022-02-01T14:57:28.000Z | """Remove sticker.id
Revision ID: 6f1c5993e9f0
Revises: ad326c5b7733
Create Date: 2020-07-25 16:37:23.543659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "6f1c5993e9f0"
down_revision = "ad326c5b7733"
branch_labels = None
depends_on = None
def upgrade():
op.drop_column("sticker", "id")
def downgrade():
op.add_column(
"sticker",
sa.Column(
"id",
sa.INTEGER(),
server_default=sa.text("nextval('sticker_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
)
| 18.411765 | 74 | 0.629393 |
793f253100b05f175a0c76f64ae9385c8b7135b7 | 12,500 | py | Python | neural_sp/models/lm/transformer_xl.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 577 | 2018-09-17T14:39:34.000Z | 2022-03-29T10:48:09.000Z | neural_sp/models/lm/transformer_xl.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 221 | 2019-04-21T01:44:09.000Z | 2022-02-10T02:08:47.000Z | neural_sp/models/lm/transformer_xl.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 139 | 2019-01-09T02:18:00.000Z | 2022-03-29T07:40:08.000Z | # Copyright 2020 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""TransformerXL language model."""
import copy
import logging
import math
import os
import random
import shutil
import torch
import torch.nn as nn
from neural_sp.models.lm.lm_base import LMBase
from neural_sp.models.modules.initialization import init_like_transformer_xl
from neural_sp.models.modules.positional_embedding import XLPositionalEmbedding
from neural_sp.models.modules.transformer import TransformerDecoderBlock
from neural_sp.models.torch_utils import tensor2np
from neural_sp.utils import mkdir_join
import matplotlib
matplotlib.use('Agg')
random.seed(1)
logger = logging.getLogger(__name__)
class TransformerXL(LMBase):
"""TransformerXL language model."""
def __init__(self, args, save_path=None):
super(LMBase, self).__init__()
logger.info(self.__class__.__name__)
self.lm_type = args.lm_type
self.save_path = save_path
self.d_model = args.transformer_d_model
self.n_layers = args.n_layers
self.n_heads = args.transformer_n_heads
self.lsm_prob = args.lsm_prob
if args.mem_len > 0:
self.mem_len = args.mem_len
else:
self.mem_len = args.bptt
if args.recog_mem_len > 0:
self.mem_len = args.recog_mem_len
self.vocab = args.vocab
self.eos = 2
self.pad = 3
# NOTE: reserved in advance
# for cache
self.cache_theta = 0.2 # smoothing parameter
self.cache_lambda = 0.2 # cache weight
self.cache_ids = []
self.cache_keys = []
self.cache_attn = []
self.embed_cache = None
# positional embedding
self.pos_emb = XLPositionalEmbedding(self.d_model, args.dropout_in)
self.u_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_model // self.n_heads))
self.v_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_model // self.n_heads))
# NOTE: u_bias and v_bias are global parameters
self.embed = nn.Embedding(self.vocab, self.d_model, padding_idx=self.pad)
self.scale = math.sqrt(self.d_model) # for token embedding
self.dropout_emb = nn.Dropout(p=args.dropout_in) # for token embedding
self.layers = nn.ModuleList([copy.deepcopy(TransformerDecoderBlock(
self.d_model, args.transformer_d_ff, 'scaled_dot',
self.n_heads, args.dropout_hidden, args.dropout_att, args.dropout_layer,
args.transformer_layer_norm_eps, args.transformer_ffn_activation, args.transformer_param_init,
src_tgt_attention=False, memory_transformer=True)) for lth in range(self.n_layers)])
self.norm_out = nn.LayerNorm(self.d_model, eps=args.transformer_layer_norm_eps)
self.adaptive_softmax = None
self.output = None
if args.adaptive_softmax:
self.adaptive_softmax = nn.AdaptiveLogSoftmaxWithLoss(
self.d_model, self.vocab,
cutoffs=[round(self.vocab / 15), 3 * round(self.vocab / 15)],
# cutoffs=[self.vocab // 25, 3 * self.vocab // 5],
div_value=4.0)
else:
self.output = nn.Linear(self.d_model, self.vocab)
if args.tie_embedding:
self.output.weight = self.embed.weight
self.reset_parameters()
@property
def output_dim(self):
return self.d_model
@staticmethod
def add_args(parser, args):
"""Add arguments."""
group = parser.add_argument_group("Transformer-XL LM")
group.add_argument('--transformer_d_model', type=int, default=256,
help='number of units in the MHA layer')
group.add_argument('--transformer_d_ff', type=int, default=2048,
help='number of units in the FFN layer')
# group.add_argument('--transformer_ffn_bottleneck_dim', type=int, default=0,
# help='bottleneck dimension in the FFN layer')
group.add_argument('--transformer_n_heads', type=int, default=4,
help='number of heads in the MHA layer')
group.add_argument('--transformer_layer_norm_eps', type=float, default=1e-12,
help='epsilon value for layer normalization')
group.add_argument('--transformer_ffn_activation', type=str, default='relu',
choices=['relu', 'gelu', 'gelu_accurate', 'glu', 'swish'],
help='nonlinear activation for the FFN layer')
group.add_argument('--transformer_param_init', type=str, default='xavier_uniform',
choices=['xavier_uniform', 'pytorch'],
help='parameter initialization')
group.add_argument('--dropout_att', type=float, default=0.1,
help='dropout probability for the attention weights')
group.add_argument('--dropout_layer', type=float, default=0.0,
help='LayerDrop probability for Transformer layers')
# XL specific
group.add_argument('--mem_len', type=int, default=0,
help='number of tokens for memory in TransformerXL during training')
return parser
@staticmethod
def define_name(dir_name, args):
dir_name = args.lm_type
dir_name += str(args.transformer_d_model) + 'dmodel'
dir_name += str(args.transformer_d_ff) + 'dff'
dir_name += str(args.n_layers) + 'L'
dir_name += str(args.transformer_n_heads) + 'H'
if args.tie_embedding:
dir_name += '_tie'
if args.adaptive_softmax:
dir_name += '_adaptiveSM'
if args.mem_len > 0:
dir_name += '_mem' + str(args.mem_len)
return dir_name
def reset_parameters(self):
"""Initialize parameters with normal distribution."""
logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)
for n, p in self.named_parameters():
init_like_transformer_xl(n, p, std=0.02)
def init_memory(self):
"""Initialize memory."""
return [torch.empty(0, dtype=torch.float).to(self.device)
for _ in range(self.n_layers)]
def update_memory(self, memory_prev, hidden_states):
"""Update memory.
Args:
memory_prev (List): length `n_layers` (inter-utterance),
each of which contains a FloatTensor of size `[B, mlen, d_model]`
hidden_states (List): length `n_layers` (intra-utterance),
each of which contains a FloatTensor of size `[B, L, d_model]`
Returns:
new_mems (List): length `n_layers`,
each of which contains a FloatTensor of size `[B, mlen, d_model]`
"""
if memory_prev is None:
memory_prev = self.init_memory() # 0-th to L-1-th layer
assert len(hidden_states) == len(memory_prev), (len(hidden_states), len(memory_prev))
mlen = memory_prev[0].size(1) if memory_prev[0].dim() > 1 else 0
qlen = hidden_states[0].size(1)
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
start_idx = max(0, end_idx - self.mem_len)
for m, h in zip(memory_prev, hidden_states):
cat = torch.cat([m, h], dim=1) # `[B, mlen + qlen, d_model]`
new_mems.append(cat[:, start_idx:end_idx].detach()) # `[B, self.mem_len, d_model]`
return new_mems
def embed_token_id(self, indices):
"""Embed token IDs.
Args:
indices (LongTensor): `[B]`
Returns:
ys_emb (FloatTensor): `[B, vocab, emb_dim]`
"""
if self.embed_cache is None or self.training:
ys_emb = self.dropout_emb(self.embed(indices) * self.scale)
else:
ys_emb = self.embed_cache[indices]
return ys_emb
def decode(self, ys, state=None, mems=None, cache=None, incremental=False):
"""Decode function.
Args:
ys (LongTensor): `[B, L]`
state (List): dummy interfance for RNNLM
mems (List): length `n_layers` (inter-utterance),
each of which contains a FloatTensor of size `[B, mlen, d_model]`
cache (List): length `n_layers` (intra-utterance),
each of which contains a FloatTensor of size `[B, L-1, d_model]`
incremental (bool): ASR decoding mode
Returns:
logits (FloatTensor): `[B, L, vocab]`
out (FloatTensor): `[B, L, d_model]`
new_cache (List): length `n_layers`,
each of which contains a FloatTensor of size `[B, L, d_model]`
"""
# for ASR decoding
if cache is None:
cache = [None] * self.n_layers # 1-th to L-th layer
if mems is None:
mems = self.init_memory()
mlen = 0
else:
mlen = mems[0].size(1)
bs, ylen = ys.size()[:2]
if incremental and cache[0] is not None:
ylen = cache[0].size(1) + 1
# Create the self-attention mask
causal_mask = ys.new_ones(ylen, ylen + mlen).byte()
causal_mask = torch.tril(causal_mask, diagonal=mlen).unsqueeze(0)
causal_mask = causal_mask.repeat([bs, 1, 1]) # `[B, L, L+mlen]`
out = self.embed_token_id(ys)
ys, rel_pos_embs = self.pos_emb(ys, n_cache=mlen)
new_mems = [None] * self.n_layers
new_cache = [None] * self.n_layers
hidden_states = [out]
for lth, (mem, layer) in enumerate(zip(mems, self.layers)):
if incremental and mlen > 0 and mem.size(0) != bs:
mem = mem.repeat([bs, 1, 1])
out = layer(out, causal_mask, cache=cache[lth],
pos_embs=rel_pos_embs, memory=mem, u_bias=self.u_bias, v_bias=self.v_bias)
if incremental:
new_cache[lth] = out
elif lth < self.n_layers - 1:
hidden_states.append(out)
# NOTE: outputs from the last layer is not used for memory
if not self.training and layer.yy_aws is not None:
setattr(self, 'yy_aws_layer%d' % lth, tensor2np(layer.yy_aws))
out = self.norm_out(out)
if self.adaptive_softmax is None:
logits = self.output(out)
else:
logits = out
if incremental:
# NOTE: do not update memory here during ASR decoding
return logits, out, new_cache
else:
# Update memory
new_mems = self.update_memory(mems, hidden_states)
return logits, out, new_mems
def plot_attention(self, n_cols=4):
"""Plot attention for each head in all layers."""
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
save_path = mkdir_join(self.save_path, 'att_weights')
# Clean directory
if save_path is not None and os.path.isdir(save_path):
shutil.rmtree(save_path)
os.mkdir(save_path)
for lth in range(self.n_layers):
if not hasattr(self, 'yy_aws_layer%d' % lth):
continue
yy_aws = getattr(self, 'yy_aws_layer%d' % lth)
plt.clf()
fig, axes = plt.subplots(self.n_heads // n_cols, n_cols, figsize=(20, 8))
for h in range(self.n_heads):
if self.n_heads > n_cols:
ax = axes[h // n_cols, h % n_cols]
else:
ax = axes[h]
ax.imshow(yy_aws[-1, h, :, :], aspect="auto")
ax.grid(False)
ax.set_xlabel("Input (head%d)" % h)
ax.set_ylabel("Output (head%d)" % h)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
fig.savefig(os.path.join(save_path, 'layer%d.png' % (lth)))
plt.close()
| 40.453074 | 106 | 0.59584 |
793f26338078051928fc23f06a3e2202d7db0e95 | 3,346 | py | Python | test/infrastructure/dns_server/servers_test.py | sasjafor/scion | cdcbd186d1350ad78f365556b771ada522bd33ed | [
"Apache-2.0"
] | 1 | 2018-03-18T14:46:34.000Z | 2018-03-18T14:46:34.000Z | test/infrastructure/dns_server/servers_test.py | sasjafor/scion | cdcbd186d1350ad78f365556b771ada522bd33ed | [
"Apache-2.0"
] | null | null | null | test/infrastructure/dns_server/servers_test.py | sasjafor/scion | cdcbd186d1350ad78f365556b771ada522bd33ed | [
"Apache-2.0"
] | 1 | 2018-03-14T14:59:57.000Z | 2018-03-14T14:59:57.000Z | # Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`servers_test` --- infrastructure.dns_server.servers unit tests
====================================================================
"""
# Stdlib
from unittest.mock import patch
# External packages
import nose
import nose.tools as ntools
# SCION
from infrastructure.dns_server.servers import (
SCIONDnsTcpServer,
SCIONDnsUdpServer,
)
from test.testcommon import create_mock
class TestSCIONDnsProtocolServerServeForever(object):
"""
Unit tests for:
infrastructure.dns_server.servers.SCIONDnsTcpServer.serve_forever
infrastructure.dns_server.servers.SCIONDnsUdpServer.serve_forever
"""
@patch('infrastructure.dns_server.servers.threading.current_thread',
autospec=True)
def _check(self, inst, srv_forever, curr_thread):
# Setup
curr_thread.return_value = create_mock(["name"])
# Call
inst.serve_forever()
# Tests
ntools.assert_is_instance(curr_thread.return_value.name, str)
srv_forever.assert_called_once_with(inst)
@patch('infrastructure.dns_server.servers.TCPServer.serve_forever',
autospec=True)
@patch('infrastructure.dns_server.servers.SCIONDnsTcpServer.__init__',
autospec=True, return_value=None)
def test_tcp(self, _, srv_forever):
self._check(SCIONDnsTcpServer("srvaddr", "reqhndlcls"), srv_forever)
@patch('infrastructure.dns_server.servers.UDPServer.serve_forever',
autospec=True)
@patch('infrastructure.dns_server.servers.SCIONDnsUdpServer.__init__',
autospec=True, return_value=None)
def test_udp(self, _, srv_forever):
self._check(SCIONDnsUdpServer("srvaddr", "reqhndlcls"), srv_forever)
class TestSCIONDnsProtocolServerHandleError(object):
"""
Unit tests for:
infrastructure.dns_server.servers.SCIONDnsTcpServer.handle_error
infrastructure.dns_server.servers.SCIONDnsUdpServer.handle_error
"""
@patch('infrastructure.dns_server.servers.kill_self', autospec=True)
@patch('infrastructure.dns_server.servers.log_exception', autospec=True)
def _check(self, inst, log_excp, kill_self):
# Call
inst.handle_error()
# Tests
ntools.ok_(log_excp.called)
kill_self.assert_called_once_with()
@patch('infrastructure.dns_server.servers.SCIONDnsTcpServer.__init__',
autospec=True, return_value=None)
def test_tcp(self, _):
self._check(SCIONDnsTcpServer("srvaddr", "reqhndlcls"))
@patch('infrastructure.dns_server.servers.SCIONDnsUdpServer.__init__',
autospec=True, return_value=None)
def test_udp(self, _):
self._check(SCIONDnsUdpServer("srvaddr", "reqhndlcls"))
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| 35.978495 | 76 | 0.711297 |
793f2684c93a63e4fb9e1cee2a0469da29b5c593 | 5,223 | py | Python | w2v_cnn+lstm.py | mosalen/Keras-Spam-Detection-with-CNN-W2V | 0a2660c50de1d6e37175b30b3e45689bce8aa58c | [
"MIT"
] | null | null | null | w2v_cnn+lstm.py | mosalen/Keras-Spam-Detection-with-CNN-W2V | 0a2660c50de1d6e37175b30b3e45689bce8aa58c | [
"MIT"
] | null | null | null | w2v_cnn+lstm.py | mosalen/Keras-Spam-Detection-with-CNN-W2V | 0a2660c50de1d6e37175b30b3e45689bce8aa58c | [
"MIT"
] | null | null | null | #coding:utf-8
import sys
import keras
VECTOR_DIR = 'D:/TBdata/baike26g_news13g_novel229g_128.bin'
#'D:/first/text_classification/wiki.zh.vector.bin'
MAX_SEQUENCE_LENGTH = 200
EMBEDDING_DIM = 128
VALIDATION_SPLIT = 0.2
TEST_SPLIT = 0.2
print ('(1) load texts...')
train_texts = open('D:/TBdata/我的语料库/random/01/traintext-ran3.txt', encoding='utf-8').read().split('\n')
train_labels = open('D:/TBdata/我的语料库/random/01/trainlabel-ran3.txt', encoding='utf-8' ).read().split('\n')
test_texts = open('D:/TBdata/我的语料库/random/01/testtext-ran3.txt', encoding='utf-8').read().split('\n')
test_labels = open('D:/TBdata/我的语料库/random/01/testlabel-ran3.txt', encoding='utf-8').read().split('\n')
all_texts = train_texts + test_texts
all_labels = train_labels + test_labels
print ('(2) doc to var...')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
tokenizer = Tokenizer()
tokenizer.fit_on_texts(all_texts)
sequences = tokenizer.texts_to_sequences(all_texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(all_labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
print ('(3) split data set...')
# split the data into training set, validation set, and test set
p1 = int(len(data)*(1-VALIDATION_SPLIT-TEST_SPLIT))
p2 = int(len(data)*(1-TEST_SPLIT))
x_train = data[:p1]
y_train = labels[:p1]
x_val = data[p1:p2]
y_val = labels[p1:p2]
x_test = data[p2:]
y_test = labels[p2:]
print ('train docs: '+str(len(x_train)))
print ('val docs: '+str(len(x_val)))
print ('test docs: '+str(len(x_test)))
print ('(4) load word2vec as embedding...')
import gensim
from keras.utils import plot_model
w2v_model = gensim.models.KeyedVectors.load_word2vec_format(VECTOR_DIR, binary=True)
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
not_in_model = 0
in_model = 0
for word, i in word_index.items():
if word in w2v_model:
in_model += 1
embedding_matrix[i] = np.asarray(w2v_model[word], dtype='float32')
else:
not_in_model += 1
print (str(not_in_model)+' words not in w2v model')
from keras.layers import Embedding
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print ('(5) training model...')
from keras.layers import Dense, Input, Flatten, Dropout, LSTM
from keras.layers import Conv1D, MaxPooling1D, Embedding, GlobalMaxPooling1D
from keras.models import Sequential
model = Sequential()
model.add(embedding_layer)
model.add(Dropout(0.2))
model.add(Conv1D(256, 3, padding='valid', activation='relu', strides=1))
model.add(MaxPooling1D(3))
#model.add(Flatten())
model.add(LSTM(512, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model.add(Flatten())
model.add(Dense(EMBEDDING_DIM, activation='relu'))
model.add(Dense(labels.shape[1], activation='sigmoid'))
model.summary()
plot_model(model, to_file='D:/TBdata/验证结果/model.png',show_shapes=True)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
print (model.metrics_names)
model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=10, batch_size=64)
model.save('word_vector_cnn.h5')
print ('(6) testing model...')
print (model.evaluate(x_test, y_test))
'''
predict_text = open('D:/TBdata/我的语料库/random/predict-text.txt', encoding='utf-8').read().split('\n')
predict_label = open('D:/TBdata/我的语料库/random/predict-label.txt', encoding='utf-8').read().split('\n')
prediction = model.predict(x_pre)
print("模型预测结果", prediction)
import csv
print("Saving evaluation")
prediction_human_readable = np.column_stack((np.array(x_pre), prediction))
with open("D:/TBdata/验证结果/Prediction.csv", 'w') as f:
csv.writer(f).writerows(prediction_human_readable)
'''
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve,auc
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
y_score = model.predict(x_test)
y_pred_labels = np.argmax(y_score, axis=1)
y_test_labels = np.argmax(y_test, axis=1)
lw = 2
n_classes = 2
print("computing f1 score...")
cm1 = confusion_matrix(y_test_labels, y_pred_labels)
print(cm1)
TPR = np.diag(cm1)
FPR = []
for i in range(n_classes):
FPR.append(sum(cm1[i, :]) - cm1[i, i])
FNR = []
for i in range(n_classes):
FNR.append(sum(cm1[i, :]) - cm1[i, i])
TNR = []
for i in range(n_classes):
temp = np.delete(cm1, i, 0) # delete ith row
temp = np.delete(temp, i, 1) # delete ith column
TNR.append(sum(sum(temp)))
l = len(y_test)
for i in range(n_classes):
print(TPR[i] + FPR[i] + FNR[i] + TNR[i] == l)
precision = TPR / (TPR + FPR)
print(precision)
recall = TPR / (TPR + FNR)
print(recall)
f1_score = 2.0 * precision * recall / (precision + recall)
print(f1_score)
print("classification_report(left: labels):")
print(classification_report(y_test_labels, y_pred_labels))
| 31.089286 | 106 | 0.712809 |
793f2706c65bbcbfde7e2cff5dd24f6cc2654e34 | 3,162 | py | Python | life/button.py | DavidY-Li/cell-life | 8fd38331f4f917cad787766676311615def8c8c7 | [
"MIT"
] | 2 | 2021-03-05T01:26:57.000Z | 2021-03-05T02:37:06.000Z | life/button.py | DavidY-Li/cell-life | 8fd38331f4f917cad787766676311615def8c8c7 | [
"MIT"
] | null | null | null | life/button.py | DavidY-Li/cell-life | 8fd38331f4f917cad787766676311615def8c8c7 | [
"MIT"
] | null | null | null | import pygame
# Button class for easier generation of buttons
class Button:
def __init__(
self,
primary,
hover_color,
x,
y,
width,
height,
text,
font=18,
padding=13,
secondary_text="",
on_click=None,
font_color=[255, 255, 255],
):
self.primary = primary
self.hover_color = hover_color
self.color = self.primary
self.text = text
self.on_click = on_click
self.x = x
self.y = y
self.width = width
self.height = height
self.secondary_text = secondary_text
self.font = font
self.font_color = font_color
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
self.padding = padding
def set_width(self, width: int):
self.width = width
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
def draw(self, screen, outline=None):
# Call this method to draw the button on the screen
if outline:
pygame.draw.rect(
screen,
outline,
(self.x - 2, self.y - 2, self.width + 4, self.height + 4),
0,
)
# Drawing button rect
pygame.draw.rect(screen, self.color, self.rect)
# Placing primary text on button
if self.text != "":
font = pygame.font.Font("pixel.ttf", self.font)
text = font.render(self.text, 1, self.font_color)
screen.blit(
text,
(
self.x + (self.width / 2 - text.get_width() / 2),
self.y
+ (self.height / 2 - text.get_height() / 2)
- self.padding
+ 4,
),
)
# Placing secondary text on button with offset
if self.secondary_text != "":
font = pygame.font.Font("pixel.ttf", self.font)
text = font.render(self.secondary_text, 1, self.font_color)
screen.blit(
text,
(
self.x + (self.width / 2 - text.get_width() / 2),
self.y
+ (self.height / 2 - text.get_height() / 2)
+ self.font
+ 5
- self.padding
+ 4,
),
)
# Handling button click
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
# Gets mouse position
mouse_pos = event.pos
# Checks to see if the mouse pressed the button
if self.rect.collidepoint(mouse_pos):
if self.on_click != None:
self.on_click()
elif event.type == pygame.MOUSEMOTION:
# Gets mouse position
mouse_pos = event.pos
# Checks to see if the mouse pressed the button
if self.rect.collidepoint(mouse_pos):
self.color = self.hover_color
else:
self.color = self.primary
| 30.114286 | 74 | 0.485136 |
793f274382c8aa2f9665f22640fe90d51f2872ba | 10,414 | py | Python | make_postcards.py | BoiseStatePlanetary/eleanor | 823cfb8e9972b0600cfe20e6a66b5956a9a177d5 | [
"MIT"
] | null | null | null | make_postcards.py | BoiseStatePlanetary/eleanor | 823cfb8e9972b0600cfe20e6a66b5956a9a177d5 | [
"MIT"
] | null | null | null | make_postcards.py | BoiseStatePlanetary/eleanor | 823cfb8e9972b0600cfe20e6a66b5956a9a177d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["make_postcards"]
import os
import glob
import tqdm
import fitsio
import numpy as np
from time import strftime
from astropy.wcs import WCS
from astropy.table import Table
from astropy.stats import SigmaClip
from photutils import MMMBackground
from eleanor.ffi import ffi, set_quality_flags
#from eleanor.version import __version__
def bkg(flux, sigma=2.5):
# Returns background for a single cadence. Default sigma=2.5
sigma_clip = SigmaClip(sigma=sigma)
bkg = MMMBackground(sigma_clip=sigma_clip)
return bkg.calc_background(flux)
def make_postcards(fns, outdir, sc_fn, width=104, height=148, wstep=None, hstep=None):
# Make sure that the output directory exists
os.makedirs(outdir, exist_ok=True)
# We'll assume that the filenames can be sorted like this (it is true for
# the ETE-6 test data
fns = list(sorted(fns))
total_ffis = len(fns)
# Save the middle header as the primary header
middle_fn = fns[total_ffis//2]
data, primary_header = fitsio.read(middle_fn, 1, header=True)
# Add the eleanor info to the header
primary_header.add_record("COMMENT ***********************")
primary_header.add_record("COMMENT * eleanor INFO *")
primary_header.add_record("COMMENT ***********************")
primary_header.add_record(
dict(name='AUTHOR', value='Adina D. Feinstein'))
primary_header.add_record(
dict(name='VERSION', value='0.0.4'))
primary_header.add_record(
dict(name='GITHUB',
value='https://github.com/afeinstein20/eleanor'))
primary_header.add_record(
dict(name='CREATED', value=strftime('%Y-%m-%d'),
comment='eleanor file creation date (YYY-MM-DD)'))
# Build the WCS for this middle exposure
primary_wcs = WCS(primary_header)
# Is this a raw frame? If so, there won't be any error information
is_raw = primary_header["IMAGTYPE"].strip() == "uncal"
# Set the output filename format
sector = os.path.split(middle_fn)[-1].split("-")[1] # Scrapes sector from the filename
info = (sector, primary_header["CAMERA"],
primary_header["CCD"], primary_header["IMAGTYPE"].strip())
info_str = '{0}-{1}-{2}-{3}'.format(info[0], info[1], info[2], info[3])
outfn_fmt = "hlsp_eleanor_tess_ffi_postcard-{0}-{{0:04d}}-{{1:04d}}.fits".format(info_str)
outfn_fmt = os.path.join(outdir, outfn_fmt).format
# Build the pointing model
f = ffi(sector=int(info[0][1:]), camera=info[1], chip=info[2])
f.local_paths = fns
f.sort_by_date()
pm = f.pointing_model_per_cadence()
# We want to shift the WCS for each postcard so let's store the default
# reference pixel
crpix_h = float(primary_header["CRPIX1"])
crpix_w = float(primary_header["CRPIX2"])
# Work out the dimensions of the problem
dtype = data.dtype
shape = data.shape
total_width, total_height = shape
width = int(width)
height = int(height)
if wstep is None:
wstep = width - 50
if hstep is None:
hstep = height - 50
wstep = int(wstep)
hstep = int(hstep)
# Make a grid of postcard origin coordinates
ws = np.arange(0, 2049, wstep)#total_width - width + wstep + 1, wstep)
hs = np.arange(44, 2093, hstep)#total_height - height + hstep + 1, hstep)
# Compute the total numbers for progress bars
num_times = len(fns)
total_num_postcards = len(ws) * len(hs)
# Allocate the memory for the stacked FFIs
all_ffis = np.empty((total_width, total_height, len(fns)), dtype=dtype,
order="F")
if not is_raw:
all_errs = np.empty((total_width, total_height, len(fns)), dtype=dtype,
order="F")
# We'll have the same primary HDU for each postcard - this will store the
# time dependent header info
primary_cols = ["TSTART", "TSTOP", "BARYCORR", "DATE-OBS", "DATE-END", "BKG", "QUALITY"]
primary_dtype = [np.float32, np.float32, np.float32, "O", "O", np.float32, np.int64]
primary_data = np.empty(len(fns), list(zip(primary_cols, primary_dtype)))
# Make sure that the sector, camera, chip, and dimensions are the
# same for all the files
for i, name in tqdm.tqdm(enumerate(fns), total=num_times):
data, hdr = fitsio.read(name, 1, header=True)
# FIXME: when `sector` is added to the header, we should check
# it too! -- still not added (dfm)
new_shape = (hdr["NAXIS2"], hdr["NAXIS1"])
new_info = (sector, hdr["CAMERA"], hdr["CCD"], hdr["IMAGTYPE"].strip())
if shape != new_shape or new_info != info:
raise ValueError("the header info for '{0}' does not match"
.format(name))
info = new_info
# Save the info for the primary HDU
for k, dtype in zip(primary_cols[0:len(primary_cols)-2], primary_dtype[0:len(primary_dtype)-2]):
if dtype == "O":
primary_data[k][i] = hdr[k].encode("ascii")
else:
primary_data[k][i] = hdr[k]
# Save the data
all_ffis[:, :, i] = data
if not is_raw:
all_errs[:, :, i] = fitsio.read(name, 2)
wmax, hmax = 2048, 2092
quality = np.empty(len(fns))
# Loop over postcards
with tqdm.tqdm(total=total_num_postcards) as bar:
for i, h in enumerate(hs):
for j, w in enumerate(ws):
dw = width#min(width, total_width - w)
dh = height#min(height, total_height - h)
hdr = fitsio.FITSHDR(primary_header)
if np.shape(all_ffis[w:w+dw, h:h+dh, :]) != (width,height,total_ffis):
if w+dw > wmax:
w = wmax-dw
if h+dh > hmax:
h = hmax-dh
# Shift the reference pixel for the WCS to
# account for the postcard location
hdr.add_record(
dict(name="CRPIX1", value=crpix_h - h,
comment="X reference pixel"))
hdr.add_record(
dict(name="CRPIX2", value=crpix_w - w,
comment="Y reference pixel"))
# Save the postcard coordinates in the header
hdr.add_record(
dict(name="POSTPIX1", value=h,
comment="origin of postcard axis 1"))
hdr.add_record(
dict(name="POSTPIX2", value=w,
comment="origin of postcard axis 2"))
xcen = h + 0.5*dh
ycen = w + 0.5*dw
outfn = outfn_fmt(int(xcen), int(ycen))
rd = primary_wcs.all_pix2world(xcen, ycen, 1)
hdr.add_record(
dict(name="CEN_X", value=xcen,
comment=("central pixel of postcard in FFI")))
hdr.add_record(
dict(name="CEN_Y", value=ycen,
comment=("central pixel of postcard in FFI")))
hdr.add_record(
dict(name="CEN_RA", value=float(rd[0]),
comment="RA of central pixel"))
hdr.add_record(
dict(name="CEN_DEC", value=float(rd[1]),
comment="Dec of central pixel"))
hdr.add_record(
dict(name="POST_H", value=float(height),
comment="Height of postcard in pixels"))
hdr.add_record(
dict(name="POST_W", value=float(width),
comment="Width of postcard in pixels"))
hdr.add_record(
dict(name="SECTOR", value=sector[1::],
comment="TESS sector"))
pixel_data = all_ffis[w:w+dw, h:h+dh, :] + 0.0
# Adds in quality column for each cadence in primary_data
for k in range(len(fns)):
b = bkg(pixel_data[:, :, k])
primary_data[k][len(primary_cols)-2] = b
pixel_data[:, :, k] -= b
if i==0 and j==0 and k==0:
print("Getting quality flags")
quality_array = set_quality_flags( primary_data['TSTART']-primary_data['BARYCORR'],
primary_data['TSTOP']-primary_data['BARYCORR'],
sc_fn, sector[1::], new_info[1], new_info[2],
pm=pm)
primary_data[k][len(primary_cols)-1] = quality_array[k]
# Saves the primary hdu
fitsio.write(outfn, primary_data, header=hdr, clobber=True)
# Save the image data
fitsio.write(outfn, pixel_data)
if not is_raw:
fitsio.write(outfn, all_errs[w:w+dw, h:h+dh, :])
bar.update()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Make postcards from a list of FFIs")
parser.add_argument('file_pattern',
help='the pattern for the input FFI filenames')
parser.add_argument('output_dir',
help='the output directory')
parser.add_argument('sc_fn',
help='the short cadence filename for this sector, camera, chip')
parser.add_argument('--width', type=int, default=104,
help='the width of the postcards')
parser.add_argument('--height', type=int, default=148,
help='the height of the postcards')
parser.add_argument('--wstep', type=int, default=None,
help='the step size in the width direction')
parser.add_argument('--hstep', type=int, default=None,
help='the step size in the height direction')
args = parser.parse_args()
fns = sorted(glob.glob(args.file_pattern))
outdir = args.output_dir
sc_fn = args.sc_fn
make_postcards(fns, outdir, sc_fn,
width=args.width, height=args.height,
wstep=args.wstep, hstep=args.hstep)
| 39.44697 | 107 | 0.561456 |
793f2756b9c7d0b9d69bd4021c55367ec6d1eb26 | 12,514 | py | Python | lumin/nn/metrics/reg_eval.py | matthewfeickert/lumin | 00c5f9f3d8a630d780ce09017ff13eb27485534c | [
"Apache-2.0"
] | null | null | null | lumin/nn/metrics/reg_eval.py | matthewfeickert/lumin | 00c5f9f3d8a630d780ce09017ff13eb27485534c | [
"Apache-2.0"
] | null | null | null | lumin/nn/metrics/reg_eval.py | matthewfeickert/lumin | 00c5f9f3d8a630d780ce09017ff13eb27485534c | [
"Apache-2.0"
] | null | null | null | import numpy as np
from typing import Optional, Callable
import pandas as pd
from statsmodels.stats.weightstats import DescrStatsW
from fastcore.all import store_attr
from ...utils.statistics import bootstrap_stats
from .eval_metric import EvalMetric, OldEvalMetric
from ..data.fold_yielder import FoldYielder
__all__ = ['RegPull', 'RegAsProxyPull']
class OldRegPull(OldEvalMetric):
r'''
Compute mean or standard deviation of delta or pull of some feature which is being directly regressed to.
Optionally, use bootstrap resampling on validation data.
Arguments:
return_mean: whether to return the mean or the standard deviation
use_bootstrap: whether to bootstrap resamples validation fold when computing statisitic
use_weights: whether to actually use weights if wgt_name is set
use_pull: whether to return the pull (differences / targets) or delta (differences)
targ_name: name of group in fold file containing regression targets
wgt_name: name of group in fold file containing datapoint weights
Examples::
>>> mean_pull = RegPull(return_mean=True, use_bootstrap=True,
... use_pull=True)
>>>
>>> std_delta = RegPull(return_mean=False, use_bootstrap=True,
... use_pull=False)
>>>
>>> mean_pull = RegPull(return_mean=True, use_bootstrap=False,
... use_pull=True, wgt_name='weights')
.. Attention:: This class is depreciated in favour of :class:`~lumin.nn.metrics.reg_eval.RegPull.
It is a copy of the old `RegPull` class used in lumin<=0.7.0.
It will be removed in V0.8
'''
# XXX remove in V0.8
# TODO: Check how this handels multi-target regression, may need to adjust averaging axis & DescrStatsW may not handle multi-dimensional data well.
# TODO: Remove use_weights and rely on whether wgt_name is None
def __init__(self, return_mean:bool, use_bootstrap:bool=False, use_weights:bool=True, use_pull:bool=True, targ_name:str='targets',
wgt_name:Optional[str]=None):
super().__init__(targ_name=targ_name, wgt_name=wgt_name)
self.use_bootstrap,self.use_weights,self.return_mean,self.use_pull = use_bootstrap,use_weights,return_mean,use_pull
def _compute(self, df:pd.DataFrame) -> float:
df['diff'] = df['pred']-df['gen_target']
if self.use_pull: df['diff'] /= df['gen_target']
if self.use_weights and 'gen_weight' in df.columns:
weights = df['gen_weight'].values.astype('float64')/df['gen_weight'].values.astype('float64').sum()
else:
weights = None
if self.use_bootstrap:
bs_args = {'data': df['diff'], 'mean': self.return_mean, 'std': True, 'n':100}
if self.use_weights and 'gen_weight' in df.columns: bs_args['weights'] = weights
bs = bootstrap_stats(bs_args)
return np.mean(bs['_mean']) if self.return_mean else np.mean(bs['_std'])
else:
if self.return_mean:
return np.average(df['diff'], weights=weights)
else:
return DescrStatsW(df['diff'].values, ddof=1, weights=weights*len(weights) if weights is not None else None).std
def evaluate(self, fy:FoldYielder, idx:int, y_pred:np.ndarray) -> float:
r'''
Compute statisitic on fold using provided predictions.
Arguments:
fy: :class:`~lumin.nn.data.fold_yielder.FoldYielder` interfacing to data
idx: fold index corresponding to fold for which y_pred was computed
y_pred: predictions for fold
Returns:
Statistic set in initialisation computed on the chsoen fold
Examples::
>>> mean = mean_pull.evaluate(train_fy, val_id, val_preds)
'''
return self._compute(self.get_df(fy, idx, y_pred))
class RegPull(EvalMetric):
r'''
Compute mean or standard deviation of delta or pull of some feature which is being directly regressed to.
Optionally, use bootstrap resampling on validation data.
Arguments:
return_mean: whether to return the mean or the standard deviation
use_bootstrap: whether to bootstrap resamples validation fold when computing statisitic
use_pull: whether to return the pull (differences / targets) or delta (differences)
name: optional name for metric, otherwise will be inferred from `use_pull`
main_metric: whether this metic should be treated as the primary metric for SaveBest and EarlyStopping
Will automatically set the first EvalMetric to be main if multiple primary metrics are submitted
Examples::
>>> mean_pull = RegPull(return_mean=True, use_bootstrap=True,
... use_pull=True)
>>>
>>> std_delta = RegPull(return_mean=False, use_bootstrap=True,
... use_pull=False)
>>>
>>> mean_pull = RegPull(return_mean=True, use_bootstrap=False,
... use_pull=True, wgt_name='weights')
'''
# TODO: Check how this handels multi-target regression, may need to adjust averaging axis & DescrStatsW may not handle multi-dimensional data well.
def __init__(self, return_mean:bool, use_bootstrap:bool=False, use_pull:bool=True, name:Optional[str]=None, main_metric:bool=True):
if name is None:
name = 'pull' if use_pull else 'delta'
super().__init__(name=name, lower_metric_better=True, main_metric=main_metric)
store_attr(but=['name', 'main_metric'])
def _compute(self, preds:np.ndarray, targets:np.ndarray, weights:Optional[np.ndarray]=None) -> float:
delta = preds-targets
if self.use_pull: delta /= targets
if weights is not None:
weights = weights.astype('float64')
weights = weights/weights.sum()
if self.use_bootstrap:
bs = bootstrap_stats({'data':delta, 'mean':True, 'std':True, 'n':100, 'weights':weights})
return np.mean(bs['_mean']) if self.return_mean else np.mean(bs['_std'])
else:
if self.return_mean:
return np.average(delta, weights=weights)
else:
return DescrStatsW(delta, ddof=1, weights=weights*len(weights) if weights is not None else None).std
def evaluate(self) -> float:
r'''
Compute mean or width of regression error.
Returns:
Mean or width of regression error
'''
return self._compute(self.preds, self.targets, self.weights)
class OldRegAsProxyPull(OldRegPull):
r'''
Compute mean or standard deviation of delta or pull of some feature which is being indirectly regressed to via a proxy function.
Optionally, use bootstrap resampling on validation data.
Arguments:
proxy_func: function which acts on regression predictions and adds pred and gen_target columns to the Pandas DataFrame it is passed which contains
prediction columns pred_{i}
return_mean: whether to return the mean or the standard deviation
use_bootstrap: whether to bootstrap resamples validation fold when computing statisitic
use_weights: whether to actually use weights if wgt_name is set
use_pull: whether to return the pull (differences / targets) or delta (differences)
targ_name: name of group in fold file containing regression targets
wgt_name: name of group in fold file containing datapoint weights
Examples::
>>> def reg_proxy_func(df):
>>> df['pred'] = calc_pair_mass(df, (1.77682, 1.77682),
... {targ[targ.find('_t')+3:]:
... f'pred_{i}' for i, targ
... in enumerate(targ_feats)})
>>> df['gen_target'] = 125
>>>
>>> std_delta = RegAsProxyPull(proxy_func=reg_proxy_func,
... return_mean=False, use_pull=False)
.. Attention:: This class is depreciated in favour of :class:`~lumin.nn.metrics.reg_eval.RegAsProxyPull.
It is a copy of the old `RegAsProxyPull` class used in lumin<=0.7.0.
It will be removed in V0.8
'''
# XXX remove in V0.8
def __init__(self, proxy_func:Callable[[pd.DataFrame],None], return_mean:bool, use_bootstrap:bool=False, use_weights:bool=True,
use_pull:bool=True, targ_name:str='targets', wgt_name:Optional[str]=None):
super().__init__(use_bootstrap=use_bootstrap, use_weights=use_weights, return_mean=return_mean, use_pull=use_pull, targ_name=targ_name,
wgt_name=wgt_name)
self.proxy_func = proxy_func
def evaluate(self, fy:FoldYielder, idx:int, y_pred:np.ndarray) -> float:
r'''
Compute statisitic on fold using provided predictions.
Arguments:
fy: :class:`~lumin.nn.data.fold_yielder.FoldYielder` interfacing to data
idx: fold index corresponding to fold for which y_pred was computed
y_pred: predictions for fold
Returns:
Statistic set in initialisation computed on the chsoen fold
Examples::
>>> mean = mean_pull.evaluate(train_fy, val_id, val_preds)
'''
df = self.get_df(fy, idx, y_pred)
self.proxy_func(df)
return self._compute(df)
class RegAsProxyPull(RegPull):
r'''
Compute mean or standard deviation of delta or pull of some feature which is being indirectly regressed to via a proxy function.
Optionally, use bootstrap resampling on validation data.
Arguments:
proxy_func: function which acts on regression predictions and adds pred and gen_target columns to the Pandas DataFrame it is passed which contains
prediction columns pred_{i}
return_mean: whether to return the mean or the standard deviation
use_bootstrap: whether to bootstrap resamples validation fold when computing statisitic
use_weights: whether to actually use weights if wgt_name is set
use_pull: whether to return the pull (differences / targets) or delta (differences)
targ_name: optional name of group in fold file containing regression targets
name: optional name for metric, otherwise will be inferred from `use_pull`
main_metric: whether this metic should be treated as the primary metric for SaveBest and EarlyStopping
Will automatically set the first EvalMetric to be main if multiple primary metrics are submitted
Examples::
>>> def reg_proxy_func(df):
>>> df['pred'] = calc_pair_mass(df, (1.77682, 1.77682),
... {targ[targ.find('_t')+3:]:
... f'pred_{i}' for i, targ
... in enumerate(targ_feats)})
>>> df['gen_target'] = 125
>>>
>>> std_delta = RegAsProxyPull(proxy_func=reg_proxy_func,
... return_mean=False, use_pull=False)
'''
def __init__(self, proxy_func:Callable[[pd.DataFrame],None], return_mean:bool, targ_name:Optional[str]=None, use_bootstrap:bool=False,
use_pull:bool=True, name:Optional[str]=None, main_metric:bool=True):
if name is None:
name = 'pull' if use_pull else 'delta'
super().__init__(use_bootstrap=use_bootstrap, return_mean=use_bootstrap, use_pull=use_pull, main_metric=main_metric)
store_attr(but=['use_bootstrap', 'use_bootstrap', 'use_pull', 'main_metric'])
def evaluate(self) -> float:
r'''
Compute statisitic on fold using provided predictions.
Arguments:
fy: :class:`~lumin.nn.data.fold_yielder.FoldYielder` interfacing to data
idx: fold index corresponding to fold for which y_pred was computed
y_pred: predictions for fold
Returns:
Statistic set in initialisation computed on the chsoen fold
Examples::
>>> mean = mean_pull.evaluate(train_fy, val_id, val_preds)
'''
df = self.get_df()
self.proxy_func(df)
return self._compute(df['pred'].values, df['gen_target'].values, df['gen_weight'].values if 'gen_weight' in df.columns else None)
| 47.222642 | 154 | 0.644398 |
793f276e9e9b3130b7c530884a8d4c02b75c072d | 14,772 | py | Python | src/google/appengine/api/mail_service_pb2.py | asriniva/appengine-python-standard | 8a5abedfe99b27a4dcb31fd47d3ba7b62fd0e47c | [
"Apache-2.0"
] | 28 | 2021-01-06T19:55:21.000Z | 2022-03-28T09:41:08.000Z | src/google/appengine/api/mail_service_pb2.py | asriniva/appengine-python-standard | 8a5abedfe99b27a4dcb31fd47d3ba7b62fd0e47c | [
"Apache-2.0"
] | 13 | 2021-06-17T09:38:17.000Z | 2022-03-11T01:12:33.000Z | src/google/appengine/api/mail_service_pb2.py | asriniva/appengine-python-standard | 8a5abedfe99b27a4dcb31fd47d3ba7b62fd0e47c | [
"Apache-2.0"
] | 28 | 2021-03-09T19:27:37.000Z | 2022-01-21T21:18:52.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/appengine/api/mail_service.proto',
package='google.appengine',
syntax='proto2',
serialized_options=b'\n\035com.google.appengine.api.mailB\rMailServicePb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\'google/appengine/api/mail_service.proto\x12\x10google.appengine\"\xb4\x01\n\x10MailServiceError\"\x9f\x01\n\tErrorCode\x12\x06\n\x02OK\x10\x00\x12\x12\n\x0eINTERNAL_ERROR\x10\x01\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x02\x12\x17\n\x13UNAUTHORIZED_SENDER\x10\x03\x12\x1b\n\x17INVALID_ATTACHMENT_TYPE\x10\x04\x12\x17\n\x13INVALID_HEADER_NAME\x10\x05\x12\x16\n\x12INVALID_CONTENT_ID\x10\x06\"i\n\x0eMailAttachment\x12\x10\n\x08\x46ileName\x18\x01 \x02(\t\x12\x0c\n\x04\x44\x61ta\x18\x02 \x02(\x0c\x12\x11\n\tContentID\x18\x03 \x01(\t\x12$\n\x18\x44\x45PRECATED_ContentID_set\x18\r \x01(\x08\x42\x02\x18\x01\")\n\nMailHeader\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x02(\t\"\x81\x02\n\x0bMailMessage\x12\x0e\n\x06Sender\x18\x01 \x02(\t\x12\x0f\n\x07ReplyTo\x18\x02 \x01(\t\x12\n\n\x02To\x18\x03 \x03(\t\x12\n\n\x02\x43\x63\x18\x04 \x03(\t\x12\x0b\n\x03\x42\x63\x63\x18\x05 \x03(\t\x12\x0f\n\x07Subject\x18\x06 \x02(\t\x12\x10\n\x08TextBody\x18\x07 \x01(\t\x12\x10\n\x08HtmlBody\x18\x08 \x01(\t\x12\x13\n\x0b\x41mpHtmlBody\x18\x0b \x01(\t\x12\x34\n\nAttachment\x18\t \x03(\x0b\x32 .google.appengine.MailAttachment\x12,\n\x06Header\x18\n \x03(\x0b\x32\x1c.google.appengine.MailHeaderB.\n\x1d\x63om.google.appengine.api.mailB\rMailServicePb'
)
_MAILSERVICEERROR_ERRORCODE = _descriptor.EnumDescriptor(
name='ErrorCode',
full_name='google.appengine.MailServiceError.ErrorCode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BAD_REQUEST', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNAUTHORIZED_SENDER', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_ATTACHMENT_TYPE', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_HEADER_NAME', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_CONTENT_ID', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=83,
serialized_end=242,
)
_sym_db.RegisterEnumDescriptor(_MAILSERVICEERROR_ERRORCODE)
_MAILSERVICEERROR = _descriptor.Descriptor(
name='MailServiceError',
full_name='google.appengine.MailServiceError',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MAILSERVICEERROR_ERRORCODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=242,
)
_MAILATTACHMENT = _descriptor.Descriptor(
name='MailAttachment',
full_name='google.appengine.MailAttachment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='FileName', full_name='google.appengine.MailAttachment.FileName', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Data', full_name='google.appengine.MailAttachment.Data', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ContentID', full_name='google.appengine.MailAttachment.ContentID', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='DEPRECATED_ContentID_set', full_name='google.appengine.MailAttachment.DEPRECATED_ContentID_set', index=3,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=244,
serialized_end=349,
)
_MAILHEADER = _descriptor.Descriptor(
name='MailHeader',
full_name='google.appengine.MailHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.appengine.MailHeader.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='google.appengine.MailHeader.value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=351,
serialized_end=392,
)
_MAILMESSAGE = _descriptor.Descriptor(
name='MailMessage',
full_name='google.appengine.MailMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Sender', full_name='google.appengine.MailMessage.Sender', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ReplyTo', full_name='google.appengine.MailMessage.ReplyTo', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='To', full_name='google.appengine.MailMessage.To', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Cc', full_name='google.appengine.MailMessage.Cc', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Bcc', full_name='google.appengine.MailMessage.Bcc', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Subject', full_name='google.appengine.MailMessage.Subject', index=5,
number=6, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='TextBody', full_name='google.appengine.MailMessage.TextBody', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='HtmlBody', full_name='google.appengine.MailMessage.HtmlBody', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='AmpHtmlBody', full_name='google.appengine.MailMessage.AmpHtmlBody', index=8,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Attachment', full_name='google.appengine.MailMessage.Attachment', index=9,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Header', full_name='google.appengine.MailMessage.Header', index=10,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=395,
serialized_end=652,
)
_MAILSERVICEERROR_ERRORCODE.containing_type = _MAILSERVICEERROR
_MAILMESSAGE.fields_by_name['Attachment'].message_type = _MAILATTACHMENT
_MAILMESSAGE.fields_by_name['Header'].message_type = _MAILHEADER
DESCRIPTOR.message_types_by_name['MailServiceError'] = _MAILSERVICEERROR
DESCRIPTOR.message_types_by_name['MailAttachment'] = _MAILATTACHMENT
DESCRIPTOR.message_types_by_name['MailHeader'] = _MAILHEADER
DESCRIPTOR.message_types_by_name['MailMessage'] = _MAILMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MailServiceError = _reflection.GeneratedProtocolMessageType('MailServiceError', (_message.Message,), {
'DESCRIPTOR' : _MAILSERVICEERROR,
'__module__' : 'google.appengine.api.mail_service_pb2'
})
_sym_db.RegisterMessage(MailServiceError)
MailAttachment = _reflection.GeneratedProtocolMessageType('MailAttachment', (_message.Message,), {
'DESCRIPTOR' : _MAILATTACHMENT,
'__module__' : 'google.appengine.api.mail_service_pb2'
})
_sym_db.RegisterMessage(MailAttachment)
MailHeader = _reflection.GeneratedProtocolMessageType('MailHeader', (_message.Message,), {
'DESCRIPTOR' : _MAILHEADER,
'__module__' : 'google.appengine.api.mail_service_pb2'
})
_sym_db.RegisterMessage(MailHeader)
MailMessage = _reflection.GeneratedProtocolMessageType('MailMessage', (_message.Message,), {
'DESCRIPTOR' : _MAILMESSAGE,
'__module__' : 'google.appengine.api.mail_service_pb2'
})
_sym_db.RegisterMessage(MailMessage)
DESCRIPTOR._options = None
_MAILATTACHMENT.fields_by_name['DEPRECATED_ContentID_set']._options = None
| 41.728814 | 1,275 | 0.752031 |
793f2818c34a262f9f300b0a7458aac23aee1372 | 1,485 | py | Python | indico/modules/events/registration/models/legacy_mapping.py | bkmgit/indico | d77ee121e35880a416b9b05e6098ea912d870b5c | [
"MIT"
] | 1 | 2021-06-11T20:02:10.000Z | 2021-06-11T20:02:10.000Z | indico/modules/events/registration/models/legacy_mapping.py | bkmgit/indico | d77ee121e35880a416b9b05e6098ea912d870b5c | [
"MIT"
] | null | null | null | indico/modules/events/registration/models/legacy_mapping.py | bkmgit/indico | d77ee121e35880a416b9b05e6098ea912d870b5c | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.util.string import format_repr
class LegacyRegistrationMapping(db.Model):
"""Legacy registration id/token mapping.
Legacy registrations had tokens which are not compatible with the
new UUID-based ones.
"""
__tablename__ = 'legacy_registration_map'
__table_args__ = {'schema': 'event_registration'}
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
autoincrement=False
)
legacy_registrant_id = db.Column(
db.Integer,
primary_key=True,
autoincrement=False
)
legacy_registrant_key = db.Column(
db.String,
nullable=False
)
registration_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.registrations.id'),
index=True,
nullable=False
)
registration = db.relationship(
'Registration',
lazy=False,
backref=db.backref(
'legacy_mapping',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
def __repr__(self):
return format_repr(self, 'event_id', 'legacy_registrant_id', 'legacy_registrant_key', 'registration_id')
| 26.052632 | 112 | 0.642424 |
793f2b848c3758a8f7dae311e7d721594f8e8f09 | 3,424 | py | Python | setup.py | HEmile/problog | 576b6fd305f72b12125111c8d4d62cf8a7bbda0f | [
"Apache-2.0"
] | 189 | 2019-05-27T08:20:10.000Z | 2022-03-28T09:29:22.000Z | setup.py | HEmile/problog | 576b6fd305f72b12125111c8d4d62cf8a7bbda0f | [
"Apache-2.0"
] | 60 | 2019-06-11T15:07:48.000Z | 2022-03-25T02:31:23.000Z | setup.py | HEmile/problog | 576b6fd305f72b12125111c8d4d62cf8a7bbda0f | [
"Apache-2.0"
] | 33 | 2019-07-03T13:14:24.000Z | 2022-02-20T01:07:15.000Z | #! /usr/bin/env python
import sys
import os
version_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "problog/version.py"
)
version = {}
with open(version_file) as fp:
exec(fp.read(), version)
version = version["version"]
if __name__ == "__main__" and len(sys.argv) == 1:
from problog import setup as problog_setup
problog_setup.install()
elif __name__ == "__main__":
from setuptools import setup, find_packages
from setuptools.command.install import install
class ProbLogInstall(install):
def run(self):
install.run(self)
before_dir = os.getcwd()
sys.path.insert(0, self.install_lib)
from problog import setup as problog_setup
try:
problog_setup.install()
except Exception as err:
print("Optional ProbLog installation failed: %s" % err, file=sys.stderr)
os.chdir(before_dir)
package_data = {
"problog": [
"bin/darwin/cnf2dDNNF_wine",
"bin/darwin/dsharp",
"bin/darwin/maxsatz",
"bin/linux/dsharp",
"bin/linux/maxsatz",
"bin/source/maxsatz/maxsatz2009.c",
"bin/windows/dsharp.exe",
"bin/windows/maxsatz.exe",
"bin/windows/libgcc_s_dw2-1.dll",
"bin/windows/libstdc++-6.dll",
"web/*.py",
"web/editor_local.html" "web/editor_adv.html",
"web/js/problog_editor.js",
"library/*.pl",
"library/*.py",
"library/nlp4plp.d/*",
]
}
setup(
name="problog",
version=version,
description="ProbLog2: Probabilistic Logic Programming toolbox",
url="https://dtai.cs.kuleuven.be/problog",
author="ProbLog team",
author_email="[email protected]",
license="Apache Software License",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Prolog",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="prolog probabilistic logic",
packages=find_packages(),
extras_require={"sdd": ["pysdd>=0.2.6"]},
entry_points={"console_scripts": ["problog=problog.tasks:main"]},
package_data=package_data,
cmdclass={"install": ProbLogInstall},
)
def increment_release(v):
v = v.split(".")
if len(v) == 4:
v = v[:3] + [str(int(v[3]) + 1)]
else:
v = v[:4]
return ".".join(v)
def increment_dev(v):
v = v.split(".")
if len(v) == 4:
v = v[:3] + [str(int(v[3]) + 1), "dev1"]
else:
v = v[:4] + ["dev" + str(int(v[4][3:]) + 1)]
return ".".join(v)
def increment_version_dev():
v = increment_dev(version)
os.path.dirname(__file__)
with open(version_file, "w") as f:
f.write("version = '%s'\n" % v)
def increment_version_release():
v = increment_release(version)
with open(version_file, "w") as f:
f.write("version = '%s'\n" % v)
| 29.517241 | 88 | 0.561332 |
793f2cc756a4b80f4e78a2eefce83882b069e043 | 2,338 | py | Python | var/spack/repos/builtin/packages/r-multtest/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-multtest/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-multtest/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMulttest(RPackage):
"""Resampling-based multiple hypothesis testing.
Non-parametric bootstrap and permutation resampling-based multiple
testing procedures (including empirical Bayes methods) for controlling
the family-wise error rate (FWER), generalized family-wise error rate
(gFWER), tail probability of the proportion of false positives (TPPFP),
and false discovery rate (FDR). Several choices of bootstrap-based null
distribution are implemented (centered, centered and scaled, quantile-
transformed). Single-step and step-wise methods are available. Tests
based on a variety of t- and F-statistics (including t-statistics based
on regression parameters from linear and survival models as well as
those based on correlation parameters) are included. When probing
hypotheses with t-statistics, users may also select a potentially faster
null distribution which is multivariate normal with mean zero and
variance covariance matrix derived from the vector influence function.
Results are reported in terms of adjusted p-values, confidence regions
and test statistic cutoffs. The procedures are directly applicable to
identifying differentially expressed genes in DNA microarray
experiments."""
bioc = "multtest"
version('2.50.0', commit='1de96649a942b115d3d554394514745e86eb3fd3')
version('2.46.0', commit='c4dd27b333c80313a88668b59d0299988c6478a2')
version('2.40.0', commit='5f00017c2d3a31e05e1cfe06d9f7afdee19f8473')
version('2.38.0', commit='4dfe71cecfb298a94521088fb7bd83c5498d2915')
version('2.36.0', commit='babb15e8d110eb72300ad59cf7e53386237a4198')
version('2.34.0', commit='6ef873e05e6c93ede54f3421424f56eda057cd54')
version('2.32.0', commit='c5e890dfbffcc3a3f107303a24b6085614312f4a')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
| 51.955556 | 79 | 0.738238 |
793f2f9e1987b7b3c549474b98984dc48944b865 | 6,978 | py | Python | findspark.py | minrk/find-spark | 527323546f5d8dd4a2365cac0af795825d222fa4 | [
"BSD-3-Clause"
] | null | null | null | findspark.py | minrk/find-spark | 527323546f5d8dd4a2365cac0af795825d222fa4 | [
"BSD-3-Clause"
] | null | null | null | findspark.py | minrk/find-spark | 527323546f5d8dd4a2365cac0af795825d222fa4 | [
"BSD-3-Clause"
] | null | null | null | """Find spark home, and initialize by adding pyspark to sys.path.
If SPARK_HOME is defined, it will be used to put pyspark on sys.path.
Otherwise, common locations for spark will be searched.
"""
from glob import glob
import os
import sys
__version__ = "2.0.1"
def find():
"""Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
"""
spark_home = os.environ.get("SPARK_HOME", None)
if not spark_home:
if "pyspark" in sys.modules:
return os.path.dirname(sys.modules["pyspark"].__file__)
for path in [
"/usr/local/opt/apache-spark/libexec", # macOS Homebrew
"/usr/lib/spark/", # AWS Amazon EMR
"/usr/local/spark/", # common linux path for spark
"/opt/spark/", # other common linux path for spark
# Any other common places to look?
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
# last resort: try importing pyspark (pip-installed, already on sys.path)
try:
import pyspark
except ImportError:
pass
else:
spark_home = os.path.dirname(pyspark.__file__)
if not spark_home:
raise ValueError(
"Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation)."
)
return spark_home
def _edit_rc(spark_home, sys_path=None):
"""Persists changes to environment by changing shell config.
Adds lines to .bashrc to set environment variables
including the adding of dependencies to the system path. Will only
edit this file if they already exist. Currently only works for bash.
Parameters
----------
spark_home : str
Path to Spark installation.
sys_path: list(str)
Paths (if any) to be added to $PYTHONPATH.
Should include python subdirectory of Spark installation, py4j
"""
bashrc_location = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrc_location):
with open(bashrc_location, "a") as bashrc:
bashrc.write("\n# Added by findspark\n")
bashrc.write("export SPARK_HOME={}\n".format(spark_home))
if sys_path:
bashrc.write(
"export PYTHONPATH={}\n".format(
os.pathsep.join(sys_path + ["$PYTHONPATH"])
)
)
bashrc.write("\n")
def _edit_ipython_profile(spark_home, sys_path=None):
"""Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
sys_path : list(str)
Paths to be added to sys.path.
Should include python subdirectory of Spark installation, py4j
"""
from IPython import get_ipython
ip = get_ipython()
if ip:
profile_dir = ip.profile_dir.location
else:
from IPython.utils.path import locate_profile
profile_dir = locate_profile()
startup_file_loc = os.path.join(profile_dir, "startup", "findspark.py")
with open(startup_file_loc, "w") as startup_file:
# Lines of code to be run when IPython starts
startup_file.write("import sys, os\n")
startup_file.write("os.environ['SPARK_HOME'] = {}\n".format(repr(spark_home)))
if sys_path:
startup_file.write("sys.path[:0] = {}\n".format(repr(sys_path)))
startup_file.write("import pyspark\n")
def init(spark_home=None, python_path=None, edit_rc=False, edit_profile=False):
"""Make pyspark importable.
Sets environment variables and adds dependencies to sys.path.
If no Spark location is provided, will try to find an installation.
Parameters
----------
spark_home : str, optional, default = None
Path to Spark installation, will try to find automatically
if not provided.
python_path : str, optional, default = None
Path to Python for Spark workers (PYSPARK_PYTHON),
will use the currently running Python if not provided.
edit_rc : bool, optional, default = False
Whether to attempt to persist changes by appending to shell
config.
edit_profile : bool, optional, default = False
Whether to create an IPython startup file to automatically
configure and import pyspark.
"""
if not spark_home:
spark_home = find()
if not python_path:
python_path = os.environ.get("PYSPARK_PYTHON", sys.executable)
# ensure SPARK_HOME is defined
os.environ["SPARK_HOME"] = spark_home
# ensure PYSPARK_PYTHON is defined
os.environ["PYSPARK_PYTHON"] = python_path
# add pyspark to sys.path
if "pyspark" not in sys.modules:
spark_python = os.path.join(spark_home, "python")
try:
py4j = glob(os.path.join(spark_python, "lib", "py4j-*.zip"))[0]
except IndexError:
raise Exception(
"Unable to find py4j in {}, your SPARK_HOME may not be configured correctly".format(
spark_python
)
)
sys.path[:0] = sys_path = [spark_python, py4j]
else:
# already imported, no need to patch sys.path
sys_path = None
if edit_rc:
_edit_rc(spark_home, sys_path)
if edit_profile:
_edit_ipython_profile(spark_home, sys_path)
def _add_to_submit_args(to_add):
"""Add string s to the PYSPARK_SUBMIT_ARGS env var"""
existing_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "")
if not existing_args:
# if empty, start with default pyspark-shell
# ref: pyspark.java_gateway.launch_gateway
existing_args = "pyspark-shell"
# add new args to front to avoid insert after executable
submit_args = "{} {}".format(to_add, existing_args)
os.environ["PYSPARK_SUBMIT_ARGS"] = submit_args
return submit_args
def add_packages(packages):
"""Add external packages to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
packages: list of package names in string format
"""
# if the parameter is a string, convert to a single element list
if isinstance(packages, str):
packages = [packages]
_add_to_submit_args("--packages " + ",".join(packages))
def add_jars(jars):
"""Add external jars to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
jars: list of path to jars in string format
"""
# if the parameter is a string, convert to a single element list
if isinstance(jars, str):
jars = [jars]
_add_to_submit_args("--jars " + ",".join(jars))
| 31.29148 | 100 | 0.637002 |
793f2fef9eea3ee2742a0bc7edf1951ba95d15fa | 2,465 | py | Python | data/p4VQE/R4/benchmark/startQiskit252.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit252.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit252.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.x(input_qubit[2]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit252.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.388889 | 118 | 0.634483 |
793f31411e745ceabbdce63800bb38c149c8c434 | 2,568 | py | Python | simplemonitor/Alerters/ses.py | cgroschupp/simplemonitor | 0d4cb4823193bdd93c3f9176eea3bfab07007be1 | [
"BSD-3-Clause"
] | 373 | 2015-12-21T02:39:21.000Z | 2022-03-08T10:49:43.000Z | simplemonitor/Alerters/ses.py | cgroschupp/simplemonitor | 0d4cb4823193bdd93c3f9176eea3bfab07007be1 | [
"BSD-3-Clause"
] | 910 | 2015-10-13T08:16:38.000Z | 2022-03-29T12:16:52.000Z | simplemonitor/Alerters/ses.py | cgroschupp/simplemonitor | 0d4cb4823193bdd93c3f9176eea3bfab07007be1 | [
"BSD-3-Clause"
] | 196 | 2015-03-24T19:15:42.000Z | 2022-02-06T22:39:55.000Z | """
SimpleMonitor alerts via Amazon Simple Email Service
"""
import os
from typing import Any, Dict, cast
import boto3
from botocore.exceptions import ClientError
from ..Monitors.monitor import Monitor
from .alerter import Alerter, AlertLength, AlertType, register
@register
class SESAlerter(Alerter):
"""Send email alerts using Amazon's SES service."""
alerter_type = "ses"
def __init__(self, config_options: dict) -> None:
super().__init__(config_options)
self.from_addr = cast(str, self.get_config_option("from", allow_empty=False))
self.to_addr = cast(str, self.get_config_option("to", allow_empty=False))
self.support_catchup = True
self.ses_client_params = {} # type: Dict[str, str]
aws_region = cast(str, self.get_config_option("aws_region"))
if aws_region:
os.environ["AWS_DEFAULT_REGION"] = aws_region
aws_access_key = cast(str, self.get_config_option("aws_access_key"))
aws_secret_key = cast(str, self.get_config_option("aws_secret_access_key"))
if aws_access_key and aws_secret_key:
self.ses_client_params["aws_access_key_id"] = aws_access_key
self.ses_client_params["aws_secret_access_key"] = aws_secret_key
def send_alert(self, name: str, monitor: Monitor) -> None:
"""Send the email."""
alert_type = self.should_alert(monitor)
mail = {} # type: Dict[str, Any]
mail["Source"] = self.from_addr
mail["Destination"] = {"ToAddresses": [self.to_addr]}
message = {} # type: Dict[str, Any]
if alert_type == AlertType.NONE:
return
message["Subject"] = {
"Data": self.build_message(AlertLength.NOTIFICATION, alert_type, monitor)
}
message["Body"] = {
"Text": {"Data": self.build_message(AlertLength.FULL, alert_type, monitor)}
}
mail["Message"] = message
if not self._dry_run:
try:
client = boto3.client("ses", **self.ses_client_params)
client.send_email(**mail)
except ClientError:
self.alerter_logger.exception("couldn't send mail")
else:
self.alerter_logger.info("dry_run: would send email:")
self.alerter_logger.info(" Subject: %s", message["Subject"]["Data"])
self.alerter_logger.info(" Body: %s", message["Body"]["Text"]["Data"])
def _describe_action(self) -> str:
return "emailing {target} via SES".format(target=self.to_addr)
| 33.350649 | 87 | 0.633956 |
793f32812fdf13fae3277facb837cda35c49043d | 611 | py | Python | HardwareTests/AsynchronousTests/_x__asynchTests.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | HardwareTests/AsynchronousTests/_x__asynchTests.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | HardwareTests/AsynchronousTests/_x__asynchTests.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | '''
In the spirit of
http://www.diveintopython3.net/unit-testing.html
http://www.diveintopython3.net/refactoring.html
The tutorials show how maintainable and refactorable code becomes when use tests
'''
'''------------------------------ Imports ------------------------------'''
# Built ins
import unittest
'''-------------------------------- Main --------------------------------'''
# stackoverflow.com/a/13533236/2354735
pattern = '_1__elementary_arithmetic.py'
testsuite = unittest.TestLoader().discover( '.', pattern )
def runTests():
unittest.TextTestRunner().run( testsuite ) | 26.565217 | 84 | 0.569558 |
793f329b122ec6bf32fbd5745d939b3b133ab25f | 10,179 | py | Python | contrib/spendfrom/spendfrom.py | puzcoin/scap-coin | 6a1c0dee69c59b0dc168f8419d7d97b33f75c867 | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | puzcoin/scap-coin | 6a1c0dee69c59b0dc168f8419d7d97b33f75c867 | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | puzcoin/scap-coin | 6a1c0dee69c59b0dc168f8419d7d97b33f75c867 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Use the raw transactions API to spend SCAPs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a safecapitald or safecapital-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the safecapital data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/SafeCapital/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "SafeCapital")
return os.path.expanduser("~/.safecapital")
def read_bitcoin_config(dbdir):
"""Read the safecapital.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "safecapital.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a safecapital JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 47005 if testnet else 47003
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the safecapitald we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(safecapitald):
info = safecapitald.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
safecapitald.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = safecapitald.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(safecapitald):
address_summary = dict()
address_to_account = dict()
for info in safecapitald.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = safecapitald.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = safecapitald.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-safecapital-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(safecapitald, fromaddresses, toaddress, amount, fee):
all_coins = list_available(safecapitald)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to safecapitald.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = safecapitald.createrawtransaction(inputs, outputs)
signed_rawtx = safecapitald.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(safecapitald, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = safecapitald.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(safecapitald, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = safecapitald.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(safecapitald, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get SCAPs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send SCAPs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of safecapital.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
safecapitald = connect_JSON(config)
if options.amount is None:
address_summary = list_available(safecapitald)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(safecapitald) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(safecapitald, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(safecapitald, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = safecapitald.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.981343 | 111 | 0.637784 |
793f33d0502d3ba9b1c600eae12927fe9ce00e48 | 2,154 | py | Python | homeassistant/components/scheduler/time.py | mikiec84/home-assistant | d9e3c02df3a2690e74d1b606e8db0a4dd686e872 | [
"MIT"
] | null | null | null | homeassistant/components/scheduler/time.py | mikiec84/home-assistant | d9e3c02df3a2690e74d1b606e8db0a4dd686e872 | [
"MIT"
] | null | null | null | homeassistant/components/scheduler/time.py | mikiec84/home-assistant | d9e3c02df3a2690e74d1b606e8db0a4dd686e872 | [
"MIT"
] | null | null | null | """
homeassistant.components.scheduler.time
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An event in the scheduler component that will call the service
every specified day at the time specified.
A time event need to have the type 'time', which service to call and at
which time.
{
"type": "time",
"service": "switch.turn_off",
"time": "22:00:00"
}
"""
from datetime import timedelta
import logging
import homeassistant.util.dt as dt_util
from homeassistant.components.scheduler import ServiceEventListener
_LOGGER = logging.getLogger(__name__)
def create_event_listener(schedule, event_listener_data):
""" Create a TimeEvent based on the description. """
service = event_listener_data['service']
(hour, minute, second) = [int(x) for x in
event_listener_data['time'].split(':')]
return TimeEventListener(schedule, service, hour, minute, second)
# pylint: disable=too-few-public-methods
class TimeEventListener(ServiceEventListener):
""" The time event that the scheduler uses. """
# pylint: disable=too-many-arguments
def __init__(self, schedule, service, hour, minute, second):
ServiceEventListener.__init__(self, schedule, service)
self.hour = hour
self.minute = minute
self.second = second
def schedule(self, hass):
""" Schedule this event so that it will be called. """
next_time = dt_util.now().replace(
hour=self.hour, minute=self.minute, second=self.second)
# Calculate the next time the event should be executed.
# That is the next day that the schedule is configured to run
while next_time < dt_util.now() or \
next_time.weekday() not in self.my_schedule.days:
next_time = next_time + timedelta(days=1)
# pylint: disable=unused-argument
def execute(now):
""" Call the execute method """
self.execute(hass)
hass.track_point_in_time(execute, next_time)
_LOGGER.info(
'TimeEventListener scheduled for %s, will call service %s.%s',
next_time, self.domain, self.service)
| 30.771429 | 74 | 0.65506 |
793f342c2a5a9ae58d181863697d533850a8e69d | 2,889 | py | Python | test/functional/test_framework/address.py | pbitmonkey/bitmonkey-debug | f48bab02e88b1fcf445c59380e6fda018d86f462 | [
"MIT"
] | null | null | null | test/functional/test_framework/address.py | pbitmonkey/bitmonkey-debug | f48bab02e88b1fcf445c59380e6fda018d86f462 | [
"MIT"
] | null | null | null | test/functional/test_framework/address.py | pbitmonkey/bitmonkey-debug | f48bab02e88b1fcf445c59380e6fda018d86f462 | [
"MIT"
] | 1 | 2020-11-04T06:59:13.000Z | 2020-11-04T06:59:13.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The bitmonkey Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import hex_str_to_bytes
from . import segwit_addr
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
checksum = hash256(hex_str_to_bytes(str)).hex()
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert len(hash) == 20
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert len(hash) == 20
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
| 31.747253 | 94 | 0.69055 |
793f34a8c1176e95220dbfeacd683d9d6ce72032 | 4,857 | py | Python | lambda/us-east-1_Numbers_Trivia/ask_sdk_model/user.py | Techievena/Numbers_Trivia | e86daaf7e7bc2c80c703c8496daea6317e986204 | [
"MIT"
] | 1 | 2019-02-04T21:07:06.000Z | 2019-02-04T21:07:06.000Z | lambda/us-east-1_Numbers_Trivia/ask_sdk_model/user.py | Techievena/Numbers_Trivia | e86daaf7e7bc2c80c703c8496daea6317e986204 | [
"MIT"
] | 9 | 2020-03-24T16:32:57.000Z | 2022-03-11T23:37:22.000Z | lambda/us-east-1_Numbers_Trivia/ask_sdk_model/user.py | Techievena/Numbers_Trivia | e86daaf7e7bc2c80c703c8496daea6317e986204 | [
"MIT"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.permissions import Permissions
class User(object):
"""
Represents the user registered to the device initiating the request.
:param user_id: A string that represents a unique identifier for the user who made the request. The length of this identifier can vary, but is never more than 255 characters. The userId is automatically generated when a user enables the skill in the Alexa app. Note: Disabling and re-enabling a skill generates a new identifier.
:type user_id: (optional) str
:param access_token: A token identifying the user in another system. This is only provided if the user has successfully linked their account. See Linking an Alexa User with a User in Your System for more details.
:type access_token: (optional) str
:param permissions:
:type permissions: (optional) ask_sdk_model.permissions.Permissions
"""
deserialized_types = {
'user_id': 'str',
'access_token': 'str',
'permissions': 'ask_sdk_model.permissions.Permissions'
}
attribute_map = {
'user_id': 'userId',
'access_token': 'accessToken',
'permissions': 'permissions'
}
def __init__(self, user_id=None, access_token=None, permissions=None):
# type: (Optional[str], Optional[str], Optional[Permissions]) -> None
"""Represents the user registered to the device initiating the request.
:param user_id: A string that represents a unique identifier for the user who made the request. The length of this identifier can vary, but is never more than 255 characters. The userId is automatically generated when a user enables the skill in the Alexa app. Note: Disabling and re-enabling a skill generates a new identifier.
:type user_id: (optional) str
:param access_token: A token identifying the user in another system. This is only provided if the user has successfully linked their account. See Linking an Alexa User with a User in Your System for more details.
:type access_token: (optional) str
:param permissions:
:type permissions: (optional) ask_sdk_model.permissions.Permissions
"""
self.__discriminator_value = None
self.user_id = user_id
self.access_token = access_token
self.permissions = permissions
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, User):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 39.487805 | 336 | 0.640107 |
793f36a021d93be1cc38ee4a5c3bd4655d8331b2 | 1,069 | py | Python | tests/console/commands/self/utils.py | zEdS15B3GCwq/poetry | 2afe9840533aacfe561d3fdf65c6fb2e790d89b1 | [
"MIT"
] | 7,258 | 2018-02-28T16:23:08.000Z | 2019-12-11T18:27:58.000Z | tests/console/commands/self/utils.py | zEdS15B3GCwq/poetry | 2afe9840533aacfe561d3fdf65c6fb2e790d89b1 | [
"MIT"
] | 1,608 | 2018-02-28T15:31:35.000Z | 2019-12-11T20:00:05.000Z | tests/console/commands/self/utils.py | zEdS15B3GCwq/poetry | 2afe9840533aacfe561d3fdf65c6fb2e790d89b1 | [
"MIT"
] | 597 | 2018-03-07T15:07:46.000Z | 2019-12-11T16:36:22.000Z | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from poetry.factory import Factory
if TYPE_CHECKING:
from tomlkit.container import Table as TOMLTable
def get_self_command_dependencies(locked: bool = True) -> TOMLTable:
from poetry.console.commands.self.self_command import SelfCommand
from poetry.locations import CONFIG_DIR
system_pyproject_file = SelfCommand.get_default_system_pyproject_file()
assert system_pyproject_file.exists()
assert system_pyproject_file.parent == Path(CONFIG_DIR)
if locked:
assert system_pyproject_file.parent.joinpath("poetry.lock").exists()
poetry = Factory().create_poetry(system_pyproject_file.parent, disable_plugins=True)
content = poetry.file.read()["tool"]["poetry"]
assert "group" in content
assert SelfCommand.ADDITIONAL_PACKAGE_GROUP in content["group"]
assert "dependencies" in content["group"][SelfCommand.ADDITIONAL_PACKAGE_GROUP]
return content["group"][SelfCommand.ADDITIONAL_PACKAGE_GROUP]["dependencies"]
| 31.441176 | 88 | 0.782039 |
793f382f6306325ed08e967eb555c8cf08a6a80c | 1,690 | py | Python | PY2/colorprint/methods.py | apua/colorprint | 4c8b22c21447b587dc827be83feb79084504cdaa | [
"WTFPL"
] | null | null | null | PY2/colorprint/methods.py | apua/colorprint | 4c8b22c21447b587dc827be83feb79084504cdaa | [
"WTFPL"
] | 10 | 2015-02-10T04:00:30.000Z | 2015-03-01T06:20:36.000Z | PY2/colorprint/methods.py | apua/colorprint | 4c8b22c21447b587dc827be83feb79084504cdaa | [
"WTFPL"
] | null | null | null | """
Provide `print` and `pprint` methods.
"""
from __future__ import print_function
from .attributes import attr_names
def colorform(values):
attrs = ';'.join(map(str, values))
return '\033[{}m{{}}\033[m'.format(attrs)
class ColorPrint:
_print = print
def __init__(self, values=()):
self.values = values
def __call__(self, *args, **kwargs):
colored = colorform(self.values).format
outputs = map(colored, map(str, args))
self._print(*outputs, **kwargs)
def __getattr__(self, attr):
values = attr_names.get(attr)
if values is None:
raise AttributeError('Color "%s" is not defined'%attr)
return self.__class__(self.values+values)
class ColorPPrint(ColorPrint):
def __call__(self, object, stream=None, indent=1, width=80, depth=None):
"""copy from `pprint.pprint`"""
from pprint import PrettyPrinter
import re
printer = PrettyPrinter(stream=stream, indent=indent,
width=width, depth=depth)
stream = printer._stream
color_stream = type('',(),{})()
printer._stream = color_stream
colored = colorform(self.values).format
#color_stream.write = lambda s: stream.write(colored(s))
def write(s, patt=re.compile(r'^(,?)(\n\ *)(.*)$')):
m = patt.match(s)
if m is not None:
a, b, c = m.groups()
stream.write((a and colored(a)) + b + (c and colored(c)))
else:
stream.write(colored(s))
color_stream.write = write
printer.pprint(object)
print = ColorPrint()
pprint = ColorPPrint()
| 28.166667 | 76 | 0.584615 |
793f390d2399e48bd918f6bb5edba794588572ce | 4,439 | py | Python | source/streaming/geh_stream/batch_operations/monitor_batch.py | Energinet-DataHub/geh-timeseries | 24e92e7664e3d3774f941973ed7563837385748a | [
"Apache-2.0"
] | 5 | 2021-04-23T08:10:57.000Z | 2021-12-17T03:14:03.000Z | source/streaming/geh_stream/batch_operations/monitor_batch.py | Energinet-DataHub/geh-timeseries | 24e92e7664e3d3774f941973ed7563837385748a | [
"Apache-2.0"
] | 335 | 2021-04-14T19:10:40.000Z | 2022-03-28T13:02:20.000Z | source/streaming/geh_stream/batch_operations/monitor_batch.py | Energinet-DataHub/geh-timeseries | 24e92e7664e3d3774f941973ed7563837385748a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Energinet DataHub A/S
#
# Licensed under the Apache License, Version 2.0 (the "License2");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql import DataFrame
from geh_stream.monitoring import Telemetry, MonitoredStopwatch
def get_rows_in_batch(batch_df: DataFrame, watch: MonitoredStopwatch):
"""Retrieves the amount of rows in the batch currently being processed.
Parameters:
batch_df (DataFrame): The batch currently being processed which will be used to determine which correlation ID's are represented in the batch.
watch (MonitoredStopwatch): The stopwatch currently tracking the performance of the batch. Will be used to start a sub-stopwatch monitoring this method.
Returns:
count (int): The amount of rows in the current batch
"""
current_method_name = get_rows_in_batch.__name__
timer = watch.start_sub_timer(current_method_name)
count = batch_df.count()
timer.stop_timer()
return count
def __track(correlation_ids_with_count, telemetry_instrumentation_key, *, batch_row_count: int, batch_dependency_id, batch_duration_ms):
correlation_id = correlation_ids_with_count[0]
correlation_row_count = correlation_ids_with_count[1]
name = "TimeSeriesHandledInBatch"
data = None
dependency_type = "databricks"
properties = {"BatchDependencyId": batch_dependency_id}
measurements = None
if batch_row_count:
average_per_item = batch_duration_ms / batch_row_count
measurements = {"TotalCountInBatch": batch_row_count, "CorrelationItemsInBatch": correlation_row_count, "AverageMsPerItem": average_per_item}
telemetry_client = Telemetry.create_telemetry_client(telemetry_instrumentation_key)
telemetry_client.context.operation.id = correlation_id
telemetry_client.context.operation.parent_id = None
telemetry_client.track_dependency(name, data, type=dependency_type, duration=batch_duration_ms, properties=properties, measurements=measurements)
telemetry_client.flush()
def track_batch_back_to_original_correlation_requests(time_series_points_df: DataFrame, batch_info, telemetry_instrumentation_key):
"""Tracks the performance of the batch back to the original requests each atomic value came from.
The correlation ID of an atomic value is the way to track it back to the original request where it was added.
Atomic values from the same batch may be split into multiple batches when processing the stream, but we add information
about each batch to Application Insights to be able to see when the individual parts of the original request were processed.
Typically a batch will contain multiple atomic values from the same original request, so we will not have the same number
of correlation IDs as rows in the dataframe. Usually alot less.
For each correlation ID, the number of items in the batch that belonged to the the request, the total rows in the batch and
the average time spent per row is reported back to the original request.
Parameters:
correlation_ids (list of class Row): The correlation IDs that were part of the batch
batch_count (int): The number of rows in the current batch
watch (MonitoredStopwatch): The stopwatch used to monitor the performance of the batch, which will contain the time used on the batch.
"""
from functools import partial
# Use 'partial' to solve the problem that UDFs don't take arguments
track_function = partial(__track, telemetry_instrumentation_key=telemetry_instrumentation_key, **batch_info)
track_function.__doc__ = """User defined function with single argument feeded by the spark engine.
It is necessary that the function doesn't hold object instance references
because the function will be serialized and sent to all worker nodes."""
time_series_points_df \
.groupBy("correlationId") \
.count() \
.foreach(track_function)
| 54.134146 | 160 | 0.76301 |
793f3958c64d201d39067caade8e62218c818fd5 | 25,896 | py | Python | sklearn/manifold/_spectral_embedding.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 199 | 2020-08-27T09:03:21.000Z | 2021-11-09T11:21:07.000Z | sklearn/manifold/_spectral_embedding.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 4 | 2020-01-28T20:43:24.000Z | 2020-11-12T15:59:34.000Z | sklearn/manifold/_spectral_embedding.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 18 | 2018-03-29T00:01:38.000Z | 2021-10-10T12:11:37.000Z | """Spectral Embedding."""
# Author: Gael Varoquaux <[email protected]>
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh
from scipy.sparse.csgraph import connected_components
from scipy.sparse.csgraph import laplacian as csgraph_laplacian
from ..base import BaseEstimator
from ..utils import (
check_array,
check_random_state,
check_symmetric,
)
from ..utils._arpack import _init_arpack_v0
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.fixes import lobpcg
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph, NearestNeighbors
from ..utils.deprecation import deprecated
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=bool)
nodes_to_explore = np.zeros(n_node, dtype=bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
"""Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[:: n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = laplacian.row == laplacian.col
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol=0.0,
norm_laplacian=True,
drop_first=True,
):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
https://doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError as e:
if eigen_solver == "amg":
raise ValueError(
"The eigen_solver was set to 'amg', but pyamg is not available."
) from e
if eigen_solver is None:
eigen_solver = "arpack"
elif eigen_solver not in ("arpack", "lobpcg", "amg"):
raise ValueError(
"Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver
)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn(
"Graph is not fully connected, spectral embedding may not work as expected."
)
laplacian, dd = csgraph_laplacian(
adjacency, normed=norm_laplacian, return_diag=True
)
if (
eigen_solver == "arpack"
or eigen_solver != "lobpcg"
and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)
):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = _init_arpack_v0(laplacian.shape[0], random_state)
_, diffusion_map = eigsh(
laplacian, k=n_components, sigma=1.0, which="LM", tol=eigen_tol, v0=v0
)
embedding = diffusion_map.T[n_components::-1]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
elif eigen_solver == "amg":
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
laplacian = check_array(
laplacian, dtype=[np.float64, np.float32], accept_sparse=True
)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# The Laplacian matrix is always singular, having at least one zero
# eigenvalue, corresponding to the trivial eigenvector, which is a
# constant. Using a singular matrix for preconditioning may result in
# random failures in LOBPCG and is not supported by the existing
# theory:
# see https://doi.org/10.1007/s10208-015-9297-1
# Shift the Laplacian so its diagononal is not all ones. The shift
# does change the eigenpairs however, so we'll feed the shifted
# matrix to the solver and afterward set it back to the original.
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr"))
laplacian -= diag_shift
M = ml.aspreconditioner()
# Create initial approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
X = X.astype(laplacian.dtype)
_, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)
embedding = diffusion_map.T
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
if eigen_solver == "lobpcg":
laplacian = check_array(
laplacian, dtype=[np.float64, np.float32], accept_sparse=True
)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
_, diffusion_map = eigh(laplacian, check_finite=False)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension and create initial
# approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
X = X.astype(laplacian.dtype)
_, diffusion_map = lobpcg(
laplacian, X, tol=1e-5, largest=False, maxiter=2000
)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
n_components : int, default=2
The dimension of the projected subspace.
affinity : {'nearest_neighbors', 'rbf', 'precomputed', \
'precomputed_nearest_neighbors'} or callable, \
default='nearest_neighbors'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix by computing a radial basis
function (RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, default=None
Kernel coefficient for rbf kernel. If None, gamma will be set to
1/n_features.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems.
If None, then ``'arpack'`` is used.
n_neighbors : int, default=None
Number of nearest neighbors for nearest_neighbors graph building.
If None, n_neighbors will be set to max(n_samples/10, 1).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_neighbors_ : int
Number of nearest neighbors effectively used.
See Also
--------
Isomap : Non-linear dimensionality reduction through Isometric Mapping.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2001
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import SpectralEmbedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = SpectralEmbedding(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
def __init__(
self,
n_components=2,
*,
affinity="nearest_neighbors",
gamma=None,
random_state=None,
eigen_solver=None,
n_neighbors=None,
n_jobs=None,
):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def _more_tags(self):
return {
"pairwise": self.affinity
in ["precomputed", "precomputed_nearest_neighbors"]
}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `_pairwise` was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26)."
)
@property
def _pairwise(self):
return self.affinity in ["precomputed", "precomputed_nearest_neighbors"]
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
"""
if self.affinity == "precomputed":
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
return self.affinity_matrix_
if self.affinity == "nearest_neighbors":
if sparse.issparse(X):
warnings.warn(
"Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity"
)
self.affinity = "rbf"
else:
self.n_neighbors_ = (
self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1)
)
self.affinity_matrix_ = kneighbors_graph(
X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs
)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (
self.affinity_matrix_ + self.affinity_matrix_.T
)
return self.affinity_matrix_
if self.affinity == "rbf":
self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse="csr", ensure_min_samples=2)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, str):
if self.affinity not in {
"nearest_neighbors",
"rbf",
"precomputed",
"precomputed_nearest_neighbors",
}:
raise ValueError(
"%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable."
% self.affinity
)
elif not callable(self.affinity):
raise ValueError(
"'affinity' is expected to be an affinity name or a callable. Got: %s"
% self.affinity
)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(
affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix} of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Spectral embedding of the training matrix.
"""
self.fit(X)
return self.embedding_
| 38.026432 | 88 | 0.632646 |
793f395eee1807f59e4a98aa2758da1839fb2ff7 | 10,425 | py | Python | rpython/rlib/rtime.py | SeraphRoy/PyPy-Functional | e825dce7f7c484fa666566974a93ed5d59fb73be | [
"Apache-2.0",
"OpenSSL"
] | 4 | 2019-02-11T06:58:43.000Z | 2020-03-15T14:12:32.000Z | rpython/rlib/rtime.py | murtyjones/kiwi-pypy | 1419c7de61a11b2d29602b25506cb3b4f225996e | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/rlib/rtime.py | murtyjones/kiwi-pypy | 1419c7de61a11b2d29602b25506cb3b4f225996e | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | """
RPython implementations of time.time(), time.clock(), time.select().
"""
import sys
import math
import time as pytime
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.objectmodel import register_replacement_for
from rpython.rlib.rarithmetic import intmask, UINT_MAX
from rpython.rlib import rposix
_WIN32 = sys.platform.startswith('win')
if _WIN32:
TIME_H = 'time.h'
FTIME = '_ftime64'
STRUCT_TIMEB = 'struct __timeb64'
includes = ['winsock2.h', 'windows.h',
TIME_H, 'sys/types.h', 'sys/timeb.h']
need_rusage = False
else:
TIME_H = 'sys/time.h'
FTIME = 'ftime'
STRUCT_TIMEB = 'struct timeb'
includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h',
'sys/types.h', 'unistd.h',
'sys/time.h', 'sys/resource.h']
if not sys.platform.startswith("openbsd") and \
not sys.platform.startswith("freebsd"):
includes.append('sys/timeb.h')
need_rusage = True
eci = ExternalCompilationInfo(includes=includes)
class CConfig:
_compilation_info_ = eci
TIMEVAL = rffi_platform.Struct('struct timeval', [('tv_sec', rffi.INT),
('tv_usec', rffi.INT)])
HAVE_GETTIMEOFDAY = rffi_platform.Has('gettimeofday')
HAVE_FTIME = rffi_platform.Has(FTIME)
if need_rusage:
RUSAGE = rffi_platform.Struct('struct rusage', [('ru_utime', TIMEVAL),
('ru_stime', TIMEVAL)])
if sys.platform.startswith('freebsd') or sys.platform.startswith('netbsd'):
libraries = ['compat']
elif sys.platform == 'linux2':
libraries = ['rt']
else:
libraries = []
class CConfigForFTime:
_compilation_info_ = ExternalCompilationInfo(
includes=[TIME_H, 'sys/timeb.h'],
libraries=libraries
)
TIMEB = rffi_platform.Struct(STRUCT_TIMEB, [('time', rffi.INT),
('millitm', rffi.INT)])
class CConfigForClockGetTime:
_compilation_info_ = ExternalCompilationInfo(
includes=['time.h'],
libraries=libraries
)
_NO_MISSING_RT = rffi_platform.Has('printf("%d", clock_gettime(0, 0))')
TIMESPEC = rffi_platform.Struct('struct timespec', [('tv_sec', rffi.LONG),
('tv_nsec', rffi.LONG)])
constant_names = ['RUSAGE_SELF', 'EINTR',
'CLOCK_REALTIME',
'CLOCK_REALTIME_COARSE',
'CLOCK_MONOTONIC',
'CLOCK_MONOTONIC_COARSE',
'CLOCK_MONOTONIC_RAW',
'CLOCK_BOOTTIME',
'CLOCK_PROCESS_CPUTIME_ID',
'CLOCK_THREAD_CPUTIME_ID',
'CLOCK_HIGHRES',
'CLOCK_PROF',
]
for const in constant_names:
setattr(CConfig, const, rffi_platform.DefinedConstantInteger(const))
defs_names = ['GETTIMEOFDAY_NO_TZ']
for const in defs_names:
setattr(CConfig, const, rffi_platform.Defined(const))
def decode_timeval(t):
return (float(rffi.getintfield(t, 'c_tv_sec')) +
float(rffi.getintfield(t, 'c_tv_usec')) * 0.000001)
def external(name, args, result, compilation_info=eci, **kwds):
return rffi.llexternal(name, args, result,
compilation_info=compilation_info, **kwds)
def replace_time_function(name):
func = getattr(pytime, name, None)
if func is None:
return lambda f: f
return register_replacement_for(
func,
sandboxed_name='ll_time.ll_time_%s' % name)
config = rffi_platform.configure(CConfig)
globals().update(config)
# Note: time.time() is used by the framework GC during collect(),
# which means that we have to be very careful about not allocating
# GC memory here. This is the reason for the _nowrapper=True.
if HAVE_GETTIMEOFDAY:
if GETTIMEOFDAY_NO_TZ:
c_gettimeofday = external('gettimeofday',
[lltype.Ptr(TIMEVAL)], rffi.INT,
_nowrapper=True, releasegil=False)
else:
c_gettimeofday = external('gettimeofday',
[lltype.Ptr(TIMEVAL), rffi.VOIDP], rffi.INT,
_nowrapper=True, releasegil=False)
if HAVE_FTIME:
globals().update(rffi_platform.configure(CConfigForFTime))
c_ftime = external(FTIME, [lltype.Ptr(TIMEB)],
lltype.Void,
_nowrapper=True, releasegil=False)
c_time = external('time', [rffi.VOIDP], rffi.TIME_T,
_nowrapper=True, releasegil=False)
@replace_time_function('time')
def time():
void = lltype.nullptr(rffi.VOIDP.TO)
result = -1.0
if HAVE_GETTIMEOFDAY:
with lltype.scoped_alloc(TIMEVAL) as t:
errcode = -1
if GETTIMEOFDAY_NO_TZ:
errcode = c_gettimeofday(t)
else:
errcode = c_gettimeofday(t, void)
if rffi.cast(rffi.LONG, errcode) == 0:
result = decode_timeval(t)
if result != -1:
return result
else: # assume using ftime(3)
with lltype.scoped_alloc(TIMEB) as t:
c_ftime(t)
result = (float(intmask(t.c_time)) +
float(intmask(t.c_millitm)) * 0.001)
return result
return float(c_time(void))
# _______________________________________________________________
# time.clock()
if _WIN32:
# hacking to avoid LARGE_INTEGER which is a union...
QueryPerformanceCounter = external(
'QueryPerformanceCounter', [rffi.CArrayPtr(lltype.SignedLongLong)],
lltype.Void, releasegil=False)
QueryPerformanceFrequency = external(
'QueryPerformanceFrequency', [rffi.CArrayPtr(lltype.SignedLongLong)],
rffi.INT, releasegil=False)
class State(object):
divisor = 0.0
counter_start = 0
state = State()
HAS_CLOCK_GETTIME = (CLOCK_MONOTONIC is not None)
if sys.platform == 'darwin':
HAS_CLOCK_GETTIME = False
# ^^^ https://bitbucket.org/pypy/pypy/issues/2432 and others
# (change it manually if you *know* you want to build and run on
# OS/X 10.12 or later)
if HAS_CLOCK_GETTIME:
# Linux and other POSIX systems with clock_gettime()
# TIMESPEC:
globals().update(rffi_platform.configure(CConfigForClockGetTime))
# do we need to add -lrt?
eciclock = CConfigForClockGetTime._compilation_info_
if not _NO_MISSING_RT:
eciclock = eciclock.merge(ExternalCompilationInfo(libraries=['rt']))
# the functions:
c_clock_getres = external("clock_getres",
[lltype.Signed, lltype.Ptr(TIMESPEC)],
rffi.INT, releasegil=False,
save_err=rffi.RFFI_SAVE_ERRNO,
compilation_info=eciclock)
c_clock_gettime = external('clock_gettime',
[lltype.Signed, lltype.Ptr(TIMESPEC)],
rffi.INT, releasegil=False,
save_err=rffi.RFFI_SAVE_ERRNO,
compilation_info=eciclock)
c_clock_settime = external('clock_settime',
[lltype.Signed, lltype.Ptr(TIMESPEC)],
rffi.INT, releasegil=False,
save_err=rffi.RFFI_SAVE_ERRNO,
compilation_info=eciclock)
# Note: there is no higher-level functions here to access
# clock_gettime(). The issue is that we'd need a way that keeps
# nanosecond precision, depending on the usage, so we can't have a
# nice function that returns the time as a float.
ALL_DEFINED_CLOCKS = [const for const in constant_names
if const.startswith('CLOCK_')
and globals()[const] is not None]
if need_rusage:
RUSAGE = RUSAGE
RUSAGE_SELF = RUSAGE_SELF or 0
c_getrusage = external('getrusage',
[rffi.INT, lltype.Ptr(RUSAGE)],
rffi.INT,
releasegil=False)
def win_perf_counter():
with lltype.scoped_alloc(rffi.CArray(rffi.lltype.SignedLongLong), 1) as a:
if state.divisor == 0.0:
QueryPerformanceCounter(a)
state.counter_start = a[0]
QueryPerformanceFrequency(a)
state.divisor = float(a[0])
QueryPerformanceCounter(a)
diff = a[0] - state.counter_start
return float(diff) / state.divisor
@replace_time_function('clock')
def clock():
if _WIN32:
return win_perf_counter()
elif HAS_CLOCK_GETTIME and CLOCK_PROCESS_CPUTIME_ID is not None:
with lltype.scoped_alloc(TIMESPEC) as a:
if c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) == 0:
return (float(rffi.getintfield(a, 'c_tv_sec')) +
float(rffi.getintfield(a, 'c_tv_nsec')) * 0.000000001)
with lltype.scoped_alloc(RUSAGE) as a:
c_getrusage(RUSAGE_SELF, a)
result = (decode_timeval(a.c_ru_utime) +
decode_timeval(a.c_ru_stime))
return result
# _______________________________________________________________
# time.sleep()
if _WIN32:
Sleep = external('Sleep', [rffi.ULONG], lltype.Void)
else:
c_select = external('select', [rffi.INT, rffi.VOIDP,
rffi.VOIDP, rffi.VOIDP,
lltype.Ptr(TIMEVAL)], rffi.INT,
save_err=rffi.RFFI_SAVE_ERRNO)
@replace_time_function('sleep')
def sleep(secs):
if _WIN32:
millisecs = secs * 1000.0
while millisecs > UINT_MAX:
Sleep(UINT_MAX)
millisecs -= UINT_MAX
Sleep(rffi.cast(rffi.ULONG, int(millisecs)))
else:
void = lltype.nullptr(rffi.VOIDP.TO)
with lltype.scoped_alloc(TIMEVAL) as t:
frac = math.fmod(secs, 1.0)
rffi.setintfield(t, 'c_tv_sec', int(secs))
rffi.setintfield(t, 'c_tv_usec', int(frac*1000000.0))
if rffi.cast(rffi.LONG, c_select(0, void, void, void, t)) != 0:
errno = rposix.get_saved_errno()
if errno != EINTR:
raise OSError(errno, "Select failed")
| 37.5 | 80 | 0.601631 |
793f3ae350c49f6b306a0375b3a9fd12e1920a61 | 780 | py | Python | src/stories/_return.py | sashgorokhov-forks/stories | ae0596cd1c6eb2b159bc652706d28ed934af1507 | [
"BSD-2-Clause"
] | null | null | null | src/stories/_return.py | sashgorokhov-forks/stories | ae0596cd1c6eb2b159bc652706d28ed934af1507 | [
"BSD-2-Clause"
] | null | null | null | src/stories/_return.py | sashgorokhov-forks/stories | ae0596cd1c6eb2b159bc652706d28ed934af1507 | [
"BSD-2-Clause"
] | null | null | null | from ._repr import namespace_representation
class Result(object):
def __init__(self, value=None):
self.value = value
def __repr__(self):
return self.__class__.__name__ + "(" + repr(self.value) + ")"
class Success(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __repr__(self):
return (
self.__class__.__name__ + "(" + namespace_representation(self.kwargs) + ")"
)
class Failure(object):
def __init__(self, reason=None):
self.reason = reason
def __repr__(self):
reason = repr(self.reason) if self.reason else ""
return self.__class__.__name__ + "(" + reason + ")"
class Skip(object):
def __repr__(self):
return self.__class__.__name__ + "()"
| 22.941176 | 87 | 0.615385 |
793f3b91c4332ee2098cb644b7df4aab959f35f6 | 6,468 | py | Python | tests/ut/python/parallel/test_gather_v2.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | 1 | 2020-05-13T11:31:21.000Z | 2020-05-13T11:31:21.000Z | tests/ut/python/parallel/test_gather_v2.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_gather_v2.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
from mindspore import Tensor
from mindspore import context
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.common import dtype as mstype
from mindspore.common.api import _executor
from tests.ut.python.ops.test_math_ops import VirtualLoss
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y):
predict = self.network(x, y)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y):
return C.grad_all(self.network)(x, y)
class Net(nn.Cell):
def __init__(self, axis=0, strategy1=None, strategy2=None, shape=[64, 64]):
super().__init__()
self.gatherv2 = P.GatherV2().set_strategy(strategy1)
self.mul = P.Mul().set_strategy(strategy2)
self.index = Tensor(np.ones(shape), dtype=ms.int32)
self.axis = axis
def construct(self, x, y):
out = self.gatherv2(x, self.index, self.axis)
out = self.mul(out, y)
return out
def test_gatherv2_semi_auto0():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((1, 8), )
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto1():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((8, 1), )
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto2():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 4), )
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto3():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((1, 8), )
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto4():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((8, 1), )
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto5():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 4), )
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto6():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(0, None, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto7():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy2 = ((4, 2, 1), (4, 2, 1))
net = GradWrap(NetWithLoss(Net(1, None, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_semi_auto8():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((8, ), )
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
net.set_auto_parallel()
x = Tensor(np.ones([64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_auto0():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel")
net = GradWrap(NetWithLoss(Net(0)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
_executor.compile(net, x, y)
def test_gatherv2_auto1():
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel")
net = GradWrap(NetWithLoss(Net(1)))
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
_executor.compile(net, x, y)
| 37.387283 | 102 | 0.672696 |
793f3e436ca884b945e0fc70a026fe990f696426 | 9,988 | py | Python | key_word_num/cifar100vgg.py | Carrie2001/Chinese-poems-generation-based-on-pictures | 63e180c207bde1559b290e79e70b0fa0ce8b0c33 | [
"MIT"
] | 1 | 2022-01-14T17:27:28.000Z | 2022-01-14T17:27:28.000Z | key_word_num/cifar100vgg.py | Carrie2001/Chinese-poems-generation-based-on-pictures | 63e180c207bde1559b290e79e70b0fa0ce8b0c33 | [
"MIT"
] | null | null | null | key_word_num/cifar100vgg.py | Carrie2001/Chinese-poems-generation-based-on-pictures | 63e180c207bde1559b290e79e70b0fa0ce8b0c33 | [
"MIT"
] | null | null | null |
from __future__ import print_function
import keras
from keras.datasets import cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras import regularizers
from PIL import Image
import os
import re
import matplotlib.pyplot as plt # plt 用于显示图片
import matplotlib.image as mpimg # mpimg 用于读取图片
import sys
class cifar100vgg:
def __init__(self,train=True):
self.num_classes = 100
self.weight_decay = 0.0005
self.x_shape = [32,32,3]
self.model = self.build_model()
if train:
self.model = self.train(self.model)
else:
self.model.load_weights('cifar100vgg.h5')
def build_model(self):
# Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
model = Sequential()
weight_decay = self.weight_decay
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(self.num_classes))
model.add(Activation('softmax'))
return model
def normalize(self,X_train,X_test):
#this function normalize inputs for zero mean and unit variance
# it is used when training a model.
# Input: training set and test set
# Output: normalized training set and test set according to the trianing set statistics.
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train, axis=(0, 1, 2, 3))
print(mean)
print(std)
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
def normalize_production(self,x):
#this function is used to normalize instances in production according to saved training set statistics
# Input: X - a training set
# Output X - a normalized training set according to normalization constants.
#these values produced during first training and are general for the standard cifar10 training set normalization
mean = 121.936
std = 68.389
return (x-mean)/(std+1e-7)
def predict(self,x,normalize=True,batch_size=50):
if normalize:
x = self.normalize_production(x)
return self.model.predict(x,batch_size)
def train(self,model):
#training parameters
batch_size = 128
maxepoches = 250
learning_rate = 0.1
lr_decay = 1e-6
lr_drop = 20
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
def lr_scheduler(epoch):
return learning_rate * (0.5 ** (epoch // lr_drop))
reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
#data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
#optimization details
sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# training process in a for loop with learning rate drop every 25 epoches.
historytemp = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches,
validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)
model.save_weights('cifar100vgg.h5')
return model
def resize_image(image_path, w, h):
image_name = re.sub(r'.*[/\\]', '', image_path)
outfile = re.sub(image_name, '', image_path)+'32_32.jpg'
#print(output_name)
#input()
#outfile = "C:/Users/28278/Desktop/a.jpg"
#if not os.path.exists(output_dir):
# os.makedirs(output_dir)
img = Image.open(image_path)
img.resize((w, h), Image.ANTIALIAS).save(outfile, quality=95)
return outfile
def unpickle():
import pickle
with open("meta", 'rb') as fo:
dict = pickle.load(fo)
return dict
def get_labels():
return unpickle()['fine_label_names']
def pic_handler(path,show_pictures=False):
path=path.replace('\\','/')
outfile=resize_image(path,w=32,h=32)
lena = mpimg.imread(outfile) # 此时 lena 就已经是一个 np.array 了,可以对它进行任意处理
if lena.shape!=(32,32,3):
print("图片尺寸有问题,请更换图片尝试,若多次尝试失败,请联系作者")
sys.exit(0)
if show_pictures:
plt.imshow(lena) # 显示图片
plt.axis('off') # 不显示坐标轴
plt.show()
os.remove(outfile)
return lena
def pic_to_label(path,show_pictures=False):
labels=unpickle()
my_model=cifar100vgg(train=False)
pic=pic_handler(path,show_pictures).astype('float32')
pic=np.expand_dims(pic, axis = 0)
predicted_x = my_model.predict(x=pic)
return(labels['fine_label_names'][np.argmax(predicted_x,1)[0]])
def main():
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 100)
y_test = keras.utils.to_categorical(y_test, 100)
model = cifar100vgg()
predicted_x = model.predict(x_test)
residuals = (np.argmax(predicted_x,1)!=np.argmax(y_test,1))
loss = sum(residuals)/len(residuals)
print("the validation 0/1 loss is: ",loss)
| 36.586081 | 120 | 0.651382 |
793f3e5587c2648c5cce05606c8889bfe6e2c709 | 1,835 | py | Python | argumentosSimple.py | sanxofon/basicnlp3 | 289415ec07fae69af04a8354bb9a9801cad564b8 | [
"MIT"
] | 1 | 2017-12-31T18:09:06.000Z | 2017-12-31T18:09:06.000Z | argumentosSimple.py | sanxofon/basicnlp3 | 289415ec07fae69af04a8354bb9a9801cad564b8 | [
"MIT"
] | null | null | null | argumentosSimple.py | sanxofon/basicnlp3 | 289415ec07fae69af04a8354bb9a9801cad564b8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Hay diversas maneras de recibir información del usuario en la terminal
Para pedir información al usuario DURANTE la ejecución de un script
podemos usar la función "raw_input" (python2) o "input" (python3) y guardar la respuesta en una variable
como se puede ver en el script: "helloWorldUTF8.py"
A veces queremos recibir información del usuario desde que EJECUTAMOS el
script, es decir, desde un principio.
Ejemplos de ejecución:
>> python argumentosSimple.py Santiago Chávez
>> python argumentosSimple.py "Santiago Chávez"
>> python argumentosSimple.py "Santiago Chávez" utf8 > test.txt
"""
# Importamos una librería para poder usar sus funcionalidades
# La librería "sys" no permite acceder a información del sistema
import sys
# La librería "sys" nos permite acceder a los "argumentos" que fueron invocados al ejecutar este script
nombreScript = sys.argv[0] # El índice "0" siempre contiene el nombre del script actual: "argumentosSimple.py"
argumentos = [] # Definimos la variable "argumentos" como una "lista vacía"
# Recorremos los argumentos del 1 al total de argumentos
for i in range(1,len(sys.argv)):
argumentos.append(sys.argv[i]) # El índice "i" trae el argumento actual (si es que existe)
# Buscamos la cadena "utf8" en los argumentos recibidos
# Si existe creamos una variable "utf8" para acordarnos
utf8 = False
if "utf8" in argumentos:
utf8 = True
argumentos.remove("utf8") # Elimina el argumento "utf8" de la lista
# Por último imprimimos los argumentos invocados por el usuario
print(u"Argumentos recibidos:")
for i in range(len(argumentos)):
if utf8:
# Si se recibió "utf8" en los argumentos codificamos la salida
print("\t",i+1,".",argumentos[i].encode('utf-8'))
else:
# De lo contrario imprimimos tal cual
print("\t",i+1,".",argumentos[i]) | 42.674419 | 110 | 0.751499 |
793f3e6b9bbb0a8d3be5a1c24757484d5404acfc | 314 | py | Python | test/tests/globals.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:28:45.000Z | 2020-02-06T14:28:45.000Z | test/tests/globals.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/globals.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:29:00.000Z | 2020-02-06T14:29:00.000Z | def f():
z = 1
print x # non-builtin, but defined
print True # builtin, redefined
print False # builtin, not redefined
print z # local
try:
print y # non-builtin, not defined
except NameError, e:
print e
x = 2
z = 2
True = "new_true"
f()
assert globals() is globals()
| 17.444444 | 42 | 0.595541 |
793f3e7d2f44e76c3ded0b4e42a09eda0736a12d | 3,183 | py | Python | samples/summarize-bot/siatl/logger/plotting.py | tsuwandy/botbuilder-community-python | e035a993cd3b0fd8c7b2ff1126c4e993d0c8efc3 | [
"MIT"
] | null | null | null | samples/summarize-bot/siatl/logger/plotting.py | tsuwandy/botbuilder-community-python | e035a993cd3b0fd8c7b2ff1126c4e993d0c8efc3 | [
"MIT"
] | null | null | null | samples/summarize-bot/siatl/logger/plotting.py | tsuwandy/botbuilder-community-python | e035a993cd3b0fd8c7b2ff1126c4e993d0c8efc3 | [
"MIT"
] | null | null | null | import numpy
from visdom import Visdom
# import matplotlib.pyplot as plt
# import seaborn as sns
class Visualizer:
def __init__(self, env="main",
server="http://localhost",
port=8097,
base_url="/",
http_proxy_host=None,
http_proxy_port=None):
self._viz = Visdom(env=env,
server=server,
port=port,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port,
use_incoming_socket=False)
self._viz.close(env=env)
def plot_line(self, values, steps, name, legend=None):
if legend is None:
opts = dict(title=name)
else:
opts = dict(title=name, legend=legend)
self._viz.line(
X=numpy.column_stack(steps),
Y=numpy.column_stack(values),
win=name,
update='append',
opts=opts
)
def plot_text(self, text, title, pre=True):
_width = max([len(x) for x in text.split("\n")]) * 10
_heigth = len(text.split("\n")) * 20
_heigth = max(_heigth, 120)
if pre:
text = "<pre>{}</pre>".format(text)
self._viz.text(text, win=title, opts=dict(title=title,
width=min(_width, 400),
height=min(_heigth, 400)))
def plot_bar(self, data, labels, title):
self._viz.bar(win=title, X=data,
opts=dict(legend=labels, stacked=False, title=title))
def plot_scatter(self, data, labels, title):
X = numpy.concatenate(data, axis=0)
Y = numpy.concatenate([numpy.full(len(d), i)
for i, d in enumerate(data, 1)], axis=0)
self._viz.scatter(win=title, X=X, Y=Y,
opts=dict(legend=labels, title=title,
markersize=5,
webgl=True,
width=400,
height=400,
markeropacity=0.5))
def plot_heatmap(self, data, labels, title):
self._viz.heatmap(win=title,
X=data,
opts=dict(
title=title,
columnnames=labels[1],
rownames=labels[0],
width=700,
height=700,
layoutopts={'plotly': {
'xaxis': {
'side': 'top',
'tickangle': -60,
# 'autorange': "reversed"
},
'yaxis': {
'autorange': "reversed"
},
}
}
))
| 37.447059 | 76 | 0.395853 |
793f3ee0bc60e3479f3565b56c6638783a2f4424 | 6,910 | py | Python | tests/lib/metrics_test.py | rjlohan/cloudformation-cli-python-plugin | f195602b2402407ccc5b6210771d14db5d9e25ca | [
"Apache-2.0"
] | null | null | null | tests/lib/metrics_test.py | rjlohan/cloudformation-cli-python-plugin | f195602b2402407ccc5b6210771d14db5d9e25ca | [
"Apache-2.0"
] | null | null | null | tests/lib/metrics_test.py | rjlohan/cloudformation-cli-python-plugin | f195602b2402407ccc5b6210771d14db5d9e25ca | [
"Apache-2.0"
] | null | null | null | # pylint: disable=no-member
from datetime import datetime
from unittest.mock import MagicMock, call, patch
import boto3
from cloudformation_cli_python_lib.interface import Action, MetricTypes, StandardUnit
from cloudformation_cli_python_lib.metrics import (
MetricPublisher,
MetricsPublisherProxy,
format_dimensions,
)
from botocore.stub import Stubber
class MockSession:
def __init__(self, client):
self._client = client
def client(self, _name):
return self._client
def test_format_dimensions():
dimensions = {"MyDimensionKey": "val_1", "MyDimensionKey2": "val_2"}
result = format_dimensions(dimensions)
assert [
{"Name": "MyDimensionKey", "Value": "val_1"},
{"Name": "MyDimensionKey2", "Value": "val_2"},
] == result
@patch("cloudformation_cli_python_lib.metrics.LOG", auto_spec=True)
def test_put_metric_catches_error(mock_logger):
client = boto3.client("cloudwatch")
stubber = Stubber(client)
stubber.add_client_error("put_metric_data", "InternalServiceError")
stubber.activate()
publisher = MetricPublisher("123412341234", "Aa::Bb::Cc", MockSession(client))
dimensions = {
"DimensionKeyActionType": Action.CREATE.name,
"DimensionKeyResourceType": publisher.resource_type,
}
publisher.publish_metric(
MetricTypes.HandlerInvocationCount,
dimensions,
StandardUnit.Count,
1.0,
datetime.now(),
)
stubber.deactivate()
expected_calls = [
call.error(
"An error occurred while publishing metrics: %s",
"An error occurred (InternalServiceError) when calling the "
"PutMetricData operation: ",
)
]
assert expected_calls == mock_logger.mock_calls
def test_publish_exception_metric():
mock_client = patch("boto3.client")
mock_client.return_value = MagicMock()
fake_datetime = datetime(2019, 1, 1)
publisher = MetricPublisher("123412341234", "Aa::Bb::Cc", mock_client.return_value)
proxy = MetricsPublisherProxy()
proxy.add_metrics_publisher(publisher)
proxy.publish_exception_metric(fake_datetime, Action.CREATE, Exception("fake-err"))
expected_calls = [
call.client("cloudwatch"),
call.client().put_metric_data(
Namespace="AWS/CloudFormation/123412341234/Aa/Bb/Cc",
MetricData=[
{
"MetricName": MetricTypes.HandlerException.name,
"Dimensions": [
{"Name": "DimensionKeyActionType", "Value": "CREATE"},
{
"Name": "DimensionKeyExceptionType",
"Value": "<class 'Exception'>",
},
{"Name": "DimensionKeyResourceType", "Value": "Aa::Bb::Cc"},
],
"Unit": StandardUnit.Count.name,
"Timestamp": str(fake_datetime),
"Value": 1.0,
}
],
),
]
assert expected_calls == mock_client.return_value.mock_calls
def test_publish_invocation_metric():
mock_client = patch("boto3.client")
mock_client.return_value = MagicMock()
fake_datetime = datetime(2019, 1, 1)
publisher = MetricPublisher("123412341234", "Aa::Bb::Cc", mock_client.return_value)
proxy = MetricsPublisherProxy()
proxy.add_metrics_publisher(publisher)
proxy.publish_invocation_metric(fake_datetime, Action.CREATE)
expected_calls = [
call.client("cloudwatch"),
call.client().put_metric_data(
Namespace="AWS/CloudFormation/123412341234/Aa/Bb/Cc",
MetricData=[
{
"MetricName": MetricTypes.HandlerInvocationCount.name,
"Dimensions": [
{"Name": "DimensionKeyActionType", "Value": "CREATE"},
{"Name": "DimensionKeyResourceType", "Value": "Aa::Bb::Cc"},
],
"Unit": StandardUnit.Count.name,
"Timestamp": str(fake_datetime),
"Value": 1.0,
}
],
),
]
assert expected_calls == mock_client.return_value.mock_calls
def test_publish_duration_metric():
mock_client = patch("boto3.client")
mock_client.return_value = MagicMock()
fake_datetime = datetime(2019, 1, 1)
publisher = MetricPublisher("123412341234", "Aa::Bb::Cc", mock_client.return_value)
proxy = MetricsPublisherProxy()
proxy.add_metrics_publisher(publisher)
proxy.publish_duration_metric(fake_datetime, Action.CREATE, 100)
expected_calls = [
call.client("cloudwatch"),
call.client().put_metric_data(
Namespace="AWS/CloudFormation/123412341234/Aa/Bb/Cc",
MetricData=[
{
"MetricName": MetricTypes.HandlerInvocationDuration.name,
"Dimensions": [
{"Name": "DimensionKeyActionType", "Value": "CREATE"},
{"Name": "DimensionKeyResourceType", "Value": "Aa::Bb::Cc"},
],
"Unit": StandardUnit.Milliseconds.name,
"Timestamp": str(fake_datetime),
"Value": 100,
}
],
),
]
assert expected_calls == mock_client.return_value.mock_calls
def test_publish_log_delivery_exception_metric():
mock_client = patch("boto3.client")
mock_client.return_value = MagicMock()
fake_datetime = datetime(2019, 1, 1)
publisher = MetricPublisher("123412341234", "Aa::Bb::Cc", mock_client.return_value)
proxy = MetricsPublisherProxy()
proxy.add_metrics_publisher(publisher)
proxy.publish_log_delivery_exception_metric(fake_datetime, TypeError("test"))
expected_calls = [
call.client("cloudwatch"),
call.client().put_metric_data(
Namespace="AWS/CloudFormation/123412341234/Aa/Bb/Cc",
MetricData=[
{
"MetricName": MetricTypes.HandlerException.name,
"Dimensions": [
{
"Name": "DimensionKeyActionType",
"Value": "ProviderLogDelivery",
},
{
"Name": "DimensionKeyExceptionType",
"Value": "<class 'TypeError'>",
},
{"Name": "DimensionKeyResourceType", "Value": "Aa::Bb::Cc"},
],
"Unit": StandardUnit.Count.name,
"Timestamp": str(fake_datetime),
"Value": 1.0,
}
],
),
]
assert expected_calls == mock_client.return_value.mock_calls
| 35.255102 | 87 | 0.578437 |
793f3f226957800f284fd84218d3621c3074c95c | 473 | py | Python | testing/gomodules/setup.py | pytogo/setuptools-golang | 953b869874ad45e4929287119912a22885993fc4 | [
"MIT"
] | 64 | 2016-08-16T22:32:43.000Z | 2022-03-22T09:15:35.000Z | testing/gomodules/setup.py | pytogo/setuptools-golang | 953b869874ad45e4929287119912a22885993fc4 | [
"MIT"
] | 25 | 2017-01-30T04:26:00.000Z | 2022-03-08T15:22:58.000Z | testing/gomodules/setup.py | pytogo/setuptools-golang | 953b869874ad45e4929287119912a22885993fc4 | [
"MIT"
] | 18 | 2016-07-12T21:08:25.000Z | 2022-03-22T09:15:40.000Z | from setuptools import Extension
from setuptools import setup
setup(
name='gomod',
ext_modules=[Extension('gomodules', ['reversemsg.go'])],
build_golang={
'root': 'github.com/asottile/setuptools-golang/testing/gomodules',
},
# Would do this, but we're testing *our* implementation and this would
# install from pypi. We can rely on setuptools-golang being already
# installed under test.
# setup_requires=['setuptools-golang'],
)
| 29.5625 | 74 | 0.697674 |
793f41ce42c72b98acbc170a04df212c8b9a1f03 | 12,518 | py | Python | modules/dbnd/src/dbnd/_core/tracking/script_tracking_manager.py | databand-ai/dbnd | 0370409e38773be8812088622953a3aa306eb564 | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | modules/dbnd/src/dbnd/_core/tracking/script_tracking_manager.py | databand-ai/dbnd | 0370409e38773be8812088622953a3aa306eb564 | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | modules/dbnd/src/dbnd/_core/tracking/script_tracking_manager.py | databand-ai/dbnd | 0370409e38773be8812088622953a3aa306eb564 | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | import atexit
import logging
import os
import sys
import typing
from subprocess import list2cmdline
from typing import Optional
from dbnd._core.configuration import get_dbnd_project_config
from dbnd._core.configuration.config_value import ConfigValuePriority
from dbnd._core.configuration.dbnd_config import config
from dbnd._core.configuration.environ_config import try_get_script_name
from dbnd._core.constants import RunState, TaskRunState, UpdateSource
from dbnd._core.context.databand_context import new_dbnd_context
from dbnd._core.current import is_verbose, try_get_databand_run
from dbnd._core.parameter.parameter_value import Parameters
from dbnd._core.run.databand_run import new_databand_run
from dbnd._core.settings import TrackingConfig
from dbnd._core.task.tracking_task import TrackingTask
from dbnd._core.task_build.task_definition import TaskDefinition
from dbnd._core.task_build.task_passport import TaskPassport
from dbnd._core.task_build.task_source_code import TaskSourceCode
from dbnd._core.task_run.task_run import TaskRun
from dbnd._core.task_run.task_run_error import TaskRunError
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
build_run_time_airflow_task,
override_airflow_log_system_for_tracking,
)
from dbnd._core.tracking.managers.callable_tracking import _handle_tracking_error
from dbnd._core.utils import seven
from dbnd._core.utils.airflow_utils import get_project_name_from_airflow_tags
from dbnd._core.utils.timezone import utcnow
from dbnd._vendor import pendulum
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from dbnd._core.context.databand_context import DatabandContext
from dbnd._core.run.databand_run import DatabandRun
T = typing.TypeVar("T")
def set_tracking_config_overide(airflow_context=None, use_dbnd_log=None):
# Ceate proper DatabandContext so we can create other objects
# There should be no Orchestrations tasks.
# However, let's disable any orchestrations side effects
config_for_tracking = {
"run": {
"skip_completed": False,
"skip_completed_on_run": False,
"validate_task_inputs": False,
"validate_task_outputs": False,
}, # we don't want to "check" as script is task_version="now"
"task": {"task_in_memory_outputs": True}, # do not save any outputs
"core": {"tracker_raise_on_error": False}, # do not fail on tracker errors
}
if airflow_context:
import pytz
task_target_date = pendulum.parse(
airflow_context.execution_date, tz=pytz.UTC
).date()
use_dbnd_log = override_airflow_log_system_for_tracking()
config_for_tracking["task"]["task_target_date"] = task_target_date
if use_dbnd_log is not None:
config_for_tracking["log"] = {"disabled": not use_dbnd_log}
return config.set_values(
config_values=config_for_tracking,
priority=ConfigValuePriority.OVERRIDE,
source="dbnd_tracking_config",
)
class _DbndScriptTrackingManager(object):
def __init__(self):
self._context_managers = []
self._atexit_registered = False
self._active = False
self._run = None
self._task_run = None
def _enter_cm(self, cm):
# type: (typing.ContextManager[T]) -> T
# else contextManagers are getting closed sometimes :(
val = cm.__enter__()
self._context_managers.append(cm)
return val
def _close_all_context_managers(self):
while self._context_managers:
cm = self._context_managers.pop()
cm.__exit__(None, None, None)
def update_run_from_airflow_context(self, airflow_context):
if not airflow_context or not airflow_context.context:
return
dag = airflow_context.context.get("dag", None)
if not dag:
return
dag_tags = getattr(dag, "tags", [])
project_name = get_project_name_from_airflow_tags(dag_tags)
airflow_user = airflow_context.context["dag"].owner
if project_name:
self._run.project_name = project_name
if airflow_user:
self._run.context.task_run_env.user = airflow_user
def start(self, root_task_name=None, airflow_context=None):
if self._run or self._active or try_get_databand_run():
return
# we probably should use only airlfow context via parameter.
# also, there are mocks that cover only get_dbnd_project_config().airflow_context
airflow_context = airflow_context or get_dbnd_project_config().airflow_context()
if airflow_context:
_set_dbnd_config_from_airflow_connections()
set_tracking_config_overide(use_dbnd_log=True, airflow_context=airflow_context)
dc = self._enter_cm(
new_dbnd_context(name="inplace_tracking")
) # type: DatabandContext
if not root_task_name:
# extract the name of the script we are running
root_task_name = sys.argv[0].split(os.path.sep)[-1]
if airflow_context:
root_task, job_name, source, run_uid = build_run_time_airflow_task(
airflow_context, root_task_name
)
try_number = airflow_context.try_number
else:
root_task = _build_inline_root_task(root_task_name)
job_name = root_task.task_name
source = UpdateSource.generic_tracking
run_uid = None
try_number = 1
tracking_source = (
None # TODO_CORE build tracking_source -> typeof TrackingSourceSchema
)
self._run = run = self._enter_cm(
new_databand_run(
context=dc,
job_name=job_name,
run_uid=run_uid,
source=source,
af_context=airflow_context,
tracking_source=tracking_source,
)
) # type: DatabandRun
self._run.root_task = root_task
self.update_run_from_airflow_context(airflow_context)
if not self._atexit_registered:
_set_process_exit_handler(self.stop)
self._atexit_registered = True
sys.excepthook = self.stop_on_exception
self._active = True
# now we send data to DB
root_task_run = run._build_and_add_task_run(
root_task, task_af_id=root_task.task_name, try_number=try_number
)
root_task_run.is_root = True
run.tracker.init_run()
run.root_task_run.set_task_run_state(TaskRunState.RUNNING)
should_capture_log = TrackingConfig.current().capture_tracking_log
self._enter_cm(
run.root_task_run.runner.task_run_execution_context(
capture_log=should_capture_log
)
)
self._task_run = run.root_task_run
return self._task_run
def stop(self):
if not self._active:
return
self._active = False
try:
databand_run = self._run
root_tr = self._task_run
root_tr.finished_time = utcnow()
if root_tr.task_run_state not in TaskRunState.finished_states():
for tr in databand_run.task_runs:
if tr.task_run_state == TaskRunState.FAILED:
root_tr.set_task_run_state(TaskRunState.UPSTREAM_FAILED)
break
else:
root_tr.set_task_run_state(TaskRunState.SUCCESS)
if root_tr.task_run_state == TaskRunState.SUCCESS:
databand_run.set_run_state(RunState.SUCCESS)
else:
databand_run.set_run_state(RunState.FAILED)
self._close_all_context_managers()
except Exception:
_handle_tracking_error("dbnd-tracking-shutdown")
def stop_on_exception(self, type, value, traceback):
if self._active:
try:
error = TaskRunError.build_from_ex(
ex=value, task_run=self._task_run, exc_info=(type, value, traceback)
)
self._task_run.set_task_run_state(TaskRunState.FAILED, error=error)
except:
_handle_tracking_error("dbnd-set-script-error")
self.stop()
sys.__excepthook__(type, value, traceback)
def _set_process_exit_handler(handler):
atexit.register(handler)
# https://docs.python.org/3/library/atexit.html
# The functions registered via this module are not called when the program
# is killed by a signal not handled by Python, when a Python fatal internal
# error is detected, or when os._exit() is called.
# ^^^^^^^^^^^^^^^^^^^^^^^^^
# and os._exit is the one used by airflow (and maybe other libraries)
# so we'd like to monkey-patch os._exit to stop dbnd inplace run manager
original_os_exit = os._exit
def _dbnd_os_exit(*args, **kwargs):
try:
handler()
finally:
original_os_exit(*args, **kwargs)
os._exit = _dbnd_os_exit
def _build_inline_root_task(root_task_name):
# create "root task" with default name as current process executable file name
task_definition = TaskDefinition(
task_passport=TaskPassport.from_module(
TrackingTask.__module__
), # we need to fix that
source_code=TaskSourceCode.from_callstack(),
)
root_task = TrackingTask(
task_name=root_task_name,
task_definition=task_definition,
task_params=Parameters(source="inline_root_task", param_values=[]),
)
root_task.ctrl.task_repr.task_command_line = list2cmdline(sys.argv)
root_task.ctrl.task_repr.task_functional_call = "bash_cmd(args=%s)" % repr(sys.argv)
return root_task
def try_get_inplace_tracking_task_run():
# type: ()->Optional[TaskRun]
if get_dbnd_project_config().is_tracking_mode():
return dbnd_tracking_start()
# there can be only one tracking manager
_dbnd_script_manager = None # type: Optional[_DbndScriptTrackingManager]
def dbnd_tracking_start(name=None, airflow_context=None):
"""
Starts handler for tracking the current running script.
Would not start a new one if script manager if already exists
@param name: Can be used to name the run
@param airflow_context: injecting the airflow context to the run, meaning we start tracking some airflow execution
"""
dbnd_project_config = get_dbnd_project_config()
if dbnd_project_config.disabled:
# we are not tracking if dbnd is disabled
return None
if name is None:
name = try_get_script_name()
global _dbnd_script_manager
if not _dbnd_script_manager:
# setting the context to tracking to prevent conflicts from dbnd orchestration
dbnd_project_config._dbnd_tracking = True
dsm = _DbndScriptTrackingManager()
try:
dsm.start(name, airflow_context)
if dsm._active:
_dbnd_script_manager = dsm
except Exception:
_handle_tracking_error("dbnd-tracking-start")
# disabling the project so we don't start any new handler in this execution
dbnd_project_config.disabled = True
return None
if _dbnd_script_manager and _dbnd_script_manager._active:
# this is the root task run of the tracking, its representing the script context.
return _dbnd_script_manager._task_run
def dbnd_tracking_stop():
"""
Stops and clears the script tracking if exists
"""
global _dbnd_script_manager
if _dbnd_script_manager:
_dbnd_script_manager.stop()
_dbnd_script_manager = None
@seven.contextlib.contextmanager
def dbnd_tracking(name=None, conf=None):
# type: (...) -> TaskRun
try:
with config(config_values=conf, source="tracking context"):
tr = dbnd_tracking_start(name=name)
yield tr
finally:
dbnd_tracking_stop()
def _set_dbnd_config_from_airflow_connections():
""" Set Databand config from Extra section in Airflow dbnd_config connection. """
try:
from dbnd_airflow.tracking.dbnd_airflow_conf import (
set_dbnd_config_from_airflow_connections,
)
set_dbnd_config_from_airflow_connections()
except ImportError:
logger.info(
"dbnd_airflow is not installed. Config will not load from Airflow Connections"
)
| 34.772222 | 118 | 0.679661 |
793f420c1d852fcb66de2fbd169c54bf7364ba2f | 22,183 | py | Python | source/gui/ui_main_window.py | playboy8/starquant | 21ff009167251209492e61ce1cb523e141946127 | [
"Apache-2.0"
] | 322 | 2019-04-03T15:31:46.000Z | 2022-03-21T13:32:06.000Z | source/gui/ui_main_window.py | synasy/starquant | c00cad64d1de2da05081b3dc320ef264c6295e08 | [
"Apache-2.0"
] | 11 | 2019-04-03T15:32:09.000Z | 2021-12-19T13:14:58.000Z | source/gui/ui_main_window.py | synasy/starquant | c00cad64d1de2da05081b3dc320ef264c6295e08 | [
"Apache-2.0"
] | 110 | 2019-04-03T15:54:58.000Z | 2022-03-25T09:26:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from queue import Queue
from PyQt5 import QtCore, QtWidgets, QtGui
from datetime import datetime
import requests
import itchat
from source.common.constant import EventType
import pyqtgraph.console
# from source.trade.order_manager import OrderManager
from source.trade.risk_manager import PassThroughRiskManager
from source.engine.iengine import EventEngine
from source.common.client_mq import ClientMq
from .ui_common_widget import (
RecorderManager,
ContractManager,
StatusThread,
CsvLoaderWidget,
DataDownloaderWidget,
AboutWidget,
WebWindow,
GlobalDialog,
TextEditDialog
)
from .ui_monitors import (
MarketMonitor,
OrderMonitor,
TradeMonitor,
PositionMonitor,
AccountMonitor,
LogMonitor
)
from .ui_strategy_window import CtaManager
from .ui_manual_window import ManualWindow
from .ui_bt_setting import BacktesterManager
from .ui_dataview import MarketDataView
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, config_server, config_client, lang_dict):
super().__init__()
# member variables
self._current_time = None
self._config_server = config_server
self._config_client = config_client
self._symbols = config_server['tickers']
self._lang_dict = lang_dict
self._font = lang_dict['font']
self._widget_dict = {}
self.central_widget = None
# self.central_widget = QtWidgets.QStackedWidget()
self.market_window = None
self.message_window = None
self.order_window = None
self.fill_window = None
self.position_window = None
self.closeposition_window = None
self.account_window = None
self.strategy_window = None
self.manualorderid = 0
# 0. order_manager; some of ui_windows uses order_manager
# self._order_manager = OrderManager()
# 1. event engine
# outgoing queue from client side
self._outgoing_queue = Queue()
self._events_engine = EventEngine() # update ui
# TODO add task scheduler;produce result_packet
self._flowrate_timer = QtCore.QTimer()
# 5. risk manager and compliance manager
self.risk_manager = PassThroughRiskManager()
# 7 portfolio manager and position manager
self.contract_manager = ContractManager()
self.recorder_manager = RecorderManager(
contracts=self.contract_manager.contracts)
self.recorder_manager.signal_recorder_out.connect(
self._outgoing_general_request_handler)
self.data_downloader = DataDownloaderWidget()
self.pgconsole = pyqtgraph.console.ConsoleWidget()
self.pgconsole.setStyleSheet("background-color: #CCE8CF; color:black")
# 8. client mq
self._client_mq = ClientMq(
self._config_server, self._events_engine, self._outgoing_queue)
# 1. set up gui windows
self.setGeometry(50, 50, 850, 650)
self.setWindowTitle('StarQuant')
self.setWindowIcon(QtGui.QIcon("source/gui/image/star.png"))
self.init_menu()
self.init_status_bar()
self.init_central_area()
# 9. wire up event handlers
self._events_engine.register(EventType.TICK, self._tick_event_handler)
self._events_engine.register(
EventType.ORDERSTATUS, self._order_status_event_handler)
self._events_engine.register(EventType.FILL, self._fill_event_handler)
self._events_engine.register(
EventType.POSITION, self._position_event_handler)
self._events_engine.register(
EventType.ACCOUNT, self._account_event_handler)
self._events_engine.register(
EventType.CONTRACT, self._contract_event_handler)
self._events_engine.register(
EventType.HISTORICAL, self._historical_event_handler)
self._events_engine.register(EventType.INFO, self._info_event_handler)
self._events_engine.register(
EventType.STRATEGY_CONTROL, self._strategy_control_event_handler)
self._events_engine.register(
EventType.ENGINE_CONTROL, self._engine_control_event_handler)
self._events_engine.register(
EventType.RECORDER_CONTROL, self._recorder_control_event_handler)
self._events_engine.register(
EventType.ORDER, self._outgoing_order_request_handler)
self._events_engine.register(
EventType.QRY, self._outgoing_qry_request_handler)
self._events_engine.register(
EventType.SUBSCRIBE, self._outgoing_general_request_handler)
self._events_engine.register(
EventType.GENERAL_REQ, self._outgoing_general_request_handler)
# timer event to reset riskmanager flow rate count
self._flowrate_timer.timeout.connect(self.risk_manager.reset)
# 10. start
self._events_engine.start()
self._client_mq.start()
self._flowrate_timer.start(5000)
#################################################################################################
# -------------------------------- Event Handler --------------------------------------------#
#################################################################################################
def _tick_event_handler(self, tick_event):
self.dataviewindow.tick_signal.emit(tick_event)
self._current_time = tick_event.data.timestamp
# self._order_manager.on_tick(tick_event) # check standing stop orders
def _order_status_event_handler(self, order_status_event): # including cancel
pass
def _fill_event_handler(self, fill_event):
try:
trade = fill_event.data
msg = f"{trade.full_symbol}: ({trade.direction.value},{trade.offset.value}),({trade.price},{trade.volume})"
itchat.send_msg(msg, 'filehelper')
except:
pass
def _position_event_handler(self, position_event):
pass
def _account_event_handler(self, account_event):
pass
def _contract_event_handler(self, contract_event):
contract = contract_event.data
self.contract_manager.on_contract(contract)
def _historical_event_handler(self, historical_event):
pass
# print(historical_event)
def _strategy_control_event_handler(self, sc_event):
self.ctastrategywindow.signal_strategy_in.emit(sc_event)
def _engine_control_event_handler(self, ec_event):
self.manual_widget.updateapistatusdict(ec_event)
def _recorder_control_event_handler(self, rc_event):
self.recorder_manager.signal_recorder_update.emit(rc_event)
def _info_event_handler(self, info_event):
pass
# self.log_window.msg_signal.emit(info_event)
# ----------------------------------------outgoing event ------------------------------------
def _outgoing_order_request_handler(self, o):
"""
process o, check against risk manager and compliance manager
"""
self.risk_manager.order_in_compliance(
o) # order pointer; modify order directly
if (self.risk_manager.passorder()):
# self._order_manager.on_order(o)
# self.order_window.
msg = o.serialize()
print('client send msg: ' + msg, datetime.now())
# print('client send msg: ' + msg)
# text = o.destination + o.source + str(o.clientID)
# requests.get('https://sc.ftqq.com/SCU49995T54cd0bf4d42dd8448359347830d62bd85cc3f69d085ee.send?text=%s &desp=%s'%(text,msg))
self._outgoing_queue.put(msg)
def _outgoing_qry_request_handler(self, qry):
if (self.risk_manager.passquery()):
msg = qry.serialize()
print('client send msg: ' + msg)
self._outgoing_queue.put(msg)
def _outgoing_general_request_handler(self, gr):
msg = gr.serialize()
print('client send msg: ' + msg)
self._outgoing_queue.put(msg)
#################################################################################################
# ------------------------------ Event Handler Ends --------------------------------------------#
#################################################################################################
#################################################################################################
# -------------------------------- User Interface --------------------------------------------#
#################################################################################################
def set_font(self, font):
self._font = font
def displaytrade(self):
self.central_widget.setCurrentIndex(0)
def displaybacktest(self):
self.central_widget.setCurrentIndex(1)
def init_menu(self):
menubar = self.menuBar()
# sys menu --
sysMenu = menubar.addMenu('File')
editsettingAction = QtWidgets.QAction('Setting', self)
editsettingAction.setStatusTip('edit python setting')
editsettingAction.triggered.connect(self.edit_client_setting)
sysMenu.addAction(editsettingAction)
editfileAction = QtWidgets.QAction('view/edit', self)
editfileAction.setStatusTip('edit server config...')
editfileAction.triggered.connect(self.file_edit)
sysMenu.addAction(editfileAction)
# --exit
sysMenu.addSeparator()
sys_exitAction = QtWidgets.QAction('Exit', self)
sys_exitAction.setShortcut('Ctrl+Q')
sys_exitAction.setStatusTip('Exit GUI')
sys_exitAction.triggered.connect(self.close)
sysMenu.addAction(sys_exitAction)
# mode menu
modeMenu = menubar.addMenu('Mode')
mode_tradeAction = QtWidgets.QAction('Trade', self)
mode_tradeAction.triggered.connect(self.displaytrade)
modeMenu.addAction(mode_tradeAction)
mode_backtestAction = QtWidgets.QAction('Backtest', self)
mode_backtestAction.triggered.connect(self.displaybacktest)
modeMenu.addAction(mode_backtestAction)
# tool menu
toolMenu = menubar.addMenu('Tools')
tool_recorder = QtWidgets.QAction('Data Recorder', self)
tool_recorder.triggered.connect(self.recorder_manager.show)
toolMenu.addAction(tool_recorder)
tool_csvloader = QtWidgets.QAction('Data Loader', self)
tool_csvloader.triggered.connect(self.opencsvloader)
toolMenu.addAction(tool_csvloader)
tool_datadownloader = QtWidgets.QAction('Data Downloader', self)
tool_datadownloader.triggered.connect(self.data_downloader.show)
toolMenu.addAction(tool_datadownloader)
tool_pgconsole = QtWidgets.QAction('Python Console', self)
tool_pgconsole.triggered.connect(self.pgconsole.show)
toolMenu.addAction(tool_pgconsole)
# view menu
viewMenu = menubar.addMenu('View')
viewManual = QtWidgets.QAction(
'Manual Control Center', self, checkable=True)
viewManual.setChecked(True)
viewManual.triggered.connect(self.toggleviewmanual)
viewMenu.addAction(viewManual)
viewMarketMonitor = QtWidgets.QAction(
'Market Monitor', self, checkable=True)
viewMarketMonitor.setChecked(True)
viewMarketMonitor.triggered.connect(self.toggleviewMarketMonitor)
viewMenu.addAction(viewMarketMonitor)
viewTradeMonitor = QtWidgets.QAction(
'Trade Monitor', self, checkable=True)
viewTradeMonitor.setChecked(True)
viewTradeMonitor.triggered.connect(self.toggleviewTradeMonitor)
viewMenu.addAction(viewTradeMonitor)
viewMarketChart = QtWidgets.QAction(
'Market Chart', self, checkable=True)
viewMarketChart.setChecked(True)
viewMarketChart.triggered.connect(self.toggleviewMarketChart)
viewMenu.addAction(viewMarketChart)
viewCtaManager = QtWidgets.QAction(
'Strategy Manager', self, checkable=True)
viewCtaManager.setChecked(True)
viewCtaManager.triggered.connect(self.toggleviewCtaManager)
viewMenu.addAction(viewCtaManager)
viewBtSetting = QtWidgets.QAction(
'Backtest Setting', self, checkable=True)
viewBtSetting.setChecked(True)
viewBtSetting.triggered.connect(self.toggleviewBtSetting)
viewMenu.addAction(viewBtSetting)
viewBtTopM = QtWidgets.QAction(
'Backtest Details', self, checkable=True)
viewBtTopM.setChecked(True)
viewBtTopM.triggered.connect(self.toggleviewBtTopM)
viewMenu.addAction(viewBtTopM)
viewBtBottomM = QtWidgets.QAction(
'Backtest QuotesChart', self, checkable=True)
viewBtBottomM.setChecked(True)
viewBtBottomM.triggered.connect(self.toggleviewBtBottomM)
viewMenu.addAction(viewBtBottomM)
# help menu
helpMenu = menubar.addMenu('Help')
help_contractaction = QtWidgets.QAction('Query Contracts', self)
help_contractaction.triggered.connect(self.contract_manager.show)
helpMenu.addAction(help_contractaction)
help_webaction = QtWidgets.QAction('Web/Jupyter Notebook', self)
help_webaction.triggered.connect(self.openweb)
helpMenu.addAction(help_webaction)
help_action = QtWidgets.QAction('About', self)
help_action.triggered.connect(self.openabout)
helpMenu.addAction(help_action)
def toggleviewmanual(self, state):
if state:
self.dockmanual.setVisible(True)
else:
self.dockmanual.hide()
def toggleviewMarketMonitor(self, state):
if state:
self.market_window.setVisible(True)
else:
self.market_window.hide()
def toggleviewTradeMonitor(self, state):
if state:
self.bottomleft.setVisible(True)
else:
self.bottomleft.hide()
def toggleviewMarketChart(self, state):
if state:
self.dataviewindow.setVisible(True)
else:
self.dataviewindow.hide()
def toggleviewCtaManager(self, state):
if state:
self.bottomright.setVisible(True)
else:
self.bottomright.hide()
def toggleviewBtSetting(self, state):
if state:
self.backtestwidget.bt_setting.setVisible(True)
else:
self.backtestwidget.bt_setting.hide()
def toggleviewBtTopM(self, state):
if state:
self.backtestwidget.bt_topmiddle.setVisible(True)
else:
self.backtestwidget.bt_topmiddle.hide()
def toggleviewBtBottomM(self, state):
if state:
self.backtestwidget.bt_bottommiddle.setVisible(True)
else:
self.backtestwidget.bt_bottommiddle.hide()
def file_edit(self):
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
self, 'open file', 'etc/')
print(filename)
if not filename:
return
a = TextEditDialog(filename)
a.exec()
def opencsvloader(self):
try:
self._widget_dict['csvloader'].show()
except KeyError:
self._widget_dict['csvloader'] = CsvLoaderWidget()
self._widget_dict['csvloader'].show()
def edit_client_setting(self):
"""
"""
dialog = GlobalDialog()
dialog.exec_()
def openabout(self):
try:
self._widget_dict['about'].show()
except KeyError:
self._widget_dict['about'] = AboutWidget(self)
self._widget_dict['about'].show()
def openweb(self):
try:
self._widget_dict['web'].show()
except KeyError:
self._widget_dict['web'] = WebWindow()
self._widget_dict['web'].show()
def closeEvent(self, a0: QtGui.QCloseEvent):
print('closing main window')
self._events_engine.stop()
self._client_mq.stop()
def init_status_bar(self):
self.statusthread = StatusThread()
self.statusthread.status_update.connect(self.update_status_bar)
self.statusthread.start()
def update_status_bar(self, message):
self.statusBar().showMessage(message)
def init_central_area(self):
self.central_widget = QtWidgets.QStackedWidget()
# -------Trade Widgets----------
tradewidget = QtWidgets.QWidget()
hbox = QtWidgets.QHBoxLayout()
# -------------------------------- Top Left ------------------------------------------#
# topleft = MarketWindow(self._symbols, self._lang_dict)
topleft = MarketMonitor(self._events_engine)
self.market_window = topleft
# -------------------------------- bottom Left ------------------------------------------#
bottomleft = QtWidgets.QTabWidget()
bottomleft.setFont(self._font)
tab1 = QtWidgets.QWidget()
tab2 = QtWidgets.QWidget()
tab3 = QtWidgets.QWidget()
tab4 = QtWidgets.QWidget()
# tab5 = QtWidgets.QWidget()
tab6 = QtWidgets.QWidget()
bottomleft.addTab(tab1, self._lang_dict['Log'])
bottomleft.addTab(tab2, self._lang_dict['Order'])
bottomleft.addTab(tab3, self._lang_dict['Fill'])
bottomleft.addTab(tab4, self._lang_dict['Position'])
# bottomleft.addTab(tab5, self._lang_dict['ClosePosition'])
bottomleft.addTab(tab6, self._lang_dict['Account'])
# self.log_window = LogWindow(self._lang_dict)
self.log_window = LogMonitor(self._events_engine)
tab1_layout = QtWidgets.QVBoxLayout()
tab1_layout.addWidget(self.log_window)
tab1.setLayout(tab1_layout)
self.order_window = OrderMonitor(self._events_engine)
tab2_layout = QtWidgets.QVBoxLayout()
tab2_layout.addWidget(self.order_window)
tab2.setLayout(tab2_layout)
self.fill_window = TradeMonitor(self._events_engine)
tab3_layout = QtWidgets.QVBoxLayout()
tab3_layout.addWidget(self.fill_window)
tab3.setLayout(tab3_layout)
self.position_window = PositionMonitor(self._events_engine)
tab4_layout = QtWidgets.QVBoxLayout()
tab4_layout.addWidget(self.position_window)
tab4.setLayout(tab4_layout)
# self.closeposition_window = ClosePositionWindow(self._lang_dict)
# tab5_layout = QtWidgets.QVBoxLayout()
# tab5_layout.addWidget(self.closeposition_window)
# tab5.setLayout(tab5_layout)
self.account_window = AccountMonitor(self._events_engine)
tab6_layout = QtWidgets.QVBoxLayout()
tab6_layout.addWidget(self.account_window)
tab6.setLayout(tab6_layout)
self.bottomleft = bottomleft
# -------------------------------- bottom right ------------------------------------------#
bottomright = QtWidgets.QFrame()
bottomright.setFrameShape(QtWidgets.QFrame.StyledPanel)
bottomright.setFont(self._font)
strategy_manager_layout = QtWidgets.QFormLayout()
self.ctastrategywindow = CtaManager()
self.ctastrategywindow.signal_strategy_out.connect(
self._outgoing_general_request_handler)
strategy_manager_layout.addRow(QtWidgets.QLabel('Strategy Manager'))
strategy_manager_layout.addWidget(self.ctastrategywindow)
bottomright.setLayout(strategy_manager_layout)
self.bottomright = bottomright
# --------------------------------------------------------------------------------------#
self.dataviewindow = MarketDataView()
self.market_window.symbol_signal.connect(
self.dataviewindow.symbol_signal.emit)
splitter1 = QtWidgets.QSplitter(QtCore.Qt.Vertical)
splitter1.addWidget(topleft)
splitter1.addWidget(bottomleft)
splitter1.setSizes([500, 500])
splitter2 = QtWidgets.QSplitter(QtCore.Qt.Vertical)
splitter2.addWidget(self.dataviewindow)
splitter2.addWidget(bottomright)
splitter2.setSizes([500, 500])
splitter3 = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
splitter3.addWidget(splitter1)
splitter3.addWidget(splitter2)
splitter3.setSizes([600, 600])
hbox.addWidget(splitter3)
tradewidget.setLayout(hbox)
# ---------Backtest ----------------------------------------
self.backtestwidget = BacktesterManager(self._events_engine)
# --------------------mainwindow----------------------
manualwidget = ManualWindow(self._config_server['gateway'])
manualwidget.order_signal.connect(self._outgoing_order_request_handler)
manualwidget.qry_signal.connect(self._outgoing_qry_request_handler)
manualwidget.manual_req.connect(self._outgoing_queue.put)
manualwidget.subscribe_signal.connect(
self._outgoing_general_request_handler)
manualwidget.cancelall_signal.connect(
self._outgoing_general_request_handler)
self.manual_widget = manualwidget
dockmanual = QtWidgets.QDockWidget('Manual Control Center', self)
dockmanual.setFeatures(
QtWidgets.QDockWidget.DockWidgetFloatable | QtWidgets.QDockWidget.DockWidgetMovable)
# dockmanual.setFloating(True)
dockmanual.setAllowedAreas(
QtCore.Qt.RightDockWidgetArea | QtCore.Qt.LeftDockWidgetArea)
dockmanual.setWidget(manualwidget)
self.dockmanual = dockmanual
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, dockmanual)
self.central_widget.addWidget(tradewidget)
self.central_widget.addWidget(self.backtestwidget)
self.central_widget.setCurrentIndex(0)
self.setCentralWidget(self.central_widget)
#################################################################################################
# ------------------------------ User Interface End --------------------------------------------#
#################################################################################################
| 40.186594 | 137 | 0.629807 |
793f422e84f7b52e1886f562ffa9bef115e9eca2 | 13,086 | py | Python | UnsupervisedMT/NMT/src/utils.py | zsl-nlp/DeepOffense-Unsupervised | 3c6f18eefd1d7302ba2c77eeb1a3ab47d7991fc4 | [
"RSA-MD"
] | null | null | null | UnsupervisedMT/NMT/src/utils.py | zsl-nlp/DeepOffense-Unsupervised | 3c6f18eefd1d7302ba2c77eeb1a3ab47d7991fc4 | [
"RSA-MD"
] | null | null | null | UnsupervisedMT/NMT/src/utils.py | zsl-nlp/DeepOffense-Unsupervised | 3c6f18eefd1d7302ba2c77eeb1a3ab47d7991fc4 | [
"RSA-MD"
] | null | null | null | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import re
import sys
import pickle
import random
import inspect
import argparse
import subprocess
from logging import getLogger
import numpy as np
import torch
from torch import optim
from .logger import create_logger
from .data.dictionary import EOS_WORD, UNK_WORD
from .adam_inverse_sqrt_with_warmup import AdamInverseSqrtWithWarmup
logger = getLogger()
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def initialize_exp(params, logger_filename='train.log'):
"""
Initialize the experience:
- dump parameters
- create a logger
- set the random seed
"""
# dump parameters
get_dump_path(params)
pickle.dump(params, open(os.path.join(params.dump_path, 'params.pkl'), 'wb'))
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith('--'):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
command.append("'%s'" % x)
command = ' '.join(command)
params.command = command + ' --exp_id "%s"' % params.exp_id
# random seed
if params.seed >= 0:
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
# environment variables
if 'pivo_directions' in params and len(params.pivo_directions) > 0:
os.environ["OMP_NUM_THREADS"] = "2"
os.environ["MKL_NUM_THREADS"] = "2"
# create a logger
logger = create_logger(os.path.join(params.dump_path, logger_filename))
logger.info('============ Initialized logger ============')
logger.info('\n'.join('%s: %s' % (k, str(v))
for k, v in sorted(dict(vars(params)).items())))
logger.info('The experiment will be stored in %s\n' % params.dump_path)
logger.info('Running command: %s\n' % params.command)
return logger
def get_dump_path(params):
"""
Create a directory to store the experiment.
"""
assert len(params.exp_name) > 0
dump_path = './' if params.dump_path == '' else params.dump_path
subprocess.Popen("mkdir -p %s" % dump_path, shell=True).wait()
assert os.path.isdir(dump_path)
# create the sweep path if it does not exist
sweep_path = os.path.join(dump_path, params.exp_name)
if not os.path.exists(sweep_path):
subprocess.Popen("mkdir %s" % sweep_path, shell=True).wait()
# create an ID for the job if it is not given in the parameters.
# if we run on the cluster, the job ID is the one of Chronos.
# otherwise, it is randomly generated
if params.exp_id == '':
exp_id = os.environ.get('CHRONOS_JOB_ID')
if exp_id is None:
exp_id = os.environ.get('SLURM_JOB_ID')
if exp_id is None:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
while True:
exp_id = ''.join(random.choice(chars) for _ in range(10))
if not os.path.isdir(os.path.join(sweep_path, exp_id)):
break
else:
assert exp_id.isdigit()
params.exp_id = exp_id
else:
assert os.path.isdir(os.path.join(sweep_path, params.exp_id)) # reload an experiment
# create the dump folder / update parameters
params.dump_path = os.path.join(sweep_path, params.exp_id)
if not os.path.isdir(params.dump_path):
subprocess.Popen("mkdir %s" % params.dump_path, shell=True).wait()
def get_optimizer(parameters, s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
optim_params['betas'] = (optim_params.get('beta1', 0.5), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
elif method == 'adam_inverse_sqrt':
optim_fn = AdamInverseSqrtWithWarmup
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.98))
optim_params['warmup_updates'] = optim_params.get('warmup_updates', 4000)
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn(parameters, **optim_params)
def reload_parameters(old_params, new_params, attributes):
"""
Reload the parameters of a previous model.
"""
for k, v in old_params.__dict__.items():
if k in attributes and k not in new_params:
setattr(new_params, k, v)
def reload_model(model, to_reload, attributes):
"""
Reload a previously trained model.
"""
# check parameters sizes
model_params = set(model.state_dict().keys())
to_reload_params = set(to_reload.state_dict().keys())
assert model_params == to_reload_params, (model_params - to_reload_params,
to_reload_params - model_params)
# check attributes
warnings = []
errors = []
for k in attributes:
assert type(k) is tuple or type(k) is str
k, strict = k if type(k) is tuple else (k, True)
if getattr(model, k, None) is None:
errors.append('- Attribute "%s" not found in the current model' % k)
if getattr(to_reload, k, None) is None:
errors.append('- Attribute "%s" not found in the model to reload' % k)
if getattr(model, k, None) != getattr(to_reload, k, None):
message = ('- Attribute "%s" differs between the current model (%s) '
'and the one to reload (%s)'
% (k, str(getattr(model, k)), str(getattr(to_reload, k))))
(errors if strict else warnings).append(message)
if len(warnings) > 0:
logger.warning('Different parameters:\n%s' % '\n'.join(warnings))
if len(errors) > 0:
logger.error('Incompatible parameters:\n%s' % '\n'.join(errors))
exit()
# copy saved parameters
for k in model.state_dict().keys():
if model.state_dict()[k].size() != to_reload.state_dict()[k].size():
raise Exception("Expected tensor {} of size {}, but got {}".format(
k, model.state_dict()[k].size(),
to_reload.state_dict()[k].size()
))
model.state_dict()[k].copy_(to_reload.state_dict()[k])
def clip_parameters(model, clip):
"""
Clip model weights.
"""
if clip > 0:
for x in model.parameters():
x.data.clamp_(-clip, clip)
def get_grad_norm(model):
"""
Return the norm of the parameters gradients.
"""
norm = 0
for param in model.parameters():
norm += param.grad.data.norm(2) ** 2
return np.sqrt(norm)
def parse_lambda_config(params, name):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000
"""
x = getattr(params, name)
split = x.split(',')
if len(split) == 1:
setattr(params, name, float(x))
setattr(params, name + '_config', None)
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
setattr(params, name, float(split[0][1]))
setattr(params, name + '_config', [(int(k), float(v)) for k, v in split])
def update_lambda_value(config, n_iter):
"""
Update a lambda value according to its schedule configuration.
"""
ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
def update_lambdas(params, n_total_iter):
"""
Update all lambda coefficients.
"""
if params.lambda_xe_mono_config is not None:
params.lambda_xe_mono = update_lambda_value(params.lambda_xe_mono_config, n_total_iter)
if params.lambda_xe_para_config is not None:
params.lambda_xe_para = update_lambda_value(params.lambda_xe_para_config, n_total_iter)
if params.lambda_xe_back_config is not None:
params.lambda_xe_back = update_lambda_value(params.lambda_xe_back_config, n_total_iter)
if params.lambda_xe_otfd_config is not None:
params.lambda_xe_otfd = update_lambda_value(params.lambda_xe_otfd_config, n_total_iter)
if params.lambda_xe_otfa_config is not None:
params.lambda_xe_otfa = update_lambda_value(params.lambda_xe_otfa_config, n_total_iter)
if params.lambda_dis_config is not None:
params.lambda_dis = update_lambda_value(params.lambda_dis_config, n_total_iter)
if params.lambda_lm_config is not None:
params.lambda_lm = update_lambda_value(params.lambda_lm_config, n_total_iter)
def get_mask(lengths, all_words, expand=None, ignore_first=False, batch_first=False, cuda=True):
"""
Create a mask of shape (slen, bs) or (bs, slen).
"""
bs, slen = lengths.size(0), lengths.max()
mask = torch.ByteTensor(slen, bs).zero_()
for i in range(bs):
if all_words:
mask[:lengths[i], i] = 1
else:
mask[lengths[i] - 1, i] = 1
if expand is not None:
assert type(expand) is int
mask = mask.unsqueeze(2).expand(slen, bs, expand)
if ignore_first:
mask[0].fill_(0)
if batch_first:
mask = mask.transpose(0, 1)
if cuda:
mask = mask.cuda()
return mask
def reverse_sentences(batch, lengths):
"""
Reverse sentences inside a batch.
"""
bs = lengths.size(0)
assert batch.size(1) == bs
new_batch = batch.clone()
inv_idx = torch.arange(lengths.max() - 1, -1, -1)
for i in range(bs):
new_batch[:lengths[i], i].copy_(new_batch[:, i][inv_idx[-lengths[i]:]])
return new_batch
def restore_segmentation(path):
"""
Take a file segmented with BPE and restore it to its original segmentation.
"""
assert os.path.isfile(path)
restore_cmd = "sed -i -r 's/(@@ )|(@@ ?$)//g' %s"
subprocess.Popen(restore_cmd % path, shell=True).wait()
def create_word_masks(params, data):
"""
Create masks for allowed / forbidden output words.
"""
if not hasattr(params, 'vocab') or len(params.vocab) == 0:
return
params.vocab_mask_pos = []
params.vocab_mask_neg = []
for lang, n_words in zip(params.langs, params.n_words):
dico = data['dico'][lang]
vocab = data['vocab'][lang]
words = [EOS_WORD, UNK_WORD] + list(vocab)
mask_pos = set([dico.index(w) for w in words])
mask_neg = [i for i in range(n_words) if i not in mask_pos]
params.vocab_mask_pos.append(torch.LongTensor(sorted(mask_pos)))
params.vocab_mask_neg.append(torch.LongTensor(sorted(mask_neg)))
| 35.177419 | 141 | 0.620816 |
793f429f6e9af7f4c46a188d1c7ee66cde711bb9 | 82,317 | py | Python | src/transformers/modeling_utils.py | cbrochtrup/transformers | c89bdfbe720bc8f41c7dc6db5473a2cb0955f224 | [
"Apache-2.0"
] | 1 | 2020-11-30T09:01:57.000Z | 2020-11-30T09:01:57.000Z | src/transformers/modeling_utils.py | cbrochtrup/transformers | c89bdfbe720bc8f41c7dc6db5473a2cb0955f224 | [
"Apache-2.0"
] | null | null | null | src/transformers/modeling_utils.py | cbrochtrup/transformers | c89bdfbe720bc8f41c7dc6db5473a2cb0955f224 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from torch import Tensor, device, dtype, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
cached_path,
hf_bucket_url,
is_remote_url,
is_torch_tpu_available,
replace_return_docstrings,
)
from .generation_utils import GenerationMixin
from .utils import logging
logger = logging.get_logger(__name__)
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive."""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
def find_pruneable_heads_and_indices(
heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return heads, index
class ModuleUtilsMixin:
"""
A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
"""
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
zero with :obj:`model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
"""
Reset the :obj:`mem_rss_diff` attribute of each module (see
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
"""
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> device:
"""
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def dtype(self) -> dtype:
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype == torch.float32:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
"{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format(
self.dtype
)
)
return encoder_extended_attention_mask
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(
self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
) -> Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (:obj:`int`):
The number of hidden layers in the model.
is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or
list with :obj:`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility
return head_mask
def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
"""
Get number of (optionally, trainable or non-embeddings) parameters in the module.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of non-embeddings parameters
Returns:
:obj:`int`: The number of parameters.
"""
def parameter_filter(x):
return (x.requires_grad or not only_trainable) and not (
isinstance(x, torch.nn.Embedding) and exclude_embeddings
)
params = filter(parameter_filter, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
"""
Helper function to estimate the total number of tokens from the model inputs.
Args:
inputs (:obj:`dict`): The model inputs.
Returns:
:obj:`int`: The total number of tokens.
"""
token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
if token_inputs:
return sum([token_input.numel() for token_input in token_inputs])
else:
warnings.warn(
"Could not estimate the number of tokens of the input, floating-point operations will not be computed"
)
return 0
def floating_point_ops(
self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
) -> int:
"""
Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper
<https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter
re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
Args:
batch_size (:obj:`int`):
The batch size for the forward pass.
sequence_length (:obj:`int`):
The number of tokens in each line of the batch.
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to count embedding and softmax operations.
Returns:
:obj:`int`: The number of floating-point operations.
"""
return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin):
r"""
Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch
model, taking as arguments:
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
TensorFlow checkpoint.
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to
the model.
- **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
- **authorized_missing_keys** (:obj:`Optional[List[str]]`) -- A list of re pattern of tensor names to ignore
when loading the model (and avoid unnecessary warnings).
- **keys_to_never_save** (:obj:`Optional[List[str]]`) -- A list of of tensor names to ignore when saving the
model (useful for keys that aren't trained, but which are deterministic)
"""
config_class = None
base_model_prefix = ""
authorized_missing_keys = None
authorized_unexpected_keys = None
keys_to_never_save = None
@property
def dummy_inputs(self) -> Dict[str, torch.Tensor]:
"""
:obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@property
def base_model(self) -> nn.Module:
"""
:obj:`torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self) -> nn.Module:
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value: nn.Module):
"""
Set model's input embeddings.
Args:
value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> nn.Module:
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None and self.config.tie_word_embeddings:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
if hasattr(self, self.base_model_prefix):
self = getattr(self, self.base_model_prefix)
self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)
@staticmethod
def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
uninitialized_encoder_weights: List[str] = []
if decoder.__class__ != encoder.__class__:
logger.info(
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
)
def tie_encoder_to_decoder_recursively(
decoder_pointer: nn.Module,
encoder_pointer: nn.Module,
module_name: str,
uninitialized_encoder_weights: List[str],
depth=0,
):
assert isinstance(decoder_pointer, nn.Module) and isinstance(
encoder_pointer, nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
if hasattr(decoder_pointer, "weight"):
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
encoder_modules
) != len(decoder_modules):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and subtract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
if len(uninitialized_encoder_weights) > 0:
logger.warning(
f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
)
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
"""Tie or clone module weights depending of whether we are using TorchScript or not"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(
0,
output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing
anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
) -> torch.nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Embedding`` module of the model without doing anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
"""
Initializes and prunes weights if needed.
"""
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
state_dict = model_to_save.state_dict()
# Handle the case where some state_dict keys shouldn't be saved
if self.keys_to_never_save is not None:
state_dict = {k: v for k, v in state_dict.items() if k not in self.keys_to_never_save}
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False) and is_torch_tpu_available():
import torch_xla.core.xla_model as xm
if xm.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# xm.save takes care of saving only from master
xm.save(state_dict, output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(state_dict, output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
train the model, you should first set it back in training mode with ``model.train()``.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string with the `shortcut name` of a pretrained model to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `shortcut name` string of a
pretrained model).
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (i.e., do not try to download the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import BertConfig, BertModel
>>> # Download model and configuration from S3 and cache.
>>> model = BertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = BertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
revision=revision,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
f"at '{resolved_archive_file}'"
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict,
prefix,
local_metadata,
True,
missing_keys,
unexpected_keys,
error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls.authorized_missing_keys is not None:
for pat in cls.authorized_missing_keys:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls.authorized_unexpected_keys is not None:
for pat in cls.authorized_unexpected_keys:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
# make sure token embedding weights are still tied if needed
model.tie_weights()
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available():
import torch_xla.core.xla_model as xm
model = xm.send_cpu_data_to_device(model, xm.xla_device())
model.to(xm.xla_device())
return model
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`): The number of output features.
nx (:obj:`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
"""
Compute SQuAD start logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(
self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
Returns:
:obj:`torch.FloatTensor`: The start logits for SQuAD.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
"""
Compute SQuAD end logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The end logits for SQuAD.
"""
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
"""
Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
"""
# No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
hsz = hidden_states.shape[-1]
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
@dataclass
class SquadHeadOutput(ModelOutput):
"""
Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification
losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
(beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
class SQuADHead(nn.Module):
r"""
A SQuAD head inspired by XLNet.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config):
super().__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
@replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
def forward(
self,
hidden_states: torch.FloatTensor,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
is_impossible: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
return_dict: bool = False,
) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
Final hidden states of the model on the sequence tokens.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the first token for the labeled span.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the last token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Whether the question has a possible answer in the paragraph or not.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
"""
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
if not return_dict:
return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
else:
return SquadHeadOutput(
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
class SequenceSummary(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
self.first_dropout = Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
token.
Returns:
:obj:`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
"""
Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
are transposed.
Used to remove heads.
Args:
layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.
Returns:
:class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(
layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[torch.nn.Linear, Conv1D]:
"""
Prune a Conv1D or linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with
:obj:`requires_grad=True`.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
def apply_chunking_to_forward(
forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
) -> torch.Tensor:
"""
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Args:
forward_fn (:obj:`Callable[..., torch.Tensor]`):
The forward function of the model.
chunk_size (:obj:`int`):
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
chunk_dim (:obj:`int`):
The dimension over which the :obj:`input_tensors` should be chunked.
input_tensors (:obj:`Tuple[torch.Tensor]`):
The input tensors of ``forward_fn`` which will be chunked
Returns:
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
"""
assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
tensor_shape = input_tensors[0].shape[chunk_dim]
assert all(
input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
assert num_args_in_forward_chunk_fn == len(
input_tensors
), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
num_args_in_forward_chunk_fn, len(input_tensors)
)
if chunk_size > 0:
assert (
input_tensors[0].shape[chunk_dim] % chunk_size == 0
), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
input_tensors[0].shape[chunk_dim], chunk_size
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
| 48.393298 | 197 | 0.628048 |
793f43e9bbe35f198038b988a0026eeaaf685976 | 1,367 | py | Python | asv_bench/benchmarks/tslibs/tz_convert.py | Pawel-Kranzberg/pandas | 6f90cb3d7bd5891d15a427252fba00027ca6084d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 28,899 | 2016-10-13T03:32:12.000Z | 2022-03-31T21:39:05.000Z | asv_bench/benchmarks/tslibs/tz_convert.py | Pawel-Kranzberg/pandas | 6f90cb3d7bd5891d15a427252fba00027ca6084d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 31,004 | 2016-10-12T23:22:27.000Z | 2022-03-31T23:17:38.000Z | asv_bench/benchmarks/tslibs/tz_convert.py | Pawel-Kranzberg/pandas | 6f90cb3d7bd5891d15a427252fba00027ca6084d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 15,149 | 2016-10-13T03:21:31.000Z | 2022-03-31T18:46:47.000Z | import numpy as np
from pytz import UTC
from pandas._libs.tslibs.tzconversion import tz_localize_to_utc
from .tslib import (
_sizes,
_tzs,
tzlocal_obj,
)
try:
old_sig = False
from pandas._libs.tslibs.tzconversion import tz_convert_from_utc
except ImportError:
old_sig = True
from pandas._libs.tslibs.tzconversion import tz_convert as tz_convert_from_utc
class TimeTZConvert:
params = [
_sizes,
[x for x in _tzs if x is not None],
]
param_names = ["size", "tz"]
def setup(self, size, tz):
if size == 10 ** 6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_tz_convert_from_utc(self, size, tz):
# effectively:
# dti = DatetimeIndex(self.i8data, tz=tz)
# dti.tz_localize(None)
if old_sig:
tz_convert_from_utc(self.i8data, UTC, tz)
else:
tz_convert_from_utc(self.i8data, tz)
def time_tz_localize_to_utc(self, size, tz):
# effectively:
# dti = DatetimeIndex(self.i8data)
# dti.tz_localize(tz, ambiguous="NaT", nonexistent="NaT")
tz_localize_to_utc(self.i8data, tz, ambiguous="NaT", nonexistent="NaT")
| 27.897959 | 82 | 0.643745 |
793f446389f44cfedb0528d0e08a4d921537d099 | 3,567 | py | Python | bmtk/simulator/filternet/lgnmodel/fitfuns.py | aaberbach/bmtk | 42aa70ce2003227a32df6ce5a95420dbf4bdfbd4 | [
"BSD-3-Clause"
] | 216 | 2017-10-03T17:02:42.000Z | 2022-03-20T03:35:48.000Z | bmtk/simulator/filternet/lgnmodel/fitfuns.py | moekay/bmtk | 6efdf6387d2a6badf276b917ee15d238daeae883 | [
"BSD-3-Clause"
] | 70 | 2017-10-05T00:50:41.000Z | 2022-03-30T18:55:01.000Z | bmtk/simulator/filternet/lgnmodel/fitfuns.py | moekay/bmtk | 6efdf6387d2a6badf276b917ee15d238daeae883 | [
"BSD-3-Clause"
] | 97 | 2017-10-03T22:15:06.000Z | 2022-03-23T21:03:26.000Z | import os
from math import *
import numpy as np
import numpy.fft as npft
def makeFitStruct_GLM(dtsim, kbasprs, nkt, flag_exp):
gg = {}
gg['k'] = []
gg['dc'] = 0
gg['kt'] = np.zeros((nkt,1))
gg['ktbas'] = []
gg['kbasprs'] = kbasprs
gg['dt'] = dtsim
nkt = nkt
if flag_exp==0:
ktbas = makeBasis_StimKernel(kbasprs,nkt)
else:
ktbas = makeBasis_StimKernel_exp(kbasprs,nkt)
gg['ktbas'] = ktbas
gg['k'] = gg['ktbas']*gg['kt']
return gg
def makeBasis_StimKernel(kbasprs, nkt):
neye = kbasprs['neye']
ncos = kbasprs['ncos']
kpeaks = kbasprs['kpeaks']
kdt = 1
b = kbasprs['b']
delays_raw = kbasprs['delays']
delays = delays_raw[0].astype(int)
ylim = np.array([100.,200.]) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!HARD-CODED FOR NOW
yrnge = nlin(ylim + b*np.ones(np.shape(kpeaks)))
db = (yrnge[-1]-yrnge[0])/(ncos-1)
ctrs = nlin(np.array(kpeaks)) # yrnge
mxt = invnl(yrnge[ncos-1]+2*db)-b
kt0 = np.arange(0, mxt, kdt) # -delay
nt = len(kt0)
e1 = np.tile(nlin(kt0 + b*np.ones(np.shape(kt0))), (ncos, 1))
e2 = np.transpose(e1)
e3 = np.tile(ctrs, (nt, 1))
kbasis0 = []
for kk in range(ncos):
kbasis0.append(ff(e2[:,kk],e3[:,kk],db))
#Concatenate identity vectors
nkt0 = np.size(kt0, 0)
a1 = np.concatenate((np.eye(neye), np.zeros((nkt0,neye))),axis=0)
a2 = np.concatenate((np.zeros((neye,ncos)),np.array(kbasis0).T),axis=0)
kbasis = np.concatenate((a1, a2),axis=1)
kbasis = np.flipud(kbasis)
nkt0 = np.size(kbasis,0)
if nkt0 < nkt:
kbasis = np.concatenate((np.zeros((nkt - nkt0, ncos + neye)), kbasis), axis=0)
elif nkt0 > nkt:
kbasis = kbasis[-1-nkt:-1, :]
kbasis = normalizecols(kbasis)
# Add delays for both functions. tack on delays (array of 0s) to the end of the function, then readjusts the second
# function so both are the same size.
kbasis2_0 = np.concatenate((kbasis[:, 0], np.zeros((delays[0], ))), axis=0)
kbasis2_1 = np.concatenate((kbasis[:, 1], np.zeros((delays[1], ))), axis=0)
len_diff = delays[1] - delays[0]
kbasis2_1 = kbasis2_1[len_diff:]
# combine and renormalize
kbasis2 = np.zeros((len(kbasis2_0), 2))
kbasis2[:, 0] = kbasis2_0
kbasis2[:, 1] = kbasis2_1
kbasis2 = normalizecols(kbasis2)
return kbasis2
def makeBasis_StimKernel_exp(kbasprs,nkt):
ks = kbasprs['ks']
b = kbasprs['b']
x0 = np.arange(0,nkt)
kbasis = np.zeros((nkt,len(ks)))
for ii in range(len(ks)):
kbasis[:,ii] = invnl(-ks[ii]*x0) # (1.0/ks[ii])*
kbasis = np.flipud(kbasis)
# kbasis = normalizecols(kbasis)
return kbasis
def nlin(x):
eps = 1e-20
# x.clip(0.)
return np.log(x+eps)
def invnl(x):
eps = 1e-20
return np.exp(x)-eps
def ff(x, c, dc):
rowsize = np.size(x,0)
m = []
for i in range(rowsize):
xi = x[i]
ci = c[i]
val=(np.cos(np.max([-pi, np.min([pi, (xi-ci)*pi/dc/2])])) + 1)/2
m.append(val)
return np.array(m)
def normalizecols(A):
B = A/np.tile(np.sqrt(sum(A**2,0)),(np.size(A,0),1))
return B
def sameconv(A,B):
am = np.size(A)
bm = np.size(B)
nn = am+bm-1
q = npft.fft(A,nn)*npft.fft(np.flipud(B),nn)
p = q
G = npft.ifft(p)
G = G[range(am)]
return G
| 26.227941 | 120 | 0.537707 |
793f4538cfa2fd90aafcc0ec1cc494c786b444b8 | 399 | py | Python | exam_system/exam_system/wsgi.py | hiruthikj/exam-system | 952cb87bd43b31f6337aac1f1e57e05a68e7c531 | [
"Apache-2.0"
] | 3 | 2020-11-16T17:32:56.000Z | 2021-04-07T14:16:24.000Z | exam_system/exam_system/wsgi.py | hiruthikj/exam-system | 952cb87bd43b31f6337aac1f1e57e05a68e7c531 | [
"Apache-2.0"
] | null | null | null | exam_system/exam_system/wsgi.py | hiruthikj/exam-system | 952cb87bd43b31f6337aac1f1e57e05a68e7c531 | [
"Apache-2.0"
] | 1 | 2020-11-03T17:10:20.000Z | 2020-11-03T17:10:20.000Z | """
WSGI config for exam_system project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'exam_system.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
793f4901f11f8ce15bfae1fbab52409dfb3f3408 | 1,689 | py | Python | template_main.py | cristi161/eecvf | 519c488bd47f697ef51e88823f7a751a52677b88 | [
"MIT"
] | 1 | 2021-04-02T15:33:12.000Z | 2021-04-02T15:33:12.000Z | template_main.py | cristi161/eecvf | 519c488bd47f697ef51e88823f7a751a52677b88 | [
"MIT"
] | null | null | null | template_main.py | cristi161/eecvf | 519c488bd47f697ef51e88823f7a751a52677b88 | [
"MIT"
] | 1 | 2021-08-14T09:07:22.000Z | 2021-08-14T09:07:22.000Z | import Application
import Benchmarking
import MachineLearning
import config_main as CONFIG
import Utils
def main():
"""
"""
Application.delete_folder_appl_out()
Application.set_input_image_folder('TestData/smoke_test')
Application.do_get_image_job('RAW')
Application.set_number_waves(2)
grey = Application.do_grayscale_transform_job(port_input_name='RAW')
Application.do_max_pixel_image_job(port_input_name=grey, level=CONFIG.PYRAMID_LEVEL.LEVEL_0)
Application.do_pyramid_level_down_job(port_input_name=grey, number_of_lvl=1)
Application.do_pyramid_level_down_job(port_input_name='RAW', number_of_lvl=1, is_rgb=True)
Application.do_pyramid_level_up_job(port_input_name=grey, port_input_lvl=CONFIG.PYRAMID_LEVEL.LEVEL_1,
number_of_lvl=1, is_rgb=False)
Application.do_pyramid_level_up_job(port_input_name='RAW', port_input_lvl=CONFIG.PYRAMID_LEVEL.LEVEL_1,
number_of_lvl=1, is_rgb=True)
xt = Application.do_dob_job(port_input_name=grey,
is_rgb=False)
print(xt)
Application.do_edge_label_job(port_input_name=xt, connectivity=4)
Application.do_edge_label_job(port_input_name=xt, connectivity=8)
Application.do_hough_lines_job(port_input_name=xt, vote_threshold=150)
Application.create_config_file()
Application.configure_save_pictures(ports_to_save='ALL')
Application.configure_show_pictures(ports_to_show='ALL', time_to_show=500, to_rotate=False)
Application.run_application()
Utils.close_files()
if __name__ == "__main__":
main()
| 36.717391 | 108 | 0.725281 |
793f492f83d33ed72b3febc88c8295eadd9fa588 | 626 | py | Python | pangolin/core/utils.py | skylifewww/pangolinreact | 8d8a45fd15c442618f2ed1ecab15e2e2ab4b7a3a | [
"MIT"
] | null | null | null | pangolin/core/utils.py | skylifewww/pangolinreact | 8d8a45fd15c442618f2ed1ecab15e2e2ab4b7a3a | [
"MIT"
] | null | null | null | pangolin/core/utils.py | skylifewww/pangolinreact | 8d8a45fd15c442618f2ed1ecab15e2e2ab4b7a3a | [
"MIT"
] | null | null | null | import re
from purl import URL
from django.utils.encoding import force_text
def intspace(value):
"""
45570 => 45 570
450840 => 450 840
1450000 => 1 450 000
"""
orig = force_text(value)
new = re.sub(r'^(-?\d+)(\d{3})', '\g<1> \g<2>', orig)
if orig == new:
return new
return intspace(new)
def set_param(request=None, url=None, **kwargs):
if not request and not url:
return '/'
url = URL(path=request.path, query=request.META['QUERY_STRING']) if request else URL(url)
for k, v in kwargs.items():
url = url.query_param(k, v)
return url.as_string()
| 22.357143 | 93 | 0.600639 |
793f4a7e12c070ae92b4f0aca450098f97affa3a | 824 | py | Python | main.py | vtalks/pipeline | b075836f16157c7096eebc9cfd9c43301e137a61 | [
"Apache-2.0"
] | 1 | 2018-07-07T11:56:44.000Z | 2018-07-07T11:56:44.000Z | main.py | vtalks/pipeline | b075836f16157c7096eebc9cfd9c43301e137a61 | [
"Apache-2.0"
] | null | null | null | main.py | vtalks/pipeline | b075836f16157c7096eebc9cfd9c43301e137a61 | [
"Apache-2.0"
] | null | null | null | import logging
import handlers
from scheduler import Scheduler
logger = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger.info('Starting the pipeline-scheduler ...')
scheduler = Scheduler()
scheduler.event_loop.run_until_complete(scheduler.boostrap())
logger.info('Setup event subscriptions and message handlers ...')
scheduler.subscribe("pipeline.channel", handlers.channels.channel_message_handler)
scheduler.subscribe("pipeline.playlist", handlers.playlists.playlist_message_handler)
scheduler.subscribe("pipeline.talk", handlers.talks.talk_message_handler)
logger.info("Setup event dispatchers and message publishers ...")
try:
scheduler.event_loop.run_forever()
finally:
scheduler.event_loop.close() | 31.692308 | 89 | 0.756068 |
793f4a81ef79023606ac7ae384e0c32cd776b55f | 364 | py | Python | Ago-Dic-2021/viera-rodriguez-david/P2_Practica2/ejercicio3/insertar.py | AnhellO/DAS_Sistemas | 07b4eca78357d02d225d570033d05748d91383e3 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2021/viera-rodriguez-david/P2_Practica2/ejercicio3/insertar.py | AnhellO/DAS_Sistemas | 07b4eca78357d02d225d570033d05748d91383e3 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2021/viera-rodriguez-david/P2_Practica2/ejercicio3/insertar.py | AnhellO/DAS_Sistemas | 07b4eca78357d02d225d570033d05748d91383e3 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | import redis
import json
with open('mock_data.json') as json_file:
data=json.load(json_file)
redis_client = redis.Redis(host='redis_db', port=6379)
for i in data:
redis_client.set("id":i['id'],"first_name":i['first_name'],"last_name":i['last_name'],"email":i['email'],"gender":i['gender'],"ip_address":i['ip_address'],"school_number":i['school_number']) | 36.4 | 194 | 0.708791 |
793f4b64337c9c02f062b13d7aa505fae1af9b98 | 1,042 | py | Python | manage.py | pmrowla/gumiya | cdfac7d79d3ebac28df40ec6aa011a5400c52112 | [
"MIT"
] | null | null | null | manage.py | pmrowla/gumiya | cdfac7d79d3ebac28df40ec6aa011a5400c52112 | [
"MIT"
] | 13 | 2017-08-21T10:00:09.000Z | 2021-12-31T04:00:56.000Z | manage.py | pmrowla/gumiya | cdfac7d79d3ebac28df40ec6aa011a5400c52112 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# twitch_osu_bot directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'twitch_osu_bot'))
execute_from_command_line(sys.argv)
| 34.733333 | 77 | 0.661228 |
793f4b77dd50381abffc82d03fee8bc36e746dab | 5,824 | py | Python | imaginaire/evaluation/fid.py | hw07216/imaginaire | 87c774114622e39488a5ea8a7728b1a20896afb9 | [
"RSA-MD"
] | 3,308 | 2020-07-15T17:50:13.000Z | 2022-03-31T14:53:31.000Z | imaginaire/evaluation/fid.py | hw07216/imaginaire | 87c774114622e39488a5ea8a7728b1a20896afb9 | [
"RSA-MD"
] | 132 | 2020-09-20T17:36:28.000Z | 2022-03-28T12:40:03.000Z | src/imaginaire/evaluation/fid.py | livingbio/imaginaire-fsvid2vid | d82c87aced50afd44fd162491ba5b59056b74034 | [
"RSA-MD"
] | 370 | 2020-09-29T00:34:08.000Z | 2022-03-30T04:12:48.000Z | # Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import os
import numpy as np
import torch
from scipy import linalg
from imaginaire.evaluation.common import load_or_compute_activations
from imaginaire.utils.distributed import is_master
from imaginaire.utils.distributed import master_only_print as print
@torch.no_grad()
def compute_fid(fid_path, data_loader, net_G,
key_real='images', key_fake='fake_images',
sample_size=None, preprocess=None, return_act=False,
is_video=False, few_shot_video=False, **kwargs):
r"""Compute the fid score.
Args:
fid_path (str): Location for the numpy file to store or to load the
statistics.
data_loader (obj): PyTorch dataloader object.
net_G (obj): For image generation modes, net_G is the generator network.
For video generation models, net_G is the trainer.
key_real (str): Dictionary key value for the real data.
key_fake (str): Dictionary key value for the fake data.
sample_size (int or tuple): How many samples to be used.
preprocess (func): The preprocess function to be applied to the data.
return_act (bool): If ``True``, also returns feature activations of
real and fake data.
is_video (bool): Whether we are handling video sequences.
few_shot_video (bool): If ``True``, uses few-shot video synthesis.
Returns:
(float): FID value.
"""
print('Computing FID.')
act_path = os.path.join(os.path.dirname(fid_path),
'activations_real.npy')
# Get the fake mean and covariance.
fake_act = load_or_compute_activations(
None, data_loader, key_real, key_fake, net_G,
sample_size, preprocess, is_video=is_video,
few_shot_video=few_shot_video, **kwargs
)
# Get the ground truth mean and covariance.
real_act = load_or_compute_activations(
act_path, data_loader, key_real, key_fake, None,
sample_size, preprocess, is_video=is_video,
few_shot_video=few_shot_video, **kwargs
)
if is_master():
fid = _calculate_frechet_distance(
fake_act, real_act)["FID"]
if return_act:
return fid, real_act, fake_act
else:
return fid
elif return_act:
return None, None, None
else:
return None
@torch.no_grad()
def compute_fid_data(fid_path, data_loader_a, data_loader_b,
key_a='images', key_b='images', sample_size=None,
is_video=False, few_shot_video=False, **kwargs):
r"""Compute the fid score between two datasets.
Args:
fid_path (str): Location for the numpy file to store or to load the
statistics.
data_loader_a (obj): PyTorch dataloader object for dataset a.
data_loader_b (obj): PyTorch dataloader object for dataset b.
key_a (str): Dictionary key value for images in the dataset a.
key_b (str): Dictionary key value for images in the dataset b.
sample_size (int): How many samples to be used for computing the FID.
is_video (bool): Whether we are handling video sequences.
few_shot_video (bool): If ``True``, uses few-shot video synthesis.
Returns:
(float): FID value.
"""
print('Computing FID.')
path_a = os.path.join(os.path.dirname(fid_path),
'activations_a.npy')
min_data_size = min(len(data_loader_a.dataset),
len(data_loader_b.dataset))
if sample_size is None:
sample_size = min_data_size
else:
sample_size = min(sample_size, min_data_size)
act_a = load_or_compute_activations(
path_a, data_loader_a, key_a, key_b, None,
sample_size=sample_size, is_video=is_video,
few_shot_video=few_shot_video, **kwargs
)
act_b = load_or_compute_activations(
None, data_loader_b, key_a, key_b, None,
sample_size=sample_size, is_video=is_video,
few_shot_video=few_shot_video, **kwargs
)
if is_master():
return _calculate_frechet_distance(act_a, act_b)["FID"]
def _calculate_frechet_distance(act_1, act_2, eps=1e-6):
mu1 = np.mean(act_1.cpu().numpy(), axis=0)
sigma1 = np.cov(act_1.cpu().numpy(), rowvar=False)
mu2 = np.mean(act_2.cpu().numpy(), axis=0)
sigma2 = np.cov(act_2.cpu().numpy(), rowvar=False)
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, 'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, 'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
print('Imaginary component {}'.format(m))
# raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return {"FID": (diff.dot(diff) + np.trace(sigma1) + np.trace(
sigma2) - 2 * tr_covmean)}
| 40.444444 | 98 | 0.656593 |
793f4bf548b2a00de86bbc6012dc0aab3561c179 | 4,420 | py | Python | mvpa2/clfs/mass.py | mortonne/PyMVPA | 98644c5cd9733edd39fac746ea7cf67398674645 | [
"MIT"
] | null | null | null | mvpa2/clfs/mass.py | mortonne/PyMVPA | 98644c5cd9733edd39fac746ea7cf67398674645 | [
"MIT"
] | null | null | null | mvpa2/clfs/mass.py | mortonne/PyMVPA | 98644c5cd9733edd39fac746ea7cf67398674645 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Generic wrappers for learners (classifiers) provided by R's MASS
Highly experimental and ad-hoc -- primary use was to verify LDA/QDA
results, thus not included in the mvpa2.suite ATM.
"""
__docformat__ = "restructuredtext"
import numpy as np
from mvpa2.base import externals
from mvpa2.base.learner import FailedToTrainError, FailedToPredictError
from mvpa2.base.state import ConditionalAttribute
from mvpa2.clfs.base import Classifier, accepts_dataset_as_samples
# do conditional to be able to build module reference
if externals.exists("mass", raise_=True):
import rpy2.robjects
import rpy2.robjects.numpy2ri
if hasattr(rpy2.robjects.numpy2ri, "activate"):
rpy2.robjects.numpy2ri.activate()
RRuntimeError = rpy2.robjects.rinterface.RRuntimeError
r = rpy2.robjects.r
r.library("MASS")
from mvpa2.support.rpy2_addons import Rrx2
class MASSLearnerAdapter(Classifier):
"""Generic adapter for instances of learners provided by R's MASS
Provides basic adaptation of interface for classifiers from MASS
library (e.g. QDA, LDA), by adapting interface.
Examples
--------
>>> if externals.exists('mass'):
... from mvpa2.testing.datasets import datasets
... mass_qda = MASSLearnerAdapter('qda', tags=['non-linear', 'multiclass'], enable_ca=['posterior'])
... mass_qda.train(datasets['uni2large'])
... mass_qda.predict(datasets['uni2large']) # doctest: +SKIP
"""
__tags__ = ["mass", "rpy2"]
posterior = ConditionalAttribute(
enabled=False, doc="Posterior probabilities if provided by classifier"
)
def __init__(self, learner, kwargs=None, kwargs_predict=None, tags=None, **kwargs_):
"""
Parameters
----------
learner : string
kwargs : dict, optional
kwargs_predict : dict, optional
tags : list of string
What additional tags to attach to this classifier. Tags are
used in the queries to classifier or regression warehouses.
"""
self._learner = learner
self._kwargs = kwargs or {}
self._kwargs_predict = kwargs_predict or {}
if tags:
# So we make a per-instance copy
self.__tags__ = self.__tags__ + tags
Classifier.__init__(self, **kwargs_)
def __repr__(self):
"""String representation of `SKLLearnerWrapper`"""
return Classifier.__repr__(
self, prefixes=[repr(self._learner), "kwargs=%r" % (self._kwargs,)]
)
def _train(self, dataset):
"""Train the skl learner using `dataset` (`Dataset`)."""
targets_sa = dataset.sa[self.get_space()]
targets = targets_sa.value
if not "regression" in self.__tags__:
targets = self._attrmap.to_numeric(targets)
try:
self._R_model = r[self._learner](dataset.samples, targets, **self._kwargs)
except RRuntimeError as e:
raise FailedToTrainError(
"Failed to train %s on %s. Got '%s' during call to fit()."
% (self, dataset, e)
)
@accepts_dataset_as_samples
def _predict(self, data):
"""Predict using the trained MASS learner"""
try:
output = r.predict(self._R_model, data, **self._kwargs_predict)
# TODO: access everything computed, and assign to
# ca's: res.names
classes = Rrx2(output, "class")
# TODO: move to helper function to be used generically
if classes.rclass[0] == "factor":
classes = [int(classes.levels[i - 1]) for i in classes]
if "posterior" in output.names:
self.ca.posterior = np.asarray(Rrx2(output, "posterior"))
res = np.asarray(classes)
except Exception as e:
raise FailedToPredictError(
"Failed to predict %s on data of shape %s. Got '%s' during"
" call to predict()." % (self, data.shape, e)
)
return res
| 35.934959 | 107 | 0.608597 |
793f4d32abb0d655ca3928b7dc629f44feb64313 | 597 | py | Python | AtC_Beg_Con_121-130/ABC125/C.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Beg_Con_121-130/ABC125/C.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Beg_Con_121-130/ABC125/C.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | n = int(input())
a = [int(m) for m in input().split()]
a.sort()
m0 = a[0]
m1 = a[1]
def make_divisors(n):
divisors = []
for i in range(1, int(n**0.5)+1):
if n % i == 0:
divisors.append(i)
if i != n // i:
divisors.append(n//i)
# divisors.sort()
return divisors
mm0 = make_divisors(m0)
mm1 = make_divisors(m1)
mm = mm0 + mm1
mm.sort()
si = 0
candi = 0
for g in mm:
si = 0
for i in range(n):
if a[i] % g != 0:
si += 1
if si == 2:
break
else:
candi = g
print(candi) | 17.558824 | 37 | 0.460637 |
793f4d8824080d792b37111e66c4bf62a0261fcd | 619 | py | Python | tflite_model_train.py | RadXGH/face-detect-cam | e3e6ad268b89e5e15ce190cff66e5322ad73bfe6 | [
"MIT"
] | null | null | null | tflite_model_train.py | RadXGH/face-detect-cam | e3e6ad268b89e5e15ce190cff66e5322ad73bfe6 | [
"MIT"
] | null | null | null | tflite_model_train.py | RadXGH/face-detect-cam | e3e6ad268b89e5e15ce190cff66e5322ad73bfe6 | [
"MIT"
] | null | null | null | from tflite_model_maker import image_classifier, config
from tflite_model_maker.image_classifier import DataLoader
# Load input data specific to an on-device ML app.
data = DataLoader.from_folder('./test-models/')
train_data, test_data = data.split(0.9)
# Customize the TensorFlow model.
model = image_classifier.create(train_data, batch_size = 1)
# Evaluate the model.
loss, accuracy = model.evaluate(test_data)
# Export to Tensorflow Lite model.
config = config.QuantizationConfig.for_dynamic()
model.export(export_dir='./tflite_models/', tflite_filename='model.tflite', quantization_config=config) | 38.6875 | 103 | 0.783522 |
793f4d98c1bd3b273f3082b72333acb792e87866 | 194 | py | Python | example/configure/app.py | dmsimard/dynaconf | ec394ab07e3b522879c8be678c65ebeb05fc2b59 | [
"MIT"
] | null | null | null | example/configure/app.py | dmsimard/dynaconf | ec394ab07e3b522879c8be678c65ebeb05fc2b59 | [
"MIT"
] | null | null | null | example/configure/app.py | dmsimard/dynaconf | ec394ab07e3b522879c8be678c65ebeb05fc2b59 | [
"MIT"
] | null | null | null | # coding: utf-8
from dynaconf import settings
settings.configure(settings_module='/tmp/configure_test/settings.py')
assert settings.MESSAGE == 'Hello from tmp'
print(settings.MESSAGE) # noqa
| 24.25 | 69 | 0.783505 |
793f4e76ff70fda32ac46dc07bc631fe107b9120 | 1,039 | py | Python | auto_cat_namer/catscanner.py | onhernandes/auto-cat-namer | 929e38296777a9453de7a13fbe70e9dbfa0f8ede | [
"MIT"
] | null | null | null | auto_cat_namer/catscanner.py | onhernandes/auto-cat-namer | 929e38296777a9453de7a13fbe70e9dbfa0f8ede | [
"MIT"
] | null | null | null | auto_cat_namer/catscanner.py | onhernandes/auto-cat-namer | 929e38296777a9453de7a13fbe70e9dbfa0f8ede | [
"MIT"
] | null | null | null | import cv2
from pathlib import Path
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
HAARS_FILENAME = "./haarcascade_frontalcatface_extended.xml"
HAARS_FILE = os.path.join(ROOT_DIR, HAARS_FILENAME)
def write_name_to_image(name, image_path, output_path):
image_path = Path(image_path).resolve()
suffix = image_path.suffix
image_path = str(image_path)
output_path = Path(output_path).resolve()
full_output_path = str(Path("%s/%s%s" % (output_path, name, suffix)))
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
detector = cv2.CascadeClassifier(HAARS_FILE)
rects = detector.detectMultiScale(
gray, scaleFactor=1.3, minNeighbors=7, minSize=(75, 75)
)
if len(rects) == 0:
raise ValueError("Could not detect cat's face location")
x, y, w, h = rects[0]
cv2.putText(
image, name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 2,
)
cv2.imwrite(full_output_path, image)
| 32.46875 | 82 | 0.6718 |
793f4e781d383842e97fa3f900a2b40e93ab21c8 | 19,645 | py | Python | venv/lib/python3.6/site-packages/tensorflow_core/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | 2 | 2020-09-30T00:11:09.000Z | 2021-10-04T13:00:38.000Z | venv/lib/python3.6/site-packages/tensorflow_core/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/tensorflow_core/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | null | null | null | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: audio_microfrontend_op.cc
"""
import collections
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
@_dispatch.add_dispatch_list
@tf_export('audio_microfrontend')
def audio_microfrontend(audio, sample_rate=16000, window_size=25, window_step=10, num_channels=32, upper_band_limit=7500, lower_band_limit=125, smoothing_bits=10, even_smoothing=0.025, odd_smoothing=0.06, min_signal_remaining=0.05, enable_pcan=False, pcan_strength=0.95, pcan_offset=80, gain_bits=21, enable_log=True, scale_shift=6, left_context=0, right_context=0, frame_stride=1, zero_padding=False, out_scale=1, out_type=_dtypes.uint16, name=None):
r"""Audio Microfrontend Op.
This Op converts a sequence of audio data into one or more
feature vectors containing filterbanks of the input. The
conversion process uses a lightweight library to perform:
1. A slicing window function
2. Short-time FFTs
3. Filterbank calculations
4. Noise reduction
5. PCAN Auto Gain Control
6. Logarithmic scaling
Arguments
audio: 1D Tensor, int16 audio data in temporal ordering.
sample_rate: Integer, the sample rate of the audio in Hz.
window_size: Integer, length of desired time frames in ms.
window_step: Integer, length of step size for the next frame in ms.
num_channels: Integer, the number of filterbank channels to use.
upper_band_limit: Float, the highest frequency included in the filterbanks.
lower_band_limit: Float, the lowest frequency included in the filterbanks.
smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
even_smoothing: Float, smoothing coefficient for even-numbered channels.
odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
min_signal_remaining: Float, fraction of signal to preserve in smoothing.
enable_pcan: Bool, enable PCAN auto gain control.
pcan_strength: Float, gain normalization exponent.
pcan_offset: Float, positive value added in the normalization denominator.
gain_bits: Int, number of fractional bits in the gain.
enable_log: Bool, enable logarithmic scaling of filterbanks.
scale_shift: Integer, scale filterbanks by 2^(scale_shift).
left_context: Integer, number of preceding frames to attach to each frame.
right_context: Integer, number of preceding frames to attach to each frame.
frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
out_scale: Integer, divide all filterbanks by this number.
out_type: DType, type of the output Tensor, defaults to UINT16.
Returns
filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
Args:
audio: A `Tensor` of type `int16`.
sample_rate: An optional `int`. Defaults to `16000`.
window_size: An optional `int`. Defaults to `25`.
window_step: An optional `int`. Defaults to `10`.
num_channels: An optional `int`. Defaults to `32`.
upper_band_limit: An optional `float`. Defaults to `7500`.
lower_band_limit: An optional `float`. Defaults to `125`.
smoothing_bits: An optional `int`. Defaults to `10`.
even_smoothing: An optional `float`. Defaults to `0.025`.
odd_smoothing: An optional `float`. Defaults to `0.06`.
min_signal_remaining: An optional `float`. Defaults to `0.05`.
enable_pcan: An optional `bool`. Defaults to `False`.
pcan_strength: An optional `float`. Defaults to `0.95`.
pcan_offset: An optional `float`. Defaults to `80`.
gain_bits: An optional `int`. Defaults to `21`.
enable_log: An optional `bool`. Defaults to `True`.
scale_shift: An optional `int`. Defaults to `6`.
left_context: An optional `int`. Defaults to `0`.
right_context: An optional `int`. Defaults to `0`.
frame_stride: An optional `int`. Defaults to `1`.
zero_padding: An optional `bool`. Defaults to `False`.
out_scale: An optional `int`. Defaults to `1`.
out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "AudioMicrofrontend", name,
tld.op_callbacks, audio, "sample_rate", sample_rate, "window_size",
window_size, "window_step", window_step, "num_channels", num_channels,
"upper_band_limit", upper_band_limit, "lower_band_limit",
lower_band_limit, "smoothing_bits", smoothing_bits, "even_smoothing",
even_smoothing, "odd_smoothing", odd_smoothing,
"min_signal_remaining", min_signal_remaining, "enable_pcan",
enable_pcan, "pcan_strength", pcan_strength, "pcan_offset",
pcan_offset, "gain_bits", gain_bits, "enable_log", enable_log,
"scale_shift", scale_shift, "left_context", left_context,
"right_context", right_context, "frame_stride", frame_stride,
"zero_padding", zero_padding, "out_scale", out_scale, "out_type",
out_type)
return _result
except _core._FallbackException:
try:
return audio_microfrontend_eager_fallback(
audio, sample_rate=sample_rate, window_size=window_size,
window_step=window_step, num_channels=num_channels,
upper_band_limit=upper_band_limit,
lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits,
even_smoothing=even_smoothing, odd_smoothing=odd_smoothing,
min_signal_remaining=min_signal_remaining,
enable_pcan=enable_pcan, pcan_strength=pcan_strength,
pcan_offset=pcan_offset, gain_bits=gain_bits,
enable_log=enable_log, scale_shift=scale_shift,
left_context=left_context, right_context=right_context,
frame_stride=frame_stride, zero_padding=zero_padding,
out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
audio_microfrontend, audio=audio, sample_rate=sample_rate,
window_size=window_size,
window_step=window_step,
num_channels=num_channels,
upper_band_limit=upper_band_limit,
lower_band_limit=lower_band_limit,
smoothing_bits=smoothing_bits,
even_smoothing=even_smoothing,
odd_smoothing=odd_smoothing,
min_signal_remaining=min_signal_remaining,
enable_pcan=enable_pcan,
pcan_strength=pcan_strength,
pcan_offset=pcan_offset,
gain_bits=gain_bits, enable_log=enable_log,
scale_shift=scale_shift,
left_context=left_context,
right_context=right_context,
frame_stride=frame_stride,
zero_padding=zero_padding,
out_scale=out_scale, out_type=out_type,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if sample_rate is None:
sample_rate = 16000
sample_rate = _execute.make_int(sample_rate, "sample_rate")
if window_size is None:
window_size = 25
window_size = _execute.make_int(window_size, "window_size")
if window_step is None:
window_step = 10
window_step = _execute.make_int(window_step, "window_step")
if num_channels is None:
num_channels = 32
num_channels = _execute.make_int(num_channels, "num_channels")
if upper_band_limit is None:
upper_band_limit = 7500
upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit")
if lower_band_limit is None:
lower_band_limit = 125
lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit")
if smoothing_bits is None:
smoothing_bits = 10
smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits")
if even_smoothing is None:
even_smoothing = 0.025
even_smoothing = _execute.make_float(even_smoothing, "even_smoothing")
if odd_smoothing is None:
odd_smoothing = 0.06
odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing")
if min_signal_remaining is None:
min_signal_remaining = 0.05
min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining")
if enable_pcan is None:
enable_pcan = False
enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan")
if pcan_strength is None:
pcan_strength = 0.95
pcan_strength = _execute.make_float(pcan_strength, "pcan_strength")
if pcan_offset is None:
pcan_offset = 80
pcan_offset = _execute.make_float(pcan_offset, "pcan_offset")
if gain_bits is None:
gain_bits = 21
gain_bits = _execute.make_int(gain_bits, "gain_bits")
if enable_log is None:
enable_log = True
enable_log = _execute.make_bool(enable_log, "enable_log")
if scale_shift is None:
scale_shift = 6
scale_shift = _execute.make_int(scale_shift, "scale_shift")
if left_context is None:
left_context = 0
left_context = _execute.make_int(left_context, "left_context")
if right_context is None:
right_context = 0
right_context = _execute.make_int(right_context, "right_context")
if frame_stride is None:
frame_stride = 1
frame_stride = _execute.make_int(frame_stride, "frame_stride")
if zero_padding is None:
zero_padding = False
zero_padding = _execute.make_bool(zero_padding, "zero_padding")
if out_scale is None:
out_scale = 1
out_scale = _execute.make_int(out_scale, "out_scale")
if out_type is None:
out_type = _dtypes.uint16
out_type = _execute.make_type(out_type, "out_type")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"AudioMicrofrontend", audio=audio, sample_rate=sample_rate,
window_size=window_size,
window_step=window_step,
num_channels=num_channels,
upper_band_limit=upper_band_limit,
lower_band_limit=lower_band_limit,
smoothing_bits=smoothing_bits,
even_smoothing=even_smoothing,
odd_smoothing=odd_smoothing,
min_signal_remaining=min_signal_remaining,
enable_pcan=enable_pcan,
pcan_strength=pcan_strength,
pcan_offset=pcan_offset, gain_bits=gain_bits,
enable_log=enable_log, scale_shift=scale_shift,
left_context=left_context,
right_context=right_context,
frame_stride=frame_stride,
zero_padding=zero_padding, out_scale=out_scale,
out_type=out_type, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
audio_microfrontend, audio=audio, sample_rate=sample_rate,
window_size=window_size,
window_step=window_step,
num_channels=num_channels,
upper_band_limit=upper_band_limit,
lower_band_limit=lower_band_limit,
smoothing_bits=smoothing_bits,
even_smoothing=even_smoothing,
odd_smoothing=odd_smoothing,
min_signal_remaining=min_signal_remaining,
enable_pcan=enable_pcan,
pcan_strength=pcan_strength,
pcan_offset=pcan_offset, gain_bits=gain_bits,
enable_log=enable_log, scale_shift=scale_shift,
left_context=left_context,
right_context=right_context,
frame_stride=frame_stride,
zero_padding=zero_padding, out_scale=out_scale,
out_type=out_type, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("sample_rate", _op._get_attr_int("sample_rate"), "window_size",
_op._get_attr_int("window_size"), "window_step",
_op._get_attr_int("window_step"), "num_channels",
_op._get_attr_int("num_channels"), "upper_band_limit",
_op.get_attr("upper_band_limit"), "lower_band_limit",
_op.get_attr("lower_band_limit"), "smoothing_bits",
_op._get_attr_int("smoothing_bits"), "even_smoothing",
_op.get_attr("even_smoothing"), "odd_smoothing",
_op.get_attr("odd_smoothing"), "min_signal_remaining",
_op.get_attr("min_signal_remaining"), "enable_pcan",
_op._get_attr_bool("enable_pcan"), "pcan_strength",
_op.get_attr("pcan_strength"), "pcan_offset",
_op.get_attr("pcan_offset"), "gain_bits",
_op._get_attr_int("gain_bits"), "enable_log",
_op._get_attr_bool("enable_log"), "scale_shift",
_op._get_attr_int("scale_shift"), "left_context",
_op._get_attr_int("left_context"), "right_context",
_op._get_attr_int("right_context"), "frame_stride",
_op._get_attr_int("frame_stride"), "zero_padding",
_op._get_attr_bool("zero_padding"), "out_scale",
_op._get_attr_int("out_scale"), "out_type",
_op._get_attr_type("out_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"AudioMicrofrontend", _inputs_flat, _attrs, _result)
_result, = _result
return _result
AudioMicrofrontend = tf_export("raw_ops.AudioMicrofrontend")(_ops.to_raw_op(audio_microfrontend))
def audio_microfrontend_eager_fallback(audio, sample_rate, window_size, window_step, num_channels, upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing, odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength, pcan_offset, gain_bits, enable_log, scale_shift, left_context, right_context, frame_stride, zero_padding, out_scale, out_type, name, ctx):
if sample_rate is None:
sample_rate = 16000
sample_rate = _execute.make_int(sample_rate, "sample_rate")
if window_size is None:
window_size = 25
window_size = _execute.make_int(window_size, "window_size")
if window_step is None:
window_step = 10
window_step = _execute.make_int(window_step, "window_step")
if num_channels is None:
num_channels = 32
num_channels = _execute.make_int(num_channels, "num_channels")
if upper_band_limit is None:
upper_band_limit = 7500
upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit")
if lower_band_limit is None:
lower_band_limit = 125
lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit")
if smoothing_bits is None:
smoothing_bits = 10
smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits")
if even_smoothing is None:
even_smoothing = 0.025
even_smoothing = _execute.make_float(even_smoothing, "even_smoothing")
if odd_smoothing is None:
odd_smoothing = 0.06
odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing")
if min_signal_remaining is None:
min_signal_remaining = 0.05
min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining")
if enable_pcan is None:
enable_pcan = False
enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan")
if pcan_strength is None:
pcan_strength = 0.95
pcan_strength = _execute.make_float(pcan_strength, "pcan_strength")
if pcan_offset is None:
pcan_offset = 80
pcan_offset = _execute.make_float(pcan_offset, "pcan_offset")
if gain_bits is None:
gain_bits = 21
gain_bits = _execute.make_int(gain_bits, "gain_bits")
if enable_log is None:
enable_log = True
enable_log = _execute.make_bool(enable_log, "enable_log")
if scale_shift is None:
scale_shift = 6
scale_shift = _execute.make_int(scale_shift, "scale_shift")
if left_context is None:
left_context = 0
left_context = _execute.make_int(left_context, "left_context")
if right_context is None:
right_context = 0
right_context = _execute.make_int(right_context, "right_context")
if frame_stride is None:
frame_stride = 1
frame_stride = _execute.make_int(frame_stride, "frame_stride")
if zero_padding is None:
zero_padding = False
zero_padding = _execute.make_bool(zero_padding, "zero_padding")
if out_scale is None:
out_scale = 1
out_scale = _execute.make_int(out_scale, "out_scale")
if out_type is None:
out_type = _dtypes.uint16
out_type = _execute.make_type(out_type, "out_type")
audio = _ops.convert_to_tensor(audio, _dtypes.int16)
_inputs_flat = [audio]
_attrs = ("sample_rate", sample_rate, "window_size", window_size,
"window_step", window_step, "num_channels", num_channels,
"upper_band_limit", upper_band_limit, "lower_band_limit", lower_band_limit,
"smoothing_bits", smoothing_bits, "even_smoothing", even_smoothing,
"odd_smoothing", odd_smoothing, "min_signal_remaining",
min_signal_remaining, "enable_pcan", enable_pcan, "pcan_strength",
pcan_strength, "pcan_offset", pcan_offset, "gain_bits", gain_bits,
"enable_log", enable_log, "scale_shift", scale_shift, "left_context",
left_context, "right_context", right_context, "frame_stride", frame_stride,
"zero_padding", zero_padding, "out_scale", out_scale, "out_type", out_type)
_result = _execute.execute(b"AudioMicrofrontend", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"AudioMicrofrontend", _inputs_flat, _attrs, _result)
_result, = _result
return _result
| 49.987277 | 451 | 0.684093 |
793f4f713ae5c8bbbe662aa5a3676fc9662662a1 | 50,239 | py | Python | swift/obj/replicator.py | kipdoudou/swift | ade5b80952a72afce4333fe6d98285173d68af3e | [
"Apache-2.0"
] | null | null | null | swift/obj/replicator.py | kipdoudou/swift | ade5b80952a72afce4333fe6d98285173d68af3e | [
"Apache-2.0"
] | null | null | null | swift/obj/replicator.py | kipdoudou/swift | ade5b80952a72afce4333fe6d98285173d68af3e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import os
import errno
from os.path import isdir, isfile, join, dirname
import random
import shutil
import time
import itertools
from six import viewkeys
import six.moves.cPickle as pickle
from swift import gettext_ as _
import eventlet
from eventlet import GreenPool, queue, tpool, Timeout, sleep
from eventlet.green import subprocess
from swift.common.constraints import check_drive
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, \
rsync_module_interpolation, mkdirs, config_true_value, \
config_auto_int_value, storage_directory, \
load_recon_cache, PrefixLoggerAdapter, parse_override_options, \
distribute_evenly
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import get_data_dir, get_tmp_dir, DiskFileRouter
from swift.common.storage_policy import POLICIES, REPL_POLICY
DEFAULT_RSYNC_TIMEOUT = 900
def _do_listdir(partition, replication_cycle):
return (((partition + replication_cycle) % 10) == 0)
class Stats(object):
fields = ['attempted', 'failure', 'hashmatch', 'remove', 'rsync',
'success', 'suffix_count', 'suffix_hash', 'suffix_sync',
'failure_nodes']
@classmethod
def from_recon(cls, dct):
return cls(**{k: v for k, v in dct.items() if k in cls.fields})
def to_recon(self):
return {k: getattr(self, k) for k in self.fields}
def __init__(self, attempted=0, failure=0, hashmatch=0, remove=0, rsync=0,
success=0, suffix_count=0, suffix_hash=0,
suffix_sync=0, failure_nodes=None):
self.attempted = attempted
self.failure = failure
self.hashmatch = hashmatch
self.remove = remove
self.rsync = rsync
self.success = success
self.suffix_count = suffix_count
self.suffix_hash = suffix_hash
self.suffix_sync = suffix_sync
self.failure_nodes = defaultdict(lambda: defaultdict(int),
(failure_nodes or {}))
def __add__(self, other):
total = type(self)()
total.attempted = self.attempted + other.attempted
total.failure = self.failure + other.failure
total.hashmatch = self.hashmatch + other.hashmatch
total.remove = self.remove + other.remove
total.rsync = self.rsync + other.rsync
total.success = self.success + other.success
total.suffix_count = self.suffix_count + other.suffix_count
total.suffix_hash = self.suffix_hash + other.suffix_hash
total.suffix_sync = self.suffix_sync + other.suffix_sync
all_failed_ips = (set(self.failure_nodes.keys() +
other.failure_nodes.keys()))
for ip in all_failed_ips:
self_devs = self.failure_nodes.get(ip, {})
other_devs = other.failure_nodes.get(ip, {})
this_ip_failures = {}
for dev in set(self_devs.keys() + other_devs.keys()):
this_ip_failures[dev] = (
self_devs.get(dev, 0) + other_devs.get(dev, 0))
total.failure_nodes[ip] = this_ip_failures
return total
def add_failure_stats(self, failures):
"""
Note the failure of one or more devices.
:param failures: a list of (ip, device-name) pairs that failed
"""
self.failure += len(failures)
for ip, device in failures:
self.failure_nodes[ip][device] += 1
class ObjectReplicator(Daemon):
"""
Replicate objects.
Encapsulates most logic and data needed by the object replication process.
Each call to .replicate() performs one replication pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = PrefixLoggerAdapter(
logger or get_logger(conf, log_route='object-replicator'), {})
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
self.concurrency = int(conf.get('concurrency', 1))
self.replicator_workers = int(conf.get('replicator_workers', 0))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.replication_cycle = random.randint(0, 9)
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
if 'run_pause' in conf and 'interval' not in conf:
self.logger.warning('Option object-replicator/run_pause '
'is deprecated and will be removed in a '
'future version. Update your configuration'
' to use option object-replicator/'
'interval.')
self.rsync_timeout = int(conf.get('rsync_timeout',
DEFAULT_RSYNC_TIMEOUT))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
self.http_timeout = int(conf.get('http_timeout', 60))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self._next_rcache_update = time.time() + self.stats_interval
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self.is_multiprocess_worker = None
self._df_router = DiskFileRouter(conf, self.logger)
self._child_process_reaper_queue = queue.LightQueue()
self.rsync_password = \
config_true_value(conf.get('rsync_password', 'no'))
self.rsync_password_file = conf.get('rsync_password_file', '/etc/swift/rsyncd-client.passwd')
def _zero_stats(self):
self.stats_for_dev = defaultdict(Stats)
@property
def total_stats(self):
return sum(self.stats_for_dev.values(), Stats())
def _emplace_log_prefix(self, worker_index):
self.logger.set_prefix("[worker %d/%d pid=%d] " % (
worker_index + 1, # use 1-based indexing for more readable logs
self.replicator_workers,
os.getpid()))
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
def _child_process_reaper(self):
"""
Consume processes from self._child_process_reaper_queue and wait() for
them
"""
procs = set()
done = False
while not done:
timeout = 60 if procs else None
try:
new_proc = self._child_process_reaper_queue.get(
timeout=timeout)
if new_proc is not None:
procs.add(new_proc)
else:
done = True
except queue.Empty:
pass
reaped_procs = set()
for proc in procs:
# this will reap the process if it has exited, but
# otherwise will not wait
if proc.poll() is not None:
reaped_procs.add(proc)
procs -= reaped_procs
def get_worker_args(self, once=False, **kwargs):
if self.replicator_workers < 1:
return []
override_opts = parse_override_options(once=once, **kwargs)
have_overrides = bool(override_opts.devices or override_opts.partitions
or override_opts.policies)
# save this off for ring-change detection later in is_healthy()
self.all_local_devices = self.get_local_devices()
if override_opts.devices:
devices_to_replicate = [
d for d in override_opts.devices
if d in self.all_local_devices]
else:
# The sort isn't strictly necessary since we're just trying to
# spread devices around evenly, but it makes testing easier.
devices_to_replicate = sorted(self.all_local_devices)
# Distribute devices among workers as evenly as possible
self.replicator_workers = min(self.replicator_workers,
len(devices_to_replicate))
return [{'override_devices': devs,
'override_partitions': override_opts.partitions,
'override_policies': override_opts.policies,
'have_overrides': have_overrides,
'multiprocess_worker_index': index}
for index, devs in enumerate(
distribute_evenly(devices_to_replicate,
self.replicator_workers))]
def is_healthy(self):
"""
Check whether our set of local devices remains the same.
If devices have been added or removed, then we return False here so
that we can kill off any worker processes and then distribute the
new set of local devices across a new set of workers so that all
devices are, once again, being worked on.
This function may also cause recon stats to be updated.
:returns: False if any local devices have been added or removed,
True otherwise
"""
# We update recon here because this is the only function we have in
# a multiprocess replicator that gets called periodically in the
# parent process.
if time.time() >= self._next_rcache_update:
update = self.aggregate_recon_update()
dump_recon_cache(update, self.rcache, self.logger)
return self.get_local_devices() == self.all_local_devices
def get_local_devices(self):
"""
Returns a set of all local devices in all replication-type storage
policies.
This is the device names, e.g. "sdq" or "d1234" or something, not
the full ring entries.
"""
ips = whataremyips(self.bind_ip)
local_devices = set()
for policy in POLICIES:
if policy.policy_type != REPL_POLICY:
continue
self.load_object_ring(policy)
for device in policy.object_ring.devs:
if device and is_local_device(
ips, self.port,
device['replication_ip'],
device['replication_port']):
local_devices.add(device['device'])
return local_devices
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _limit_rsync_log(self, line):
"""
If rsync_error_log_line_length is defined then
limit the error to that length
:param line: rsync log line
:return: If enabled the line limited to rsync_error_log_line_length
otherwise the initial line.
"""
if self.rsync_error_log_line_length:
return line[:self.rsync_error_log_line_length]
return line
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
proc = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(
self._limit_rsync_log(
_("Killing long-running rsync: %s") % str(args)))
if proc:
proc.kill()
try:
# Note: Python 2.7's subprocess.Popen class doesn't take
# any arguments for wait(), but Python 3's does.
# However, Eventlet's replacement Popen takes a timeout
# argument regardless of Python version, so we don't
# need any conditional code here.
proc.wait(timeout=1.0)
except subprocess.TimeoutExpired:
# Sometimes a process won't die immediately even after a
# SIGKILL. This can be due to failing disks, high load,
# or other reasons. We can't wait for it forever since
# we're taking up a slot in the (green)thread pool, so
# we send it over to another greenthread, not part of
# our pool, whose sole duty is to wait for child
# processes to exit.
self._child_process_reaper_queue.put(proc)
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if not ret_val:
self.logger.info(result)
else:
self.logger.error(result)
if ret_val:
self.logger.error(
self._limit_rsync_log(
_('Bad rsync return code: %(ret)d <- %(args)s') %
{'args': str(args), 'ret': ret_val}))
else:
log_method = self.logger.info if results else self.logger.debug
log_method(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
'--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
]
if self.rsync_password:
passwd_opt = '--password-file=' + self.rsync_password_file
popen_args.append(passwd_opt)
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
return self._rsync(args) == 0, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in os.listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
stats = self.stats_for_dev[job['device']]
stats.attempted += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
handoff_partition_deleted = False
try:
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
stats.rsync += 1
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if success:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'],
node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
if node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success)
for cand_objs in synced_remote_regions.values():
if delete_objs is None:
delete_objs = cand_objs
else:
delete_objs = delete_objs & cand_objs
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
delete_handoff = len([resp for resp in responses if resp]) >= \
self.handoff_delete
else:
# delete handoff if all syncs were successful
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
stats.remove += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info(_("Removing %s objects"),
len(delete_objs))
_junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it failed,
# the remote devices which are target of the replication
# from the hand-off device will be marked. Because cleanup
# after replication failed means replicator needs to
# replicate again with the same info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else:
self.delete_partition(job['path'])
handoff_partition_deleted = True
elif not suffixes:
self.delete_partition(job['path'])
handoff_partition_deleted = True
except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition"))
stats.add_failure_stats(failure_devs_info)
finally:
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
stats.success += len(target_devs_info - failure_devs_info)
if not handoff_partition_deleted:
self.handoffs_remaining += 1
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path)
def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception(
"Unexpected error trying to cleanup suffix dir:%r",
suffix_dir)
return success_paths, error_paths
def update(self, job):
"""
High-level method that replicates a single partition.
:param job: a dict containing info about the partition to be replicated
"""
stats = self.stats_for_dev[job['device']]
stats.attempted += 1
self.logger.increment('partition.update.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time()
df_mgr = self._df_router[job['policy']]
try:
hashed, local_hash = tpool.execute(
df_mgr._get_hashes, job['device'],
job['partition'], job['policy'],
do_listdir=_do_listdir(
int(job['partition']),
self.replication_cycle))
stats.suffix_hash += hashed
self.logger.update_stats('suffix.hashes', hashed)
attempts_left = len(job['nodes'])
synced_remote_regions = set()
random.shuffle(job['nodes'])
nodes = itertools.chain(
job['nodes'],
job['policy'].object_ring.get_more_nodes(
int(job['partition'])))
while attempts_left > 0:
# If this throws StopIteration it will be caught way below
node = next(nodes)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1
# if we have already synced to this remote region,
# don't sync again on this replication pass
if node['region'] in synced_remote_regions:
continue
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(
_('%(replication_ip)s/%(device)s '
'responded as unmounted'), node)
attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
if resp.status != HTTP_OK:
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status,
'ip': node['replication_ip']})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
remote_hash = pickle.loads(resp.read())
del resp
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
stats.hashmatch += 1
continue
hashed, recalc_hash = tpool.execute(
df_mgr._get_hashes,
job['device'], job['partition'], job['policy'],
recalculate=suffixes)
self.logger.update_stats('suffix.hashes', hashed)
local_hash = recalc_hash
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
stats.rsync += 1
success, _junk = self.sync(node, job, suffixes)
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes),
headers=headers)
conn.getresponse().read()
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded
if success and node['region'] != job['region']:
synced_remote_regions.add(node['region'])
stats.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception(_("Error syncing with node: %s") %
node)
stats.suffix_count += len(local_hash)
except StopIteration:
self.logger.error('Ran out of handoffs while replicating '
'partition %s of policy %d',
job['partition'], int(job['policy']))
except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self.logger.exception(_("Error syncing partition"))
finally:
stats.add_failure_stats(failure_devs_info)
stats.success += len(target_devs_info - failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin)
def stats_line(self):
"""
Logs various stats for the currently running replication pass.
"""
stats = self.total_stats
replication_count = stats.attempted
if replication_count > self.last_replication_count:
self.last_replication_count = replication_count
elapsed = (time.time() - self.start) or 0.000001
rate = replication_count / elapsed
self.logger.info(
_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': replication_count, 'total': self.job_count,
'percentage': replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
replication_count,
self.job_count)})
self.logger.info(_('%(success)s successes, %(failure)s failures')
% dict(success=stats.success,
failure=stats.failure))
if stats.suffix_count:
self.logger.info(
_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': stats.suffix_count,
'hashed':
(stats.suffix_hash * 100.0) / stats.suffix_count,
'synced':
(stats.suffix_sync * 100.0) / stats.suffix_count})
self.partition_times.sort()
self.logger.info(
_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def heartbeat(self):
"""
Loop that runs in the background during replication. It periodically
logs progress.
"""
while True:
eventlet.sleep(self.stats_interval)
self.stats_line()
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
df_mgr = self._df_router[policy]
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
found_local = True
local_dev_stats = self.stats_for_dev[local_dev['device']]
try:
dev_path = check_drive(self.devices_dir, local_dev['device'],
self.mount_check)
except ValueError as err:
local_dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warning("%s", err)
continue
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
local_dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
local_dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
if not found_local:
self.logger.error("Can't find itself in policy with index %d with"
" ips %s and with port %s in ring file, not"
" replicating",
int(policy), ", ".join(ips), self.port)
return jobs
def collect_jobs(self, override_devices=None, override_partitions=None,
override_policies=None):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
:param override_devices: if set, only jobs on these devices
will be returned
:param override_partitions: if set, only jobs on these partitions
will be returned
:param override_policies: if set, only jobs in these storage
policies will be returned
"""
jobs = []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
# Skip replication if next_part_power is set. In this case
# every object is hard-linked twice, but the replicator can't
# detect them and would create a second copy of the file if not
# yet existing - and this might double the actual transferred
# and stored data
next_part_power = getattr(
policy.object_ring, 'next_part_power', None)
if next_part_power is not None:
self.logger.warning(
_("next_part_power set in policy '%s'. Skipping"),
policy.name)
continue
if policy.policy_type == REPL_POLICY:
if (override_policies is not None and
policy.idx not in override_policies):
continue
# ensure rings are loaded for policy
self.load_object_ring(policy)
jobs += self.build_replication_jobs(
policy, ips, override_devices=override_devices,
override_partitions=override_partitions)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
def replicate(self, override_devices=None, override_partitions=None,
override_policies=None, start_time=None):
"""Run a replication pass"""
if start_time is None:
start_time = time.time()
self.start = start_time
self.last_replication_count = 0
self.replication_cycle = (self.replication_cycle + 1) % 10
self.partition_times = []
self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
self.handoffs_remaining = 0
stats = eventlet.spawn(self.heartbeat)
eventlet.sleep() # Give spawns a cycle
current_nodes = None
dev_stats = None
num_jobs = 0
try:
self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
dev_stats = self.stats_for_dev[job['device']]
num_jobs += 1
current_nodes = job['nodes']
try:
check_drive(self.devices_dir, job['device'],
self.mount_check)
except ValueError as err:
dev_stats.add_failure_stats([
(failure_dev['replication_ip'], failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warning("%s", err)
continue
if self.handoffs_first and not job['delete']:
# in handoffs first mode, we won't process primary
# partitions until rebalance was successful!
if self.handoffs_remaining:
self.logger.warning(_(
"Handoffs first mode still has handoffs "
"remaining. Aborting current "
"replication pass."))
break
if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
try:
if isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
os.remove(job['path'])
continue
except OSError:
continue
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
else:
self.run_pool.spawn(self.update, job)
current_nodes = None
self.run_pool.waitall()
except (Exception, Timeout) as err:
if dev_stats:
if current_nodes:
dev_stats.add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
dev_stats.add_failure_stats(self.all_devs_info)
self.logger.exception(
_("Exception in top-level replication loop: %s"), err)
finally:
stats.kill()
self.stats_line()
def update_recon(self, total, end_time, override_devices):
# Called at the end of a replication pass to update recon stats.
if self.is_multiprocess_worker:
# If it weren't for the failure_nodes field, we could do this as
# a bunch of shared memory using multiprocessing.Value, which
# would be nice because it'd avoid dealing with existing data
# during an upgrade.
update = {
'object_replication_per_disk': {
od: {'replication_stats':
self.stats_for_dev[od].to_recon(),
'replication_time': total,
'replication_last': end_time,
'object_replication_time': total,
'object_replication_last': end_time}
for od in override_devices}}
else:
update = {'replication_stats': self.total_stats.to_recon(),
'replication_time': total,
'replication_last': end_time,
'object_replication_time': total,
'object_replication_last': end_time}
dump_recon_cache(update, self.rcache, self.logger)
def aggregate_recon_update(self):
per_disk_stats = load_recon_cache(self.rcache).get(
'object_replication_per_disk', {})
recon_update = {}
min_repl_last = float('inf')
min_repl_time = float('inf')
# If every child has reported some stats, then aggregate things.
if all(ld in per_disk_stats for ld in self.all_local_devices):
aggregated = Stats()
for device_name, data in per_disk_stats.items():
aggregated += Stats.from_recon(data['replication_stats'])
min_repl_time = min(
min_repl_time, data['object_replication_time'])
min_repl_last = min(
min_repl_last, data['object_replication_last'])
recon_update['replication_stats'] = aggregated.to_recon()
recon_update['replication_last'] = min_repl_last
recon_update['replication_time'] = min_repl_time
recon_update['object_replication_last'] = min_repl_last
recon_update['object_replication_time'] = min_repl_time
# Clear out entries for old local devices that we no longer have
devices_to_remove = set(per_disk_stats) - set(self.all_local_devices)
if devices_to_remove:
recon_update['object_replication_per_disk'] = {
dtr: {} for dtr in devices_to_remove}
return recon_update
def run_once(self, multiprocess_worker_index=None,
have_overrides=False, *args, **kwargs):
if multiprocess_worker_index is not None:
self.is_multiprocess_worker = True
self._emplace_log_prefix(multiprocess_worker_index)
rsync_reaper = eventlet.spawn(self._child_process_reaper)
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_opts = parse_override_options(once=True, **kwargs)
devices = override_opts.devices or None
partitions = override_opts.partitions or None
policies = override_opts.policies or None
start_time = time.time()
self.replicate(
override_devices=devices,
override_partitions=partitions,
override_policies=policies,
start_time=start_time)
end_time = time.time()
total = (end_time - start_time) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
# If we've been manually run on a subset of
# policies/devices/partitions, then our recon stats are not
# representative of how replication is doing, so we don't publish
# them.
if self.is_multiprocess_worker:
# The main process checked for overrides and determined that
# there were none
should_update_recon = not have_overrides
else:
# We are single-process, so update recon only if we worked on
# everything
should_update_recon = not (partitions or devices or policies)
if should_update_recon:
self.update_recon(total, end_time, devices)
# Give rsync processes one last chance to exit, then bail out and
# let them be init's problem
self._child_process_reaper_queue.put(None)
rsync_reaper.wait()
def run_forever(self, multiprocess_worker_index=None,
override_devices=None, *args, **kwargs):
if multiprocess_worker_index is not None:
self.is_multiprocess_worker = True
self._emplace_log_prefix(multiprocess_worker_index)
self.logger.info(_("Starting object replicator in daemon mode."))
eventlet.spawn_n(self._child_process_reaper)
# Run the replicator continually
while True:
self._zero_stats()
self.logger.info(_("Starting object replication pass."))
# Run the replicator
start = time.time()
self.replicate(override_devices=override_devices)
end = time.time()
total = (end - start) / 60
self.logger.info(
_("Object replication complete. (%.02f minutes)"), total)
self.update_recon(total, end, override_devices)
self.logger.debug('Replication sleeping for %s seconds.',
self.interval)
sleep(self.interval)
def post_multiprocess_run(self):
# This method is called after run_once using multiple workers.
update = self.aggregate_recon_update()
dump_recon_cache(update, self.rcache, self.logger)
| 44.816236 | 101 | 0.544119 |
793f4fa9330aef58dd327dbb81b7deb52469c87b | 11,887 | py | Python | data/scripts/scenarios.py | natkaratkova/covid19_scenarios | 40e646ef23453a2b7555529523deb9f18cc5a88d | [
"MIT"
] | null | null | null | data/scripts/scenarios.py | natkaratkova/covid19_scenarios | 40e646ef23453a2b7555529523deb9f18cc5a88d | [
"MIT"
] | null | null | null | data/scripts/scenarios.py | natkaratkova/covid19_scenarios | 40e646ef23453a2b7555529523deb9f18cc5a88d | [
"MIT"
] | null | null | null | import sys
import csv
import os
import json
import numpy as np
import multiprocessing as multi
import yaml
from uuid import uuid4
sys.path.append('..')
import generated.types as schema
from datetime import datetime
from scipy.stats import linregress
from paths import TMP_CASES, BASE_PATH, JSON_DIR, FIT_PARAMETERS, SCHEMA_SCENARIOS
from scripts.tsv import parse as parse_tsv
from scripts.model import fit_population
from jsonschema import validate, FormatChecker
##
mitigation_colors = {
"School Closures": "#7fc97f",
"Social Distancing": "#beaed4",
"Lock-down": "#fdc086",
"Shut-down": "#ffff99",
"Case Isolation": "#386cb0",
"Contact Tracing": "#f0027f",
"Intervention #1": "#bf5b17",
"Intervention #2": "#666666",
}
# ------------------------------------------------------------------------
# Globals
SCENARIO_POPS = os.path.join(BASE_PATH, "populationData.tsv")
FIT_CASE_DATA = {}
from scripts.default_schema_values import DEFAULTS
# ------------------------------------------------------------------------
# Fallback data fitter
class Fitter:
doubling_time = 3.0
serial_interval = 6.0
fixed_slope = np.log(2)/doubling_time
cases_on_tMin = 10
under_reporting = 5
delay = 15
fatality_rate = 0.01
def slope_to_r0(self, slope):
return 1 + slope*self.serial_interval
def fit(self, pop):
# ----------------------------------
# Internal functions
def fit_cumulative(t, y):
good_ind = (y > 3) & (y < 500)
t_subset = t[good_ind]
logy_subset = np.log(y[good_ind])
num_data = good_ind.sum()
if num_data > 10:
res = linregress(t_subset, logy_subset)
return {"intercept" : res.intercept,
"slope" : res.slope,
'rvalue' : res.rvalue}
elif num_data > 4:
intercept = logy_subset.mean() - t_subset.mean()*self.fixed_slope
return {"intercept" : intercept,
"slope" : 1.0*self.fixed_slope,
'rvalue' : np.nan}
else:
return None
def to_ms(time):
return datetime.strptime(time[:10], "%Y-%m-%d").toordinal()
def from_ms(time):
d = datetime.fromordinal(int(time))
return f"{d.year:04d}-{d.month:02d}-{d.day:02d}"
# ----------------------------------
# Body
data = np.array([ ([to_ms(dp['time']), dp['cases'] or np.nan, dp['deaths'] or np.nan]) for dp in pop ])
# Try to fit on death
p = fit_cumulative(data[:,0], data[:,2])
if p and p["slope"] > 0:
tMin = (np.log(self.cases_on_tMin * self.fatality_rate) - p["intercept"]) / p["slope"] - self.delay
return {'tMin': from_ms(tMin), 'initialCases': self.cases_on_tMin, 'r0':self.slope_to_r0(p["slope"])}
else: # If no death, fit on case counts
p = fit_cumulative(data[:,0], data[:,1])
if p and p["slope"] > 0:
tMin = (np.log(self.cases_on_tMin)/self.under_reporting - p["intercept"]) / p["slope"]
return {'tMin': from_ms(tMin), 'initialCases': self.cases_on_tMin, 'r0':self.slope_to_r0(p["slope"])}
return None
# ------------------------------------------------------------------------
# Parameter class constructors (with defaults)
class DateRange(schema.DateRange):
def __init__(self, tMin, tMax):
return super(DateRange, self).__init__( \
t_min = tMin,
t_max = tMax)
class MitigationInterval(schema.MitigationInterval):
def __init__(self, name='Intervention', tMin=None, tMax=None, id='', color='#cccccc', mitigationValue=0):
return super(MitigationInterval, self).__init__( \
color = color,
id = id,
mitigation_value = mitigationValue,
name = name,
time_range = DateRange(tMin, tMax))
class PopulationParams(schema.PopulationData):
def __init__(self, region, country, population, beds, icus, cases_key):
return super(PopulationParams, self).__init__( \
cases=cases_key,
country=country,
hospital_beds=int(beds),
icu_beds=int(icus),
imports_per_day=0.1,
population_served=int(population),
initial_number_of_cases=int(round(FIT_CASE_DATA[region]['initialCases']
if region in FIT_CASE_DATA else Fitter.cases_on_tMin)))
class EpidemiologicalParams(schema.EpidemiologicalData):
def __init__(self, region, hemisphere):
default = DEFAULTS["EpidemiologicalData"]
if hemisphere:
if hemisphere == 'Northern':
default['seasonal_forcing'] = 0.0
default['peak_month'] = 0
elif hemisphere == 'Southern':
default['seasonal_forcing'] = 0.0
default['peak_month'] = 6
elif hemisphere == 'Tropical':
default['seasonal_forcing'] = 0.0
default['peak_month'] = 6
else:
print(f'Error: Could not parse hemisphere for {region} in scenarios.py')
return super(EpidemiologicalParams, self).__init__( \
infectious_period = default["infectiousPeriod"],
latency_time = default["latencyTime"],
length_hospital_stay = default["lengthHospitalStay"],
length_icu_stay = default["lengthICUStay"],
overflow_severity = default["overflowSeverity"],
peak_month = default["peakMonth"],
r0 = float(max(1, round(FIT_CASE_DATA[region]['r0'], 1)) if region in FIT_CASE_DATA else default["r0"]),
seasonal_forcing = default["seasonalForcing"])
class ContainmentParams(schema.ContainmentData):
def __init__(self):
default = DEFAULTS["ContainmentData"]
return super(ContainmentParams, self).__init__([], default["numberPoints"])
class SimulationParams(schema.SimulationData):
def __init__(self, region):
return super(SimulationParams, self).__init__( \
simulation_time_range = DateRange( \
datetime.strptime(FIT_CASE_DATA[region]['tMin'] if region in FIT_CASE_DATA else "2020-03-01", '%Y-%m-%d').date(),
datetime.strptime("2020-09-01", '%Y-%m-%d').date()),
number_stochastic_runs = 0.0)
# TODO: Region and country provide redudant information
# Condense the information into one field.
class AllParams(schema.AllParams):
def __init__(self, region, country, population, beds, icus, hemisphere, srcPopulation, srcHospitalBeds, srcICUBeds, cases_key):
#self.sources = {'populationServed': srcPopulation, 'hospitalBeds': srcHospitalBeds, 'ICUBeds': srcICUBeds }
return super(AllParams, self).__init__( \
ContainmentParams(),
EpidemiologicalParams(region, hemisphere),
PopulationParams(region, country, population, beds, icus, cases_key),
SimulationParams(region)
)
# ------------------------------------------------------------------------
# Functions
def marshalJSON(obj, wtr=None):
""" Validate and store data to .json file
Arguments:
- obj: a dict of allParams
"""
if wtr is None:
return json.dumps(obj, default=lambda x: x.__dict__, sort_keys=True, indent=0)
newdata = []
for k in obj:
newdata.append({'country': k, 'allParams': obj[k].to_dict()})
newdata.sort(key = lambda x:x['country'])
# Serialize into json
news = json.dumps(newdata, default=lambda x: x.__dict__, sort_keys=True, indent=0)
# Validate the dict based on the json
with open(os.path.join(BASE_PATH, SCHEMA_SCENARIOS), "r") as f:
schema = yaml.load(f, Loader=yaml.FullLoader)
validate(json.loads(news), schema, format_checker=FormatChecker())
return wtr.write(news)
def fit_one_case_data(args):
Params = Fitter()
region, data = args
r = fit_population(region)
if r is None:
return (region, Params.fit(data))
param = {"tMin": r['tMin'], "r0": np.exp(r['params'].rates.logR0), "initialCases": r["initialCases"]}
return (region, param)
def fit_all_case_data(num_procs=4):
pool = multi.Pool(num_procs)
case_counts = parse_tsv()
results = pool.map(fit_one_case_data, list(case_counts.items()))
for k, v in results:
if v is not None:
FIT_CASE_DATA[k] = v
def set_mitigation(cases, scenario):
valid_cases = [c for c in cases if c['cases'] is not None]
if len(valid_cases)==0:
scenario.containment.mitigation_intervals = []
return
case_counts = np.array([c['cases'] for c in valid_cases])
levelOne = np.where(case_counts > min(max(5, 1e-4*scenario.population.population_served),10000))[0]
levelTwo = np.where(case_counts > min(max(50, 1e-3*scenario.population.population_served),50000))[0]
levelOneVal = round(1 - np.minimum(0.8, 1.8/scenario.epidemiological.r0), 1)
levelTwoVal = round(1 - np.minimum(0.4, 0.5), 1)
for name, level, val in [("Intervention #1", levelOne, levelOneVal), ('Intervention #2', levelTwo, levelTwoVal)]:
if len(level):
level_idx = level[0]
cutoff_str = valid_cases[level_idx]["time"][:10]
cutoff = datetime.strptime(cutoff_str, '%Y-%m-%d').toordinal()
scenario.containment.mitigation_intervals.append(MitigationInterval(
name=name,
tMin=datetime.strptime(cutoff_str, '%Y-%m-%d').date(),
id=uuid4(),
tMax=scenario.simulation.simulation_time_range.t_max,
color=mitigation_colors.get(name, "#cccccc"),
mitigationValue=round(100*val)))
# ------------------------------------------------------------------------
# Main point of entry
def generate(output_json, num_procs=1, recalculate=False):
scenario = {}
fit_fname = os.path.join(BASE_PATH,FIT_PARAMETERS)
if recalculate or (not os.path.isfile(fit_fname)):
fit_all_case_data(num_procs)
with open(fit_fname, 'w') as fh:
json.dump(FIT_CASE_DATA, fh)
else:
with open(fit_fname, 'r') as fh:
tmp = json.load(fh)
for k,v in tmp.items():
FIT_CASE_DATA[k] = v
case_counts = parse_tsv()
with open(SCENARIO_POPS, 'r') as fd:
rdr = csv.reader(fd, delimiter='\t')
hdr = next(rdr)
idx = {'name' : hdr.index('name'),
'size' : hdr.index('populationServed'),
'ages' : hdr.index('ageDistribution'),
'beds' : hdr.index('hospitalBeds'),
'icus' : hdr.index('ICUBeds'),
'hemisphere' : hdr.index('hemisphere'),
'srcPopulation' : hdr.index('srcPopulation'),
'srcHospitalBeds' : hdr.index('srcHospitalBeds'),
'srcICUBeds' : hdr.index('srcICUBeds')}
args = ['name', 'ages', 'size', 'beds', 'icus', 'hemisphere', 'srcPopulation', 'srcHospitalBeds', 'srcICUBeds']
for region in rdr:
region_name = region[idx['name']]
entry = [region[idx[arg]] for arg in args]
scenario[region_name] = AllParams(*entry, region_name if region_name in case_counts else 'None')
if region_name in case_counts:
set_mitigation(case_counts[region_name], scenario[region_name])
else:
scenario[region_name].containment.mitigation_intervals = []
with open(output_json, "w+") as fd:
marshalJSON(scenario, fd)
if __name__ == '__main__':
generate()
| 38.846405 | 133 | 0.581223 |
793f516d4699261499753318831fcad4ce3feaad | 10,597 | py | Python | tests/morph_colors_extractor2/create_tests_white_dunes.py | duhnnie/3-dreams-of-black | 15aded97f57a82e5a4c95c4e74bcd603b3fc6e1e | [
"Apache-2.0"
] | 475 | 2015-01-02T07:49:46.000Z | 2022-03-17T04:01:47.000Z | tests/morph_colors_extractor2/create_tests_white_dunes.py | duhnnie/3-dreams-of-black | 15aded97f57a82e5a4c95c4e74bcd603b3fc6e1e | [
"Apache-2.0"
] | 3 | 2015-03-06T10:51:03.000Z | 2019-09-10T19:39:39.000Z | tests/morph_colors_extractor2/create_tests_white_dunes.py | duhnnie/3-dreams-of-black | 15aded97f57a82e5a4c95c4e74bcd603b3fc6e1e | [
"Apache-2.0"
] | 130 | 2015-01-15T02:08:21.000Z | 2021-12-20T19:15:22.000Z | import glob
import os.path
# ##################################################################
# Config
# ##################################################################
JSFILES = "results/*.js"
HTMLPATH = "html"
# ##################################################################
# Templates
# ##################################################################
TEMPLATE_HTML = """\
<!DOCTYPE HTML>
<html>
<head>
<title>three.js webgl - %(title)s - dunes</title>
<style type="text/css">
body {
font-family: Monospace;
background-color: #fff;
color: #000;
margin: 0px;
overflow: hidden;
}
</style>
<script type="text/javascript" src="js/Three.js"></script>
<script type="text/javascript" src="js/AnimalRandomSoup.js"></script>
<script type="text/javascript" src="js/Detector.js"></script>
<script type="text/javascript" src="js/RequestAnimationFrame.js"></script>
</head>
<body>
<script>
if ( ! Detector.webgl ) Detector.addGetWebGLMessage();
var container;
var camera, scene, renderer;
var morphObject;
var postprocessing = {};
var SCREEN_HEIGHT = window.innerHeight;
var SCREEN_WIDTH = window.innerWidth;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
camera = new THREE.Camera( 45, window.innerWidth / window.innerHeight, 1, 2000 );
camera.position.y = 20;
camera.position.z = 150;
scene = new THREE.Scene();
scene.addLight( new THREE.AmbientLight( 0x333333 ) );
var light;
light = new THREE.DirectionalLight( 0xffffff, 1.25 );
light.position.set( 0, 1, 1 );
scene.addLight( light );
renderer = new THREE.WebGLRenderer( { antialias: true, clearColor: 0xffffff, clearAlpha: 0 } );
renderer.setSize( window.innerWidth, window.innerHeight );
renderer.autoClear = false;
container.appendChild( renderer.domElement );
initPostprocessingNoise( postprocessing );
var loader = new THREE.JSONLoader();
loader.load( { model: "../%(fname)s", callback: addAnimal } );
};
function addAnimal( geometry ) {
morphObject = ROME.Animal( geometry, true );
var mesh = morphObject.mesh;
mesh.rotation.set( 0, -0.75, 0 );
//mesh.position.set( 0, -100, 0 );
mesh.matrixAutoUpdate = false;
mesh.updateMatrix();
mesh.update();
scene.addChild( mesh );
cameraDistance = 500;
cameraHeight = 100;
cameraDistance = mesh.boundRadius * 3;
//cameraHeight = mesh.boundRadius * 0.1;
camera.position.set( 0, cameraHeight, cameraDistance );
camera.target.position.set( 0, 0, 0 );
var nameA = morphObject.availableAnimals[ 0 ],
nameB = morphObject.availableAnimals[ 0 ];
morphObject.play( nameA, nameB );
morphObject.animalA.timeScale = morphObject.animalB.timeScale = 0.05;
};
var delta, time, oldTime = new Date().getTime();
function updateMorph( delta ) {
if ( morphObject ) {
THREE.AnimationHandler.update( delta );
}
};
function animate() {
requestAnimationFrame( animate );
time = new Date().getTime();
delta = time - oldTime;
oldTime = time;
if ( morphObject ) {
//morphObject.mesh.rotation.y += -0.01;
//morphObject.mesh.updateMatrix();
}
updateMorph( delta );
render();
};
function initPostprocessingNoise( effect ) {
effect.type = "noise";
effect.scene = new THREE.Scene();
effect.camera = new THREE.Camera();
effect.camera.projectionMatrix = THREE.Matrix4.makeOrtho( SCREEN_WIDTH / - 2, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2, SCREEN_HEIGHT / - 2, -10000, 10000 );
effect.camera.position.z = 100;
effect.texture = new THREE.WebGLRenderTarget( SCREEN_WIDTH, SCREEN_HEIGHT, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter } );
effect.texture2 = new THREE.WebGLRenderTarget( SCREEN_WIDTH, SCREEN_HEIGHT, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter } );
var film_shader = THREE.ShaderUtils.lib["film"];
var film_uniforms = THREE.UniformsUtils.clone( film_shader.uniforms );
film_uniforms["tDiffuse"].texture = effect.texture;
effect.materialFilm = new THREE.MeshShaderMaterial( { uniforms: film_uniforms, vertexShader: film_shader.vertexShader, fragmentShader: film_shader.fragmentShader } );
effect.materialFilm.uniforms.grayscale.value = 0;
var heatUniforms = {
"map": { type: "t", value:0, texture: effect.texture },
"screenWidth": { type: "f", value: SCREEN_WIDTH },
"screenHeight": { type: "f", value: SCREEN_HEIGHT },
"vingenettingOffset": { type: "f", value: 1.2 },
"vingenettingDarkening": { type: "f", value: 0.64 },
"colorOffset": { type: "f", value: 0 },
"colorFactor": { type: "f", value: 0 },
"colorBrightness": { type: "f", value: 0 },
"sampleDistance": { type: "f", value: 0.4 },
"waveFactor": { type: "f", value: 0.00756 },
"colorA": { type: "v3", value: new THREE.Vector3( 1, 1, 1 ) },
"colorB": { type: "v3", value: new THREE.Vector3( 1, 1, 1 ) },
"colorC": { type: "v3", value: new THREE.Vector3( 1, 1, 1 ) }
};
effect.materialHeat = new THREE.MeshShaderMaterial( {
uniforms: heatUniforms,
vertexShader: [
"varying vec2 vUv;",
"void main() {",
"vUv = vec2( uv.x, 1.0 - uv.y );",
"gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );",
"}"
].join("\\n"),
fragmentShader: [
"uniform float screenWidth;",
"uniform float screenHeight;",
"uniform float vingenettingOffset;",
"uniform float vingenettingDarkening;",
"uniform float colorOffset;",
"uniform float colorFactor;",
"uniform float sampleDistance;",
"uniform float colorBrightness;",
"uniform float waveFactor;",
"uniform vec3 colorA;",
"uniform sampler2D map;",
"varying vec2 vUv;",
"void main() {",
"vec4 color, org, tmp, add;",
"float sample_dist, f;",
"vec2 vin;",
"vec2 uv = vUv;",
"add = color = org = texture2D( map, uv );",
"vin = (uv - vec2(0.5)) * vec2(4.0);",
"sample_dist =(dot( vin, vin ) * 2.0);",
"f = (1.86 + sample_dist) * sampleDistance * 0.5;",
"vec2 sampleSize = vec2( 1.0 / screenWidth, 1.0 / screenHeight ) * vec2(f);",
"add += tmp = texture2D( map, uv + vec2(0.111964, 0.993712) * sampleSize);",
"if( tmp.b < color.b ) color = tmp;",
"add += tmp = texture2D( map, uv + vec2(0.846724, 0.532032) * sampleSize);",
"if( tmp.b < color.b ) color = tmp;",
"add += tmp = texture2D( map, uv + vec2(0.943883, -0.330279) * sampleSize);",
"if( tmp.b < color.b ) color = tmp;",
"add += tmp = texture2D( map, uv + vec2(0.330279, -0.943883) * sampleSize);",
"if( tmp.b < color.b ) color = tmp;",
"add += tmp = texture2D( map, uv + vec2(-0.532032, -0.846724) * sampleSize);",
"if( tmp.b < color.b ) color = tmp;",
"add += tmp = texture2D( map, uv + vec2(-0.993712, -0.111964) * sampleSize);",
"if( tmp.b < color.b ) color = tmp;",
"add += tmp = texture2D( map, uv + vec2(-0.707107, 0.707107) * sampleSize);",
"if( tmp.b < color.b ) color = tmp;",
"uv = (uv - vec2(0.5)) * vec2( 0.94/* vingenettingOffset*/ );",
// "color = color + (add / vec4(8.0) - color) * (vec4(1.0) - vec4(sample_dist * 0.1));",
"color = (add / vec4(8.0));",
"gl_FragColor = vec4( mix(color.rgb, color.ggg * colorFactor - vec3( vingenettingDarkening ), vec3( dot( uv, uv ))), 1.0 );",
"gl_FragColor = vec4(1.0) - (vec4(1.0) - gl_FragColor) * (vec4(1.0) - gl_FragColor);",
"}"
].join("\\n")
} );
effect.quad = new THREE.Mesh( new THREE.Plane( SCREEN_WIDTH, SCREEN_HEIGHT ), effect.materialFilm );
effect.quad.position.z = -500;
effect.scene.addObject( effect.quad );
}
function render() {
renderer.clear();
//renderer.render( scene, camera );
renderer.render( scene, camera, postprocessing.texture, true );
postprocessing.materialFilm.uniforms.time.value += 0.01 * delta;
//postprocessing.materialHeat.uniforms.time.value += 0.01 * delta;
// HEAT => NOISE
postprocessing.quad.materials[ 0 ] = postprocessing.materialHeat;
postprocessing.materialHeat.uniforms.map.texture = postprocessing.texture;
renderer.render( postprocessing.scene, postprocessing.camera );
//renderer.render( postprocessing.scene, postprocessing.camera, postprocessing.texture2 );
postprocessing.quad.materials[ 0 ] = postprocessing.materialFilm;
postprocessing.materialFilm.uniforms.tDiffuse.texture = postprocessing.texture2;
}
</script>
</body>
</html>
"""
TEMPLATE_HTML_INDEX = """\
<!DOCTYPE HTML>
<html>
<head>
<title>rome - animals - dunes</title>
<style type="text/css">
body {
background-color: #fff;
color: #000;
margin: 0px;
padding: 1em;
text-align:left;
}
a { color:#000; font-size:1.25em; text-decoration:none }
#links { float:left; width:9%% }
#animals { border: 0; float:left; width:90%%; height:95%%; background:#fff }
</style>
</head>
<body>
<div id="links">
%(links)s
</div>
<iframe id="animals"></iframe>
</body>
</html>
"""
TEMPLATE_LINK = """<a href="#" onclick="document.getElementById('animals').src = 'white_dunes_%s.html';">%s</a>"""
# ##################################################################
# Utils
# ##################################################################
def write_file(name, content):
f = open(name, "w")
f.write(content)
f.close()
# ##################################################################
# Main
# ##################################################################
if __name__ == "__main__":
jsfiles = sorted(glob.glob(JSFILES))
links = []
for jsname in jsfiles:
fname = os.path.splitext(jsname)[0]
bname = os.path.basename(fname)
htmlname = "white_dunes_%s.html" % bname
htmlpath = os.path.join(HTMLPATH, htmlname)
content = TEMPLATE_HTML % {
"fname" : jsname.replace("\\","/"),
"title" : bname
}
write_file(htmlpath, content)
links.append( bname )
links_string = TEMPLATE_HTML_INDEX % {
"links" : "<br/>".join(TEMPLATE_LINK % (x, x) for x in links)
}
linkspath = os.path.join(HTMLPATH, "white_dunes.html")
write_file( linkspath, links_string ) | 26.426434 | 170 | 0.58224 |
793f518135929dcbaae53ea02568aef584a8727d | 3,160 | py | Python | tensorflow_model_analysis/eval_saved_model/example_trainers/fixed_prediction_estimator.py | robertwb/model-analysis | e081016a1c81f17057ad3714f867832632899504 | [
"Apache-2.0"
] | 1 | 2019-03-24T15:09:56.000Z | 2019-03-24T15:09:56.000Z | tensorflow_model_analysis/eval_saved_model/example_trainers/fixed_prediction_estimator.py | robertwb/model-analysis | e081016a1c81f17057ad3714f867832632899504 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_analysis/eval_saved_model/example_trainers/fixed_prediction_estimator.py | robertwb/model-analysis | e081016a1c81f17057ad3714f867832632899504 | [
"Apache-2.0"
] | 1 | 2020-04-12T14:29:27.000Z | 2020-04-12T14:29:27.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exports a simple "fixed prediction" estimator using tf.Learn.
This model always predicts the value of the "prediction" feature.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model.example_trainers import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
def simple_fixed_prediction_estimator(export_path, eval_export_path):
"""Exports a simple fixed prediction estimator."""
def model_fn(features, labels, mode, params):
"""Model function for custom estimator."""
del params
predictions = features['prediction']
predictions_dict = {
prediction_keys.PredictionKeys.PREDICTIONS: predictions,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_dict,
export_outputs={
tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.RegressionOutput(predictions)
})
loss = tf.losses.mean_squared_error(predictions, labels)
train_op = tf.assign_add(tf.train.get_global_step(), 1)
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN: tf.metrics.mean(loss),
}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions_dict,
eval_metric_ops=eval_metric_ops)
def train_input_fn():
"""Train input function."""
return {
'prediction': tf.constant([[1.0], [2.0], [3.0], [4.0]]),
}, tf.constant([[1.0], [2.0], [3.0], [4.0]]),
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn, steps=1)
feature_spec = {'prediction': tf.FixedLenFeature([1], dtype=tf.float32)}
eval_feature_spec = {
'prediction': tf.FixedLenFeature([1], dtype=tf.float32),
'label': tf.FixedLenFeature([1], dtype=tf.float32),
}
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=(
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec)),
eval_input_receiver_fn=export.build_parsing_eval_input_receiver_fn(
eval_feature_spec, label_key='label'),
export_path=export_path,
eval_export_path=eval_export_path)
| 34.725275 | 76 | 0.721203 |
793f525142b70949e9399d08be8c605a23ae6c27 | 6,924 | py | Python | interp2D.py | jsasaki-utokyo/model2roms | f2dbe56cd4c3ecfbb88177ed0ff34db48c148b8d | [
"MIT"
] | null | null | null | interp2D.py | jsasaki-utokyo/model2roms | f2dbe56cd4c3ecfbb88177ed0ff34db48c148b8d | [
"MIT"
] | null | null | null | interp2D.py | jsasaki-utokyo/model2roms | f2dbe56cd4c3ecfbb88177ed0ff34db48c148b8d | [
"MIT"
] | 1 | 2021-11-27T06:37:53.000Z | 2021-11-27T06:37:53.000Z | from __future__ import print_function
import datetime
import logging
import extrapolate as ex
import numpy as np
try:
import ESMF
except ImportError:
logging.error("[M2R_interp2D] Could not find module ESMF")
pass
__author__ = 'Trond Kristiansen'
__email__ = '[email protected]'
__created__ = datetime.datetime(2008, 12, 4)
__modified__ = datetime.datetime(2021, 3, 26)
__version__ = "1.6"
__status__ = "Development"
def laplacefilter(field, threshold, toxi, toeta):
undef = 2.0e+35
tx = 0.9 * undef
critx = 0.01
cor = 1.6
mxs = 10
field = np.where(abs(field) > threshold, undef, field)
field = ex.extrapolate.fill(int(1), int(toxi),
int(1), int(toeta),
float(tx), float(critx), float(cor), float(mxs),
np.asarray(field, order='F'),
int(toxi),
int(toeta))
return field
def do_hor_interpolation_regular_grid(confM2R, mydata, myvar):
if confM2R.show_progress is True:
try:
import progressbar
widgets = ['\rHorizontal interpolation:', progressbar.Percentage(), progressbar.Bar()]
progress = progressbar.ProgressBar(confM2R.grdMODEL.nlevels, widgets=widgets).start()
except ImportError:
logging.error("[M2R_interp2D] Could not find module progressbar")
confM2R.show_progress = False
pass
index_roms, toxi, toeta, mymask = setup_indexes(confM2R, myvar)
array1 = np.zeros(index_roms, dtype=np.float)
# 2D or 3D interpolation
depth_levels = confM2R.grdMODEL.nlevels
if myvar in ['ssh', 'ageice', 'uice', 'vice', 'aice', 'hice', 'snow_thick', 'hs']:
depth_levels = 1
for k in range(depth_levels):
if confM2R.use_esmf:
if depth_levels == 1:
indata = np.squeeze(mydata[:, :])
else:
indata = np.squeeze(mydata[k, :, :])
# We interpolate to RHO fields for all variables and then we later interpolate RHO points to U and V points
# But input data are read on U and V and RHO grids if they differ (as NorESM and GLORYS does).
if myvar in ['uice']:
confM2R.grdMODEL.fieldSrc_rho.data[:, :] = np.flipud(np.rot90(indata))
field = confM2R.grdROMS.regridSrc2Dst_u(confM2R.grdMODEL.fieldSrc_rho, confM2R.grdROMS.fieldDst_u)
elif myvar in ['vice']:
confM2R.grdMODEL.fieldSrc_rho.data[:, :] = np.flipud(np.rot90(indata))
field = confM2R.grdROMS.regridSrc2Dst_v(confM2R.grdMODEL.fieldSrc_rho, confM2R.grdROMS.fieldDst_v)
else:
confM2R.grdMODEL.fieldSrc_rho.data[:, :] = np.flipud(np.rot90(indata))
field = confM2R.grdROMS.regridSrc2Dst_rho(confM2R.grdMODEL.fieldSrc_rho, confM2R.grdROMS.fieldDst_rho)
# Since ESMF uses coordinates (x,y) we need to rotate and flip to get back to (y,x) order.
field = np.fliplr(np.rot90(field.data, 3))
if confM2R.use_filter and myvar not in ['aice','hice','ageice']:
field = laplacefilter(field, 1000, toxi, toeta)
field = field * mymask
array1[k, :, :] = field
if k in [2, 0] and False is True:
import plotData
import matplotlib.pyplot as plt
plotData.contourMap(confM2R.grdROMS, confM2R.grdROMS.lon_rho, confM2R.grdROMS.lat_rho, field,
str(k) + '_withfilter', myvar)
plotfilename = "test_{}_wfilter.png".format(myvar)
plt.savefig(plotfilename, dpi=150)
if confM2R.show_progress is True:
progress.update(k)
if confM2R.show_progress is True:
progress.finish()
return array1
def setup_indexes(confM2R, myvar):
if myvar in ["uice"]:
indexROMS_Z_ST = (confM2R.grdMODEL.nlevels, confM2R.grdROMS.eta_u, confM2R.grdROMS.xi_u)
toxi = confM2R.grdROMS.xi_u
toeta = confM2R.grdROMS.eta_u
mymask = confM2R.grdROMS.mask_u
elif myvar in ["vice"]:
indexROMS_Z_ST = (confM2R.grdMODEL.nlevels, confM2R.grdROMS.eta_v, confM2R.grdROMS.xi_v)
toxi = confM2R.grdROMS.xi_v
toeta = confM2R.grdROMS.eta_v
mymask = confM2R.grdROMS.mask_v
else:
indexROMS_Z_ST = (confM2R.grdMODEL.nlevels, confM2R.grdROMS.eta_rho, confM2R.grdROMS.xi_rho)
toxi = confM2R.grdROMS.xi_rho
toeta = confM2R.grdROMS.eta_rho
mymask = confM2R.grdROMS.mask_rho
return indexROMS_Z_ST, toxi, toeta, mymask
def setup_ESMF_interpolation_weights(confM2R):
if confM2R.use_esmf:
logging.info(
"[M2R_interp2D] => Creating the interpolation weights and indexes using ESMF (this may take some time....):")
logging.info("[M2R_interp2D] -> Source field src at RHO points")
confM2R.grdMODEL.fieldSrc_rho = ESMF.Field(confM2R.grdMODEL.esmfgrid, "fieldSrc",
staggerloc=ESMF.StaggerLoc.CENTER)
logging.info("[M2R_interp2D] -> Destination field src at RHO, u, and v points")
confM2R.grdROMS.fieldDst_rho = ESMF.Field(confM2R.grdROMS.esmfgrid, "fieldDst",
staggerloc=ESMF.StaggerLoc.CENTER)
confM2R.grdROMS.fieldDst_u = ESMF.Field(confM2R.grdROMS.esmfgrid_u, "fieldDst",
staggerloc=ESMF.StaggerLoc.CENTER)
confM2R.grdROMS.fieldDst_v = ESMF.Field(confM2R.grdROMS.esmfgrid_v, "fieldDst",
staggerloc=ESMF.StaggerLoc.CENTER)
logging.info("[M2R_interp2D] -> regridSrc2Dst from RHO to U, V and RHO points")
confM2R.grdROMS.regridSrc2Dst_rho = ESMF.Regrid(confM2R.grdMODEL.fieldSrc_rho,
confM2R.grdROMS.fieldDst_rho,
regrid_method=ESMF.RegridMethod.BILINEAR,
unmapped_action=ESMF.UnmappedAction.IGNORE)
confM2R.grdROMS.regridSrc2Dst_u = ESMF.Regrid(confM2R.grdMODEL.fieldSrc_rho,
confM2R.grdROMS.fieldDst_u,
regrid_method=ESMF.RegridMethod.BILINEAR,
unmapped_action=ESMF.UnmappedAction.IGNORE)
confM2R.grdROMS.regridSrc2Dst_v = ESMF.Regrid(confM2R.grdMODEL.fieldSrc_rho,
confM2R.grdROMS.fieldDst_v,
regrid_method=ESMF.RegridMethod.BILINEAR,
unmapped_action=ESMF.UnmappedAction.IGNORE)
| 43.275 | 121 | 0.589977 |
793f52634034db09a1026145b87cf7c97e8f6a23 | 3,411 | py | Python | components/studio/api/serializers.py | ScilifelabDataCentre/stackn | 00a65a16ff271f04548b3ff475c72dacbfd916df | [
"Apache-2.0"
] | null | null | null | components/studio/api/serializers.py | ScilifelabDataCentre/stackn | 00a65a16ff271f04548b3ff475c72dacbfd916df | [
"Apache-2.0"
] | null | null | null | components/studio/api/serializers.py | ScilifelabDataCentre/stackn | 00a65a16ff271f04548b3ff475c72dacbfd916df | [
"Apache-2.0"
] | null | null | null | from rest_framework.serializers import ModelSerializer
from models.models import Model, ModelLog, Metadata
from reports.models import Report, ReportGenerator
from projects.models import Project, Volume
from deployments.models import DeploymentInstance, DeploymentDefinition
from datasets.models import Dataset, FileModel
from experiments.models import Experiment
from labs.models import Session
from django.contrib.auth.models import User
class MLModelSerializer(ModelSerializer):
class Meta:
model = Model
fields = (
'id', 'uid', 'name', 'description', 'resource', 'url', 'uploaded_at', 'project', 'status', 'version')
class ModelLogSerializer(ModelSerializer):
class Meta:
model = ModelLog
fields = (
'id', 'run_id', 'trained_model', 'project', 'training_started_at', 'execution_time', 'code_version',
'current_git_repo', 'latest_git_commit', 'system_details', 'cpu_details', 'training_status')
class MetadataSerializer(ModelSerializer):
class Meta:
model = Metadata
fields = (
'id', 'run_id', 'trained_model', 'project', 'model_details', 'parameters', 'metrics')
class DeploymentDefinitionSerializer(ModelSerializer):
class Meta:
model = DeploymentDefinition
fields = (
'id', 'project','name', 'bucket','filename','path_predict')
class DeploymentInstanceSerializer(ModelSerializer):
class Meta:
model = DeploymentInstance
fields = ('id','deployment', 'model', 'access', 'path', 'endpoint', 'created_at')
class ReportSerializer(ModelSerializer):
class Meta:
model = Report
fields = (
'id', 'model', 'description', 'created_at', 'report', 'job_id', 'generator', 'status')
class ReportGeneratorSerializer(ModelSerializer):
class Meta:
model = ReportGenerator
fields = (
'id', 'project', 'description', 'generator', 'visualiser', 'created_at')
class ProjectSerializer(ModelSerializer):
class Meta:
model = Project
fields = (
'id', 'name', 'description', 'slug', 'owner', 'authorized', 'image', 'project_key', 'project_secret', 'updated_at',
'created_at', 'repository', 'repository_imported')
class LabSessionSerializer(ModelSerializer):
class Meta:
model = Session
fields = (
'id', 'name', 'slug', 'project', 'lab_session_owner', 'flavor_slug', 'environment_slug', 'status',
'created_at', 'updated_at')
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = ['id', 'username']
class DatasetSerializer(ModelSerializer):
class Meta:
model = Dataset
fields = ['id', 'name', 'version', 'release_type', 'description',
'bucket', 'project_slug', 'files', 'created_by', 'created_on', 'datasheet']
class FileModelSerializer(ModelSerializer):
class Meta:
model = FileModel
fields = ['id', 'name', 'bucket']
class VolumeSerializer(ModelSerializer):
class Meta:
model = Volume
fields = ['id', 'name', 'slug', 'size', 'settings', 'created_by', 'created_on', 'updated_on']
class ExperimentSerializer(ModelSerializer):
class Meta:
model = Experiment
fields = ['id', 'username', 'command', 'environment', 'project', 'schedule', 'created_at', 'uploaded_at'] | 34.11 | 127 | 0.652888 |
793f527ce99807276d93cb9dc120c52c326a7255 | 1,601 | py | Python | common/src/stack/command/stack/commands/remove/appliance/firewall/__init__.py | shivanshs9/stacki | 258740748281dfe89b0f566261eaf23102f91aa4 | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/command/stack/commands/remove/appliance/firewall/__init__.py | shivanshs9/stacki | 258740748281dfe89b0f566261eaf23102f91aa4 | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/command/stack/commands/remove/appliance/firewall/__init__.py | shivanshs9/stacki | 258740748281dfe89b0f566261eaf23102f91aa4 | [
"BSD-3-Clause"
] | null | null | null | # @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
from stack.exception import ArgRequired, CommandError
class Command(stack.commands.remove.appliance.command):
"""
Remove a firewall service rule for an appliance type.
To remove the rule, you must supply the name of the rule.
<arg type='string' name='appliance' repeat='1'>
Name of an appliance type (e.g., "backend").
</arg>
<param type='string' name='rulename' optional='0'>
Name of the Appliance-specific rule
</param>
"""
def run(self, params, args):
if len(args) == 0:
raise ArgRequired(self, 'appliance')
(rulename, ) = self.fillParams([ ('rulename', None, True) ])
for appliance in self.getApplianceNames(args):
# Make sure our rule exists
if self.db.count("""
(*) from appliance_firewall
where name=%s and appliance=(
select id from appliances where name=%s
)""", (rulename, appliance)
) == 0:
raise CommandError(
self,
f'firewall rule {rulename} does not '
f'exist for appliance {appliance}'
)
# It exists, so delete it
self.db.execute("""
delete from appliance_firewall
where name=%s and appliance=(
select id from appliances where name=%s
)
""", (rulename, appliance))
| 27.603448 | 71 | 0.686446 |
793f528d0e5cd75e4dfe98336de4cb68acf63e75 | 10,677 | py | Python | flair/models/sequence_tagger_utils/viterbi.py | adriensas/flair | f01b0e7ff9a87d3862acae50aeaffdc8e8b8ac21 | [
"MIT"
] | 1 | 2022-02-06T04:04:27.000Z | 2022-02-06T04:04:27.000Z | flair/models/sequence_tagger_utils/viterbi.py | adriensas/flair | f01b0e7ff9a87d3862acae50aeaffdc8e8b8ac21 | [
"MIT"
] | null | null | null | flair/models/sequence_tagger_utils/viterbi.py | adriensas/flair | f01b0e7ff9a87d3862acae50aeaffdc8e8b8ac21 | [
"MIT"
] | null | null | null | from typing import Tuple
import numpy as np
import torch
import torch.nn
from torch.nn.functional import softmax
from torch.nn.utils.rnn import pack_padded_sequence
import flair
from flair.data import Dictionary, Label, List
START_TAG: str = "<START>"
STOP_TAG: str = "<STOP>"
class ViterbiLoss(torch.nn.Module):
"""
Calculates the loss for each sequence up to its length t.
"""
def __init__(self, tag_dictionary: Dictionary):
"""
:param tag_dictionary: tag_dictionary of task
"""
super(ViterbiLoss, self).__init__()
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def forward(self, features_tuple: tuple, targets: torch.Tensor) -> torch.Tensor:
"""
Forward propagation of Viterbi Loss
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentences in batch, transitions from CRF
:param targets: true tags for sentences which will be converted to matrix indices.
:return: average Viterbi Loss over batch size
"""
features, lengths, transitions = features_tuple
batch_size = features.size(0)
seq_len = features.size(1)
targets, targets_matrix_indices = self._format_targets(targets, lengths)
targets_matrix_indices = torch.tensor(targets_matrix_indices, dtype=torch.long).unsqueeze(2).to(flair.device)
# scores_at_targets[range(features.shape[0]), lengths.values -1]
# Squeeze crf scores matrices in 1-dim shape and gather scores at targets by matrix indices
scores_at_targets = torch.gather(features.view(batch_size, seq_len, -1), 2, targets_matrix_indices)
scores_at_targets = pack_padded_sequence(scores_at_targets, lengths, batch_first=True)[0]
transitions_to_stop = transitions[
np.repeat(self.stop_tag, features.shape[0]),
[target[length - 1] for target, length in zip(targets, lengths)],
]
gold_score = scores_at_targets.sum() + transitions_to_stop.sum()
scores_upto_t = torch.zeros(batch_size, self.tagset_size, device=flair.device)
for t in range(max(lengths)):
batch_size_t = sum(
[length > t for length in lengths]
) # since batch is ordered, we can save computation time by reducing our effective batch_size
if t == 0:
# Initially, get scores from <start> tag to all other tags
scores_upto_t[:batch_size_t] = (
scores_upto_t[:batch_size_t] + features[:batch_size_t, t, :, self.start_tag]
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and log-sum-exp
# Remember, the cur_tag of the previous timestep is the prev_tag of this timestep
scores_upto_t[:batch_size_t] = self._log_sum_exp(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t].unsqueeze(1), dim=2
)
all_paths_scores = self._log_sum_exp(scores_upto_t + transitions[self.stop_tag].unsqueeze(0), dim=1).sum()
viterbi_loss = all_paths_scores - gold_score
return viterbi_loss
@staticmethod
def _log_sum_exp(tensor, dim):
"""
Calculates the log-sum-exponent of a tensor's dimension in a numerically stable way.
:param tensor: tensor
:param dim: dimension to calculate log-sum-exp of
:return: log-sum-exp
"""
m, _ = torch.max(tensor, dim)
m_expanded = m.unsqueeze(dim).expand_as(tensor)
return m + torch.log(torch.sum(torch.exp(tensor - m_expanded), dim))
def _format_targets(self, targets: torch.Tensor, lengths: torch.IntTensor):
"""
Formats targets into matrix indices.
CRF scores contain per sentence, per token a (tagset_size x tagset_size) matrix, containing emission score for
token j + transition prob from previous token i. Means, if we think of our rows as "to tag" and our columns
as "from tag", the matrix in cell [10,5] would contain the emission score for tag 10 + transition score
from previous tag 5 and could directly be addressed through the 1-dim indices (10 + tagset_size * 5) = 70,
if our tagset consists of 12 tags.
:param targets: targets as in tag dictionary
:param lengths: lengths of sentences in batch
"""
targets_per_sentence = []
targets_list = targets.tolist()
for cut in lengths:
targets_per_sentence.append(targets_list[:cut])
targets_list = targets_list[cut:]
for t in targets_per_sentence:
t += [self.tag_dictionary.get_idx_for_item(STOP_TAG)] * (int(lengths.max().item()) - len(t))
matrix_indices = list(
map(
lambda s: [self.tag_dictionary.get_idx_for_item(START_TAG) + (s[0] * self.tagset_size)]
+ [s[i] + (s[i + 1] * self.tagset_size) for i in range(0, len(s) - 1)],
targets_per_sentence,
)
)
return targets_per_sentence, matrix_indices
class ViterbiDecoder:
"""
Decodes a given sequence using the Viterbi algorithm.
"""
def __init__(self, tag_dictionary: Dictionary):
"""
:param tag_dictionary: Dictionary of tags for sequence labeling task
"""
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def decode(self, features_tuple: tuple, probabilities_for_all_classes: bool) -> Tuple[List, List]:
"""
Decoding function returning the most likely sequence of tags.
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentence in batch, transitions of CRF
:param probabilities_for_all_classes: whether to return probabilities for all tags
:return: decoded sequences
"""
features, lengths, transitions = features_tuple
all_tags = []
batch_size = features.size(0)
seq_len = features.size(1)
# Create a tensor to hold accumulated sequence scores at each current tag
scores_upto_t = torch.zeros(batch_size, seq_len + 1, self.tagset_size).to(flair.device)
# Create a tensor to hold back-pointers
# i.e., indices of the previous_tag that corresponds to maximum accumulated score at current tag
# Let pads be the <end> tag index, since that was the last tag in the decoded sequence
backpointers = (
torch.ones((batch_size, seq_len + 1, self.tagset_size), dtype=torch.long, device=flair.device)
* self.stop_tag
)
for t in range(seq_len):
batch_size_t = sum([length > t for length in lengths]) # effective batch size (sans pads) at this timestep
terminates = [i for i, length in enumerate(lengths) if length == t + 1]
if t == 0:
scores_upto_t[:batch_size_t, t] = features[:batch_size_t, t, :, self.start_tag]
backpointers[:batch_size_t, t, :] = (
torch.ones((batch_size_t, self.tagset_size), dtype=torch.long) * self.start_tag
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and
# choose the previous timestep that corresponds to the max. accumulated score for each current timestep
scores_upto_t[:batch_size_t, t], backpointers[:batch_size_t, t, :] = torch.max(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t, t - 1].unsqueeze(1), dim=2
)
# If sentence is over, add transition to STOP-tag
if terminates:
scores_upto_t[terminates, t + 1], backpointers[terminates, t + 1, :] = torch.max(
scores_upto_t[terminates, t].unsqueeze(1) + transitions[self.stop_tag].unsqueeze(0), dim=2
)
# Decode/trace best path backwards
decoded = torch.zeros((batch_size, backpointers.size(1)), dtype=torch.long, device=flair.device)
pointer = torch.ones((batch_size, 1), dtype=torch.long, device=flair.device) * self.stop_tag
for t in list(reversed(range(backpointers.size(1)))):
decoded[:, t] = torch.gather(backpointers[:, t, :], 1, pointer).squeeze(1)
pointer = decoded[:, t].unsqueeze(1)
# Sanity check
assert torch.equal(
decoded[:, 0], torch.ones((batch_size), dtype=torch.long, device=flair.device) * self.start_tag
)
# remove start-tag and backscore to stop-tag
scores_upto_t = scores_upto_t[:, :-1, :]
decoded = decoded[:, 1:]
# Max + Softmax to get confidence score for predicted label and append label to each token
scores = softmax(scores_upto_t, dim=2)
confidences = torch.max(scores, dim=2)
tags = []
for tag_seq, tag_seq_conf, length_seq in zip(decoded, confidences.values, lengths):
tags.append(
[
Label(self.tag_dictionary.get_item_for_index(tag), conf.item())
for tag, conf in list(zip(tag_seq, tag_seq_conf))[:length_seq]
]
)
if probabilities_for_all_classes:
all_tags = self._all_scores_for_token(scores, lengths)
return tags, all_tags
def _all_scores_for_token(self, scores: torch.Tensor, lengths: torch.IntTensor):
"""
Returns all scores for each tag in tag dictionary.
:param scores: Scores for current sentence.
"""
scores = scores.numpy()
prob_tags_per_sentence = []
for scores_sentence, length in zip(scores, lengths):
scores_sentence = scores_sentence[:length]
prob_tags_per_sentence.append(
[
[
Label(self.tag_dictionary.get_item_for_index(score_id), score)
for score_id, score in enumerate(score_dist)
]
for score_dist in scores_sentence
]
)
return prob_tags_per_sentence
| 43.938272 | 119 | 0.631263 |
793f52dc26724b68376dd0adda737ab5129343a6 | 48 | py | Python | sitesearch/api/wsgi.py | lanceleonard/redis-sitesearch | 7115d5a5b470bf716d1e7d56c8e8fd311138b601 | [
"MIT"
] | 8,217 | 2015-03-06T19:30:57.000Z | 2022-03-30T14:54:36.000Z | sitesearch/api/wsgi.py | lanceleonard/redis-sitesearch | 7115d5a5b470bf716d1e7d56c8e8fd311138b601 | [
"MIT"
] | 1,637 | 2015-03-06T21:27:17.000Z | 2022-03-31T06:27:19.000Z | sitesearch/api/wsgi.py | lanceleonard/redis-sitesearch | 7115d5a5b470bf716d1e7d56c8e8fd311138b601 | [
"MIT"
] | 1,064 | 2015-03-07T15:32:24.000Z | 2022-03-25T17:23:40.000Z | from .app import create_app
app = create_app()
| 12 | 27 | 0.75 |
793f53bb3a3c705d6ae8a4ef52204a87ed2beae2 | 1,258 | py | Python | setup.py | miss-tais/djsettings | d345a42f4fd565b1666cefcc0a39d6ef201c3452 | [
"BSD-3-Clause"
] | null | null | null | setup.py | miss-tais/djsettings | d345a42f4fd565b1666cefcc0a39d6ef201c3452 | [
"BSD-3-Clause"
] | null | null | null | setup.py | miss-tais/djsettings | d345a42f4fd565b1666cefcc0a39d6ef201c3452 | [
"BSD-3-Clause"
] | null | null | null | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='djsettings',
version='0.1',
packages=[
'djsettings'
],
include_package_data=True,
license='BSD License',
description='Django app for changing settings in Admin panel.',
long_description=README,
url='https://github.com/miss-tais/djsettings',
author='Taisiya Astapenko',
author_email='[email protected]',
install_requires=[
'django>=2.2',
'six'
],
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
) | 30.682927 | 78 | 0.618442 |
793f53cec431c41e054ee8da73e0cea3274bbb8f | 15,249 | py | Python | template/rdchiral/main.py | sw32-seo/GTA | 86b102a14b78f6c8b50d742a56445c748e59b51e | [
"MIT"
] | 5 | 2021-09-30T16:28:48.000Z | 2022-03-30T05:20:27.000Z | template/rdchiral/main.py | sw32-seo/GTA | 86b102a14b78f6c8b50d742a56445c748e59b51e | [
"MIT"
] | null | null | null | template/rdchiral/main.py | sw32-seo/GTA | 86b102a14b78f6c8b50d742a56445c748e59b51e | [
"MIT"
] | null | null | null | from __future__ import print_function
import sys
import os
import rdkit.Chem as Chem
import rdkit.Chem.AllChem as AllChem
from rdkit.Chem.rdchem import ChiralType, BondType, BondDir
from rdchiral.utils import vprint
from rdchiral.initialization import rdchiralReaction, rdchiralReactants
from rdchiral.chiral import template_atom_could_have_been_tetra, copy_chirality, atom_chirality_matches
from rdchiral.clean import canonicalize_outcome_smiles, combine_enantiomers_into_racemic
def rdchiralRunText(reaction_smarts, reactant_smiles, **kwargs):
'''Run from SMARTS string and SMILES string. This is NOT recommended
for library application, since initialization is pretty slow. You should
separately initialize the template and molecules and call run()'''
rxn = rdchiralReaction(reaction_smarts)
reactants = rdchiralReactants(reactant_smiles)
return rdchiralRun(rxn, reactants, **kwargs)
def rdchiralRun(rxn, reactants, keep_isotopes=False, combine_enantiomers=True):
'''
rxn = rdchiralReaction (rdkit reaction + auxilliary information)
reactants = rdchiralReactants (rdkit mol + auxilliary information)
note: there is a fair amount of initialization (assigning stereochem), most
importantly assigning isotope numbers to the reactant atoms. It is
HIGHLY recommended to use the custom classes for initialization.
'''
final_outcomes = set()
# We need to keep track of what map numbers
# (i.e., isotopes) correspond to which atoms
# note: all reactant atoms must be mapped, so this is safe
atoms_r = reactants.atoms_r
# Copy reaction template so we can play around with isotopes
template_r, template_p = rxn.template_r, rxn.template_p
# Get molAtomMapNum->atom dictionary for tempalte reactants and products
atoms_rt_map = rxn.atoms_rt_map
atoms_pt_map = rxn.atoms_pt_map
###############################################################################
# Run naive RDKit on ACHIRAL version of molecules
outcomes = rxn.rxn.RunReactants((reactants.reactants_achiral,))
vprint(2, 'Using naive RunReactants, {} outcomes', len(outcomes))
if not outcomes:
return []
###############################################################################
for outcome in outcomes:
###############################################################################
# Look for new atoms in products that were not in
# reactants (e.g., LGs for a retro reaction)
vprint(2, 'Processing {}', str([Chem.MolToSmiles(x, True) for x in outcome]))
unmapped = 900
for m in outcome:
for a in m.GetAtoms():
# Assign "map" number via isotope
if not a.GetIsotope():
a.SetIsotope(unmapped)
unmapped += 1
vprint(2, 'Added {} map numbers to product', unmapped-900)
###############################################################################
###############################################################################
# Check to see if reactants should not have been matched (based on chirality)
# Define isotope -> reactant template atom map
atoms_rt = {a.GetIsotope(): atoms_rt_map[a.GetIntProp('old_mapno')] \
for m in outcome for a in m.GetAtoms() if a.HasProp('old_mapno')}
# Set isotopes of reactant template
# note: this is okay to do within the loop, because ALL atoms must be matched
# in the templates, so the isotopes will get overwritten every time
[a.SetIsotope(i) for (i, a) in atoms_rt.items()]
# Make sure each atom matches
if not all(atom_chirality_matches(atoms_rt[i], atoms_r[i]) for i in atoms_rt):
vprint(2, 'Chirality violated! Should not have gotten this match')
continue
vprint(2, 'Chirality matches! Just checked with atom_chirality_matches')
# Check bond chirality
#TODO: add bond chirality considerations to exclude improper matches
###############################################################################
###############################################################################
# Convert product(s) to single product so that all
# reactions can be treated as pseudo-intramolecular
# But! check for ring openings mistakenly split into multiple
# This can be diagnosed by duplicate map numbers (i.e., SMILES)
isotopes = [a.GetIsotope() for m in outcome for a in m.GetAtoms() if a.GetIsotope()]
if len(isotopes) != len(set(isotopes)): # duplicate?
vprint(1, 'Found duplicate isotopes in product - need to stitch')
# need to do a fancy merge
merged_mol = Chem.RWMol(outcome[0])
merged_iso_to_id = {a.GetIsotope(): a.GetIdx() for a in outcome[0].GetAtoms() if a.GetIsotope()}
for j in range(1, len(outcome)):
new_mol = outcome[j]
for a in new_mol.GetAtoms():
if a.GetIsotope() not in merged_iso_to_id:
merged_iso_to_id[a.GetIsotope()] = merged_mol.AddAtom(a)
for b in new_mol.GetBonds():
bi = b.GetBeginAtom().GetIsotope()
bj = b.GetEndAtom().GetIsotope()
vprint(10, 'stitching bond between {} and {} in stich has chirality {}, {}'.format(
bi, bj, b.GetStereo(), b.GetBondDir()
))
if not merged_mol.GetBondBetweenAtoms(
merged_iso_to_id[bi], merged_iso_to_id[bj]):
merged_mol.AddBond(merged_iso_to_id[bi],
merged_iso_to_id[bj], b.GetBondType())
merged_mol.GetBondBetweenAtoms(
merged_iso_to_id[bi], merged_iso_to_id[bj]
).SetStereo(b.GetStereo())
merged_mol.GetBondBetweenAtoms(
merged_iso_to_id[bi], merged_iso_to_id[bj]
).SetBondDir(b.GetBondDir())
outcome = merged_mol.GetMol()
vprint(1, 'Merged editable mol, converted back to real mol, {}', Chem.MolToSmiles(outcome, True))
else:
new_outcome = outcome[0]
for j in range(1, len(outcome)):
new_outcome = AllChem.CombineMols(new_outcome, outcome[j])
outcome = new_outcome
vprint(2, 'Converted all outcomes to single molecules')
###############################################################################
###############################################################################
# Figure out which atoms were matched in the templates
# atoms_rt and atoms_p will be outcome-specific.
atoms_pt = {a.GetIsotope(): atoms_pt_map[a.GetIntProp('old_mapno')] \
for a in outcome.GetAtoms() if a.HasProp('old_mapno')}
atoms_p = {a.GetIsotope(): a for a in outcome.GetAtoms() if a.GetIsotope()}
# Set isotopes of product template
# note: this is okay to do within the loop, because ALL atoms must be matched
# in the templates, so the isotopes will get overwritten every time
# This makes it easier to check parity changes
[a.SetIsotope(i) for (i, a) in atoms_pt.items()]
###############################################################################
###############################################################################
# Check for missing bonds. These are bonds that are present in the reactants,
# not specified in the reactant template, and not in the product. Accidental
# fragmentation can occur for intramolecular ring openings
missing_bonds = []
for (i, j, b) in reactants.bonds_by_isotope:
if i in atoms_p and j in atoms_p:
# atoms from reactant bond show up in product
if not outcome.GetBondBetweenAtoms(atoms_p[i].GetIdx(), atoms_p[j].GetIdx()):
#...but there is not a bond in the product between those atoms
if i not in atoms_rt or j not in atoms_rt or not template_r.GetBondBetweenAtoms(atoms_rt[i].GetIdx(), atoms_rt[j].GetIdx()):
# the reactant template did not specify a bond between those atoms (e.g., intentionally destroy)
missing_bonds.append((i, j, b))
if missing_bonds:
vprint(1, 'Product is missing non-reacted bonds that were present in reactants!')
outcome = Chem.RWMol(outcome)
rwmol_iso_to_id = {a.GetIsotope(): a.GetIdx() for a in outcome.GetAtoms() if a.GetIsotope()}
for (i, j, b) in missing_bonds:
outcome.AddBond(rwmol_iso_to_id[i], rwmol_iso_to_id[j])
new_b = outcome.GetBondBetweenAtoms(rwmol_iso_to_id[i], rwmol_iso_to_id[j])
new_b.SetBondType(b.GetBondType())
new_b.SetBondDir(b.GetBondDir())
new_b.SetIsAromatic(b.GetIsAromatic())
outcome = outcome.GetMol()
else:
vprint(3, 'No missing bonds')
###############################################################################
# Now that we've fixed any bonds, connectivity is set. This is a good time
# to udpate the property cache, since all that is left is fixing atom/bond
# stereochemistry.
try:
outcome.UpdatePropertyCache()
except ValueError as e:
vprint(1, '{}, {}'.format(Chem.MolToSmiles(outcome, True), e))
continue
###############################################################################
# Correct tetra chirality in the outcome
for a in outcome.GetAtoms():
# Participants in reaction core (from reactants) will have old_mapno
# Spectators present in reactants will have react_atom_idx
# ...so new atoms will have neither!
if not a.HasProp('old_mapno'):
# Not part of the reactants template
if not a.HasProp('react_atom_idx'):
# Atoms only appear in product template - their chirality
# should be properly instantiated by RDKit...hopefully...
vprint(4, 'Atom {} created by product template, should have right chirality', a.GetIsotope())
else:
vprint(4, 'Atom {} outside of template, copy chirality from reactants', a.GetIsotope())
copy_chirality(atoms_r[a.GetIsotope()], a)
else:
# Part of reactants and reaction core
if template_atom_could_have_been_tetra(atoms_rt[a.GetIsotope()]):
vprint(3, 'Atom {} was in rct template (could have been tetra)', a.GetIsotope())
if template_atom_could_have_been_tetra(atoms_pt[a.GetIsotope()]):
vprint(3, 'Atom {} in product template could have been tetra, too', a.GetIsotope())
# Was the product template specified?
if atoms_pt[a.GetIsotope()].GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
# No, leave unspecified in product
vprint(3, '...but it is not specified in product, so destroy chirality')
a.SetChiralTag(ChiralType.CHI_UNSPECIFIED)
else:
# Yes
vprint(3, '...and product is specified')
# Was the reactant template specified?
if atoms_rt[a.GetIsotope()].GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
# No, so the reaction introduced chirality
vprint(3, '...but reactant template was not, so copy from product template')
copy_chirality(atoms_pt[a.GetIsotope()], a)
else:
# Yes, so we need to check if chirality should be preserved or inverted
vprint(3, '...and reactant template was, too! copy from reactants')
copy_chirality(atoms_r[a.GetIsotope()], a)
if not atom_chirality_matches(atoms_pt[a.GetIsotope()], atoms_rt[a.GetIsotope()]):
vprint(3, 'but! reactant template and product template have opposite stereochem, so invert')
a.InvertChirality()
else:
# Reactant template chiral, product template not - the
# reaction is supposed to destroy chirality, so leave
# unspecified
vprint(3, 'If reactant template could have been ' +
'chiral, but the product template could not, then we dont need ' +
'to worry about specifying product atom chirality')
else:
vprint(3, 'Atom {} could not have been chiral in reactant template', a.GetIsotope())
if not template_atom_could_have_been_tetra(atoms_pt[a.GetIsotope()]):
vprint(3, 'Atom {} also could not have been chiral in product template', a.GetIsotope())
vprint(3, '...so, copy chirality from reactant instead')
copy_chirality(atoms_r[a.GetIsotope()], a)
else:
vprint(3, 'Atom could/does have product template chirality!', a.GetIsotope())
vprint(3, '...so, copy chirality from product template')
copy_chirality(atoms_pt[a.GetIsotope()], a)
vprint(3, 'New chiral tag {}', a.GetChiralTag())
vprint(2, 'After attempting to re-introduce chirality, outcome = {}',
Chem.MolToSmiles(outcome, True))
###############################################################################
###############################################################################
# Correct bond directionality in the outcome
# TODO
# Clear isotope
if not keep_isotopes:
[a.SetIsotope(0) for a in outcome.GetAtoms()]
# Canonicalize
smiles = canonicalize_outcome_smiles(outcome)
if smiles is not None:
final_outcomes.add(smiles)
###############################################################################
# One last fix for consolidating multiple stereospecified products...
if combine_enantiomers:
final_outcomes = combine_enantiomers_into_racemic(final_outcomes)
###############################################################################
return list(final_outcomes)
if __name__ == '__main__':
reaction_smarts = '[C:1][OH:2]>>[C:1][O:2][C]'
reactant_smiles = 'CC(=O)OCCCO'
outcomes = rdchiralRunText(reaction_smarts, reactant_smiles)
print(outcomes)
| 49.833333 | 144 | 0.543314 |
793f55e90cd151aa6bbf69518cca52fb63a369f6 | 146 | py | Python | helios/discounts/admin.py | panosl/helios | 22ceb736709aaa336def81d801797d72321a737e | [
"BSD-3-Clause"
] | 2 | 2021-01-26T02:37:19.000Z | 2021-12-02T14:15:22.000Z | helios/discounts/admin.py | panosl/helios | 22ceb736709aaa336def81d801797d72321a737e | [
"BSD-3-Clause"
] | null | null | null | helios/discounts/admin.py | panosl/helios | 22ceb736709aaa336def81d801797d72321a737e | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from helios.discounts.models import CategoryPercentageDiscount
admin.site.register(CategoryPercentageDiscount)
| 24.333333 | 62 | 0.876712 |
793f56a76698d707fef66084e47a3a2196add0ed | 15,501 | py | Python | apysc/_event/custom_event_interface.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | null | null | null | apysc/_event/custom_event_interface.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | null | null | null | apysc/_event/custom_event_interface.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | null | null | null | """Class implementation for the custom event interface.
"""
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Union
from apysc._event.custom_event_type import CustomEventType
from apysc._event.event import Event
from apysc._event.handler import HandlerData
from apysc._type.blank_object_interface import BlankObjectInterface
_CustomEventType = str
_HandlerName = str
_Handler = Callable[[Any, Any], None]
class CustomEventInterface(BlankObjectInterface):
_custom_event_handlers: Dict[
_CustomEventType,
Dict[_HandlerName, HandlerData]]
def _initialize_custom_event_handlers_if_not_initialized(
self, *, custom_event_type_str: str) -> None:
"""
Initialize the _custom_event_handlers data if it hasn't been
initialized yet.
Parameters
----------
custom_event_type_str : str
Target custom event type string.
"""
if not hasattr(self, '_custom_event_handlers'):
self._custom_event_handlers = {}
if custom_event_type_str not in self._custom_event_handlers:
self._custom_event_handlers[custom_event_type_str] = {}
def _get_custom_event_type_str(
self, *,
custom_event_type: Union[CustomEventType, str]) -> str:
"""
Get a custom event type string from a type value.
Parameters
----------
custom_event_type : CustomEventType or str
Target custom event type or string.
Returns
-------
custom_event_type_str : str
A custom event type string.
"""
if isinstance(custom_event_type, str):
return custom_event_type
custom_event_type_str: str = custom_event_type.value
return custom_event_type_str
def _set_custom_event_handler_data(
self, *, handler: _Handler,
custom_event_type_str: str,
options: Optional[Any]) -> None:
"""
Set a handler's data to the dictionary.
Parameters
----------
handler : _Handler
Callable will be called when an event is dispatched.
custom_event_type_str : str
Target custom event type string.
options : dict or None
Optional arguments dictionary to be passed to a handler.
"""
from apysc._event.handler import get_handler_name
name: str = get_handler_name(handler=handler, instance=self)
if options is None:
options = {}
self._custom_event_handlers[
custom_event_type_str][name] = { # type: ignore
'handler': handler,
'options': options,
}
def _unset_custom_event_handler_data(
self, *, handler: _Handler,
custom_event_type_str: str) -> None:
"""
Unset a handler's data from the dictionary.
Parameters
----------
handler : _Handler
Callable will be called when an event is dispatched.
custom_event_type_str : str
Target custom event type string.
"""
from apysc._event.handler import get_handler_name
if custom_event_type_str not in self._custom_event_handlers:
return
name: str = get_handler_name(handler=handler, instance=self)
if name not in self._custom_event_handlers[custom_event_type_str]:
return
del self._custom_event_handlers[custom_event_type_str][name]
def bind_custom_event(
self, custom_event_type: Union[CustomEventType, str],
handler: _Handler,
e: Event,
*,
options: Optional[Any] = None,
in_handler_head_expression: str = '') -> str:
"""
Add a custom event listener setting.
Parameters
----------
custom_event_type : CustomEventType or str
Target custom event type.
handler : _Handler
A handler will be called when the custom event is triggered.
e : Event
Event instance.
options : dict or None, default None
Optional arguments dictionary to be passed to a handler.
in_handler_head_expression : str, default ''
Optional expression to be added at the handler function's
head position.
Returns
-------
name : str
Handler's name.
References
----------
- Bind and trigger the custom event document
- https://bit.ly/3rky7VI
- About the handler options’ type document
- https://bit.ly/39tnYxC
Examples
--------
>>> import apysc as ap
>>> def on_custom_event(
... e: ap.Event[ap.Rectangle], options: dict) -> None:
... rectangle: ap.Rectangle = e.this
... rectangle.fill_color = ap.String('#f0a')
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color='#0af')
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50)
>>> e: ap.Event = ap.Event(this=rectangle)
>>> _ = rectangle.bind_custom_event(
... custom_event_type='my_custom_event',
... handler=on_custom_event, e=e)
>>> # Do something here and then trigger the custom event
>>> rectangle.trigger_custom_event(
... custom_event_type='my_custom_event')
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.bind_custom_event, locals_=locals(),
module_name=__name__, class_=CustomEventInterface):
from apysc._event.handler import append_handler_expression
from apysc._event.handler import get_handler_name
custom_event_type_str: str = self._get_custom_event_type_str(
custom_event_type=custom_event_type)
self._initialize_custom_event_handlers_if_not_initialized(
custom_event_type_str=custom_event_type_str)
self._set_custom_event_handler_data(
handler=handler, custom_event_type_str=custom_event_type_str,
options=options)
name: str = get_handler_name(handler=handler, instance=self)
self._append_custom_event_binding_expression(
custom_event_type_str=custom_event_type_str, name=name)
handler_data: HandlerData = \
self._custom_event_handlers[custom_event_type_str][name]
append_handler_expression(
handler_data=handler_data, handler_name=name, e=e,
in_handler_head_expression=in_handler_head_expression)
return name
def _append_custom_event_binding_expression(
self, *, custom_event_type_str: str, name: str) -> None:
"""
Append a custom event binding expression.
Parameters
----------
custom_event_type_str : str
Target custom event type string.
name : str
Handler's name.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_custom_event_binding_expression,
locals_=locals(),
module_name=__name__, class_=CustomEventInterface):
blank_object_variable_name: str = self.blank_object_variable_name
expression: str = (
f'$({blank_object_variable_name})'
f'.off("{custom_event_type_str}", {name});'
f'\n$({blank_object_variable_name})'
f'.on("{custom_event_type_str}", {name});'
)
ap.append_js_expression(expression=expression)
def trigger_custom_event(
self, custom_event_type: Union[CustomEventType, str]) -> None:
"""
Add a custom event trigger setting.
Parameters
----------
custom_event_type : CustomEventType or str
Target custom event type.
References
----------
- Bind and trigger the custom event document
- https://bit.ly/3rky7VI
Examples
--------
>>> import apysc as ap
>>> def on_custom_event(
... e: ap.Event[ap.Rectangle], options: dict) -> None:
... rectangle: ap.Rectangle = e.this
... rectangle.fill_color = ap.String('#f0a')
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color='#0af')
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50)
>>> e: ap.Event = ap.Event(this=rectangle)
>>> _ = rectangle.bind_custom_event(
... custom_event_type='my_custom_event',
... handler=on_custom_event, e=e)
>>> # Do something here and then trigger the custom event
>>> rectangle.trigger_custom_event(
... custom_event_type='my_custom_event')
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.trigger_custom_event, locals_=locals(),
module_name=__name__, class_=CustomEventInterface):
blank_object_variable_name: str = self.blank_object_variable_name
custom_event_type_str: str = self._get_custom_event_type_str(
custom_event_type=custom_event_type)
expression: str = (
f'$({blank_object_variable_name})'
f'.trigger("{custom_event_type_str}");'
)
ap.append_js_expression(expression=expression)
def unbind_custom_event(
self,
custom_event_type: Union[CustomEventType, str],
handler: _Handler) -> str:
"""
Unbind (remove) a custom event listener setting.
Parameters
----------
custom_event_type : CustomEventType or str
Target custom event type.
handler : _Handler
A handler will be called when the custom event is triggered.
Returns
-------
name : str
Handler's name.
Examples
--------
>>> import apysc as ap
>>> def on_custom_event(
... e: ap.Event[ap.Rectangle], options: dict) -> None:
... rectangle: ap.Rectangle = e.this
... rectangle.fill_color = ap.String('#f0a')
... rectangle.unbind_custom_event(
... custom_event_type='my_custom_event',
... handler=on_custom_event)
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color='#0af')
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50)
>>> e: ap.Event = ap.Event(this=rectangle)
>>> _ = rectangle.bind_custom_event(
... custom_event_type='my_custom_event',
... handler=on_custom_event, e=e)
>>> # Do something here and then trigger the custom event
>>> rectangle.trigger_custom_event(
... custom_event_type='my_custom_event')
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.unbind_custom_event, locals_=locals(),
module_name=__name__, class_=CustomEventInterface):
from apysc._event.handler import get_handler_name
custom_event_type_str: str = self._get_custom_event_type_str(
custom_event_type=custom_event_type)
self._initialize_custom_event_handlers_if_not_initialized(
custom_event_type_str=custom_event_type_str)
self._unset_custom_event_handler_data(
handler=handler, custom_event_type_str=custom_event_type_str)
name: str = get_handler_name(handler=handler, instance=self)
self._append_custom_event_unbinding_expression(
custom_event_type_str=custom_event_type_str, name=name)
return name
def _append_custom_event_unbinding_expression(
self, *, custom_event_type_str: str, name: str) -> None:
"""
Add a custom event unbinding expression.
Parameters
----------
custom_event_type_str : str
Target custom event type string.
name : str
Handler's name.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_custom_event_unbinding_expression,
locals_=locals(),
module_name=__name__, class_=CustomEventInterface):
expression: str = (
f'$({self.blank_object_variable_name})'
f'.off("{custom_event_type_str}", {name});'
)
ap.append_js_expression(expression=expression)
def unbind_custom_event_all(
self, custom_event_type: Union[CustomEventType, str]) -> None:
"""
Unbind (remove) custom event listener settings.
Parameters
----------
custom_event_type : CustomEventType or str
Target custom event type.
Examples
--------
>>> import apysc as ap
>>> def on_custom_event(
... e: ap.Event[ap.Rectangle], options: dict) -> None:
... rectangle: ap.Rectangle = e.this
... rectangle.fill_color = ap.String('#f0a')
... rectangle.unbind_custom_event_all(
... custom_event_type='my_custom_event')
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color='#0af')
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50)
>>> e: ap.Event = ap.Event(this=rectangle)
>>> _ = rectangle.bind_custom_event(
... custom_event_type='my_custom_event',
... handler=on_custom_event, e=e)
>>> # Do something here and then trigger the custom event
>>> rectangle.trigger_custom_event(
... custom_event_type='my_custom_event')
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.unbind_custom_event_all, locals_=locals(),
module_name=__name__, class_=CustomEventInterface):
custom_event_type_str: str = self._get_custom_event_type_str(
custom_event_type=custom_event_type)
self._initialize_custom_event_handlers_if_not_initialized(
custom_event_type_str=custom_event_type_str)
self._custom_event_handlers[custom_event_type_str] = {}
expression: str = (
f'$({self.blank_object_variable_name})'
f'.off("{custom_event_type_str}");'
)
ap.append_js_expression(expression=expression)
| 39.746154 | 78 | 0.580156 |
793f56cec6e8ab34d3da15bfcb874e00c841072b | 668 | py | Python | mayan/apps/authentication/templatetags/authentication_tags.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/authentication/templatetags/authentication_tags.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/authentication/templatetags/authentication_tags.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from django.template import Library
from ..literals import (
USER_IMPERSONATE_VARIABLE_ID, USER_IMPERSONATE_VARIABLE_PERMANENT
)
register = Library()
@register.simple_tag(takes_context=True)
def authentication_impersonation_check(context):
request = getattr(context, 'request', None)
if request:
user_id = request.session.get(USER_IMPERSONATE_VARIABLE_ID)
impersonate_permanent_session = USER_IMPERSONATE_VARIABLE_PERMANENT in request.session
if user_id and not impersonate_permanent_session:
return context.request.user
else:
return False
else:
return False
| 27.833333 | 95 | 0.714072 |