max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
desktop/core/src/desktop/lib/idbroker/client.py | yetsun/hue | 5,079 | 6632751 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import object
import logging
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.idbroker import conf
from desktop.lib.rest import http_client, resource
from hadoop.core_site import is_kerberos_enabled
LOG = logging.getLogger(__name__)
_KNOX_TOKEN_API = '/knoxtoken/api/v1/token'
_CAB_API_CREDENTIALS_GLOBAL = '/cab/api/v1/credentials'
class IDBroker(object):
@classmethod
def from_core_site(cls, fs=None, user=None):
security = {'type': None}
if is_kerberos_enabled():
security['type'] = 'kerberos'
elif conf.get_cab_username(fs):
security['type'] = 'basic'
security['params'] = {'username': conf.get_cab_username(fs), 'password': conf.get_cab_password(fs)}
return cls(
user,
conf.get_cab_address(fs),
conf.get_cab_dt_path(fs),
conf.get_cab_path(fs),
security
)
def __init__(self, user=None, address=None, dt_path=None, path=None, security=None):
self.user=user
self.address=address
self.dt_path = dt_path
self.path = path
self.security = security
self._client = http_client.HttpClient(self.address, logger=LOG)
self._root = resource.Resource(self._client)
def _knox_token_params(self):
if self.user:
if self.security['type'] == 'kerberos':
return { 'doAs': self.user }
else:
return { 'user.name': self.user }
else:
return None
def get_auth_token(self):
if self.security['type'] == 'kerberos':
self._client.set_kerberos_auth()
elif self.security['type'] == 'basic':
self._client.set_basic_auth(self.security['params']['username'], self.security['params']['password'])
try:
res = self._root.invoke("GET", self.dt_path + _KNOX_TOKEN_API, self._knox_token_params(), allow_redirects=True, log_response=False) # Can't log response because returns credentials
return res.get('access_token')
except Exception as e:
raise PopupException('Failed to authenticate to IDBroker with error: %s' % e.message)
def get_cab(self):
self._client.set_bearer_auth(self.get_auth_token())
try:
return self._root.invoke("GET", self.path + _CAB_API_CREDENTIALS_GLOBAL, allow_redirects=True, log_response=False) # Can't log response because returns credentials
except Exception as e:
raise PopupException('Failed to obtain storage credentials from IDBroker with error: %s' % e.message)
| # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import object
import logging
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.idbroker import conf
from desktop.lib.rest import http_client, resource
from hadoop.core_site import is_kerberos_enabled
LOG = logging.getLogger(__name__)
_KNOX_TOKEN_API = '/knoxtoken/api/v1/token'
_CAB_API_CREDENTIALS_GLOBAL = '/cab/api/v1/credentials'
class IDBroker(object):
@classmethod
def from_core_site(cls, fs=None, user=None):
security = {'type': None}
if is_kerberos_enabled():
security['type'] = 'kerberos'
elif conf.get_cab_username(fs):
security['type'] = 'basic'
security['params'] = {'username': conf.get_cab_username(fs), 'password': conf.get_cab_password(fs)}
return cls(
user,
conf.get_cab_address(fs),
conf.get_cab_dt_path(fs),
conf.get_cab_path(fs),
security
)
def __init__(self, user=None, address=None, dt_path=None, path=None, security=None):
self.user=user
self.address=address
self.dt_path = dt_path
self.path = path
self.security = security
self._client = http_client.HttpClient(self.address, logger=LOG)
self._root = resource.Resource(self._client)
def _knox_token_params(self):
if self.user:
if self.security['type'] == 'kerberos':
return { 'doAs': self.user }
else:
return { 'user.name': self.user }
else:
return None
def get_auth_token(self):
if self.security['type'] == 'kerberos':
self._client.set_kerberos_auth()
elif self.security['type'] == 'basic':
self._client.set_basic_auth(self.security['params']['username'], self.security['params']['password'])
try:
res = self._root.invoke("GET", self.dt_path + _KNOX_TOKEN_API, self._knox_token_params(), allow_redirects=True, log_response=False) # Can't log response because returns credentials
return res.get('access_token')
except Exception as e:
raise PopupException('Failed to authenticate to IDBroker with error: %s' % e.message)
def get_cab(self):
self._client.set_bearer_auth(self.get_auth_token())
try:
return self._root.invoke("GET", self.path + _CAB_API_CREDENTIALS_GLOBAL, allow_redirects=True, log_response=False) # Can't log response because returns credentials
except Exception as e:
raise PopupException('Failed to obtain storage credentials from IDBroker with error: %s' % e.message)
| en | 0.856536 | # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Can't log response because returns credentials # Can't log response because returns credentials | 1.678793 | 2 |
solvent_classification/vectorize_data.py | rmrmg/SuzukiConditions | 2 | 6632752 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datafile', type=str, default='heteroaryl_suzuki.csv')
parser.add_argument('--mol2vec_model', default='model_300dim.pkl')
args= parser.parse_args()
import numpy as np
from mol2vec.features import mol2alt_sentence, sentences2vec
from gensim.models import word2vec
from my_neural_fgp import *
from sklearn import preprocessing
from rdkit.Chem import Descriptors, AllChem
from rdkit import Chem
import gzip
import pickle
import time
import pandas as pd
import tqdm
import logging
from tqdm.contrib.concurrent import process_map
import multiprocessing
N_CPUS = multiprocessing.cpu_count()
#1. COMMON FILES
m2v_model = word2vec.Word2Vec.load(args.mol2vec_model)
literature_csv_file = args.datafile
#aga_solvents = pd.read_csv('../other/aga_smiles_solv.csv', sep=';')#DEL?
#aga_bases = pd.read_csv('../other/aga_smiles_base.csv', sep=';')#DEL?
#ligands = pd.read_csv('../other/suzuki_ligandy.csv', sep=';')#DEL?
#2. VECTORIZATIONS
#2a. Mol2Vec
def processor_mol2vec(line):
#s = '.'.join(line)#list(s)[0]
mol=Chem.MolFromSmiles(line)
return mol2alt_sentence(mol,1)
def embedd_m2v(smiles_list):
sentences = [processor_mol2vec(line) for line in smiles_list]
table = sentences2vec(sentences, m2v_model, unseen='UNK').astype(np.float32)
return table
#2b. RdKIT
rdkit_desc = dict(Descriptors.descList)
rdkit_keys = list(rdkit_desc.keys())
rdkit_keys.sort()
N_WORKERS = min(10, N_CPUS)
def describe(smiles_list):
L = len(smiles_list)
result = np.zeros((L,len(rdkit_keys)))
with tqdm.tqdm(desc='rdkit', total=L) as pgbar:
for i,s in enumerate(smiles_list):
#if len(s)>1: raise ValueError('WTF: %i %s'%(i,str(s)))
if s=='':
pgbar.update()
continue
try:
mol=Chem.MolFromSmiles(s)
for j,k in enumerate(rdkit_keys):
d=rdkit_desc[k](mol)
if not np.isfinite(d): d=0
result[i,j]=d
except:
print(s, k)
raise
pgbar.update()
return result
#2b. Morgan Fingerprints
def morganize(smiles_list, rad=3, lenght=512, counts=True, clean_iso=True):
L = len(smiles_list)
result = np.zeros((L,lenght))
with tqdm.tqdm(desc='ecfp6', total=L) as pgbar:
for i,s in enumerate(smiles_list):
if s=='':
pgbar.update()
continue
try:
mol=Chem.MolFromSmiles(s)
if clean_iso:
for atom in mol.GetAtoms(): atom.SetIsotope(0)
#if len(s)>1:raise
fgp = AllChem.GetMorganFingerprint(mol,rad,useCounts=counts)
except:
print(i,s)
raise
details= fgp.GetNonzeroElements()
for d in details:
result[i,d%lenght]+=details[d]
pgbar.update()
return result
#2c. One-hot encodings
def str_one_hot(labels, v=False):
label_encoder = preprocessing.LabelEncoder()
onehot_encoder = preprocessing.OneHotEncoder(sparse=False)
nums = label_encoder.fit_transform(labels).reshape(-1, 1)
if v:
msg = f'Classes: {label_encoder.classes_}'
logging.info(msg)
return onehot_encoder.fit_transform(nums), label_encoder.classes_
#2d. Graphs
def make_graphs(all_smiles, filter_name='first_order'):
graphs = smiles_data_processor(all_smiles)
logging.info('SMILES processed for %s'%k)
graphs, input_shapes = align_and_make_filters(graphs, filter_name)
logging.info('Graphs done for %s'%k)
return (graphs, input_shapes)
#3. Some utils
def is_low(dc):
sml = list(dc['halogen'])[0]
n = Chem.MolFromSmiles(sml).GetNumHeavyAtoms()
return n<=100
def select_smallest(smiles_iter):
result, min_n='', 1000
do_wtf_check = 'O=Cc1ccc(-c2ccc(Br)c3nc4c5ccccc5c5ccccc5c4nc23)s1' in smiles_iter
for x in smiles_iter:
N = Chem.MolFromSmiles(x).GetNumHeavyAtoms()
if N<min_n:
min_n = N
result= x
if do_wtf_check:
logging.info('WTF check: %s'%str(smiles_iter))
logging.info('WTF check: %s'%result)
return [result]
def check_missing_data(df, null='', columns=['solvent_class', 'base_class', 'yield', 'bromide', 'boronate']):
missing_cols, missing_vals = [], []
for x in columns:
if x not in df.columns:
missing_cols.append(x)
else:
full = (df[x]!=null).all()
if not full:
missing_vals.append(x)
assert missing_cols==[], f'missing columns: {missing_cols}'
assert missing_vals==[], f'missing values in columns: {missing_vals}'
FILTER_NAME = 'first_order'
#===================================
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s')
logging.info('START')
data = pd.read_csv(literature_csv_file, sep=';').fillna('')
check_missing_data(data)
weird_bases = [',,' in x for x in data.base_class]
logging.info(f'Converting more than one base class into "other" - {sum(weird_bases)}/{len(data)} ({np.mean(weird_bases):.1f})')
data['base_class'] = data.base_class.apply(lambda x: 'other' if ',,' in x else x)
output_vectors = {'yield':data['yield'].values}
for k in ['solvent_class','base_class']:
enc, labels = str_one_hot(data[k], True)
output_vectors[k+'_enc'] = enc
output_vectors[k+'_labels'] = labels
logging.info('%s converted to one-hot'%k)
input_vectors = {}
mismatch = []
func_names, funcs = ['ecfp6', 'rdkit', 'm2v', 'graph'], [morganize, describe, embedd_m2v, make_graphs]
for k in ['boronate', 'bromide']:
for n,func in zip(func_names, funcs):
input_vectors[f'{k}_{n}'] = func(data[k])
logging.info('%s converted to %s'%(k,n))
this_len = len(input_vectors[f'{k}_{n}']) if n!='graph' else len(input_vectors[f'{k}_{n}'][0]['X'])
if this_len!=len(data):
mismatch.append((f'{k}_{n}', this_len))
status = mismatch==[]
logging.info(f'All tables with len {len(data)}: {status}')
if not status: logging.info(f'incorrect lens: {mismatch}')
logging.info('Saving')
with gzip.open('vectorized_data.pkz', 'wb') as f:
pickle.dump((input_vectors, output_vectors),f)
logging.info('EOT, NCR')
| import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datafile', type=str, default='heteroaryl_suzuki.csv')
parser.add_argument('--mol2vec_model', default='model_300dim.pkl')
args= parser.parse_args()
import numpy as np
from mol2vec.features import mol2alt_sentence, sentences2vec
from gensim.models import word2vec
from my_neural_fgp import *
from sklearn import preprocessing
from rdkit.Chem import Descriptors, AllChem
from rdkit import Chem
import gzip
import pickle
import time
import pandas as pd
import tqdm
import logging
from tqdm.contrib.concurrent import process_map
import multiprocessing
N_CPUS = multiprocessing.cpu_count()
#1. COMMON FILES
m2v_model = word2vec.Word2Vec.load(args.mol2vec_model)
literature_csv_file = args.datafile
#aga_solvents = pd.read_csv('../other/aga_smiles_solv.csv', sep=';')#DEL?
#aga_bases = pd.read_csv('../other/aga_smiles_base.csv', sep=';')#DEL?
#ligands = pd.read_csv('../other/suzuki_ligandy.csv', sep=';')#DEL?
#2. VECTORIZATIONS
#2a. Mol2Vec
def processor_mol2vec(line):
#s = '.'.join(line)#list(s)[0]
mol=Chem.MolFromSmiles(line)
return mol2alt_sentence(mol,1)
def embedd_m2v(smiles_list):
sentences = [processor_mol2vec(line) for line in smiles_list]
table = sentences2vec(sentences, m2v_model, unseen='UNK').astype(np.float32)
return table
#2b. RdKIT
rdkit_desc = dict(Descriptors.descList)
rdkit_keys = list(rdkit_desc.keys())
rdkit_keys.sort()
N_WORKERS = min(10, N_CPUS)
def describe(smiles_list):
L = len(smiles_list)
result = np.zeros((L,len(rdkit_keys)))
with tqdm.tqdm(desc='rdkit', total=L) as pgbar:
for i,s in enumerate(smiles_list):
#if len(s)>1: raise ValueError('WTF: %i %s'%(i,str(s)))
if s=='':
pgbar.update()
continue
try:
mol=Chem.MolFromSmiles(s)
for j,k in enumerate(rdkit_keys):
d=rdkit_desc[k](mol)
if not np.isfinite(d): d=0
result[i,j]=d
except:
print(s, k)
raise
pgbar.update()
return result
#2b. Morgan Fingerprints
def morganize(smiles_list, rad=3, lenght=512, counts=True, clean_iso=True):
L = len(smiles_list)
result = np.zeros((L,lenght))
with tqdm.tqdm(desc='ecfp6', total=L) as pgbar:
for i,s in enumerate(smiles_list):
if s=='':
pgbar.update()
continue
try:
mol=Chem.MolFromSmiles(s)
if clean_iso:
for atom in mol.GetAtoms(): atom.SetIsotope(0)
#if len(s)>1:raise
fgp = AllChem.GetMorganFingerprint(mol,rad,useCounts=counts)
except:
print(i,s)
raise
details= fgp.GetNonzeroElements()
for d in details:
result[i,d%lenght]+=details[d]
pgbar.update()
return result
#2c. One-hot encodings
def str_one_hot(labels, v=False):
label_encoder = preprocessing.LabelEncoder()
onehot_encoder = preprocessing.OneHotEncoder(sparse=False)
nums = label_encoder.fit_transform(labels).reshape(-1, 1)
if v:
msg = f'Classes: {label_encoder.classes_}'
logging.info(msg)
return onehot_encoder.fit_transform(nums), label_encoder.classes_
#2d. Graphs
def make_graphs(all_smiles, filter_name='first_order'):
graphs = smiles_data_processor(all_smiles)
logging.info('SMILES processed for %s'%k)
graphs, input_shapes = align_and_make_filters(graphs, filter_name)
logging.info('Graphs done for %s'%k)
return (graphs, input_shapes)
#3. Some utils
def is_low(dc):
sml = list(dc['halogen'])[0]
n = Chem.MolFromSmiles(sml).GetNumHeavyAtoms()
return n<=100
def select_smallest(smiles_iter):
result, min_n='', 1000
do_wtf_check = 'O=Cc1ccc(-c2ccc(Br)c3nc4c5ccccc5c5ccccc5c4nc23)s1' in smiles_iter
for x in smiles_iter:
N = Chem.MolFromSmiles(x).GetNumHeavyAtoms()
if N<min_n:
min_n = N
result= x
if do_wtf_check:
logging.info('WTF check: %s'%str(smiles_iter))
logging.info('WTF check: %s'%result)
return [result]
def check_missing_data(df, null='', columns=['solvent_class', 'base_class', 'yield', 'bromide', 'boronate']):
missing_cols, missing_vals = [], []
for x in columns:
if x not in df.columns:
missing_cols.append(x)
else:
full = (df[x]!=null).all()
if not full:
missing_vals.append(x)
assert missing_cols==[], f'missing columns: {missing_cols}'
assert missing_vals==[], f'missing values in columns: {missing_vals}'
FILTER_NAME = 'first_order'
#===================================
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s')
logging.info('START')
data = pd.read_csv(literature_csv_file, sep=';').fillna('')
check_missing_data(data)
weird_bases = [',,' in x for x in data.base_class]
logging.info(f'Converting more than one base class into "other" - {sum(weird_bases)}/{len(data)} ({np.mean(weird_bases):.1f})')
data['base_class'] = data.base_class.apply(lambda x: 'other' if ',,' in x else x)
output_vectors = {'yield':data['yield'].values}
for k in ['solvent_class','base_class']:
enc, labels = str_one_hot(data[k], True)
output_vectors[k+'_enc'] = enc
output_vectors[k+'_labels'] = labels
logging.info('%s converted to one-hot'%k)
input_vectors = {}
mismatch = []
func_names, funcs = ['ecfp6', 'rdkit', 'm2v', 'graph'], [morganize, describe, embedd_m2v, make_graphs]
for k in ['boronate', 'bromide']:
for n,func in zip(func_names, funcs):
input_vectors[f'{k}_{n}'] = func(data[k])
logging.info('%s converted to %s'%(k,n))
this_len = len(input_vectors[f'{k}_{n}']) if n!='graph' else len(input_vectors[f'{k}_{n}'][0]['X'])
if this_len!=len(data):
mismatch.append((f'{k}_{n}', this_len))
status = mismatch==[]
logging.info(f'All tables with len {len(data)}: {status}')
if not status: logging.info(f'incorrect lens: {mismatch}')
logging.info('Saving')
with gzip.open('vectorized_data.pkz', 'wb') as f:
pickle.dump((input_vectors, output_vectors),f)
logging.info('EOT, NCR')
| en | 0.305895 | #1. COMMON FILES #aga_solvents = pd.read_csv('../other/aga_smiles_solv.csv', sep=';')#DEL? #aga_bases = pd.read_csv('../other/aga_smiles_base.csv', sep=';')#DEL? #ligands = pd.read_csv('../other/suzuki_ligandy.csv', sep=';')#DEL? #2. VECTORIZATIONS #2a. Mol2Vec #s = '.'.join(line)#list(s)[0] #2b. RdKIT #if len(s)>1: raise ValueError('WTF: %i %s'%(i,str(s))) #2b. Morgan Fingerprints #if len(s)>1:raise #2c. One-hot encodings #2d. Graphs #3. Some utils #=================================== | 2.422082 | 2 |
sscanss/ui/commands/control.py | StephenNneji/SScanSS-2 | 0 | 6632753 | <reponame>StephenNneji/SScanSS-2
from PyQt5 import QtWidgets
from sscanss.core.util import CommandID, toggleActionInGroup, Attributes
class LockJoint(QtWidgets.QUndoCommand):
"""Sets lock state of specified joint
:param positioner_name: name of positioner
:type positioner_name: str
:param index: joint index
:type index: int
:param value: indicates if joint is locked
:type value: bool
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner_name, index, value, presenter):
super().__init__()
self.model = presenter.model
self.positioner_name = positioner_name
stack = self.model.instrument.getPositioner(self.positioner_name)
self.old_lock_state = [l.locked for l in stack.links]
self.new_lock_state = self.old_lock_state.copy()
self.new_lock_state[index] = value
self.setText(f'Locked Joint in {positioner_name}')
def redo(self):
self.changeLockState(self.new_lock_state)
def undo(self):
self.changeLockState(self.old_lock_state)
def changeLockState(self, lock_state):
stack = self.model.instrument.getPositioner(self.positioner_name)
for state, link in zip(lock_state, stack.links):
link.locked = state
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.positioner_name != command.positioner_name:
return False
if self.old_lock_state == command.new_lock_state:
self.setObsolete(True)
self.new_lock_state = command.new_lock_state
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.LockJoint
class IgnoreJointLimits(QtWidgets.QUndoCommand):
"""Sets joint limit ignore state of specified joint
:param positioner_name: name of positioner
:type positioner_name: str
:param index: joint index
:type index: int
:param value: indicates joint limit should be ignored
:type value: bool
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner_name, index, value, presenter):
super().__init__()
self.model = presenter.model
self.positioner_name = positioner_name
stack = self.model.instrument.getPositioner(self.positioner_name)
self.old_ignore_state = [l.ignore_limits for l in stack.links]
self.new_ignore_state = self.old_ignore_state.copy()
self.new_ignore_state[index] = value
self.setText(f'Ignored Joint Limits in {positioner_name}')
def redo(self):
self.changeIgnoreLimitState(self.new_ignore_state)
def undo(self):
self.changeIgnoreLimitState(self.old_ignore_state)
def changeIgnoreLimitState(self, ignore_state):
stack = self.model.instrument.getPositioner(self.positioner_name)
for state, link in zip(ignore_state, stack.links):
link.ignore_limits = state
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.positioner_name != command.positioner_name:
return False
if self.old_ignore_state == command.new_ignore_state:
self.setObsolete(True)
self.new_ignore_state = command.new_ignore_state
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.IgnoreJointLimits
class MovePositioner(QtWidgets.QUndoCommand):
"""Moves the stack to specified configuration. The first move will be animated but any repeats (redo)
will be an instant move i.e. single step.
:param positioner_name: name of positioner
:type positioner_name: str
:param q: list of joint offsets to move to. The length must be equal to number of links
:type q: List[float]
:param ignore_locks: indicates that joint locks should be ignored
:type ignore_locks: bool
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner_name, q, ignore_locks, presenter):
super().__init__()
self.model = presenter.model
self.view = presenter.view
self.positioner_name = positioner_name
stack = self.model.instrument.getPositioner(self.positioner_name)
self.move_from = stack.set_points
self.move_to = q
self.animate = True
self.ignore_locks = ignore_locks
self.setText(f'Moved {positioner_name}')
def redo(self):
stack = self.model.instrument.getPositioner(self.positioner_name)
if self.animate:
stack.set_points = self.move_to
self.model.moveInstrument(lambda q, s=stack: s.fkine(q, setpoint=False, ignore_locks=self.ignore_locks),
self.move_from, self.move_to, 500, 10)
self.animate = False
else:
stack.fkine(self.move_to, ignore_locks=self.ignore_locks)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def undo(self):
if self.view.scenes.sequence.isRunning():
self.view.scenes.sequence.stop()
stack = self.model.instrument.getPositioner(self.positioner_name)
stack.set_point = self.move_from
stack.fkine(self.move_from, ignore_locks=self.ignore_locks)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.positioner_name != command.positioner_name or self.ignore_locks != command.ignore_locks:
return False
if self.move_from == command.move_to:
self.setObsolete(True)
self.move_to = command.move_to
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.MovePositioner
class ChangePositioningStack(QtWidgets.QUndoCommand):
"""Changes the active positioning stack of the instrument
:param stack_name: name of positioning stack
:type stack_name: str
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, stack_name, presenter):
super().__init__()
self.model = presenter.model
stack = self.model.instrument.positioning_stack
self.old_q = stack.set_points
self.link_state = [(l.locked, l.ignore_limits) for l in stack.links]
self.bases = [aux.base for aux in stack.auxiliary]
self.old_stack = self.model.instrument.positioning_stack.name
self.new_stack = stack_name
self.setText('Changed Positioning Stack to {}'.format(stack_name))
def redo(self):
self.model.instrument.loadPositioningStack(self.new_stack)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def undo(self):
self.model.instrument.loadPositioningStack(self.old_stack)
stack = self.model.instrument.positioning_stack
for base, aux in zip(self.bases, stack.auxiliary):
aux.base = base
for s, l in zip(self.link_state, stack.links):
l.locked = s[0]
l.ignore_limits = s[1]
stack.fkine(self.old_q, True)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangePositioningStack
class ChangePositionerBase(QtWidgets.QUndoCommand):
"""Changes the base matrix of an auxiliary positioner
:param positioner: auxiliary positioner
:type positioner: SerialManipulator
:param matrix: new base matrix
:type matrix: Matrix44
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner, matrix, presenter):
super().__init__()
self.model = presenter.model
self.aux = positioner
self.old_matrix = positioner.base
self.new_matrix = matrix
self.setText('Changed Base Matrix of {}'.format(positioner.name))
def redo(self):
self.changeBase(self.new_matrix)
def undo(self):
self.changeBase(self.old_matrix)
def changeBase(self, matrix):
self.model.instrument.positioning_stack.changeBaseMatrix(self.aux, matrix)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.aux is not command.aux:
return False
if self.old_matrix is command.new_matrix:
self.setObsolete(True)
self.new_matrix = command.new_matrix
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangePositionerBase
class ChangeJawAperture(QtWidgets.QUndoCommand):
"""Sets the Jaws aperture
:param aperture: new aperture
:type aperture: List[float]
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, aperture, presenter):
super().__init__()
self.model = presenter.model
jaws = self.model.instrument.jaws
self.old_aperture = jaws.aperture.copy()
self.new_aperture = aperture
self.setText(f'Changed {jaws.name} Aperture')
def redo(self):
self.changeAperture(self.new_aperture)
def undo(self):
self.changeAperture(self.old_aperture)
def changeAperture(self, aperture):
self.model.instrument.jaws.aperture[0] = aperture[0]
self.model.instrument.jaws.aperture[1] = aperture[1]
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.old_aperture == command.new_aperture:
self.setObsolete(True)
self.new_aperture = command.new_aperture
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangeJawAperture
class ChangeCollimator(QtWidgets.QUndoCommand):
"""Changes the collimator of a given detector
:param detector_name: name of detector
:type detector_name: str
:param collimator_name: new collimator name
:type collimator_name: Union[str, None]
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, detector_name, collimator_name, presenter):
super().__init__()
self.model = presenter.model
self.detector_name = detector_name
detector = self.model.instrument.detectors[self.detector_name]
collimator = detector.current_collimator
self.old_collimator_name = None if collimator is None else collimator.name
self.new_collimator_name = collimator_name
self.action_group = presenter.view.collimator_action_groups[detector_name]
presenter.view.scenes.switchToInstrumentScene()
self.setText(f"Changed {detector_name} Detector's Collimator to {collimator_name}")
def redo(self):
self.changeCollimator(self.new_collimator_name)
def undo(self):
self.changeCollimator(self.old_collimator_name)
def changeCollimator(self, collimator_name):
detector = self.model.instrument.detectors[self.detector_name]
detector.current_collimator = collimator_name
self.model.notifyChange(Attributes.Instrument)
toggleActionInGroup(collimator_name, self.action_group)
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.detector_name != command.detector_name:
return False
if self.old_collimator_name == command.new_collimator_name:
self.setObsolete(True)
self.new_collimator_name = command.new_collimator_name
self.setText(f"Changed {self.detector_name} Detector's Collimator to {self.new_collimator_name}")
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangeCollimator
| from PyQt5 import QtWidgets
from sscanss.core.util import CommandID, toggleActionInGroup, Attributes
class LockJoint(QtWidgets.QUndoCommand):
"""Sets lock state of specified joint
:param positioner_name: name of positioner
:type positioner_name: str
:param index: joint index
:type index: int
:param value: indicates if joint is locked
:type value: bool
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner_name, index, value, presenter):
super().__init__()
self.model = presenter.model
self.positioner_name = positioner_name
stack = self.model.instrument.getPositioner(self.positioner_name)
self.old_lock_state = [l.locked for l in stack.links]
self.new_lock_state = self.old_lock_state.copy()
self.new_lock_state[index] = value
self.setText(f'Locked Joint in {positioner_name}')
def redo(self):
self.changeLockState(self.new_lock_state)
def undo(self):
self.changeLockState(self.old_lock_state)
def changeLockState(self, lock_state):
stack = self.model.instrument.getPositioner(self.positioner_name)
for state, link in zip(lock_state, stack.links):
link.locked = state
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.positioner_name != command.positioner_name:
return False
if self.old_lock_state == command.new_lock_state:
self.setObsolete(True)
self.new_lock_state = command.new_lock_state
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.LockJoint
class IgnoreJointLimits(QtWidgets.QUndoCommand):
"""Sets joint limit ignore state of specified joint
:param positioner_name: name of positioner
:type positioner_name: str
:param index: joint index
:type index: int
:param value: indicates joint limit should be ignored
:type value: bool
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner_name, index, value, presenter):
super().__init__()
self.model = presenter.model
self.positioner_name = positioner_name
stack = self.model.instrument.getPositioner(self.positioner_name)
self.old_ignore_state = [l.ignore_limits for l in stack.links]
self.new_ignore_state = self.old_ignore_state.copy()
self.new_ignore_state[index] = value
self.setText(f'Ignored Joint Limits in {positioner_name}')
def redo(self):
self.changeIgnoreLimitState(self.new_ignore_state)
def undo(self):
self.changeIgnoreLimitState(self.old_ignore_state)
def changeIgnoreLimitState(self, ignore_state):
stack = self.model.instrument.getPositioner(self.positioner_name)
for state, link in zip(ignore_state, stack.links):
link.ignore_limits = state
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.positioner_name != command.positioner_name:
return False
if self.old_ignore_state == command.new_ignore_state:
self.setObsolete(True)
self.new_ignore_state = command.new_ignore_state
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.IgnoreJointLimits
class MovePositioner(QtWidgets.QUndoCommand):
"""Moves the stack to specified configuration. The first move will be animated but any repeats (redo)
will be an instant move i.e. single step.
:param positioner_name: name of positioner
:type positioner_name: str
:param q: list of joint offsets to move to. The length must be equal to number of links
:type q: List[float]
:param ignore_locks: indicates that joint locks should be ignored
:type ignore_locks: bool
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner_name, q, ignore_locks, presenter):
super().__init__()
self.model = presenter.model
self.view = presenter.view
self.positioner_name = positioner_name
stack = self.model.instrument.getPositioner(self.positioner_name)
self.move_from = stack.set_points
self.move_to = q
self.animate = True
self.ignore_locks = ignore_locks
self.setText(f'Moved {positioner_name}')
def redo(self):
stack = self.model.instrument.getPositioner(self.positioner_name)
if self.animate:
stack.set_points = self.move_to
self.model.moveInstrument(lambda q, s=stack: s.fkine(q, setpoint=False, ignore_locks=self.ignore_locks),
self.move_from, self.move_to, 500, 10)
self.animate = False
else:
stack.fkine(self.move_to, ignore_locks=self.ignore_locks)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def undo(self):
if self.view.scenes.sequence.isRunning():
self.view.scenes.sequence.stop()
stack = self.model.instrument.getPositioner(self.positioner_name)
stack.set_point = self.move_from
stack.fkine(self.move_from, ignore_locks=self.ignore_locks)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.positioner_name != command.positioner_name or self.ignore_locks != command.ignore_locks:
return False
if self.move_from == command.move_to:
self.setObsolete(True)
self.move_to = command.move_to
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.MovePositioner
class ChangePositioningStack(QtWidgets.QUndoCommand):
"""Changes the active positioning stack of the instrument
:param stack_name: name of positioning stack
:type stack_name: str
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, stack_name, presenter):
super().__init__()
self.model = presenter.model
stack = self.model.instrument.positioning_stack
self.old_q = stack.set_points
self.link_state = [(l.locked, l.ignore_limits) for l in stack.links]
self.bases = [aux.base for aux in stack.auxiliary]
self.old_stack = self.model.instrument.positioning_stack.name
self.new_stack = stack_name
self.setText('Changed Positioning Stack to {}'.format(stack_name))
def redo(self):
self.model.instrument.loadPositioningStack(self.new_stack)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def undo(self):
self.model.instrument.loadPositioningStack(self.old_stack)
stack = self.model.instrument.positioning_stack
for base, aux in zip(self.bases, stack.auxiliary):
aux.base = base
for s, l in zip(self.link_state, stack.links):
l.locked = s[0]
l.ignore_limits = s[1]
stack.fkine(self.old_q, True)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangePositioningStack
class ChangePositionerBase(QtWidgets.QUndoCommand):
"""Changes the base matrix of an auxiliary positioner
:param positioner: auxiliary positioner
:type positioner: SerialManipulator
:param matrix: new base matrix
:type matrix: Matrix44
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, positioner, matrix, presenter):
super().__init__()
self.model = presenter.model
self.aux = positioner
self.old_matrix = positioner.base
self.new_matrix = matrix
self.setText('Changed Base Matrix of {}'.format(positioner.name))
def redo(self):
self.changeBase(self.new_matrix)
def undo(self):
self.changeBase(self.old_matrix)
def changeBase(self, matrix):
self.model.instrument.positioning_stack.changeBaseMatrix(self.aux, matrix)
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.aux is not command.aux:
return False
if self.old_matrix is command.new_matrix:
self.setObsolete(True)
self.new_matrix = command.new_matrix
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangePositionerBase
class ChangeJawAperture(QtWidgets.QUndoCommand):
"""Sets the Jaws aperture
:param aperture: new aperture
:type aperture: List[float]
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, aperture, presenter):
super().__init__()
self.model = presenter.model
jaws = self.model.instrument.jaws
self.old_aperture = jaws.aperture.copy()
self.new_aperture = aperture
self.setText(f'Changed {jaws.name} Aperture')
def redo(self):
self.changeAperture(self.new_aperture)
def undo(self):
self.changeAperture(self.old_aperture)
def changeAperture(self, aperture):
self.model.instrument.jaws.aperture[0] = aperture[0]
self.model.instrument.jaws.aperture[1] = aperture[1]
self.model.notifyChange(Attributes.Instrument)
self.model.instrument_controlled.emit(self.id())
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.old_aperture == command.new_aperture:
self.setObsolete(True)
self.new_aperture = command.new_aperture
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangeJawAperture
class ChangeCollimator(QtWidgets.QUndoCommand):
"""Changes the collimator of a given detector
:param detector_name: name of detector
:type detector_name: str
:param collimator_name: new collimator name
:type collimator_name: Union[str, None]
:param presenter: Mainwindow presenter instance
:type presenter: MainWindowPresenter
"""
def __init__(self, detector_name, collimator_name, presenter):
super().__init__()
self.model = presenter.model
self.detector_name = detector_name
detector = self.model.instrument.detectors[self.detector_name]
collimator = detector.current_collimator
self.old_collimator_name = None if collimator is None else collimator.name
self.new_collimator_name = collimator_name
self.action_group = presenter.view.collimator_action_groups[detector_name]
presenter.view.scenes.switchToInstrumentScene()
self.setText(f"Changed {detector_name} Detector's Collimator to {collimator_name}")
def redo(self):
self.changeCollimator(self.new_collimator_name)
def undo(self):
self.changeCollimator(self.old_collimator_name)
def changeCollimator(self, collimator_name):
detector = self.model.instrument.detectors[self.detector_name]
detector.current_collimator = collimator_name
self.model.notifyChange(Attributes.Instrument)
toggleActionInGroup(collimator_name, self.action_group)
def mergeWith(self, command):
"""Merges consecutive change main commands
:param command: command to merge
:type command: QUndoCommand
:return: True if merge was successful
:rtype: bool
"""
if self.detector_name != command.detector_name:
return False
if self.old_collimator_name == command.new_collimator_name:
self.setObsolete(True)
self.new_collimator_name = command.new_collimator_name
self.setText(f"Changed {self.detector_name} Detector's Collimator to {self.new_collimator_name}")
return True
def id(self):
"""Returns ID used for notifying of or merging commands"""
return CommandID.ChangeCollimator | en | 0.731049 | Sets lock state of specified joint :param positioner_name: name of positioner :type positioner_name: str :param index: joint index :type index: int :param value: indicates if joint is locked :type value: bool :param presenter: Mainwindow presenter instance :type presenter: MainWindowPresenter Merges consecutive change main commands :param command: command to merge :type command: QUndoCommand :return: True if merge was successful :rtype: bool Returns ID used for notifying of or merging commands Sets joint limit ignore state of specified joint :param positioner_name: name of positioner :type positioner_name: str :param index: joint index :type index: int :param value: indicates joint limit should be ignored :type value: bool :param presenter: Mainwindow presenter instance :type presenter: MainWindowPresenter Merges consecutive change main commands :param command: command to merge :type command: QUndoCommand :return: True if merge was successful :rtype: bool Returns ID used for notifying of or merging commands Moves the stack to specified configuration. The first move will be animated but any repeats (redo) will be an instant move i.e. single step. :param positioner_name: name of positioner :type positioner_name: str :param q: list of joint offsets to move to. The length must be equal to number of links :type q: List[float] :param ignore_locks: indicates that joint locks should be ignored :type ignore_locks: bool :param presenter: Mainwindow presenter instance :type presenter: MainWindowPresenter Merges consecutive change main commands :param command: command to merge :type command: QUndoCommand :return: True if merge was successful :rtype: bool Returns ID used for notifying of or merging commands Changes the active positioning stack of the instrument :param stack_name: name of positioning stack :type stack_name: str :param presenter: Mainwindow presenter instance :type presenter: MainWindowPresenter Returns ID used for notifying of or merging commands Changes the base matrix of an auxiliary positioner :param positioner: auxiliary positioner :type positioner: SerialManipulator :param matrix: new base matrix :type matrix: Matrix44 :param presenter: Mainwindow presenter instance :type presenter: MainWindowPresenter Merges consecutive change main commands :param command: command to merge :type command: QUndoCommand :return: True if merge was successful :rtype: bool Returns ID used for notifying of or merging commands Sets the Jaws aperture :param aperture: new aperture :type aperture: List[float] :param presenter: Mainwindow presenter instance :type presenter: MainWindowPresenter Merges consecutive change main commands :param command: command to merge :type command: QUndoCommand :return: True if merge was successful :rtype: bool Returns ID used for notifying of or merging commands Changes the collimator of a given detector :param detector_name: name of detector :type detector_name: str :param collimator_name: new collimator name :type collimator_name: Union[str, None] :param presenter: Mainwindow presenter instance :type presenter: MainWindowPresenter Merges consecutive change main commands :param command: command to merge :type command: QUndoCommand :return: True if merge was successful :rtype: bool Returns ID used for notifying of or merging commands | 2.552131 | 3 |
epic/utils/cell_migration.py | AlphonsG/EPIC-BBox-Cell-Tracking | 0 | 6632754 | <reponame>AlphonsG/EPIC-BBox-Cell-Tracking<gh_stars>0
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from epic.utils.misc import avg_bbox_length
import numpy as np
def bnd_ldg_es(det, leading_edges):
top_edge, bottom_edge = leading_edges
return det.centre[1] > bottom_edge or det.centre[1] < top_edge
def keep_sliding(curr_window_pos, idx, img_centre_y):
result = (curr_window_pos < img_centre_y if idx == 0 else
curr_window_pos > img_centre_y)
return result
def detect_leading_edges(img, dets, start_posn=24, std1=3, std2=2, stride=None,
window_height=None):
if len(dets) == 0:
return None
avg_len = avg_bbox_length(dets)
window_height = window_height if window_height is not None else avg_len
strides = [stride, -stride] if stride is not None else [avg_len, -avg_len]
img_centre_y = img.shape[0] / 2
window_pos = [[window_height, 0], [img.shape[0],
img.shape[0] - window_height]]
data = [[], []]
for i in range(0, 2):
window_lower_pos, window_upper_pos = window_pos[i]
while keep_sliding(window_pos[i][i], i, img_centre_y):
num_dets = 0
for tracklet in dets:
if tracklet.dets[0].centre[1] < window_lower_pos and (
tracklet.dets[0].centre[1] > window_upper_pos):
num_dets += 1
data[i].append((num_dets, window_upper_pos, window_lower_pos))
window_upper_pos += strides[i]
window_lower_pos = window_upper_pos + window_height
window_pos[i] = [window_lower_pos, window_upper_pos]
final_posns = [0, 0]
for i in range(0, 2):
dens, posns = [x[0] for x in data[i]], [int(x[2 - i]) for x in data[i]]
rolling_dens = dens[0: start_posn]
for posn, den in enumerate(dens[start_posn::], start=start_posn):
final_posns[i] = posns[posn]
if (np.mean(rolling_dens + [den]) - std1 * np.std(
rolling_dens + [den]) < 0 and den > rolling_dens[-1]):
continue
elif (den < np.mean(rolling_dens + [den]) - std2 * np.std(
rolling_dens + [den])):
break
else:
rolling_dens.append(den)
return final_posns
| # Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from epic.utils.misc import avg_bbox_length
import numpy as np
def bnd_ldg_es(det, leading_edges):
top_edge, bottom_edge = leading_edges
return det.centre[1] > bottom_edge or det.centre[1] < top_edge
def keep_sliding(curr_window_pos, idx, img_centre_y):
result = (curr_window_pos < img_centre_y if idx == 0 else
curr_window_pos > img_centre_y)
return result
def detect_leading_edges(img, dets, start_posn=24, std1=3, std2=2, stride=None,
window_height=None):
if len(dets) == 0:
return None
avg_len = avg_bbox_length(dets)
window_height = window_height if window_height is not None else avg_len
strides = [stride, -stride] if stride is not None else [avg_len, -avg_len]
img_centre_y = img.shape[0] / 2
window_pos = [[window_height, 0], [img.shape[0],
img.shape[0] - window_height]]
data = [[], []]
for i in range(0, 2):
window_lower_pos, window_upper_pos = window_pos[i]
while keep_sliding(window_pos[i][i], i, img_centre_y):
num_dets = 0
for tracklet in dets:
if tracklet.dets[0].centre[1] < window_lower_pos and (
tracklet.dets[0].centre[1] > window_upper_pos):
num_dets += 1
data[i].append((num_dets, window_upper_pos, window_lower_pos))
window_upper_pos += strides[i]
window_lower_pos = window_upper_pos + window_height
window_pos[i] = [window_lower_pos, window_upper_pos]
final_posns = [0, 0]
for i in range(0, 2):
dens, posns = [x[0] for x in data[i]], [int(x[2 - i]) for x in data[i]]
rolling_dens = dens[0: start_posn]
for posn, den in enumerate(dens[start_posn::], start=start_posn):
final_posns[i] = posns[posn]
if (np.mean(rolling_dens + [den]) - std1 * np.std(
rolling_dens + [den]) < 0 and den > rolling_dens[-1]):
continue
elif (den < np.mean(rolling_dens + [den]) - std2 * np.std(
rolling_dens + [den])):
break
else:
rolling_dens.append(den)
return final_posns | en | 0.853077 | # Copyright (c) 2021 <NAME> # # This software is released under the MIT License. # https://opensource.org/licenses/MIT | 1.832749 | 2 |
app/controllers/add_dataset.py | rodrigoasrodrigues/CS598-T7 | 0 | 6632755 | import os
import uuid
import json
import hashlib
from flask import render_template, request, redirect, send_from_directory
from app import app
from app import db
from app.controllers.env_configs import EnvConf
from app.models.tables import (DatasetFile, LabelFile, Word2VecModel)
from threading import Thread
from app.controllers import model_manager
def word_count_dataset(hash):
filepath = os.path.abspath(EnvConf.dataset_dir+'/'+hash)
file = open(filepath, "rt")
data = file.read()
words = data.split()
wcount = len(words)
return wcount
def count_labels(hash):
filepath = os.path.abspath(EnvConf.label_dir+'/'+hash)
label_file = open(filepath, 'r')
Lines = label_file.readlines()
count = 0
positive = 0
# counts positive and total then calculate negatives
for line in Lines:
count = count+1
pos = line.strip().split('\t')[1] == '1'
if pos:
positive = positive + 1
negative = count - positive
#todo implement
return positive, negative
def save_file(file, folder):
fullpath_folder = os.path.abspath(folder)
temp_filename = 'temp_'+str(uuid.uuid4())
fullpath = fullpath_folder+'/'+temp_filename
file.save(fullpath)
with open(fullpath, "rb") as f:
file_hash = hashlib.md5()
while chunk := f.read(8192):
file_hash.update(chunk)
hashstr = file_hash.hexdigest()
dest = fullpath_folder+'/'+hashstr
if not os.path.exists(dest):
os.rename(fullpath,dest)
else:
os.remove(fullpath)
return hashstr
@app.route('/add_dataset')
def add_dataset():
dataset_list = DatasetFile.query.all()
label_list = LabelFile.query.all()
return render_template('add_dataset.html', dataset_list=dataset_list, label_list=label_list)
@app.route('/download/<filetypen>/<filehash>')
def download_file(filetypen=None,filehash=None):
if filetypen == 'ds':
folder = os.path.abspath(EnvConf.dataset_dir)
elif filetypen == 'lbl':
folder = os.path.abspath(EnvConf.label_dir)
return send_from_directory(directory=folder, filename=filehash, mimetype='text/plain')
@app.route('/train', methods=["POST"])
def train():
dataset_id = -1
label_id = -1
if request.files:
if request.files["fupDataset"] and request.form['datasetRadio'] == 'new':
dataset_file = request.files["fupDataset"]
dataset_hash = save_file(dataset_file,EnvConf.dataset_dir)
dataset_name = request.form['txtDatasetName']
print(f'dataset hash = {dataset_hash}')
nwords = word_count_dataset(dataset_hash)
print(f'words = {nwords}')
dataset = DatasetFile(dataset_hash,dataset_name,nwords)
db.session.add(dataset)
db.session.commit()
dataset_id = dataset.id
if request.files["fupLabel"] and request.form['labelRadio'] == 'new':
label_file = request.files["fupLabel"]
label_hash = save_file(label_file,EnvConf.label_dir)
label_name = request.form['txtLabelName']
print(f'label hash = {label_hash}')
positive, negative = count_labels(label_hash)
label = LabelFile(label_hash,label_name,positive, negative)
db.session.add(label)
db.session.commit()
label_id = label.id
if dataset_id == -1:
dataset_id = int(request.form['datasetRadio'])
if label_id == -1:
label_id = int(request.form['labelRadio'])
description = request.form['txtDescription']
w2v_model = Word2VecModel('training', description, dataset_id, label_id)
iterations = int(request.form['iterations'])
window_size = int(request.form['window_size'])
min_count = int(request.form['min_count'])
w2v_model.iterations = iterations
w2v_model.window = window_size
w2v_model.min_count = min_count
db.session.add(w2v_model)
db.session.commit()
print(f'dataset = {dataset_id} ; label = {label_id}')
return redirect(f'/training/{w2v_model.id}')
@app.route('/training/<model_id>')
def training(model_id):
model = Word2VecModel.query.get(model_id)
return render_template('training_model.html',model=model)
@app.route('/training_status/<model_id>')
def training_status(model_id):
model = Word2VecModel.query.get(model_id)
return json.dumps(model.file_hash != 'training')
@app.route('/training_exec/<model_id>')
def training_exec(model_id):
hash = model_manager.train(model_id)
return json.dumps(hash) | import os
import uuid
import json
import hashlib
from flask import render_template, request, redirect, send_from_directory
from app import app
from app import db
from app.controllers.env_configs import EnvConf
from app.models.tables import (DatasetFile, LabelFile, Word2VecModel)
from threading import Thread
from app.controllers import model_manager
def word_count_dataset(hash):
filepath = os.path.abspath(EnvConf.dataset_dir+'/'+hash)
file = open(filepath, "rt")
data = file.read()
words = data.split()
wcount = len(words)
return wcount
def count_labels(hash):
filepath = os.path.abspath(EnvConf.label_dir+'/'+hash)
label_file = open(filepath, 'r')
Lines = label_file.readlines()
count = 0
positive = 0
# counts positive and total then calculate negatives
for line in Lines:
count = count+1
pos = line.strip().split('\t')[1] == '1'
if pos:
positive = positive + 1
negative = count - positive
#todo implement
return positive, negative
def save_file(file, folder):
fullpath_folder = os.path.abspath(folder)
temp_filename = 'temp_'+str(uuid.uuid4())
fullpath = fullpath_folder+'/'+temp_filename
file.save(fullpath)
with open(fullpath, "rb") as f:
file_hash = hashlib.md5()
while chunk := f.read(8192):
file_hash.update(chunk)
hashstr = file_hash.hexdigest()
dest = fullpath_folder+'/'+hashstr
if not os.path.exists(dest):
os.rename(fullpath,dest)
else:
os.remove(fullpath)
return hashstr
@app.route('/add_dataset')
def add_dataset():
dataset_list = DatasetFile.query.all()
label_list = LabelFile.query.all()
return render_template('add_dataset.html', dataset_list=dataset_list, label_list=label_list)
@app.route('/download/<filetypen>/<filehash>')
def download_file(filetypen=None,filehash=None):
if filetypen == 'ds':
folder = os.path.abspath(EnvConf.dataset_dir)
elif filetypen == 'lbl':
folder = os.path.abspath(EnvConf.label_dir)
return send_from_directory(directory=folder, filename=filehash, mimetype='text/plain')
@app.route('/train', methods=["POST"])
def train():
dataset_id = -1
label_id = -1
if request.files:
if request.files["fupDataset"] and request.form['datasetRadio'] == 'new':
dataset_file = request.files["fupDataset"]
dataset_hash = save_file(dataset_file,EnvConf.dataset_dir)
dataset_name = request.form['txtDatasetName']
print(f'dataset hash = {dataset_hash}')
nwords = word_count_dataset(dataset_hash)
print(f'words = {nwords}')
dataset = DatasetFile(dataset_hash,dataset_name,nwords)
db.session.add(dataset)
db.session.commit()
dataset_id = dataset.id
if request.files["fupLabel"] and request.form['labelRadio'] == 'new':
label_file = request.files["fupLabel"]
label_hash = save_file(label_file,EnvConf.label_dir)
label_name = request.form['txtLabelName']
print(f'label hash = {label_hash}')
positive, negative = count_labels(label_hash)
label = LabelFile(label_hash,label_name,positive, negative)
db.session.add(label)
db.session.commit()
label_id = label.id
if dataset_id == -1:
dataset_id = int(request.form['datasetRadio'])
if label_id == -1:
label_id = int(request.form['labelRadio'])
description = request.form['txtDescription']
w2v_model = Word2VecModel('training', description, dataset_id, label_id)
iterations = int(request.form['iterations'])
window_size = int(request.form['window_size'])
min_count = int(request.form['min_count'])
w2v_model.iterations = iterations
w2v_model.window = window_size
w2v_model.min_count = min_count
db.session.add(w2v_model)
db.session.commit()
print(f'dataset = {dataset_id} ; label = {label_id}')
return redirect(f'/training/{w2v_model.id}')
@app.route('/training/<model_id>')
def training(model_id):
model = Word2VecModel.query.get(model_id)
return render_template('training_model.html',model=model)
@app.route('/training_status/<model_id>')
def training_status(model_id):
model = Word2VecModel.query.get(model_id)
return json.dumps(model.file_hash != 'training')
@app.route('/training_exec/<model_id>')
def training_exec(model_id):
hash = model_manager.train(model_id)
return json.dumps(hash) | en | 0.746099 | # counts positive and total then calculate negatives #todo implement | 2.363438 | 2 |
docs/end-to-end/library/GeocontribConnectLibrary.py | hcharp/geocontrib | 3 | 6632756 | # Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from utils import get_driver
def geocontrib_connect_superuser(username, password):
get_driver().find_element_by_link_text("Se Connecter").click()
get_driver().find_element_by_name("username").clear()
get_driver().find_element_by_name("username").send_keys(username)
get_driver().find_element_by_name("password").clear()
get_driver().find_element_by_name("password").send_keys(password)
get_driver().find_element_by_xpath("//button[@type='submit']").click()
def geocontrib_disconnect():
get_driver().find_element_by_xpath("//i").click()
| # Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from utils import get_driver
def geocontrib_connect_superuser(username, password):
get_driver().find_element_by_link_text("Se Connecter").click()
get_driver().find_element_by_name("username").clear()
get_driver().find_element_by_name("username").send_keys(username)
get_driver().find_element_by_name("password").clear()
get_driver().find_element_by_name("password").send_keys(password)
get_driver().find_element_by_xpath("//button[@type='submit']").click()
def geocontrib_disconnect():
get_driver().find_element_by_xpath("//i").click()
| en | 0.83867 | # Copyright (c) 2017-2021 Neogeo-Technologies. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 2.20505 | 2 |
databin/parsers/__init__.py | LeTristanB/databin | 0 | 6632757 | from databin.parsers.util import ParseException
from databin.parsers.simple import parse_dsv, parse_tsv, parse_ssv, pandas_parser
from databin.parsers.psql import parse_psql
PARSERS = [
('Auto Detect', "pandas_table", pandas_parser),
('Excel copy & paste', 'excel', parse_tsv),
('PostgreSQL Shell', 'psql', parse_psql),
('MySQL Shell', 'mysql', parse_psql),
('Comma-Separated Values', 'csv', parse_dsv),
('Tab-Separated Values', 'tsv', parse_tsv),
('Space-Separated Values', 'ssv', parse_ssv)
]
def parse(format, data):
for name, key, func in PARSERS:
if key == format:
return func(data)
raise ParseException()
def get_parsers():
for name, key, func in PARSERS:
yield (key, name)
| from databin.parsers.util import ParseException
from databin.parsers.simple import parse_dsv, parse_tsv, parse_ssv, pandas_parser
from databin.parsers.psql import parse_psql
PARSERS = [
('Auto Detect', "pandas_table", pandas_parser),
('Excel copy & paste', 'excel', parse_tsv),
('PostgreSQL Shell', 'psql', parse_psql),
('MySQL Shell', 'mysql', parse_psql),
('Comma-Separated Values', 'csv', parse_dsv),
('Tab-Separated Values', 'tsv', parse_tsv),
('Space-Separated Values', 'ssv', parse_ssv)
]
def parse(format, data):
for name, key, func in PARSERS:
if key == format:
return func(data)
raise ParseException()
def get_parsers():
for name, key, func in PARSERS:
yield (key, name)
| none | 1 | 2.760824 | 3 |
|
main.py | yutongshen/DSAI-Subtractor | 0 | 6632758 | import numpy as np
import random
import pickle
import os.path
# Parameters Config
SUB = 0
SUB_ADD = 1
MULTIPLY = 2
DIGITS = 4
TRAINING_SIZE = 18000
VALIDATION_SIZE = 2000
TESTING_SIZE = 60000
ITERATION = 100
TOTAL_SIZE = TRAINING_SIZE + VALIDATION_SIZE + TESTING_SIZE
GEN_TYPE = None
MAXLEN = None
ANS_DIGITS = None
chars = None
ct = None
def set_gen_type(gen_type):
global GEN_TYPE
global ANS_DIGITS
global chars
global ct
GEN_TYPE = gen_type
ANS_DIGITS = {
SUB: DIGITS + 1,
SUB_ADD: DIGITS + 1,
MULTIPLY: 2 * DIGITS
}.get(GEN_TYPE, DIGITS + 1)
chars = {
SUB: '0123456789- ',
SUB_ADD: '0123456789+- ',
MULTIPLY: '0123456789* '
}.get(GEN_TYPE, '0123456789+-* ')
ct = CharacterTable(chars)
class CharacterTable:
def __init__(self, chars):
self.chars = list(chars)
self.len = len(chars)
self.encode = {}
for i, key in enumerate(self.chars):
self.encode[key] = np.zeros(self.len, np.float32)
self.encode[key][i] = 1.
def encoder(self, C):
result = np.zeros((len(C), self.len))
for i, c in enumerate(C):
try:
result[i] = self.encode[c]
except:
pass
return result
def decoder(self, x):
x = x.argmax(axis=-1)
return ''.join(self.chars[i] for i in x)
# Data Generation
def generation(arg):
questions = []
expected = []
seen = set()
operator = {
SUB: ['-'],
SUB_ADD: ['-', '+'],
MULTIPLY: ['*']
}
ans_switcher = {
'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b
}
ops = operator.get(GEN_TYPE, [None])
print('Generating data...')
while len(questions) < TOTAL_SIZE:
f = lambda: random.choice(range(10 ** random.choice(range(1, DIGITS + 1))))
g = lambda: random.choice(ops)
a, b, op = f(), f(), g()
if op == '-':
a, b = sorted((a, b), reverse=True)
key = tuple((a, b, op))
if key in seen:
continue
seen.add(key)
# query = '{}{}{}'.format(a, op, b).ljust(MAXLEN)
query = str(a).rjust(DIGITS) + op + str(b).rjust(DIGITS)
ans_funct = ans_switcher.get(op, lambda a, b: float('NAN'))
ans = str(ans_funct(a, b)).rjust(ANS_DIGITS)
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
print(questions[:10])
print(expected[:10])
# Processing
x = np.zeros((len(questions), MAXLEN, len(chars)), np.float32)
y = np.zeros((len(expected), ANS_DIGITS, len(chars)), np.float32)
for i, sentence in enumerate(questions):
x[i] = ct.encoder(sentence)
for i, sentence in enumerate(expected):
y[i] = ct.encoder(sentence)
data = {}
data['train_x'] = x[:TRAINING_SIZE]
data['train_y'] = y[:TRAINING_SIZE]
data['validation_x'] = x[TRAINING_SIZE:TRAINING_SIZE + VALIDATION_SIZE]
data['validation_y'] = y[TRAINING_SIZE:TRAINING_SIZE + VALIDATION_SIZE]
data['test_x'] = x[TRAINING_SIZE + VALIDATION_SIZE:]
data['test_y'] = y[TRAINING_SIZE + VALIDATION_SIZE:]
data['type'] = GEN_TYPE
with open(arg.d, 'wb') as f:
pickle.dump(data, f, -1)
print('Save file successfully.')
# Model
# - Using sequence to sequence model
# - Encoder: bi-directional LSTM
# - Decoder: LSTM
def train(arg):
if not os.path.exists(arg.d):
print('Training data not exist.')
return
else:
with open(arg.d, 'rb') as f:
data = pickle.load(f)
train_x = data['train_x']
train_y = data['train_y']
validation_x = data['validation_x']
validation_y = data['validation_y']
test_x = data['test_x']
test_y = data['test_y']
set_gen_type(data['type'])
HIDDEN_SIZE = 256
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
warnings.filterwarnings("ignore",category=UserWarning)
import keras as K
from keras.models import Sequential, Model
from keras.layers.core import Dense, Activation, Lambda
from keras.layers import Input, LSTM, TimeDistributed, RepeatVector, Reshape, Dropout, Bidirectional, Concatenate
from keras.layers.normalization import BatchNormalization
model = Sequential()
encoder_inputs = Input(shape=(MAXLEN, len(chars)))
encoder = Bidirectional(LSTM(HIDDEN_SIZE, return_state=True))
encoder_outputs, forward_h, forward_c, backward_h, backward_c = encoder(encoder_inputs)
state_h = Concatenate()([forward_h, backward_h])
state_c = Concatenate()([forward_c, backward_c])
states = [state_h, state_c]
# Set up the decoder, which will only process one timestep at a time.
decoder_inputs = Reshape((1, HIDDEN_SIZE * 2))
decoder_lstm = LSTM(HIDDEN_SIZE * 2, return_state=True)
all_outputs = []
inputs = decoder_inputs(encoder_outputs)
first_decoder = True
for _ in range(ANS_DIGITS):
# Run the decoder on one timestep
outputs, state_h, state_c = decoder_lstm(inputs,
initial_state=states)
# Reinject the outputs as inputs for the next loop iteration
# as well as update the states
states = [state_h, state_c]
# Store the current prediction (we will concatenate all predictions later)
outputs = Dense(len(chars), activation='softmax')(outputs)
all_outputs.append(outputs)
# Concatenate all predictions
decoder_outputs = Concatenate()(all_outputs)
decoder_outputs = Reshape((ANS_DIGITS, len(chars)))(decoder_outputs)
decoder_outputs = Lambda(lambda x: x[:, ::-1])(decoder_outputs)
# Define and compile model as previously
model = Model(encoder_inputs, decoder_outputs)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
batch_size = int(len(train_x) / 128 / 100) * 100
if batch_size == 0:
batch_size = 100
model.fit(train_x, train_y,
batch_size=batch_size, epochs=ITERATION,
verbose=1, validation_data=[validation_x, validation_y])
model.save(arg.m)
print('save model successfully')
def check_error(model, x, y):
err_list = []
pred = model.predict(x)
size = len(x)
for i in range(size):
y_str = ct.decoder(y[i])
pred_str = ct.decoder(pred[i])
if y_str != pred_str:
err_list.append(ct.decoder(x[i]) + ' = ' + pred_str + ' ' + y_str)
return err_list
def report(arg, obj):
if not os.path.exists(arg.d):
print('Data not exist.')
return
else:
with open(arg.d, 'rb') as f:
data = pickle.load(f)
if obj == 'acc':
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
from keras.models import load_model
if not os.path.exists(arg.m):
print('Model file not exist.')
return
else:
set_gen_type(data['type'])
model = load_model(arg.m)
eva = model.evaluate(data['train_x'], data['train_y'], verbose=False)
print('Training Data:')
print('Size:', len(data['train_x']))
print('Loss:', eva[0], 'Accuracy:', eva[1])
err_list = check_error(model, data['train_x'], data['train_y'])
print('ERROR:', len(err_list), '/', len(data['train_x']))
if len(err_list):
print('predict'.rjust(MAXLEN + ANS_DIGITS + 3), 'ans'.rjust(ANS_DIGITS))
print('\n'.join(err_list))
print()
eva = model.evaluate(data['validation_x'], data['validation_y'], verbose=False)
print('Validation Data:')
print('Size:', len(data['validation_x']))
print('Loss:', eva[0], 'Accuracy:', eva[1])
err_list = check_error(model, data['validation_x'], data['validation_y'])
print('ERROR:', len(err_list), '/', len(data['validation_x']))
if len(err_list):
print('predict'.rjust(MAXLEN + ANS_DIGITS + 3), 'ans'.rjust(ANS_DIGITS))
print('\n'.join(err_list))
print()
eva = model.evaluate(data['test_x'], data['test_y'], verbose=False)
print('Testing Data:')
print('Size:', len(data['test_x']))
print('Loss:', eva[0], 'Accuracy:', eva[1])
err_list = check_error(model, data['test_x'], data['test_y'])
print('ERROR:', len(err_list), '/', len(data['test_x']))
if len(err_list):
print('predict'.rjust(MAXLEN + ANS_DIGITS + 3), 'ans'.rjust(ANS_DIGITS))
print('\n'.join(err_list))
print()
else:
report_x = data[obj + '_x']
report_y = data[obj + '_y']
set_gen_type(data['type'])
for i in range(len(report_x)):
print(ct.decoder(report_x[i]), '=', ct.decoder(report_y[i]))
def test(arg):
from keras.models import load_model
if not os.path.exists(arg.m):
print('Model file not exist.')
return
else:
model = load_model(arg.m)
while True:
q = input('Please input test data or "exit": ')
if q.upper() == 'EXIT':
break;
q_padding = q.ljust(MAXLEN)[:MAXLEN]
test_x = ct.encoder(q_padding)
pred_y = model.predict(test_x.reshape(-1, MAXLEN, len(chars)))
print(q, '=', ct.decoder(pred_y[0]))
def help(arg = None):
print('usage: python main.py [-o OPTION] [-t TYPE] [-d DATA] [-m MODEL]')
print()
print('general options:')
print(' -h, --help show this help message and exit')
print('')
print('operational options:')
print(' -o gen data generation')
print(' -o train training model')
print(' -o report_training_data show all training data')
print(' -o report_validation_data show all validation data')
print(' -o report_testing_data show all testing data')
print(' -o report_accuracy show accuracy')
print(' -o test input formula by self')
print('')
print('calculational options: (default: -t sub)')
print(' -t sub subtraction')
print(' -t sub_add subtraction mix with addition')
print(' -t multiply multiplication')
print('')
print('advance options:')
print(' -d <DATA> input the path of training (or generation) data')
print(' (default: src/data.pkl)')
print(' -m <MODEL> input the path of model')
print(' (default: src/my_model.h5)')
print('')
import argparse
class MyParser(argparse.ArgumentParser):
def format_help(self):
help()
return
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
parser = MyParser()
parser.add_argument('-o',
default='unknown',
help='input operation.')
parser.add_argument('-t',
default='sub',
help='input calculation type.')
parser.add_argument('-d',
default='src/data.pkl',
help='input data.')
parser.add_argument('-m',
default='src/my_model.h5',
help='input model.')
args = parser.parse_args()
GEN_TYPE = {
'sub': SUB,
'sub_add': SUB_ADD,
'multiply': MULTIPLY
}.get(args.t, -1)
if GEN_TYPE != -1:
MAXLEN = DIGITS + 1 + DIGITS
set_gen_type(GEN_TYPE)
switcher = {
'gen': lambda arg: generation(arg),
'train': lambda arg: train(arg),
'report_training_data': lambda arg: report(arg, 'train'),
'report_validation_data': lambda arg: report(arg, 'validation'),
'report_testing_data': lambda arg: report(arg, 'test'),
'report_accuracy': lambda arg: report(arg, 'acc'),
'test': lambda arg: test(arg)
}
func = switcher.get(args.o, lambda arg: help(arg))
func(args)
else:
help()
| import numpy as np
import random
import pickle
import os.path
# Parameters Config
SUB = 0
SUB_ADD = 1
MULTIPLY = 2
DIGITS = 4
TRAINING_SIZE = 18000
VALIDATION_SIZE = 2000
TESTING_SIZE = 60000
ITERATION = 100
TOTAL_SIZE = TRAINING_SIZE + VALIDATION_SIZE + TESTING_SIZE
GEN_TYPE = None
MAXLEN = None
ANS_DIGITS = None
chars = None
ct = None
def set_gen_type(gen_type):
global GEN_TYPE
global ANS_DIGITS
global chars
global ct
GEN_TYPE = gen_type
ANS_DIGITS = {
SUB: DIGITS + 1,
SUB_ADD: DIGITS + 1,
MULTIPLY: 2 * DIGITS
}.get(GEN_TYPE, DIGITS + 1)
chars = {
SUB: '0123456789- ',
SUB_ADD: '0123456789+- ',
MULTIPLY: '0123456789* '
}.get(GEN_TYPE, '0123456789+-* ')
ct = CharacterTable(chars)
class CharacterTable:
def __init__(self, chars):
self.chars = list(chars)
self.len = len(chars)
self.encode = {}
for i, key in enumerate(self.chars):
self.encode[key] = np.zeros(self.len, np.float32)
self.encode[key][i] = 1.
def encoder(self, C):
result = np.zeros((len(C), self.len))
for i, c in enumerate(C):
try:
result[i] = self.encode[c]
except:
pass
return result
def decoder(self, x):
x = x.argmax(axis=-1)
return ''.join(self.chars[i] for i in x)
# Data Generation
def generation(arg):
questions = []
expected = []
seen = set()
operator = {
SUB: ['-'],
SUB_ADD: ['-', '+'],
MULTIPLY: ['*']
}
ans_switcher = {
'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b
}
ops = operator.get(GEN_TYPE, [None])
print('Generating data...')
while len(questions) < TOTAL_SIZE:
f = lambda: random.choice(range(10 ** random.choice(range(1, DIGITS + 1))))
g = lambda: random.choice(ops)
a, b, op = f(), f(), g()
if op == '-':
a, b = sorted((a, b), reverse=True)
key = tuple((a, b, op))
if key in seen:
continue
seen.add(key)
# query = '{}{}{}'.format(a, op, b).ljust(MAXLEN)
query = str(a).rjust(DIGITS) + op + str(b).rjust(DIGITS)
ans_funct = ans_switcher.get(op, lambda a, b: float('NAN'))
ans = str(ans_funct(a, b)).rjust(ANS_DIGITS)
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
print(questions[:10])
print(expected[:10])
# Processing
x = np.zeros((len(questions), MAXLEN, len(chars)), np.float32)
y = np.zeros((len(expected), ANS_DIGITS, len(chars)), np.float32)
for i, sentence in enumerate(questions):
x[i] = ct.encoder(sentence)
for i, sentence in enumerate(expected):
y[i] = ct.encoder(sentence)
data = {}
data['train_x'] = x[:TRAINING_SIZE]
data['train_y'] = y[:TRAINING_SIZE]
data['validation_x'] = x[TRAINING_SIZE:TRAINING_SIZE + VALIDATION_SIZE]
data['validation_y'] = y[TRAINING_SIZE:TRAINING_SIZE + VALIDATION_SIZE]
data['test_x'] = x[TRAINING_SIZE + VALIDATION_SIZE:]
data['test_y'] = y[TRAINING_SIZE + VALIDATION_SIZE:]
data['type'] = GEN_TYPE
with open(arg.d, 'wb') as f:
pickle.dump(data, f, -1)
print('Save file successfully.')
# Model
# - Using sequence to sequence model
# - Encoder: bi-directional LSTM
# - Decoder: LSTM
def train(arg):
if not os.path.exists(arg.d):
print('Training data not exist.')
return
else:
with open(arg.d, 'rb') as f:
data = pickle.load(f)
train_x = data['train_x']
train_y = data['train_y']
validation_x = data['validation_x']
validation_y = data['validation_y']
test_x = data['test_x']
test_y = data['test_y']
set_gen_type(data['type'])
HIDDEN_SIZE = 256
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
warnings.filterwarnings("ignore",category=UserWarning)
import keras as K
from keras.models import Sequential, Model
from keras.layers.core import Dense, Activation, Lambda
from keras.layers import Input, LSTM, TimeDistributed, RepeatVector, Reshape, Dropout, Bidirectional, Concatenate
from keras.layers.normalization import BatchNormalization
model = Sequential()
encoder_inputs = Input(shape=(MAXLEN, len(chars)))
encoder = Bidirectional(LSTM(HIDDEN_SIZE, return_state=True))
encoder_outputs, forward_h, forward_c, backward_h, backward_c = encoder(encoder_inputs)
state_h = Concatenate()([forward_h, backward_h])
state_c = Concatenate()([forward_c, backward_c])
states = [state_h, state_c]
# Set up the decoder, which will only process one timestep at a time.
decoder_inputs = Reshape((1, HIDDEN_SIZE * 2))
decoder_lstm = LSTM(HIDDEN_SIZE * 2, return_state=True)
all_outputs = []
inputs = decoder_inputs(encoder_outputs)
first_decoder = True
for _ in range(ANS_DIGITS):
# Run the decoder on one timestep
outputs, state_h, state_c = decoder_lstm(inputs,
initial_state=states)
# Reinject the outputs as inputs for the next loop iteration
# as well as update the states
states = [state_h, state_c]
# Store the current prediction (we will concatenate all predictions later)
outputs = Dense(len(chars), activation='softmax')(outputs)
all_outputs.append(outputs)
# Concatenate all predictions
decoder_outputs = Concatenate()(all_outputs)
decoder_outputs = Reshape((ANS_DIGITS, len(chars)))(decoder_outputs)
decoder_outputs = Lambda(lambda x: x[:, ::-1])(decoder_outputs)
# Define and compile model as previously
model = Model(encoder_inputs, decoder_outputs)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
batch_size = int(len(train_x) / 128 / 100) * 100
if batch_size == 0:
batch_size = 100
model.fit(train_x, train_y,
batch_size=batch_size, epochs=ITERATION,
verbose=1, validation_data=[validation_x, validation_y])
model.save(arg.m)
print('save model successfully')
def check_error(model, x, y):
err_list = []
pred = model.predict(x)
size = len(x)
for i in range(size):
y_str = ct.decoder(y[i])
pred_str = ct.decoder(pred[i])
if y_str != pred_str:
err_list.append(ct.decoder(x[i]) + ' = ' + pred_str + ' ' + y_str)
return err_list
def report(arg, obj):
if not os.path.exists(arg.d):
print('Data not exist.')
return
else:
with open(arg.d, 'rb') as f:
data = pickle.load(f)
if obj == 'acc':
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
from keras.models import load_model
if not os.path.exists(arg.m):
print('Model file not exist.')
return
else:
set_gen_type(data['type'])
model = load_model(arg.m)
eva = model.evaluate(data['train_x'], data['train_y'], verbose=False)
print('Training Data:')
print('Size:', len(data['train_x']))
print('Loss:', eva[0], 'Accuracy:', eva[1])
err_list = check_error(model, data['train_x'], data['train_y'])
print('ERROR:', len(err_list), '/', len(data['train_x']))
if len(err_list):
print('predict'.rjust(MAXLEN + ANS_DIGITS + 3), 'ans'.rjust(ANS_DIGITS))
print('\n'.join(err_list))
print()
eva = model.evaluate(data['validation_x'], data['validation_y'], verbose=False)
print('Validation Data:')
print('Size:', len(data['validation_x']))
print('Loss:', eva[0], 'Accuracy:', eva[1])
err_list = check_error(model, data['validation_x'], data['validation_y'])
print('ERROR:', len(err_list), '/', len(data['validation_x']))
if len(err_list):
print('predict'.rjust(MAXLEN + ANS_DIGITS + 3), 'ans'.rjust(ANS_DIGITS))
print('\n'.join(err_list))
print()
eva = model.evaluate(data['test_x'], data['test_y'], verbose=False)
print('Testing Data:')
print('Size:', len(data['test_x']))
print('Loss:', eva[0], 'Accuracy:', eva[1])
err_list = check_error(model, data['test_x'], data['test_y'])
print('ERROR:', len(err_list), '/', len(data['test_x']))
if len(err_list):
print('predict'.rjust(MAXLEN + ANS_DIGITS + 3), 'ans'.rjust(ANS_DIGITS))
print('\n'.join(err_list))
print()
else:
report_x = data[obj + '_x']
report_y = data[obj + '_y']
set_gen_type(data['type'])
for i in range(len(report_x)):
print(ct.decoder(report_x[i]), '=', ct.decoder(report_y[i]))
def test(arg):
from keras.models import load_model
if not os.path.exists(arg.m):
print('Model file not exist.')
return
else:
model = load_model(arg.m)
while True:
q = input('Please input test data or "exit": ')
if q.upper() == 'EXIT':
break;
q_padding = q.ljust(MAXLEN)[:MAXLEN]
test_x = ct.encoder(q_padding)
pred_y = model.predict(test_x.reshape(-1, MAXLEN, len(chars)))
print(q, '=', ct.decoder(pred_y[0]))
def help(arg = None):
print('usage: python main.py [-o OPTION] [-t TYPE] [-d DATA] [-m MODEL]')
print()
print('general options:')
print(' -h, --help show this help message and exit')
print('')
print('operational options:')
print(' -o gen data generation')
print(' -o train training model')
print(' -o report_training_data show all training data')
print(' -o report_validation_data show all validation data')
print(' -o report_testing_data show all testing data')
print(' -o report_accuracy show accuracy')
print(' -o test input formula by self')
print('')
print('calculational options: (default: -t sub)')
print(' -t sub subtraction')
print(' -t sub_add subtraction mix with addition')
print(' -t multiply multiplication')
print('')
print('advance options:')
print(' -d <DATA> input the path of training (or generation) data')
print(' (default: src/data.pkl)')
print(' -m <MODEL> input the path of model')
print(' (default: src/my_model.h5)')
print('')
import argparse
class MyParser(argparse.ArgumentParser):
def format_help(self):
help()
return
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
parser = MyParser()
parser.add_argument('-o',
default='unknown',
help='input operation.')
parser.add_argument('-t',
default='sub',
help='input calculation type.')
parser.add_argument('-d',
default='src/data.pkl',
help='input data.')
parser.add_argument('-m',
default='src/my_model.h5',
help='input model.')
args = parser.parse_args()
GEN_TYPE = {
'sub': SUB,
'sub_add': SUB_ADD,
'multiply': MULTIPLY
}.get(args.t, -1)
if GEN_TYPE != -1:
MAXLEN = DIGITS + 1 + DIGITS
set_gen_type(GEN_TYPE)
switcher = {
'gen': lambda arg: generation(arg),
'train': lambda arg: train(arg),
'report_training_data': lambda arg: report(arg, 'train'),
'report_validation_data': lambda arg: report(arg, 'validation'),
'report_testing_data': lambda arg: report(arg, 'test'),
'report_accuracy': lambda arg: report(arg, 'acc'),
'test': lambda arg: test(arg)
}
func = switcher.get(args.o, lambda arg: help(arg))
func(args)
else:
help()
| en | 0.797804 | # Parameters Config # Data Generation # query = '{}{}{}'.format(a, op, b).ljust(MAXLEN) # Processing # Model # - Using sequence to sequence model # - Encoder: bi-directional LSTM # - Decoder: LSTM # Set up the decoder, which will only process one timestep at a time. # Run the decoder on one timestep # Reinject the outputs as inputs for the next loop iteration # as well as update the states # Store the current prediction (we will concatenate all predictions later) # Concatenate all predictions # Define and compile model as previously # parser = argparse.ArgumentParser() | 2.669055 | 3 |
legacy/legacy/utils/evaluators/recall.py | csmithchicago/openrec | 0 | 6632759 | <filename>legacy/legacy/utils/evaluators/recall.py
import numpy as np
from openrec.legacy.utils.evaluators import Evaluator
class Recall(Evaluator):
def __init__(self, recall_at, name="Recall"):
self._recall_at = np.array(recall_at)
super(Recall, self).__init__(etype="rank", name=name)
def compute(self, rank_above, negative_num):
del negative_num
results = np.zeros(len(self._recall_at))
for rank in rank_above:
results += (rank <= self._recall_at).astype(np.float32)
return results / len(rank_above)
| <filename>legacy/legacy/utils/evaluators/recall.py
import numpy as np
from openrec.legacy.utils.evaluators import Evaluator
class Recall(Evaluator):
def __init__(self, recall_at, name="Recall"):
self._recall_at = np.array(recall_at)
super(Recall, self).__init__(etype="rank", name=name)
def compute(self, rank_above, negative_num):
del negative_num
results = np.zeros(len(self._recall_at))
for rank in rank_above:
results += (rank <= self._recall_at).astype(np.float32)
return results / len(rank_above)
| none | 1 | 2.738742 | 3 |
|
ncrystal_python/__init__.py | highness-eu/ncrystal | 1 | 6632760 | #!/usr/bin/env python3
"""Python module for using the NCrystal library for thermal neutron transport in crystals
Please find more information about NCrystal at the website:
https://mctools.github.io/ncrystal/
In particular, a small example using the NCrystal python module can be found at:
https://github.com/mctools/ncrystal/blob/master/examples/ncrystal_example_py
A substantial effort went into developing NCrystal. If you use it for your work,
we would appreciate it if you would use the following reference in your work:
<NAME> and <NAME>, NCrystal: A library for thermal neutron
transport, Computer Physics Communications 246 (2020) 106851,
https://doi.org/10.1016/j.cpc.2019.07.015
For work benefitting from our inelastic physics, we furthermore request that you
additionally also use the following reference in your work:
<NAME>, <NAME>, et. al., "Rejection-based sampling of inelastic
neutron scattering", Journal of Computational Physics 380 (2019) 400-407,
https://doi.org/10.1016/j.jcp.2018.11.043
For detailed usage conditions and licensing of this open source project, see:
https://github.com/mctools/ncrystal/blob/master/NOTICE
https://github.com/mctools/ncrystal/blob/master/LICENSE
https://github.com/mctools/ncrystal/blob/master/ncrystal_extra/LICENSE
"""
################################################################################
## ##
## This file is part of NCrystal (see https://mctools.github.io/ncrystal/) ##
## ##
## Copyright 2015-2021 NCrystal developers ##
## ##
## Licensed under the Apache License, Version 2.0 (the "License"); ##
## you may not use this file except in compliance with the License. ##
## You may obtain a copy of the License at ##
## ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
## Unless required by applicable law or agreed to in writing, software ##
## distributed under the License is distributed on an "AS IS" BASIS, ##
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ##
## See the License for the specific language governing permissions and ##
## limitations under the License. ##
## ##
################################################################################
__license__ = "Apache 2.0, http://www.apache.org/licenses/LICENSE-2.0"
__version__ = '2.6.1'
__status__ = "Production"
__author__ = "NCrystal developers (<NAME>, <NAME>)"
__copyright__ = "Copyright 2015-2021 %s"%__author__
__maintainer__ = __author__
__email__ = "<EMAIL>"
#Only put the few most important items in __all__, to prevent cluttering on
#wildcard imports. Specifically this is the exceptions, the most important API
#classes, the factory functions, and the constants:
__all__ = [ 'NCException','NCFileNotFound','NCDataLoadError','NCMissingInfo','NCCalcError',
'NCLogicError','NCBadInput','RCBase','TextData','Info','CalcBase','Process',
'Absorption','Scatter','AtomData','FileListEntry','createTextData',
'createInfo','createScatter','createScatterIndependentRNG','createAbsorption',
'constant_c','constant_dalton2kg','constant_dalton2eVc2','constant_avogadro',
'constant_boltzmann','const_neutron_mass_amu','constant_planck']
import sys
pyversion = sys.version_info[0:3]
_minpyversion=(3,5,0)
if pyversion < (3,0,0):
raise SystemExit('NCrystal no longer supports Python2.')
if pyversion < (3,5,0):
if not _unittest:
print('WARNING: Unsupported python version %i.%i.%i detected (recommended is %i.%i.%i or later).'%(pyversion+_minpyversion))
import numbers
import pathlib
import os
import copy
import ctypes
import weakref
###################################
#Convert cstr<->str:
def _str2cstr(s):
#converts any string (str,bytes,unicode,path) to bytes
if hasattr(s,'__fspath__'):
s=str(s)
try:
return s if isinstance(s,bytes) else s.encode('ascii')
except UnicodeEncodeError:
#Attempt with file-system encoding, in case of non-ASCII path names:
return s.encode(sys.getfilesystemencoding())
def _cstr2str(s):
#converts bytes object to str (unicode in py3, bytes in py2)
try:
return s if isinstance(s,str) else s.decode('ascii')
except UnicodeDecodeError:
return s.decode(sys.getfilesystemencoding())
###################################
#Same as NCRYSTAL_VERSION macro:
version_num = sum(int(i)*j for i,j in zip(__version__.split('.'),(1000000,1000,1)))
class NCException(RuntimeError):
"""Base class for all exceptions raised by NCrystal code"""
pass
class NCFileNotFound(NCException):
pass
class NCDataLoadError(NCException):
pass
class NCMissingInfo(NCException):
pass
class NCCalcError(NCException):
pass
class NCLogicError(NCException):
pass
class NCBadInput(NCException):
pass
#some constants (NB: Copied here from NCMath.hh - must keep synchronized!! Also,
#remember to include in __all__ list above):
constant_c = 299792458e10# speed of light in Aa/s
constant_dalton2kg = 1.660539040e-27# amu to kg
constant_dalton2eVc2 = 931494095.17# amu to eV/c^2
constant_avogadro = 6.022140857e23# mol^-1
constant_boltzmann = 8.6173303e-5# eV/K
const_neutron_mass_amu = 1.00866491588# [amu]
constant_planck = 4.135667662e-15 # [eV*s]
_k4Pidiv100 = 0.125663706143591729538505735331180115367886776
def _find_nclib():
#If NCRYSTAL_LIB env var is set, we try that and only that:
override=os.environ.get('NCRYSTAL_LIB',None)
if override:
override = pathlib.Path(override)
if not override.exists() or override.is_dir():
raise NCFileNotFound('NCRYSTAL_LIB environment variable is set but does not point to an actual file.')
return override.absolute().resolve()
try:
if __name__ != '__main__':
#normal import
from . import _nclibpath
else:
#work if running as script:
sys.path.insert(0,str(pathlib.Path(__file__).absolute().parent))
import _nclibpath
sys.path.pop(0)
except ImportError:
raise NCFileNotFound('Autogenerated _nclibpath.py module not found (it should have been generated by'
+' CMake during installation). In this case you must set the environment variable'
+' NCRYSTAL_LIB to point at the compiled NCrystal library.')
_ = pathlib.Path(_nclibpath.liblocation)
if not _.is_absolute():
_ = (pathlib.Path(__file__).absolute().parent / _)
if not _.exists() or _.is_dir():
raise NCFileNotFound('Autogenerated _nclibpath.py module was found but no file exists in the indicated'
+' library location (%s). Either reinstall NCrystal or try to use the environment variable'%_
+' NCRYSTAL_LIB to point at the compiled NCrystal library.')
return _.resolve()
try:
import numpy as _np
except ImportError:
_np = None
def _ensure_numpy():
if not _np:
raise NCException("Numpy not available - array based functionality is unavailable")
_keepalive = []
def _load(nclib_filename):
_nclib = ctypes.CDLL(nclib_filename)
_int,_intp,_uint,_uintp,_dbl,_dblp,_cstr,_voidp = (ctypes.c_int, ctypes.POINTER(ctypes.c_int),
ctypes.c_uint,ctypes.POINTER(ctypes.c_uint), ctypes.c_double,
ctypes.POINTER(ctypes.c_double), ctypes.c_char_p, ctypes.c_void_p)
_ulong = ctypes.c_ulong
_charptr = ctypes.POINTER(ctypes.c_char)
_cstrp = ctypes.POINTER(_cstr)
_cstrpp = ctypes.POINTER(_cstrp)
_dblpp = ctypes.POINTER(_dblp)
ndarray_to_dblp = lambda a : a.ctypes.data_as(_dblp)
ndarray_to_uintp = lambda a : a.ctypes.data_as(_uintp)
def _create_numpy_double_array(n):
_ensure_numpy()
a=_np.empty(n,dtype=_dbl)
return a,ndarray_to_dblp(a)
def _create_numpy_unsigned_array(n):
_ensure_numpy()
a=_np.empty(n,dtype=_uint)
return a,ndarray_to_uintp(a)
class ncrystal_info_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_process_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_scatter_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_absorption_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_atomdata_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
functions = {}
#Exceptions:
_errmap = {'FileNotFound':NCFileNotFound,
'DataLoadError':NCDataLoadError,
'MissingInfo':NCMissingInfo,
'CalcError':NCCalcError,
'LogicError':NCLogicError,
'BadInput':NCBadInput}
def _raise_err():
assert _ncerror()#checks there was an error
tm=(_cstr2str(_ncerror_type()),_cstr2str(_ncerror_msg()))
_ncerror_clear()
#TODO: Provide line number / file as well?
e=_errmap.get(tm[0],NCException)(tm[1])
e.message = tm[1]#to avoid warnings in py 2.6
raise e
#helper class for exporting the functions:
def _wrap(fct_name,restype,argtypes,take_ref = False, hide=False, error_check=True):
assert isinstance(argtypes,tuple)
raw=getattr(_nclib,fct_name)
raw.argtypes=argtypes
raw.restype=restype
if take_ref:
assert len(argtypes)==1
fct = lambda arg : raw(ctypes.byref(arg))
else:
fct = lambda *args : raw(*args)
if error_check:
#NB: we should read about return types in the ctypes tutorial. Apparently one
#can just set an error checking function as the restype.
raw_fct = fct
def fcte(*aaa):
r = raw_fct(*aaa)
if _ncerror():
_raise_err()
return r
fct=fcte
if not hide:
functions[fct_name] = fct
return fct
lib_version = _cstr2str(_wrap('ncrystal_version_str',_cstr,tuple(),hide=True,error_check=False)())
if lib_version != __version__:
raise RuntimeError("ERROR: Version mismatch detected between NCrystal python code (v%s)"
" and loaded binary"" library (v%s). Control which NCrystal library"
" to load with the NCRYSTAL_LIB env var."%(__version__,lib_version))
_wrap('ncrystal_sethaltonerror',_int,(_int,),hide=True,error_check=False)(False)
_wrap('ncrystal_setquietonerror',_int,(_int,),hide=True,error_check=False)(True)
_ncerror = _wrap('ncrystal_error',_int,tuple(),hide=True,error_check=False)
_ncerror_msg = _wrap('ncrystal_lasterror',_cstr,tuple(),hide=True,error_check=False)
_ncerror_type = _wrap('ncrystal_lasterrortype',_cstr,tuple(),hide=True,error_check=False)
_ncerror_clear = _wrap('ncrystal_clearerror',None,tuple(),hide=True,error_check=False)
_wrap('ncrystal_refcount',_int,(_voidp,),take_ref=True)
_wrap('ncrystal_valid',_int,(_voidp,),take_ref=True)
#NB: For ncrystal_unref we use take_ref=False, so RCBase.__del__ can cache
#the result of ctypes.byref(rawobj). This is needed since the ctypes module
#might have been unloaded before RCBase.__del__ is called:
_wrap('ncrystal_unref',None,(_voidp,),take_ref=False)
_wrap('ncrystal_cast_scat2proc',ncrystal_process_t,(ncrystal_scatter_t,))
_wrap('ncrystal_cast_abs2proc',ncrystal_process_t,(ncrystal_absorption_t,))
_wrap('ncrystal_dump',None,(ncrystal_info_t,))
_wrap('ncrystal_ekin2wl',_dbl,(_dbl,))
_wrap('ncrystal_wl2ekin',_dbl,(_dbl,))
_wrap('ncrystal_isnonoriented',_int,(ncrystal_process_t,))
_wrap('ncrystal_name',_cstr,(ncrystal_process_t,))
_wrap('ncrystal_debyetemp2msd',_dbl,(_dbl,_dbl,_dbl))
_wrap('ncrystal_msd2debyetemp',_dbl,(_dbl,_dbl,_dbl))
_wrap('ncrystal_create_atomdata_fromdb',ncrystal_atomdata_t,(_uint,_uint))
_wrap('ncrystal_create_atomdata_fromdbstr',ncrystal_atomdata_t,(_cstr,))
_raw_atomdb_getn = _wrap('ncrystal_atomdatadb_getnentries',_uint,tuple(), hide=True )
_raw_atomdb_getall = _wrap('ncrystal_atomdatadb_getallentries',_uint,(_uintp,_uintp), hide=True )
def atomdb_getall_za():
n = _raw_atomdb_getn()
zvals,zvalsptr = _create_numpy_unsigned_array(n)
avals,avalsptr = _create_numpy_unsigned_array(n)
_raw_atomdb_getall(zvalsptr,avalsptr)
za=_np.stack((zvals,avals)).T
return za
functions['atomdb_getall_za']=atomdb_getall_za
_wrap('ncrystal_info_natominfo',_uint,(ncrystal_info_t,))
_wrap('ncrystal_info_hasatommsd',_int,(ncrystal_info_t,))
_raw_info_getatominfo = _wrap('ncrystal_info_getatominfo',None,(ncrystal_info_t,_uint,_uintp,_uintp,_dblp,_dblp),hide=True)
def ncrystal_info_getatominfo(nfo,iatom):
atomidx,n,dt,msd=_uint(),_uint(),_dbl(),_dbl()
_raw_info_getatominfo(nfo,iatom,atomidx,n,dt,msd)
return (atomidx.value,n.value,dt.value,msd.value)
functions['ncrystal_info_getatominfo'] = ncrystal_info_getatominfo
_raw_info_getatompos = _wrap('ncrystal_info_getatompos',None,(ncrystal_info_t,_uint,_uint,_dblp,_dblp,_dblp),hide=True)
def ncrystal_info_getatompos(nfo,iatom,ipos):
x,y,z=_dbl(),_dbl(),_dbl()
_raw_info_getatompos(nfo,iatom,ipos,x,y,z)
return x.value, y.value, z.value
functions['ncrystal_info_getatompos'] = ncrystal_info_getatompos
for s in ('temperature','xsectabsorption','xsectfree','density','numberdensity'):
_wrap('ncrystal_info_get%s'%s,_dbl,(ncrystal_info_t,))
_raw_info_getstruct = _wrap('ncrystal_info_getstructure',_int,(ncrystal_info_t,_uintp,_dblp,_dblp,_dblp,_dblp,_dblp,_dblp,_dblp,_uintp))
def ncrystal_info_getstructure(nfo):
sg,natom=_uint(),_uint()
a,b,c,alpha,beta,gamma,vol = _dbl(),_dbl(),_dbl(),_dbl(),_dbl(),_dbl(),_dbl(),
if _raw_info_getstruct(nfo,sg,a,b,c,alpha,beta,gamma,vol,natom) == 0:
return {}
return dict(spacegroup=int(sg.value),a=a.value,b=b.value,c=c.value,alpha=alpha.value,
beta=beta.value,gamma=gamma.value,volume=vol.value,n_atoms=int(natom.value))
functions['ncrystal_info_getstructure'] = ncrystal_info_getstructure
_wrap('ncrystal_info_nhkl',_int,(ncrystal_info_t,))
_wrap('ncrystal_info_hkl_dlower',_dbl,(ncrystal_info_t,))
_wrap('ncrystal_info_hkl_dupper',_dbl,(ncrystal_info_t,))
_wrap('ncrystal_info_gethkl',None,(ncrystal_info_t,_int,_intp,_intp,_intp,_intp,_dblp,_dblp))
_wrap('ncrystal_info_dspacing_from_hkl',_dbl,(ncrystal_info_t,_int,_int,_int))
functions['ncrystal_info_gethkl_setuppars'] = lambda : (_int(),_int(),_int(),_int(),_dbl(),_dbl())
_wrap('ncrystal_info_ndyninfo',_uint,(ncrystal_info_t,))
_raw_di_base = _wrap('ncrystal_dyninfo_base',None,(ncrystal_info_t,_uint,_dblp,_uintp,_dblp,_uintp),hide=True)
_raw_di_scatknl = _wrap('ncrystal_dyninfo_extract_scatknl',None,(ncrystal_info_t,_uint,_uint,_dblp,_uintp,_uintp,_uintp,
_dblpp,_dblpp,_dblpp,_dblpp),hide=True)
_raw_di_vdos = _wrap('ncrystal_dyninfo_extract_vdos',None,(ncrystal_info_t,_uint,_dblp,_dblp,_uintp,_dblpp),hide=True)
_raw_di_vdosdebye = _wrap('ncrystal_dyninfo_extract_vdosdebye',None,(ncrystal_info_t,_uint,_dblp),hide=True)
_raw_di_vdos_input = _wrap('ncrystal_dyninfo_extract_vdos_input',None,(ncrystal_info_t,_uint,_uintp,_dblpp,_uintp,_dblpp),hide=True)
def ncrystal_dyninfo_base(key):
infoobj,dynidx = key
fr,tt,atomindex,ditype=_dbl(),_dbl(),_uint(),_uint()
_raw_di_base(infoobj,dynidx,fr,atomindex,tt,ditype)
return (fr.value,tt.value,atomindex.value,ditype.value)
def ncrystal_dyninfo_extract_scatknl(key,vdoslux):
infoobj,dynidx = key
sugEmax,ne,na,nb,e,a,b,sab = _dbl(),_uint(),_uint(),_uint(),_dblp(),_dblp(),_dblp(),_dblp()
_raw_di_scatknl(infoobj,dynidx,vdoslux,sugEmax,ne,na,nb,
ctypes.byref(e),ctypes.byref(a),ctypes.byref(b),ctypes.byref(sab))
return (sugEmax.value,ne.value,na.value,nb.value,e,a,b,sab)
def ncrystal_dyninfo_extract_vdos(key):
infoobj,dynidx = key
egrid_min,egrid_max,ndensity,densityptr = _dbl(),_dbl(),_uint(),_dblp()
_raw_di_vdos(infoobj,dynidx,egrid_min,egrid_max,ndensity,ctypes.byref(densityptr))
return (egrid_min.value,egrid_max.value,ndensity.value,densityptr)
def ncrystal_dyninfo_extract_vdosdebye(key):
infoobj,dynidx = key
td=_dbl()
_raw_di_vdosdebye(infoobj,dynidx,td)
return td.value
def ncrystal_dyninfo_extract_vdos_input(key):
infoobj,dynidx = key
negrid,egridptr,ndensity,densityptr = _uint(),_dblp(),_uint(),_dblp()
_raw_di_vdos_input(infoobj,dynidx,negrid,ctypes.byref(egridptr),ndensity,ctypes.byref(densityptr));
return (negrid.value,egridptr,ndensity.value,densityptr)
functions['ncrystal_dyninfo_base'] = ncrystal_dyninfo_base
functions['ncrystal_dyninfo_extract_scatknl'] = ncrystal_dyninfo_extract_scatknl
functions['ncrystal_dyninfo_extract_vdos'] = ncrystal_dyninfo_extract_vdos
functions['ncrystal_dyninfo_extract_vdosdebye'] = ncrystal_dyninfo_extract_vdosdebye
functions['ncrystal_dyninfo_extract_vdos_input'] = ncrystal_dyninfo_extract_vdos_input
_wrap('ncrystal_info_ncomponents',_uint,(ncrystal_info_t,))
_raw_info_getcomp=_wrap('ncrystal_info_getcomponent',None,(ncrystal_info_t,_uint,_uintp,_dblp),hide=True)
def ncrystal_info_getcomp(nfo,icomp):
aidx,fraction=_uint(),_dbl()
_raw_info_getcomp(nfo,icomp,aidx,fraction)
return aidx.value,fraction.value
functions['ncrystal_info_getcomp']=ncrystal_info_getcomp
_wrap('ncrystal_create_atomdata',ncrystal_atomdata_t,(ncrystal_info_t,_uint))
_raw_atomdata_subcomp = _wrap('ncrystal_create_atomdata_subcomp',ncrystal_atomdata_t,
(ncrystal_atomdata_t,_uint,_dblp),hide=True)
_raw_atomdata_getfields=_wrap('ncrystal_atomdata_getfields',None,(ncrystal_atomdata_t,_cstrp,_cstrp,
_dblp,_dblp,_dblp,_dblp,
_uintp,_uintp,_uintp),hide=True)
def ncrystal_atomdata_createsubcomp(ad,icomp):
fraction = _dbl()
comp_ad = _raw_atomdata_subcomp(ad,icomp,fraction)
return (comp_ad,fraction.value)
functions['ncrystal_atomdata_createsubcomp']=ncrystal_atomdata_createsubcomp
def ncrystal_atomdata_getfields(ad):
mass_amu,sigma_inc,scatlen_coh,sigma_abs=_dbl(),_dbl(),_dbl(),_dbl()
dl,descr=_cstr(),_cstr()
ncomp,zval,aval = _uint(),_uint(),_uint()
_raw_atomdata_getfields(ad,ctypes.byref(dl),ctypes.byref(descr),
mass_amu,sigma_inc,scatlen_coh,sigma_abs,
ncomp,zval,aval)
return dict(m=mass_amu.value,incxs=sigma_inc.value,cohsl_fm=scatlen_coh.value,absxs=sigma_abs.value,
dl=_cstr2str(dl.value),descr=_cstr2str(descr.value),
ncomp=ncomp.value,z=zval.value,a=aval.value)
functions['ncrystal_atomdata_getfields'] = ncrystal_atomdata_getfields
_raw_ncustom = _wrap('ncrystal_info_ncustomsections',_uint,(ncrystal_info_t,),hide=True)
_raw_csec_name = _wrap('ncrystal_info_customsec_name',_cstr,(ncrystal_info_t,_uint),hide=True)
_raw_csec_nlines = _wrap('ncrystal_info_customsec_nlines',_uint,(ncrystal_info_t,_uint),hide=True)
_raw_csec_nparts = _wrap('ncrystal_info_customline_nparts',_uint,(ncrystal_info_t,_uint,_uint),hide=True)
_raw_csec_part = _wrap('ncrystal_info_customline_getpart',_cstr,(ncrystal_info_t,_uint,_uint,_uint),hide=True)
def ncrystal_info_getcustomsections(nfo):
n=_raw_ncustom(nfo)
if n==0:
return tuple()
out=[]
for isec in range(n):
lines=[]
secname = _cstr2str(_raw_csec_name(nfo,isec))
nlines = _raw_csec_nlines(nfo,isec)
for iline in range(nlines):
nparts=_raw_csec_nparts(nfo,isec,iline)
parts=[]
for ipart in range(nparts):
parts.append(_cstr2str(_raw_csec_part(nfo,isec,iline,ipart)))
lines.append(tuple(parts))
out.append((secname,tuple(lines)))
return tuple(out)
functions['ncrystal_info_getcustomsections'] = ncrystal_info_getcustomsections
_raw_reginmemfd = _wrap('ncrystal_register_in_mem_file_data',None,(_cstr,_cstr),hide=True)
def ncrystal_register_in_mem_file_data(virtual_filename,data):
_raw_reginmemfd(_str2cstr(virtual_filename),
_str2cstr(data))
functions['ncrystal_register_in_mem_file_data']=ncrystal_register_in_mem_file_data
def _prepare_many(ekin,repeat):
if _np is None and not repeat is None:
raise NCBadInput('Can not use "repeat" parameter when Numpy is absent on the system')
if repeat is None and not hasattr(ekin,'__len__'):
return None#scalar case, array interface not triggered
repeat = 1 if repeat is None else repeat
ekin = (ekin if hasattr(ekin,'ctypes') else _np.asfarray(ekin) ) if hasattr(ekin,'__len__') else _np.ones(1)*ekin
#NB: returning the ekin object itself is important in order to keep a reference to it after the call:
return ndarray_to_dblp(ekin),len(ekin),repeat,ekin
_raw_xs_no = _wrap('ncrystal_crosssection_nonoriented',None,(ncrystal_process_t,_dbl,_dblp),hide=True)
_raw_xs_no_many = _wrap('ncrystal_crosssection_nonoriented_many',None,(ncrystal_process_t,_dblp,_ulong,
_ulong,_dblp),hide=True)
def ncrystal_crosssection_nonoriented(scat,ekin,repeat=None):
many = _prepare_many(ekin,repeat)
if many is None:
res = _dbl()
_raw_xs_no(scat,ekin,res)
return res.value
else:
ekin_ct,n_ekin,repeat,ekin_nparr = many
xs, xs_ct = _create_numpy_double_array(n_ekin*repeat)
_raw_xs_no_many(scat,ekin_ct,n_ekin,repeat,xs_ct)
return xs
functions['ncrystal_crosssection_nonoriented'] = ncrystal_crosssection_nonoriented
_raw_domain = _wrap('ncrystal_domain',None,(ncrystal_process_t,_dblp,_dblp),hide=True)
def ncrystal_domain(proc):
a,b = _dbl(),_dbl()
_raw_domain(proc,a,b)
return (a.value,b.value)
functions['ncrystal_domain'] = ncrystal_domain
_raw_samplesct_iso =_wrap('ncrystal_samplescatterisotropic',None,(ncrystal_scatter_t,_dbl,_dblp,_dblp),hide=True)
_raw_samplesct_iso_many =_wrap('ncrystal_samplescatterisotropic_many',None,
(ncrystal_scatter_t,_dblp,_ulong,_ulong,_dblp,_dblp),hide=True)
_raw_samplescat = _wrap('ncrystal_samplescatter',None,( ncrystal_scatter_t, _dbl,_dbl*3,_dblp,_dbl*3),hide=True)
_raw_samplescat_many = _wrap('ncrystal_samplescatter_many',None,( ncrystal_scatter_t,_dbl,_dbl*3,_ulong,
_dblp,_dblp,_dblp,_dblp),hide=True)
def ncrystal_samplesct_iso(scat,ekin,repeat=None):
many = _prepare_many(ekin,repeat)
if many is None:
ekin_final,mu = _dbl(),_dbl()
_raw_samplesct_iso(scat,ekin,ekin_final,mu)
return ekin_final.value,mu.value
else:
ekin_ct,n_ekin,repeat,ekin_nparr = many
ekin_final, ekin_final_ct = _create_numpy_double_array(n_ekin*repeat)
mu, mu_ct = _create_numpy_double_array(n_ekin*repeat)
_raw_samplesct_iso_many(scat,ekin_ct,n_ekin,repeat,ekin_final_ct,mu_ct)
return ekin_final,mu
functions['ncrystal_samplesct_iso'] = ncrystal_samplesct_iso
def ncrystal_samplesct(scat, ekin, direction, repeat):
cdir = (_dbl * 3)(*direction)
if not repeat:
res_dir = (_dbl * 3)(0,0,0)
res_ekin = _dbl()
_raw_samplescat(scat,ekin,cdir,res_ekin,res_dir)
return res_ekin.value,(res_dir[0],res_dir[1],res_dir[2])
else:
assert repeat>=1
res_ekin, res_ekin_ct = _create_numpy_double_array(repeat)
res_ux, res_ux_ct = _create_numpy_double_array(repeat)
res_uy, res_uy_ct = _create_numpy_double_array(repeat)
res_uz, res_uz_ct = _create_numpy_double_array(repeat)
_raw_samplescat_many(scat,ekin,cdir,repeat,res_ekin_ct,res_ux_ct,res_uy_ct,res_uz_ct)
return res_ekin,(res_ux,res_uy,res_uz)
functions['ncrystal_samplesct']=ncrystal_samplesct
_raw_xs = _wrap('ncrystal_crosssection',None,(ncrystal_process_t,_dbl,_dbl*3,_dblp),hide=True)
def ncrystal_crosssection( proc, ekin, direction):
res = _dbl()
cdir = (_dbl * 3)(*direction)
_raw_xs(proc,ekin,cdir,res)
return res.value
functions['ncrystal_crosssection'] = ncrystal_crosssection
#Obsolete:
_raw_gs_no = _wrap('ncrystal_genscatter_nonoriented',None,(ncrystal_scatter_t,_dbl,_dblp,_dblp),hide=True)
_raw_gs_no_many = _wrap('ncrystal_genscatter_nonoriented_many',None,(ncrystal_scatter_t,_dblp,_ulong,
_ulong,_dblp,_dblp),hide=True)
def ncrystal_genscatter_nonoriented(scat,ekin,repeat=None):
many = _prepare_many(ekin,repeat)
if many is None:
angle,de = _dbl(),_dbl()
_raw_gs_no(scat,ekin,angle,de)
return angle.value,de.value
else:
ekin_ct,n_ekin,repeat,ekin_nparr = many
angle, angle_ct = _create_numpy_double_array(n_ekin*repeat)
de, de_ct = _create_numpy_double_array(n_ekin*repeat)
_raw_gs_no_many(scat,ekin_ct,n_ekin,repeat,angle_ct,de_ct)
return angle,de
functions['ncrystal_genscatter_nonoriented'] = ncrystal_genscatter_nonoriented
_raw_gs = _wrap('ncrystal_genscatter',None,(ncrystal_scatter_t,_dbl,_dbl*3,_dbl*3,_dblp),hide=True)
_raw_gs_many = _wrap('ncrystal_genscatter_many',None,(ncrystal_scatter_t,_dbl,_dbl*3,
_ulong,_dblp,_dblp,_dblp,_dblp),hide=True)
def ncrystal_genscatter(scat, ekin, direction, repeat):
cdir = (_dbl * 3)(*direction)
if not repeat:
res_dir = (_dbl * 3)(0,0,0)
res_de = _dbl()
_raw_gs(scat,ekin,cdir,res_dir,res_de)
return (res_dir[0],res_dir[1],res_dir[2]),res_de.value
else:
assert repeat>=1
res_ux, res_ux_ct = _create_numpy_double_array(repeat)
res_uy, res_uy_ct = _create_numpy_double_array(repeat)
res_uz, res_uz_ct = _create_numpy_double_array(repeat)
res_de, res_de_ct = _create_numpy_double_array(repeat)
_raw_gs_many(scat,ekin,cdir,repeat,res_ux_ct,res_uy_ct,res_uz_ct,res_de_ct)
return (res_ux,res_uy,res_uz),res_de
functions['ncrystal_genscatter']=ncrystal_genscatter
_wrap('ncrystal_create_info',ncrystal_info_t,(_cstr,))
_wrap('ncrystal_create_scatter',ncrystal_scatter_t,(_cstr,))
_wrap('ncrystal_create_scatter_builtinrng',ncrystal_scatter_t,(_cstr,_ulong))
_wrap('ncrystal_create_absorption',ncrystal_absorption_t,(_cstr,))
_raw_multicreate_direct = _wrap('ncrystal_multicreate_direct',None,
( _cstr, _cstr, _cstr,
ctypes.POINTER(ncrystal_info_t),
ctypes.POINTER(ncrystal_scatter_t),
ctypes.POINTER(ncrystal_absorption_t) ),hide=True)
nullptr_ncrystal_info_t = ctypes.cast(None, ctypes.POINTER(ncrystal_info_t))
nullptr_ncrystal_scatter_t = ctypes.cast(None, ctypes.POINTER(ncrystal_scatter_t))
nullptr_ncrystal_absorption_t = ctypes.cast(None, ctypes.POINTER(ncrystal_absorption_t))
def multicreate_direct(data,dataType,cfg_params,doI,doS,doA):
rawi = ncrystal_info_t() if doI else None
raws = ncrystal_scatter_t() if doS else None
rawa = ncrystal_absorption_t() if doA else None
_raw_multicreate_direct( _str2cstr(data),_str2cstr(dataType or "" ),_str2cstr(cfg_params or ""),
ctypes.byref(rawi) if rawi else nullptr_ncrystal_info_t,
ctypes.byref(raws) if raws else nullptr_ncrystal_scatter_t,
ctypes.byref(rawa) if rawa else nullptr_ncrystal_absorption_t )
return rawi,raws,rawa
functions['multicreate_direct'] = multicreate_direct
_wrap('ncrystal_setbuiltinrandgen',None,tuple())
_RANDGENFCTTYPE = ctypes.CFUNCTYPE( _dbl )
_raw_setrand = _wrap('ncrystal_setrandgen',None,(_RANDGENFCTTYPE,),hide=True)
def ncrystal_setrandgen(randfct):
#Set random function, keeping references as needed (otherwise fct ptrs
#kept on C++ side will suddenly stop working!) and casting None to a null-ptr.
if not randfct:
keepalive=(None,ctypes.cast(None, _RANDGENFCTTYPE))
else:
keepalive=(randfct,_RANDGENFCTTYPE(randfct))#keep refs!
_keepalive.append(keepalive)
_raw_setrand(keepalive[1])
functions['ncrystal_setrandgen'] = ncrystal_setrandgen
_wrap('ncrystal_clone_absorption',ncrystal_absorption_t,(ncrystal_absorption_t,))
_wrap('ncrystal_clone_scatter',ncrystal_scatter_t,(ncrystal_scatter_t,))
_wrap('ncrystal_clone_scatter_rngbyidx',ncrystal_scatter_t,(ncrystal_scatter_t,_ulong))
_wrap('ncrystal_clone_scatter_rngforcurrentthread',ncrystal_scatter_t,(ncrystal_scatter_t,))
_wrap('ncrystal_decodecfg_packfact',_dbl,(_cstr,))
_wrap('ncrystal_decodecfg_vdoslux',_uint,(_cstr,))
_wrap('ncrystal_disable_caching',None,tuple())
_wrap('ncrystal_enable_caching',None,tuple())
_wrap('ncrystal_has_factory',_int,(_cstr,))
_wrap('ncrystal_clear_caches',None,tuple())
_wrap('ncrystal_rngsupportsstatemanip_ofscatter',_int,( ncrystal_scatter_t, ))
_wrap('ncrystal_setrngstate_ofscatter',None,(ncrystal_scatter_t, _cstr))
_raw_getrngstate_scat = _wrap('ncrystal_getrngstate_ofscatter',_charptr,( ncrystal_scatter_t,),hide=True)
def nc_getrngstate_scat(rawscatobj):
rawstate = _raw_getrngstate_scat(rawscatobj)
if not rawstate:
#null ptr, i.e. state manipulation is not supported
return None
state=_cstr2str(ctypes.cast(rawstate,_cstr).value)
_raw_deallocstr(rawstate)
return state
functions['nc_getrngstate_scat']=nc_getrngstate_scat
_raw_gettextdata = _wrap('ncrystal_get_text_data',_cstrp,(_cstr,),hide=True)
_raw_deallocstr = _wrap('ncrystal_dealloc_string',None,(_charptr,),hide=True)
def nc_gettextdata(name):
l = _raw_gettextdata(_str2cstr(str(name)))
assert l is not None
n = 5
res = [l[i].decode() for i in range(n)]
assert isinstance(res[0],str)
_raw_deallocstrlist(n,l)
return res
functions['nc_gettextdata'] = nc_gettextdata
_raw_getfilelist = _wrap('ncrystal_get_file_list',None,(_uintp,_cstrpp),hide=True)
_raw_deallocstrlist = _wrap('ncrystal_dealloc_stringlist',None,(_uint,_cstrp),hide=True)
def ncrystal_get_filelist():
n,l = _uint(),_cstrp()
_raw_getfilelist(n,ctypes.byref(l))
assert n.value%4==0
res=[]
for i in range(n.value//4):
res += [ (l[i*4].decode(),l[i*4+1].decode(),l[i*4+2].decode(),l[i*4+3].decode()) ]
_raw_deallocstrlist(n,l)
return res
functions['ncrystal_get_filelist'] = ncrystal_get_filelist
_raw_getpluginlist = _wrap('ncrystal_get_plugin_list',None,(_uintp,_cstrpp),hide=True)
def ncrystal_get_pluginlist():
n,l = _uint(),_cstrp()
_raw_getpluginlist(n,ctypes.byref(l))
assert n.value%3==0
res=[]
for i in range(n.value//3):
pluginname,filename,plugintype=l[i*3].decode(),l[i*3+1].decode(),l[i*3+2].decode()
res+=[(pluginname,filename,plugintype)]
_raw_deallocstrlist(n,l)
return res
functions['ncrystal_get_pluginlist'] = ncrystal_get_pluginlist
_wrap('ncrystal_add_custom_search_dir',None,(_cstr,))
_wrap('ncrystal_remove_custom_search_dirs',None,tuple())
_wrap('ncrystal_enable_abspaths',None,(_int,))
_wrap('ncrystal_enable_relpaths',None,(_int,))
_wrap('ncrystal_enable_stddatalib',None,(_int,_cstr))
_wrap('ncrystal_enable_stdsearchpath',None,(_int,))
_wrap('ncrystal_remove_all_data_sources',None,tuple())
return functions
_rawfct = _load(_find_nclib())
def decodecfg_packfact(cfgstr):
"""Extract packfact value from cfgstr"""
return float(_rawfct['ncrystal_decodecfg_packfact'](_str2cstr(cfgstr)))
def decodecfg_vdoslux(cfgstr):
"""Extract vdoslux value from cfgstr"""
return int(_rawfct['ncrystal_decodecfg_vdoslux'](_str2cstr(cfgstr)))
def createVDOSDebye(debye_temperature):
"""Create simplified VDOS according to the Debye model"""
_ensure_numpy()
#NB: Must keep function exactly synchronised with createVDOSDebye function
#in .cc src (although leaving out temperature,boundXS,elementMassAMU args
#here):
debye_energy = constant_boltzmann*debye_temperature;
vdos_egrid = _np.linspace(0.5*debye_energy,debye_energy,20);
scale = 1.0 / (debye_energy*debye_energy);
vdos_density = scale * (vdos_egrid**2)
#Actual returned egrid should contain only first and last value:
return (_np.asarray([vdos_egrid[0],vdos_egrid[-1]]) ,vdos_density)
class RCBase:
"""Base class for all NCrystal objects"""
def __init__(self, rawobj):
"""internal usage only"""
self._rawobj = rawobj
#do not ref here, since ncrystal_create_xxx functions in C-interface already did so.
self._rawunref = _rawfct['ncrystal_unref']#keep fct reference
self.__rawobj_byref = ctypes.byref(rawobj)#keep byref(rawobj), since ctypes might
#disappear before __del__ is called.
def __del__(self):
if hasattr(self,'_rawunref') and self._rawunref:
self._rawunref(self.__rawobj_byref)
def refCount(self):
"""Access reference count of wrapped C++ object"""
return _rawfct['ncrystal_refcount'](self._rawobj)
def nc_assert(b,msg=""):
"""Assertion which throws NCLogicError on failure"""
if not bool(b):
raise NCLogicError(msg if msg else 'assertion failed')
class AtomData(RCBase):
"""Class providing physical constants related to a particular mix of
isotopes. This can be used to represent elements (i.e. all isotopes having
same Z) in either natural or enriched form, but can also be used to
represent atoms in doped crystals. E.g. if a small fraction (0.1%) of
Cr-ions replace some Al-ions in a Al2O3 lattice, the AtomData could
represent a mix of 0.1% Cr and 99.9% Al.
"""
def __init__(self,rawobj):
"""internal usage only"""
super(AtomData, self).__init__(rawobj)
f=_rawfct['ncrystal_atomdata_getfields'](rawobj)
self.__m = f['m']
self.__incxs = f['incxs']
self.__cohsl_fm = f['cohsl_fm']
self.__absxs = f['absxs']
self.__dl = f['dl']
self.__descr = f['descr']
self.__ncomp = f['ncomp']
self.__z = f['z']
self.__a = f['a']
self.__b2f = (self.__m/(self.__m+const_neutron_mass_amu))**2
self.__comp = [None]*self.__ncomp
self.__compalldone = (self.__ncomp==0)
def averageMassAMU(self):
"""Atomic mass in Daltons (averaged appropriately over constituents)"""
return self.__m
def coherentScatLen(self):
"""Coherent scattering length in sqrt(barn)=10fm"""
return self.__cohsl_fm*0.1#0.1 is fm/sqrt(barn)
def coherentScatLenFM(self):
"""Coherent scattering length in fm"""
return self.__cohsl_fm
def coherentXS(self):
"""Bound coherent cross section in barn. Same as 4*pi*coherentScatLen()**2"""
return _k4Pidiv100*self.__cohsl_fm**2
def incoherentXS(self):
"""Bound incoherent cross section in barn"""
return self.__incxs
def scatteringXS(self):
"""Bound scattering cross section in barn (same as coherentXS()+incoherentXS())"""
return self.__incxs+self.coherentXS()
def captureXS(self):
"""Absorption cross section in barn"""
return self.__absxs
def freeScatteringXS(self):
"""Free scattering cross section in barn (same as freeCoherentXS()+freeIncoherentXS())"""
return self.__b2f * self.scatteringXS()
def freeCoherentXS(self):
"""Free coherent cross section in barn."""
return self.__b2f * self.coherentXS()
def freeIncoherentXS(self):
"""Free incoherent cross section in barn."""
return self.__b2f * self.incoherentXS()
def isNaturalElement(self):
"""Natural element with no composition."""
return self.__z!=0 and self.__ncomp==0 and self.__a==0
def isSingleIsotope(self):
"""Single isotope with no composition."""
return self.__a!=0
def isComposite(self):
"""Composite definition. See nComponents(), getComponent() and components property"""
return self.__ncomp!=0
def isElement(self):
"""If number of protons per nuclei is well defined. This is true for natural
elements, single isotopes, and composites where all components
have the same number of protons per nuclei."""
return self.__z!=0
def Z(self):
"""Number of protons per nuclei (0 if not well defined)."""
return self.__z
def elementName(self):
"""If Z()!=0, this returns the corresponding element name ('H', 'He', ...).
Returns empty string when Z() is 0."""
if not self.__z:
return ''
#NB: We are relying on natural elements to return their element names in
#description(false). This is promised by a comment in NCAtomData.hh!
if self.isNaturalElement():
return self.__descr
return atomDB(self.__z).description(False)
def A(self):
"""Number of nucleons per nuclei (0 if not well defined or natural element)."""
return self.__a
class Component:
def __init__(self,fr,ad):
"""internal usage only"""
self.__fr = fr
self.__ad = ad
assert not ad.isTopLevel()
@property
def fraction(self):
"""Fraction (by count) of component in mixture"""
return self.__fr
@property
def data(self):
"""AtomData of component"""
return self.__ad
def __str__(self):
return '%g*AtomData(%s)'%(self.__fr,self.__ad.description(True))
def nComponents(self):
"""Number of sub-components in a mixture"""
return self.__ncomp
def getComponent(self,icomponent):
"""Get component in a mixture"""
c=self.__comp[icomponent]
if c:
return c
rawobj_subc,fraction=_rawfct['ncrystal_atomdata_createsubcomp'](self._rawobj,icomponent)
ad = AtomData(rawobj_subc)
c = AtomData.Component(fraction,ad)
self.__comp[icomponent] = c
return c
def getAllComponents(self):
"""Get list of all components"""
if self.__compalldone:
return self.__comp
for i,c in enumerate(self.__comp):
if not c:
self.getComponent(i)
self.__compalldone=True
return self.__comp
components = property(getAllComponents)
def displayLabel(self):
"""Short label which unique identifies an atom role within a particular material."""
return self.__dl
def isTopLevel(self):
"""Whether or not AtomData appears directly on an Info object (if not, it must
be a component (direct or indirect) of a top level AtomData object"""
return bool(self.__dl)
def description(self,includeValues=True):
"""Returns description of material as a string, with or without values."""
if includeValues:
zstr=' Z=%i'%self.__z if self.__z else ''
astr=' A=%i'%self.__a if self.__a else ''
_=(self.__descr,self.__cohsl_fm,self.coherentXS(),self.__incxs,
self.__absxs,self.__m,zstr,astr)
return'%s(cohSL=%gfm cohXS=%gbarn incXS=%gbarn absXS=%gbarn mass=%gamu%s%s)'%_
return self.__descr
def __str__(self):
descr=self.description()
return '%s=%s'%(self.__dl,descr) if self.__dl else descr
class Info(RCBase):
"""Class representing information about a given material"""
def __init__(self, cfgstr):
"""create Info object based on cfg-string (same as using createInfo(cfgstr))"""
if isinstance(cfgstr,tuple) and len(cfgstr)==2 and cfgstr[0]=='_rawobj_':
#Already got an ncrystal_info_t object:
rawobj = cfgstr[1]
else:
rawobj = _rawfct['ncrystal_create_info'](_str2cstr(cfgstr))
super(Info, self).__init__(rawobj)
self.__dyninfo=None
self.__atominfo=None
self.__custom=None
self.__atomdatas=[]
self.__comp=None
def _initComp(self):
assert self.__comp is None
nc = _rawfct['ncrystal_info_ncomponents'](self._rawobj)
self.__comp = []
for icomp in range(nc):
atomidx,fraction = _rawfct['ncrystal_info_getcomp'](self._rawobj,icomp)
self.__comp += [(fraction,self._provideAtomData(atomidx))]
return self.__comp
def hasComposition(self):
"""Whether basic composition is available."""
return bool(self._initComp() if self.__comp is None else self.__comp)
def getComposition(self):
"""Get basic composition as list of (fraction,AtomData). The list is empty when
no composition is available, and is always consistent with AtomInfo/DynInfo (if
present). """
return self._initComp() if self.__comp is None else self.__comp
composition=property(getComposition)
def dump(self):
"""Dump contained information to standard output"""
sys.stdout.flush()
sys.stderr.flush()
_rawfct['ncrystal_dump'](self._rawobj)
def hasTemperature(self):
"""Whether or not material has a temperature available"""
return _rawfct['ncrystal_info_gettemperature'](self._rawobj)>-1
def getTemperature(self):
"""Material temperature (in kelvin)"""
t=_rawfct['ncrystal_info_gettemperature'](self._rawobj)
nc_assert(t>-1)
return t
def hasGlobalDebyeTemperature(self):
"""OBSOLETE FUNCTION: The concept of global versus per-element Debye
temperatures has been removed. Please iterate over AtomInfo objects
instead (see getAtomInfos() function) and get the Debye Temperature
from those. This function will be removed in a future release.
"""
return False
def getGlobalDebyeTemperature(self):
"""OBSOLETE FUNCTION: The concept of global versus per-element Debye
temperatures has been removed. Please iterate over AtomInfo objects
instead (see getAtomInfos() function) and get the Debye Temperature
from those. Calling this function will always result in an exception
thrown for now, and the function will be removed in a future release..
"""
raise NCLogicError('The concept of global Debye temperatures has been removed. Iterate over'
+' AtomInfo objects instead and get the Debye temperature values from those.')
return None
def hasAtomDebyeTemp(self):
"""Whether AtomInfo objects are present and have Debye temperatures available
(they will either all have them available, or none of them will have
them available).
"""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[3]
def hasDebyeTemperature(self):
"""Alias for hasAtomDebyeTemp()."""
return self.hasAtomDebyeTemp()
def hasAnyDebyeTemperature(self):
"""OBSOLETE FUNCTION which will be removed in a future release. Please
call hasDebyeTemperature() instead.
"""
return self.hasAtomDebyeTemp()
def getDebyeTemperatureByElement(self,atomdata):
"""OBSOLETE FUNCTION which will be removed in a future release. Please access
the AtomInfo objects instead and query the Debye temperature there.
"""
if atomdata.isTopLevel():
for ai in self.atominfos:
if atomdata is ai.atomData:
return ai.debyeTemperature
raise NCBadInput('Invalid atomdata object passed to Info.getDebyeTemperatureByElement'
+' (must be top-level AtomData from the same Info object)')
def hasDensity(self):
"""Whether or not material has density available"""
return _rawfct['ncrystal_info_getdensity'](self._rawobj)>-1
def getDensity(self):
"""Get density in g/cm^3. See also getNumberDensity()."""
t=_rawfct['ncrystal_info_getdensity'](self._rawobj)
nc_assert(t>-1)
return t
def hasNumberDensity(self):
"""Whether or not material has number density available"""
return _rawfct['ncrystal_info_getnumberdensity'](self._rawobj)>-1
def getNumberDensity(self):
"""Get number density in atoms/angstrom^3. See also getDensity()."""
t=_rawfct['ncrystal_info_getnumberdensity'](self._rawobj)
nc_assert(t>-1)
return t
def hasXSectAbsorption(self):
"""Whether or not material has absorption cross section available"""
return _rawfct['ncrystal_info_getxsectabsorption'](self._rawobj)>-1
def getXSectAbsorption(self):
"""Absorption cross section in barn (at 2200m/s)"""
t=_rawfct['ncrystal_info_getxsectabsorption'](self._rawobj)
nc_assert(t>-1)
return t
def hasXSectFree(self):
"""Whether or not material has free scattering cross section available"""
return _rawfct['ncrystal_info_getxsectfree'](self._rawobj)>-1
def getXSectFree(self):
"""Saturated (free) scattering cross section in barn in the high-E limit"""
t=_rawfct['ncrystal_info_getxsectfree'](self._rawobj)
nc_assert(t>-1)
return t
def hasStructureInfo(self):
"""Whether or not material has crystal structure information available."""
return bool(_rawfct['ncrystal_info_getstructure'](self._rawobj))
def getStructureInfo(self):
"""Information about crystal structure."""
d=_rawfct['ncrystal_info_getstructure'](self._rawobj)
nc_assert(d)
return d
def _provideAtomData(self,atomindex):
if atomindex >= len(self.__atomdatas):
assert atomindex < 100000#sanity check
self.__atomdatas.extend([None,]*(atomindex+1-len(self.__atomdatas)))
obj = self.__atomdatas[atomindex]
if obj:
return obj
raw_ad = _rawfct['ncrystal_create_atomdata'](self._rawobj,atomindex)
obj = AtomData(raw_ad)
assert obj.isTopLevel()
self.__atomdatas[atomindex] = obj
return obj
class AtomInfo:
"""Class with information about a particular atom in a unit cell, including the
composition of atoms, positions, Debye temperature, and mean-squared-displacements.
"""
def __init__(self,theinfoobj,atomidx,n,dt,msd,pos):
"""For internal usage only."""
assert dt is None or ( isinstance(dt,float) and dt > 0.0 )
assert msd is None or ( isinstance(msd,float) and msd > 0.0 )
self._info_wr = weakref.ref(theinfoobj)
self._atomidx,self.__n,self.__dt,self.__msd,=atomidx,n,dt,msd
self.__pos = tuple(pos)#tuple, since it is immutable
self.__atomdata = None
self.__correspDI_wp = None
def correspondingDynamicInfo(self):
"""Get corresponding DynamicInfo object from the same Info object. Returns None if Info object does not have dynamic info available"""
if self.__correspDI_wp is not None:
if self.__correspDI_wp == False:
return None
di = self.__correspDI_wp()
nc_assert(di is not None,"AtomInfo.correspondingDynamicInfo can not be used after associated Info object is deleted")
return di
_info = self._info_wr()
nc_assert(_info is not None,"AtomInfo.correspondingDynamicInfo can not be used after associated Info object is deleted")
if not _info.hasDynamicInfo():
self.__correspDI_wp = False
return None
for di in _info.dyninfos:
if di._atomidx == self._atomidx:
self.__correspDI_wp = weakref.ref(di)
return di
nc_assert(False,"AtomInfo.correspondingDynamicInfo: inconsistent internal state (bug?)")
dyninfo = property(correspondingDynamicInfo)
@property
def atomData(self):
"""Return AtomData object with details about composition and relevant physics constants"""
if self.__atomdata is None:
_info = self._info_wr()
nc_assert(_info is not None,"AtomInfo.atomData can not be used after associated Info object is deleted")
self.__atomdata = _info._provideAtomData(self._atomidx)
assert self.__atomdata.isTopLevel()
return self.__atomdata
@property
def count(self):
"""Number of atoms of this type per unit cell"""
return self.__n
@property
def debyeTemperature(self):
"""The Debye Temperature of the atom (kelvin). Returns None if not available."""
return self.__dt
@property
def meanSquaredDisplacement(self):
"""The mean-squared-displacement of the atom (angstrom^2). Returns None if not
available.
"""
return self.__msd
msd=meanSquaredDisplacement#alias
@property
def positions(self):
"""List (tuple actually) of positions of this atom in the unit cell. Each
entry is given as a tuple of three values, (x,y,z)"""
return self.__pos
@property
def atomIndex(self):
"""Index of atom on this material"""
return self._atomidx
def __str__(self):
l=[str(self.atomData.displayLabel()),str(self.__n)]
if self.__dt>0.0:
l.append('DebyeT=%gK'%self.__dt if self.__dt else 'DebyeT=n/a')
if self.__msd>0.0:
l.append('MSD=%gAa^2'%self.__msd if self.__msd else 'MSD=n/a')
l.append('hasPositions=%s'%('yes' if self.__pos else 'no'))
return 'AtomInfo(%s)'%(', '.join(l))
def hasAtomInfo(self):
"""Whether or no getAtomInfo()/atominfos are available"""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[0]
def hasAtomMSD(self):
"""Whether AtomInfo objects have mean-square-displacements available"""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[1]
def hasAtomPositions(self):
"""OBSOLETE FUNCTION: AtomInfo objects now always have positions
available. Returns same as hasAtomInfo(). Will be removed in a future
release.
"""
return self.hasAtomInfo()
def hasPerElementDebyeTemperature(self):
"""OBSOLETE FUNCTION which will be removed in a future
release. Please use hasAtomDebyeTemp() instead.
"""
return self.hasAtomDebyeTemp()
def getAtomInfo(self):
"""Get list of AtomInfo objects, one for each atom. Returns empty list if unavailable."""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[2]
atominfos = property(getAtomInfo)
def __initAtomInfo(self):
assert self.__atominfo is None
natoms = _rawfct['ncrystal_info_natominfo'](self._rawobj)
hasmsd = bool(_rawfct['ncrystal_info_hasatommsd'](self._rawobj))
hasperelemdt=False
l=[]
for iatom in range(natoms):
atomidx,n,dt,msd = _rawfct['ncrystal_info_getatominfo'](self._rawobj,iatom)
if dt:
hasperelemdt=True
assert hasmsd == (msd>0.0)
pos=[]
for ipos in range(n):
pos.append( _rawfct['ncrystal_info_getatompos'](self._rawobj,iatom,ipos) )
l.append( Info.AtomInfo(self,atomidx, n,
( dt if ( dt and dt>0.0) else None),
(msd if (msd and msd>0.0) else None),
pos) )
self.__atominfo = ( natoms>0, hasmsd, l, hasperelemdt )
def hasHKLInfo(self):
"""Whether or not material has lists of HKL-plane info available"""
return bool(_rawfct['ncrystal_info_nhkl'](self._rawobj)>-1)
def nHKL(self):
"""Number of HKL planes available (grouped into families with similar d-spacing and f-squared)"""
return int(_rawfct['ncrystal_info_nhkl'](self._rawobj))
def hklDLower(self):
"""Lower d-spacing cutoff (angstrom)."""
return float(_rawfct['ncrystal_info_hkl_dlower'](self._rawobj))
def hklDUpper(self):
"""Upper d-spacing cutoff (angstrom)."""
return float(_rawfct['ncrystal_info_hkl_dupper'](self._rawobj))
def hklList(self):
"""Iterator over HKL info, yielding tuples in the format
(h,k,l,multiplicity,dspacing,fsquared)"""
nc_assert(self.hasHKLInfo())
h,k,l,mult,dsp,fsq = _rawfct['ncrystal_info_gethkl_setuppars']()
for idx in range(self.nHKL()):
_rawfct['ncrystal_info_gethkl'](self._rawobj,idx,h,k,l,mult,dsp,fsq)
yield h.value,k.value,l.value,mult.value,dsp.value,fsq.value
def dspacingFromHKL(self, h, k, l):
"""Convenience method, calculating the d-spacing of a given Miller
index. Calling this incurs the overhead of creating a reciprocal lattice
matrix from the structure info."""
return float(_rawfct['ncrystal_info_dspacing_from_hkl'](self._rawobj,h,k,l))
class DynamicInfo:
"""Class representing dynamic information (related to inelastic scattering)
about a given atom"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
self._info_wr,self.__atomdata = weakref.ref(theinfoobj), None
self.__fraction, self._atomidx, self._key, self.__tt = fr,atomidx,key,tt
self.__correspAtomInfo_wp = None
def correspondingAtomInfo(self):
"""Get corresponding AtomInfo object from the same Info object. Returns None if Info object does not have AtomInfo available"""
if self.__correspAtomInfo_wp is not None:
if self.__correspAtomInfo_wp == False:
return None
ai = self.__correspAtomInfo_wp()
nc_assert(ai is not None,"DynamicInfo.correspondingAtomInfo can not be used after associated Info object is deleted")
return ai
_info = self._info_wr()
nc_assert(_info is not None,"DynamicInfo.correspondingAtomInfo can not be used after associated Info object is deleted")
if not _info.hasAtomInfo():
self.__correspAtomInfo_wp = False
return None
for ai in _info.atominfos:
if ai._atomidx == self._atomidx:
self.__correspAtomInfo_wp = weakref.ref(ai)
return ai
nc_assert(False,"DynamicInfo.correspondingAtomInfo: inconsistent internal state (bug?)")
atominfo = property(correspondingAtomInfo)
@property
def atomIndex(self):
"""Index of atom on this material"""
return self._atomidx
@property
def fraction(self):
"""Atom fraction in material (all fractions must add up to unity)"""
return self.__fraction
@property
def temperature(self):
"""Material temperature (same value as on associated Info object)"""
return self.__tt
@property
def atomData(self):
"""Return AtomData object with details about composition and relevant physics constants"""
if self.__atomdata is None:
_info = self._info_wr()
nc_assert(_info is not None,"DynamicInfo.atomData can not be used after associated Info object is deleted")
self.__atomdata = _info._provideAtomData(self._atomidx)
assert self.__atomdata.isTopLevel()
return self.__atomdata
def _np(self):
_ensure_numpy()
return _np
def _copy_cptr_2_nparray(self,cptr,n):
np = self._np()
return np.copy(np.ctypeslib.as_array(cptr, shape=(n,)))
def __str__(self):
n=self.__class__.__name__
if n.startswith('DI_'):
n=n[3:]
s=', %s'%self._extradescr() if hasattr(self,'_extradescr') else ''
return ('DynamicInfo(%s, fraction=%.4g%%, type=%s%s)'%(self.atomData.displayLabel(),
self.__fraction*100.0,
n,s))
class DI_Sterile(DynamicInfo):
"""Class indicating atoms for which inelastic neutron scattering is absent
or disabled."""
pass
class DI_FreeGas(DynamicInfo):
"""Class indicating atoms for which inelastic neutron scattering should be
modelled as scattering on a free gas."""
pass
class DI_ScatKnl(DynamicInfo):
"""Base class indicating atoms for which inelastic neutron scattering will
be, directly or indirectly, described by a scattering kernel,
S(alpha,beta). This is an abstract class, and derived classes provide
actual access to the kernels.
"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
super(Info.DI_ScatKnl, self).__init__(theinfoobj,fr,atomidx,tt,key)
self.__lastknl,self.__lastvdoslux = None,None
def _loadKernel( self, vdoslux = 3 ):
assert isinstance(vdoslux,numbers.Integral) and 0<=vdoslux<=5
vdoslux=int(vdoslux)
if self.__lastvdoslux != vdoslux:
sugEmax,ne,na,nb,eptr,aptr,bptr,sabptr = _rawfct['ncrystal_dyninfo_extract_scatknl'](self._key,vdoslux)
self.__lastvdoslux = vdoslux
res={}
assert ne>=0
res['suggestedEmax'] = float(sugEmax)
res['egrid'] = self._copy_cptr_2_nparray(eptr,ne) if ne > 0 else self._np().zeros(0)
assert na>1 and nb>1
res['alpha'] = self._copy_cptr_2_nparray(aptr,na)
res['beta'] = self._copy_cptr_2_nparray(bptr,nb)
res['sab'] = self._copy_cptr_2_nparray(sabptr,na*nb)
self.__lastknl = res
assert self.__lastknl is not None
return self.__lastknl
class DI_ScatKnlDirect(DI_ScatKnl):
"""Pre-calculated scattering kernel which at most needs a (hidden) conversion to
S(alpha,beta) format before it is available."""
def loadKernel( self ):
"""Prepares and returns the scattering kernel in S(alpha,beta) format"""
return self._loadKernel(vdoslux=3)#vdoslux value not actually used
class DI_VDOS(DI_ScatKnl):
"""Solid state material with a phonon spectrum in the form of a Vibrational
Density Of State (VDOS) parameterisation. This can be expanded into a
full scattering kernel. How luxurious this expansion will be is
controlled by an optional vdoslux parameter in the loadKernel call (must
be integer from 0 to 5)
"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
super(Info.DI_VDOS, self).__init__(theinfoobj,fr,atomidx,tt,key)
self.__vdosdata = None
self.__vdosegrid_expanded = None
self.__vdosorig = None
def _extradescr(self):
return 'npts=%i'%len(self.vdosOrigDensity())
def vdosData(self):
"""Access the VDOS as ([egrid_min,egrid_max],vdos_density)"""
if self.__vdosdata is None:
emin,emax,nd,dptr = _rawfct['ncrystal_dyninfo_extract_vdos'](self._key)
vdos_egrid = (emin,emax)
vdos_density = self._copy_cptr_2_nparray(dptr,nd)
self.__vdosdata = (vdos_egrid,vdos_density)
return self.__vdosdata
def __loadVDOSOrig(self):
if self.__vdosorig is None:
neg,egptr,nds,dsptr = _rawfct['ncrystal_dyninfo_extract_vdos_input'](self._key)
self.__vdosorig = ( self._copy_cptr_2_nparray(egptr,neg),
self._copy_cptr_2_nparray(dsptr,nds) )
return self.__vdosorig
def vdosOrigEgrid(self):
"""Access the original un-regularised VDOS energy grid"""
return self.__loadVDOSOrig()[0]
def vdosOrigDensity(self):
"""Access the original un-regularised VDOS energy grid"""
return self.__loadVDOSOrig()[1]
@property
def vdos_egrid(self):
"""Access the VDOS energy grid as [egrid_min,egrid_max]"""
return self.vdosData()[0]
@property
def vdos_egrid_expanded(self):
"""Access the egrid expanded into all actual egrid points"""
if self.__vdosegrid_expanded is None:
_ = self.vdosData()
self.__vdosegrid_expanded = self._np().linspace(_[0][0],_[0][1],len(_[1]))
return self.__vdosegrid_expanded
@property
def vdos_density(self):
"""Access the VDOS density array"""
return self.vdosData()[1]
def loadKernel( self, vdoslux = 3 ):
"""Converts VDOS to S(alpha,beta) kernel with a luxury level given by the vdoslux parameter."""
return self._loadKernel(vdoslux=vdoslux)
class DI_VDOSDebye(DI_ScatKnl):
"""Similarly to DI_VDOS, but instead of using a phonon VDOS spectrum provided
externally, an idealised spectrum is used for lack of better
options. This spectrum is based on the Debye Model, in which the
spectrum rises quadratically with phonon energy below a cutoff value,
kT, where T is the Debye temperature
"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
super(Info.DI_VDOSDebye, self).__init__(theinfoobj,fr,atomidx,tt,key)
self.__vdosdata = None
self.__debyetemp = None
self.__vdosegrid_expanded = None
def vdosData(self):
"""Access the idealised VDOS as ([egrid_min,egrid_max],vdos_density)"""
if self.__vdosdata is None:
self.__vdosdata = createVDOSDebye(self.debyeTemperature())
return self.__vdosdata
def debyeTemperature(self):
"""The Debye temperature of the atom"""
if self.__debyetemp is None:
self.__debyetemp = _rawfct['ncrystal_dyninfo_extract_vdosdebye'](self._key)
return self.__debyetemp
def _extradescr(self):
return 'TDebye=%gK'%self.debyeTemperature()
@property
def vdos_egrid(self):
"""Access the VDOS energy grid as [egrid_min,egrid_max]"""
return self.vdosData()[0]
@property
def vdos_egrid_expanded(self):
"""Access the egrid expanded into all actual egrid points"""
if self.__vdosegrid_expanded is None:
_ = self.vdosData()
self.__vdosegrid_expanded = self._np().linspace(_[0][0],_[0][1],len(_[1]))
return self.__vdosegrid_expanded
@property
def vdos_density(self):
"""Access the VDOS density array"""
return self.vdosData()[1]
def loadKernel( self, vdoslux = 3 ):
"""Converts VDOS to S(alpha,beta) kernel with a luxury level given by the
vdoslux parameter, which is similar to the vdoslux parameter used
in DI_VDOS. Notice that the vdoslux parameter specified here on
DI_VDOSDebye will be reduced internally by 3 (but not less than
0), since the Debye model is anyway only a crude approximation
and it accordingly does not need the same level of precise
treatment as a full externally specified VDOS.
"""
return self._loadKernel(vdoslux=vdoslux)
def hasDynamicInfo(self):
"""Whether or not dynamic information for each atom is present"""
return int(_rawfct['ncrystal_info_ndyninfo'](self._rawobj))>0 if self.__dyninfo is None else bool(self.__dyninfo)
def getDynamicInfoList(self):
"""Get list of DynamicInfo objects (if available). One for each atom."""
if self.__dyninfo is None:
self.__dyninfo = []
for idx in range(int(_rawfct['ncrystal_info_ndyninfo'](self._rawobj))):
key = (self._rawobj,idx)
fr,tt,atomidx,ditype = _rawfct['ncrystal_dyninfo_base'](key)
args=(self,fr,atomidx,tt,key)
if ditype==0:
di = Info.DI_Sterile(*args)
elif ditype==1:
di = Info.DI_FreeGas(*args)
elif ditype==2:
di = Info.DI_ScatKnlDirect(*args)
elif ditype==3:
di = Info.DI_VDOS(*args)
elif ditype==4:
di = Info.DI_VDOSDebye(*args)
else:
raise AssertionError('Unknown DynInfo type id (%i)'%ditype.value)
self.__dyninfo += [ di ]
return self.__dyninfo
dyninfos = property(getDynamicInfoList)
def getAllCustomSections(self):
"""Custom information for which the core NCrystal code does not have any
specific treatment. This is usually intended for usage by developers adding new
experimental physics models."""
if self.__custom is None:
self.__custom = _rawfct['ncrystal_info_getcustomsections'](self._rawobj)
return self.__custom
customsections = property(getAllCustomSections)
class CalcBase(RCBase):
"""Base class for all calculators"""
def getCalcName(self):
"""Calculator name"""
return _cstr2str(_rawfct['ncrystal_name'](self._rawobj))
@property
def name(self):
"""Calculator name as property"""
return self.getCalcName()
class Process(CalcBase):
"""Base class for calculations of processes in materials.
Note that kinetic energies are in electronvolt and direction vectors are
tuples of 3 numbers.
"""
def domain(self):
"""Domain where process has non-vanishing cross section.
Returns the domain as (ekin_low,ekin_high). Outside this range of
neutron kinetic energy, the process can be assumed to have vanishing
cross sections. Thus, processes present at all energies will return
(0.0,infinity).
"""
return _rawfct['ncrystal_domain'](self._rawobj)
def isNonOriented(self):
"""opposite of isOriented()"""
return bool(_rawfct['ncrystal_isnonoriented'](self._rawobj))
def isOriented(self):
"""Check if process is oriented and results depend on the incident direction of the neutron"""
return not self.isNonOriented()
def crossSection( self, ekin, direction ):
"""Access cross sections."""
return _rawfct['ncrystal_crosssection'](self._rawobj,ekin, direction)
def crossSectionNonOriented( self, ekin, repeat = None ):
"""Access cross sections (should not be called for oriented processes).
For efficiency it is possible to provide the ekin parameter as a numpy
array of numbers and get a corresponding array of cross sections
back. Likewise, the repeat parameter can be set to a positive number,
causing the ekin value(s) to be reused that many times and a numpy array
with results returned.
"""
return _rawfct['ncrystal_crosssection_nonoriented'](self._rawobj,ekin,repeat)
def xsect(self,ekin=None,direction=None,wl=None,repeat=None):
"""Convenience function which redirects calls to either crossSectionNonOriented
or crossSection depending on whether or not a direction is given. It can
also accept wavelengths instead of kinetic energies via the wl
parameter. The repeat parameter is currently only supported when
direction is not provided.
"""
ekin = Process._parseekin( ekin, wl )
if direction is None:
return self.crossSectionNonOriented( ekin, repeat )
else:
if repeat is None:
return self.crossSection( ekin, direction )
else:
raise NCBadInput('The repeat parameter is not currently supported when the direction parameter is also provided.')
@staticmethod
def _parseekin(ekin,wl):
if wl is None:
if ekin is None:
raise NCBadInput('Please provide either one of the "ekin" or "wl" parameters.')
return ekin
else:
if ekin is not None:
raise NCBadInput('Do not provide both "ekin" and "wl" parameters')
return wl2ekin(wl)
class Absorption(Process):
"""Base class for calculations of absorption in materials"""
def __init__(self, cfgstr):
"""create Absorption object based on cfg-string (same as using createAbsorption(cfgstr))"""
if isinstance(cfgstr,tuple) and len(cfgstr)==2 and cfgstr[0]=='_rawobj_':
#Cloning:
rawobj_abs = cfgstr[1]
else:
rawobj_abs = _rawfct['ncrystal_create_absorption'](_str2cstr(cfgstr))
self._rawobj_abs = rawobj_abs
rawobj_proc = _rawfct['ncrystal_cast_abs2proc'](rawobj_abs)
super(Absorption, self).__init__(rawobj_proc)
def clone(self):
"""Clone object. The clone will be using the same physics models and sharing any
read-only data with the original, but will be using its own private copy of any
mutable caches. All in all, this means that the objects are safe to use
concurrently in multi-threaded programming, as long as each thread gets
its own clone. Return value is the new Absorption object.
"""
newrawobj = _rawfct['ncrystal_clone_absorption'](self._rawobj_abs)
return Absorption( ('_rawobj_',newrawobj) )
class Scatter(Process):
"""Base class for calculations of scattering in materials.
Note that kinetic energies are in electronvolt and direction vectors are
tuples of 3 numbers.
"""
def __init__(self, cfgstr):
"""create Scatter object based on cfg-string (same as using createScatter(cfgstr))"""
if isinstance(cfgstr,tuple) and len(cfgstr)==2 and cfgstr[0]=='_rawobj_':
#Already got an ncrystal_scatter_t object:
self._rawobj_scat = cfgstr[1]
else:
self._rawobj_scat = _rawfct['ncrystal_create_scatter'](_str2cstr(cfgstr))
rawobj_proc = _rawfct['ncrystal_cast_scat2proc'](self._rawobj_scat)
super(Scatter, self).__init__(rawobj_proc)
def clone(self,rng_stream_index=None,for_current_thread=False):
"""Clone object. The clone will be using the same physics models and sharing any
read-only data with the original, but will be using its own private copy
of any mutable caches and will get an independent RNG stream. All in
all, this means that the objects are safe to use concurrently in
multi-threaded programming, as long as each thread gets its own
clone. Return value is the new Scatter object.
If greater control over RNG streams are needed, it is optionally allowed
to either set rng_stream_index to a non-negative integral value, or set
for_current_thread=True.
If rng_stream_index is set, the resulting object will use a specific
rngstream index. All objects with the same indeed will share the same
RNG state, so a sensible strategy is to use the same index for all
scatter objects which are to be used in the same thread:
If setting for_current_thread=True, the resulting object will use a
specific rngstream which has been set aside for the current thread. Thus
this function can be called from a given work-thread, in order to get
thread-safe scatter handle, with all objects cloned within the same
thread sharing RNG state.
"""
if rng_stream_index is not None:
if for_current_thread:
raise NCBadInput('Scatter.clone(..): do not set both rng_stream_index and for_current_thread parameters')
if not isinstance(rng_stream_index, numbers.Integral) or not 0 <= rng_stream_index <= 4294967295:
raise NCBadInput('Scatter.clone(..): rng_stream_index must be integral and in range [0,4294967295]')
newrawobj = _rawfct['ncrystal_clone_scatter_rngbyidx'](self._rawobj_scat,int(rng_stream_index))
elif for_current_thread:
newrawobj = _rawfct['ncrystal_clone_scatter_rngforcurrentthread'](self._rawobj_scat)
else:
newrawobj = _rawfct['ncrystal_clone_scatter'](self._rawobj_scat)
return Scatter( ('_rawobj_',newrawobj) )
def sampleScatter( self, ekin, direction, repeat = None ):
"""Randomly generate scatterings.
Assuming a scattering took place, generate final state of neutron based
on current kinetic energy and direction. Returns
tuple(ekin_final,direction_final) where direct_final is itself a tuple
(ux,uy,uz). The repeat parameter can be set to a positive number,
causing the scattering to be sampled that many times and numpy arrays
with results returned.
"""
return _rawfct['ncrystal_samplesct'](self._rawobj_scat,ekin,direction,repeat)
def sampleScatterIsotropic( self, ekin, repeat = None ):
"""Randomly generate scatterings (should not be called for oriented processes).
Assuming a scattering took place, generate final state of
neutron. Returns tuple(ekin_final,mu) where mu is the cosine of the
scattering angle. For efficiency it is possible to provide the ekin
parameter as a numpy array of numbers and get corresponding arrays of
angles and energy transfers back. Likewise, the repeat parameter can be
set to a positive number, causing the ekin value(s) to be reused that
many times and numpy arrays with results returned.
"""
return _rawfct['ncrystal_samplesct_iso'](self._rawobj_scat,ekin,repeat)
def generateScattering( self, ekin, direction, repeat = None ):
"""WARNING: Deprecated method. Please use the sampleScatter method instead.
Randomly generate scatterings.
Assuming a scattering took place, generate energy transfer (delta_ekin)
and new neutron direction based on current kinetic energy and direction
and return tuple(new_direction,delta_ekin). The repeat parameter can be
set to a positive number, causing the scattering to be sampled that many
times and numpy arrays with results returned.
"""
return _rawfct['ncrystal_genscatter'](self._rawobj_scat,ekin,direction,repeat)
def generateScatteringNonOriented( self, ekin, repeat = None ):
"""WARNING: Deprecated method. Please use the sampleScatterIsotropic method instead.
Randomly generate scatterings (should not be called for oriented processes).
Assuming a scattering took place, generate energy transfer (delta_ekin)
and scatter angle in radians and return tuple(scatter_angle,delta_ekin)
(this method should not be invoked on oriented processes). For
efficiency it is possible to provide the ekin parameter as a numpy array
of numbers and get corresponding arrays of angles and energy transfers
back. Likewise, the repeat parameter can be set to a positive number,
causing the ekin value(s) to be reused that many times and numpy arrays
with results returned.
"""
return _rawfct['ncrystal_genscatter_nonoriented'](self._rawobj_scat,ekin,repeat)
def scatter(self,ekin=None,direction=None,wl=None,repeat=None):
"""Convenience function which redirects calls to either
sampleScatterIsotropic or sampleScatter depending on whether
or not a direction is given. It can also accept wavelengths instead of
kinetic energies via the wl parameter.
"""
ekin = Process._parseekin( ekin, wl )
return self.sampleScatterIsotropic( ekin, repeat ) if direction is None else self.sampleScatter( ekin, direction, repeat )
def genscat(self,ekin=None,direction=None,wl=None,repeat=None):
"""WARNING: Deprecated method. Please use the "scatter" method instead.
Convenience function which redirects calls to either
generateScatteringNonOriented or generateScattering depending on whether
or not a direction is given. It can also accept wavelengths instead of
kinetic energies via the wl parameter.
"""
ekin = Process._parseekin( ekin, wl )
return self.generateScatteringNonOriented( ekin, repeat ) if direction is None else self.generateScattering( ekin, direction, repeat )
def rngSupportsStateManipulation(self):
"""Query whether associated RNG stream supports state manipulation"""
return bool(_rawfct['ncrystal_rngsupportsstatemanip_ofscatter'](self._rawobj_scat))
def getRNGState(self):
"""Get current RNG state (as printable hex-string with RNG type info
embedded). This function returns None if RNG stream does not support
state manipulation
"""
return _rawfct['nc_getrngstate_scat'](self._rawobj_scat)
def setRNGState(self,state):
"""Set current RNG state.
Note that setting the rng state will affect all objects sharing the
RNG stream with the given scatter object (and those subsequently cloned
from any of those).
Note that if the provided state originates in (the current version
of) NCrystal's builtin RNG algorithm, it can always be used here,
even if the current RNG uses a different algorithm (it will simply be
replaced). Otherwise, a mismatch of RNG stream algorithms will result
in an error.
"""
_rawfct['ncrystal_setrngstate_ofscatter']( self._rawobj_scat,
_str2cstr(state) )
def createInfo(cfgstr):
"""Construct Info object based on provided configuration (using available factories)"""
return Info(cfgstr)
def createScatter(cfgstr):
"""Construct Scatter object based on provided configuration (using available factories)"""
return Scatter(cfgstr)
def createScatterIndependentRNG(cfgstr,seed = 0):
"""Construct Scatter object based on provided configuration (using available
factories) and with its own independent RNG stream (using the builtin RNG
generator and the provided seed)"""
rawobj = _rawfct['ncrystal_create_scatter_builtinrng'](_str2cstr(cfgstr),seed)
return Scatter(('_rawobj_',rawobj))
def createAbsorption(cfgstr):
"""Construct Absorption object based on provided configuration (using available factories)"""
return Absorption(cfgstr)
def directMultiCreate( data, cfg_params='', *, dtype='',
doInfo = True, doScatter = True, doAbsorption = True ):
"""Convenience function which creates Info, Scatter, and Absorption objects
directly from a data string rather than an on-disk or in-memory
file. Such usage obviously precludes proper caching behind the scenes,
and is intended for scenarios where the same data should not be used
repeatedly.
"""
if not dtype and not data.startswith('NCMAT') and 'NCMAT' in data:
if data.strip().startswith('NCMAT'):
raise NCBadInput('NCMAT data must have "NCMAT" as the first 5 characters (must not be preceded by whitespace)')
rawi,raws,rawa = _rawfct['multicreate_direct'](data,dtype,cfg_params,doInfo,doScatter,doAbsorption)
info = Info( ('_rawobj_',rawi) ) if rawi else None
scatter = Scatter( ('_rawobj_',raws) ) if raws else None
absorption = Absorption( ('_rawobj_',rawa) ) if rawa else None
class MultiCreated:
def __init__(self,i,s,a):
self.__i,self.__s,self.__a = i,s,a
@property
def info(self):
"""Info object (None if not present)."""
return self.__i
@property
def scatter(self):
"""Scatter object (None if not present)."""
return self.__s
@property
def absorption(self):
"""Absorption object (None if not present)."""
return self.__a
def __str__(self):
fmt = lambda x : str(x) if x else 'n/a'
return 'MultiCreated(Info=%s, Scatter=%s, Absorption=%s)'%(fmt(self.__i),
fmt(self.__s),
fmt(self.__a))
return MultiCreated(info,scatter,absorption)
def registerInMemoryFileData(virtual_filename,data):
"""Register in-memory file data. This needs a "filename" and the content of this
virtual file. After registering such in-memory "files", they can be used
as file names in cfg strings or MatCfg objects. Registering the same
filename more than once, will simply override the content.
As a special case data can specified as "ondisk://<path>",
which will instead create a virtual alias for an on-disk file.
"""
if ( isinstance(data,str) and data.startswith('ondisk://')):
data = 'ondisk://'+str(pathlib.Path(data[9:]).resolve())
_rawfct['ncrystal_register_in_mem_file_data'](virtual_filename,data)
#numpy compatible wl2ekin and ekin2wl
_c_wl2ekin = float(_rawfct['ncrystal_wl2ekin'](1.0))
_c_ekin2wl = float(_rawfct['ncrystal_ekin2wl'](1.0))
def wl2ekin(wl):
"""Convert neutron wavelength in Angstrom to kinetic energy in electronvolt"""
if _np and hasattr(wl,'__len__'):
#reciprocals without zero division:
wlnonzero = wl != 0.0
wlinv = 1.0 / _np.where( wlnonzero, wl, 1.0)#fallback 1.0 wont be used
return _c_wl2ekin * _np.square(_np.where( wlnonzero, wlinv, _np.inf))
else:
return _rawfct['ncrystal_wl2ekin'](wl)
def ekin2wl(ekin):
"""Convert neutron kinetic energy in electronvolt to wavelength in Angstrom"""
if _np and hasattr(ekin,'__len__'):
#reciprocals without zero division:
ekinnonzero = ekin != 0.0
ekininv = 1.0 / _np.where( ekinnonzero, ekin, 1.0)#fallback 1.0 wont be used
return _c_ekin2wl * _np.sqrt(_np.where( ekinnonzero, ekininv, _np.inf))
else:
return _rawfct['ncrystal_ekin2wl'](ekin)
def clearCaches():
"""Clear various caches"""
_rawfct['ncrystal_clear_caches']()
def clearInfoCaches():
"""Deprecated. Does the same as clearCaches()"""
clearCaches()
def disableCaching():
"""Disable caching of Info objects in factory infrastructure"""
_rawfct['ncrystal_disable_caching']()
def enableCaching():
"""Enable caching of Info objects in factory infrastructure"""
_rawfct['ncrystal_enable_caching']()
def hasFactory(name):
"""Check if a factory of a given name exists"""
return bool(_rawfct['ncrystal_has_factory'](_str2cstr(name)))
#Helper function, for scripts creating ncmat files:
def formatVectorForNCMAT(name,values):
"""Utility function for help in python scripts composing .ncmat files,
transforming an array of of values into a properly formatted text string,
with word-wrapping, usage of <val>r<n> syntax, etc. Returns list of lines
(strings) for .ncmat files.
"""
v,res,l,indent,efmt_prev,efmt_nrepeat=values.flatten(),' %s'%name,'','',None,0
if not len(v):
return res
ilast,nv=len(v)-1,len(v)
def _fmtnum(num):
_ = '%g'%num if num else '0'#avoid 0.0, -0, etc.
if _.startswith('0.'):
_=_[1:]
return _
i=0
while i<nv:
fmt_vi=_fmtnum(v[i])
#check if is repeated:
irepeat=i
while irepeat+1<nv:
if _fmtnum(v[irepeat+1])==fmt_vi:
irepeat+=1
else:
break
#Write:
s = ' %sr%i'%(fmt_vi,1+irepeat-i) if irepeat>i else ' %s'%fmt_vi
l+=(s if l else (indent+s))
i=irepeat+1#advance
if i>=nv or len(l)>80:
#Flush line
res += '%s\n'%l
l,indent='',' '
return res
#Accept custom random generator:
def setDefaultRandomGenerator(rg, keepalive=True):
"""Set the default random generator for CalcBase classes.
Note that this can only changes the random generator for those CalcBase
instances that did not already use random numbers). Default generator when
using the NCrystal python interface is the scientifically sound
random.random stream from the python standard library (a Mersenne Twister).
To ensure Python does not clean up the passed function object prematurely,
the NCrystal python module will keep a reference to it eternally. To avoid
this, call with keepalive=False. But in that case the caller is responsible
for keeping a reference to the object for as long as NCrystal might use it
to generate random numbers.
"""
_rawfct['ncrystal_setrandgen'](rg)
__atomdb={}
def atomDB(Z,A=None,throwOnErrors=True):
"""Access internal database with data for isotopes and natural elements.
If A is provided, both A and Z must be integers, thus defining a specific isotope.
If Z is an integer and A is 0 or None, the corresponding natural element is provided.
Finally, the function can be called with a string identifying either natural
elements or isotopes: atomDB("Al"), atomDB("He3"), ...
In all cases, in case of errors or missing entries in the database, either
an NCBadInput exception is thrown (throwOnErrors==True) or None is
returned (when throwOnErrors==False).
"""
global __atomdb
if isinstance(Z,numbers.Integral):
Z=int(Z)
key=(Z,int(A or 0))
strkey=False
else:
assert A is None,"Do not supply two arguments unless the first argument is an integer"
assert isinstance(Z,str),"The first argument to the function must either be of int or str type"
key=Z
strkey=True
obj=__atomdb.get(key,None)
if obj:
return obj
if strkey:
rawatomdata=_rawfct['ncrystal_create_atomdata_fromdbstr'](_str2cstr(key))
else:
rawatomdata=_rawfct['ncrystal_create_atomdata_fromdb'](*key)
if not _rawfct['ncrystal_valid'](rawatomdata):
if not throwOnErrors:
return None
if strkey:
s='key="%s"'%key
else:
if key[1]==0:
s='Z=%i'%key[0]
else:
s='Z=%i,A=%i'%key
raise NCBadInput('atomDB: Could not find entry for key (%s)'%s)
ad = AtomData(rawatomdata)
assert ad.isElement()
Z,A = ad.Z(), (ad.A() if ad.isSingleIsotope() else 0)
keys=[ (Z,A)]
if Z==1 and A==2:
keys+=['H2','D']
elif Z==1 and A==3:
keys+=['H3','T']
else:
assert ad.isNaturalElement() or ad.isSingleIsotope()
keys += [ ad.description(False) ]#guaranteed to give just symbol for natelem/singleisotope!
assert key in keys#Should always be true unless we forgot some keys above
assert ad.description(False) in keys#Should also be true, given guarantees for AtomData::description(false)
for k in keys:
__atomdb[k] = ad
return ad
def iterateAtomDB(objects=True):
"""Iterate over all entries in the internal database with data for isotopes and
natural elements. If objects=True, AtomData objects are returned. If
objects=False, (Z,A) values are returned (A=0 indicates a natural
element)."""
for z,a in _rawfct['atomdb_getall_za']():
yield atomDB(z,a) if objects else (int(z),int(a))
class FileListEntry:
"""Entry in list returned by browseFiles."""
def __init__(self,*,name,source,factName,priority):
self.__n = name or None
self.__f = factName or None
self.__p = int(priority) if priority.isdigit() else priority
self.__s = source or None
@property
def name(self):
"""The (possibly virtual) filename needed to select this entry"""
return self.__n
@property
def source(self):
"""Description (such as the parent directory in case of on-disk files)"""
return self.__s
@property
def factName(self):
"""Name of the factory delivering entry."""
return self.__f
@property
def priority(self):
"""The priority value of the entry (important in case multiple factories
delivers content with the the same name). Can be 'Unable',
'OnlyOnExplicitRequest' or an integer priority value (entries with
higher values will be preferred).
"""
return self.__p
@property
def fullKey(self):
"""The string '%s::%s'%(self.factName,self.name), which can be used to
explicitly request this entry without interference from similarly
named entries in other factories.
"""
return '%s::%s'%(self.__f,self.__n)
def __str__(self):
l=[]
if self.__n:
l+=['name=%s'%self.__n]
if self.__s:
l+=['source=%s'%self.__s]
if self.__f:
l+=['factory=%s'%self.__f]
l+=['priority=%s'%self.__p]
return 'FileListEntry(%s)'%(', '.join(l))
def browseFiles(dump=False,factory=None):
"""Browse list of available input files (virtual or on-disk). The list is not
guaranteed to be exhaustive, but will usually include all files in
supported files in the most obvious locations (the NCrystal data
directory and other directories of the standard search path, the current
working directory, virtual files embedded in the NCrystal library or
registered dynamically.
Returns a list of FileListEntry objects. If the dump flag is set to True,
the list will also be printed to stdout in a human readable form.
Setting factory parameter will only return / print entries from the
factory of that name.
"""
res=[]
def sortkey(e):
praw = e.priority
if praw=='Unable':
p=-2
elif isinstance(praw,int):
p=praw
else:
assert praw=='OnlyOnExplicitRequest'
p=-1
return (-p, e.factName,e.source,e.name)
for n,s,f,p in _rawfct['ncrystal_get_filelist']():
res.append( FileListEntry(name=n,source=s,factName=f,priority=p) )
res.sort(key=sortkey)
if dump:
seen_names=set()
groupfct = lambda e : (e.factName,e.source,e.priority)
lastgroup = None
pending=[]
def print_pending():
if not pending:
return
if factory is not None and lastgroup[0]!=factory:
pending.clear()
return
n=len(pending) - 1
pending[0] = pending[0]%('%s files'%n if n!=1 else '%s file'%n )
for line in pending:
print (line)
pending.clear()
for e in res:
group = groupfct(e)
if lastgroup != group:
print_pending()
lastgroup = group
pending.append('==> %%s from "%s" (%s, priority=%s):'%group)
hidden = e.name in seen_names
seen_names.add(e.name)
extra=''
prname=e.name
if e.priority=='OnlyOnExplicitRequest':
prname='%s::%s'%(e.factName,e.name)
elif hidden:
extra=' <--- Hidden by higher priority entries (select as "%s::%s")'%(e.factName,e.name)
pending.append( ' %s%s'%(prname,extra))
print_pending()
if factory is None:
return res
return [e for e in res if e.factName==factory]
class TextData:
"""Text data accessible line by line, with associated meta-data. This always
include a UID (useful for comparison and downstream caching purposes) and
the data type (e.g. "ncmat"). Optionally available is the last known
on-disk path to a file with the same content, which might be useful in
case the data needs to be passed to 3rd party software which can only
work with physical files.
Text data objects are easily line-iterable, easily providing lines
(without newline characters): for( auto& line : mytextdata ) {...}. Of
course, the raw underlying data buffer can also be accessed if needed.
The raw data must be ASCII or UTF-8 text, with line endings \n=CR=0x0A
(Unix) or \r\n=LF+CR=0x0D0A (Windows/dos). Other encodings might work
only if 0x00, 0x0A, 0x0D bytes do not occur in them outside of line
endings.
Notice that ancient pre-OSX Mac line-endings \r=LF=0x0D are not
supported, and iterators will actually throw an error upon encountering
them. This is done on purpose, since files with \r on unix might hide
content when inspected in a terminal can be either confusing, a potential
security issue, or both.
"""
def __init__(self,name):
"""create TextData object based on string (same as using createTextData(name))"""
l=_rawfct['nc_gettextdata'](name)
assert len(l)==5
self.__rd = l[0]
self.__uid = int(l[1])
self.__descr = l[2]
self.__datatype= l[3]
self.__rp = pathlib.Path(l[4]) if l[4] else None
@property
def uid(self):
"""Unique identifier. Objects only have identical UID if all contents and
metadata are identical."""
return self.__uid
@property
def dataType(self):
"""Data type ("ncmat", "nxs", ...)."""
return self.__datatype
@property
def description(self):
"""Short description. This might for instance be a filename."""
return self.__descr
@property
def rawData(self):
"""Raw access to underlying data."""
return self.__rd
@property
def lastKnownOnDiskLocation(self):
"""Last known on-disk location (returns None if unavailable). Note that there
is no guarantee against the file having been removed or modified since the
TextData object was created.
"""
return self.__rp
def __str__(self):
return 'TextData(%s, uid=%i, %i chars)'%(self.__descr,self.__uid,len(self.__rd))
def __iter__(self):
"""Line-iteration, yielding lines without terminating newline characters"""
from io import StringIO
def chomp(x):
return x[:-2] if x.endswith('\r\n') else (x[:-1] if x.endswith('\n') else x)
for l in StringIO(self.__rd):
yield chomp(l)
def createTextData(name):
"""creates TextData objects based on requested name"""
return TextData(name)
def getFileContents(name):
"""OBSOLETE FUNCTION: Use createTextData(..).rawData instead."""
return createTextData(name).rawData
def addCustomSearchDirectory(dirpath):
"""Register custom directories to be monitored for data files."""
_rawfct['ncrystal_add_custom_search_dir'](_str2cstr(str(pathlib.Path(dirpath).resolve())))
def removeCustomSearchDirectories():
"""Remove all search directories added with addCustomSearchDirectory."""
_rawfct['ncrystal_remove_custom_search_dirs']()
def removeAllDataSources():
"""Disable all standard data sources, remove all TextData factories as well,
clear all registered virtual files and custom search directories. Finish
by calling global clearCaches function ("Ripley: I say we take off and
nuke the entire site from orbit. It's the only way to be sure.").
"""
_rawfct['ncrystal_remove_all_data_sources']()
def enableAbsolutePaths( enable = True ):
"""Whether or not absolute file paths are allowed."""
_rawfct['ncrystal_enable_abspaths'](1 if enable else 0)
def enableRelativePaths( enable = True ):
"""Whether or not paths relative to current working directory are allowed."""
_rawfct['ncrystal_enable_relpaths'](1 if enable else 0)
def enableStandardSearchPath( enable = True ):
"""Whether or not the standard search path should be searched. This standard
search path is is by default searched *after* the standard data library,
and is built by concatenating entries in the NCRYSTAL_DATA_PATH
environment variables with entries in the compile time definition of the
same name (in that order). Note that by default the standard search path
is searched *after* the standard data library.
"""
_rawfct['ncrystal_enable_stdsearchpath'](1 if enable else 0)
def enableStandardDataLibrary( enable = True, dirpath_override = None ):
"""Whether or not the standard data library shipped with NCrystal should be
searched.
Unless NCrystal is configured to have the standard data library embedded
into the binary at compilation time, the location (directory path) of the
standard data library is taken from the NCRYSTAL_DATADIR environment
variable. If the environment variable is not set, the location is taken
from the compile time definition of the same name. If neither is set, and
data was not embedded at compilation time, the standard data library will
be disabled by default and the location must be provided before it can be
enabled. In all cases, the location can be overridden if explicitly
provided by the user as the second parameter to this function.
"""
d = _str2cstr(str(pathlib.Path(dirpath_override).resolve())) if dirpath_override else ctypes.cast(None, ctypes.c_char_p)
_rawfct['ncrystal_enable_stddatalib'](1 if enable else 0, d)
def browsePlugins(dump=False):
"""Return list of plugins [(pluginname,filename,plugintype),...].
If the dump flag is set to True, the list will not be returned. Instead it
will be printed to stdout.
"""
l=_rawfct['ncrystal_get_pluginlist']()
if not dump:
return l
print('NCrystal has %i plugins loaded.'%len(l))
for i in range(len(l)):
pluginname, filename, plugintype = l[i]
print('==> %s (%s%s)'%(pluginname,plugintype,
' from %s'%filename if filename else ''))
def debyeIsotropicMSD( *, debye_temperature, temperature, mass ):
"""Estimate (isotropic, harmonic) atomic mean-squared-displacement using the
Debye Model (eq. 11+12 in <NAME>, Phys. Rev. Vol98 num 6,
1955). Unit of returned MSD value is Aa^2. Input temperatures should be
in Kelvin, and input atomic mass should be in amu.
"""
return float(_rawfct['ncrystal_debyetemp2msd'](debye_temperature, temperature, mass))
def debyeTempFromIsotropicMSD( *, msd, temperature, mass ):
"""The inverse of debyeIsotropicMSD (implemented via root-finding), allowing to
get the Debye temperature which will give rise to a given
mean-squared-displacement.
"""
return float(_rawfct['ncrystal_msd2debyetemp'](msd, temperature, mass))
def test():
"""Quick test that NCrystal works as expected in the current installation."""
_actualtest()
print("Tests completed succesfully")
def _actualtest():
def require(b):
if not b:
raise RuntimeError('check failed')
def flteq(a,b,rtol=1.0e-6,atol=1.0e-6):
return abs(a-b) <= 0.5 * rtol * (abs(a) + abs(b)) + atol
def require_flteq(a,b):
if not flteq(a,b):
raise RuntimeError('check failed (%.16g != %.16g, diff %g)'%(a,b,a-b))
return True
al = createInfo('stdlib::Al_sg225.ncmat;dcutoff=1.4')
require(hasFactory('stdncmat'))
require(al.hasTemperature() and require_flteq(al.getTemperature(),293.15))
require(al.hasXSectFree() and require_flteq(al.getXSectFree(),1.39667))
require(al.hasXSectAbsorption() and require_flteq(al.getXSectAbsorption(),0.231))
require(al.hasDensity() and require_flteq(al.getDensity(),2.69864547673))
require(al.hasNumberDensity() and require_flteq(al.getNumberDensity(),0.06023238256131625))
require(al.hasDebyeTemperature())
require(al.hasStructureInfo())
si=al.getStructureInfo()
require( si['spacegroup'] == 225 )
require_flteq(si['a'],4.04958)
require_flteq(si['b'],4.04958)
require_flteq(si['c'],4.04958)
require( si['alpha'] == 90.0 )
require( si['beta'] == 90.0 )
require( si['gamma'] == 90.0 )
require( si['n_atoms'] == 4 )
require_flteq(si['volume'],66.4094599932)
require( al.hasHKLInfo() )
require( al.nHKL() == 3 )
require_flteq(al.hklDLower(),1.4)
require( al.hklDUpper() > 1e36 )
expected_hkl = { 0 : (1, -1, -1, 8, 2.3380261031049243, 1.773159275925474),
1 : (0, 0, 2, 6, 2.02479, 1.731788590086223),
2 : (0, 2, -2, 12, 1.4317427394787094, 1.575735707233723) }
for idx,hkl in enumerate(al.hklList()):
h,k,l,mult,dsp,fsq = hkl
require(idx<len(expected_hkl))
e = expected_hkl[idx]
require( list(e)[0:4] == [h,k,l,mult] )
require_flteq(dsp, e[4])
require_flteq(fsq, e[5])
#We do all createScatter... here with independent RNG, for reproducibility
#and to avoid consuming random numbers from other streams.
alpc = createScatterIndependentRNG('stdlib::Al_sg225.ncmat;dcutoff=1.4;incoh_elas=0;inelas=0')
require( alpc.name == 'PCBragg' )
require( isinstance(alpc.name,str) )
require( alpc.refCount() in (1,2) and type(alpc.refCount()) == int )
require( alpc.isNonOriented() )
#print(alpc.xsect(wl=4.0))
require_flteq(1.632435821586171,alpc.crossSectionNonOriented(wl2ekin(4.0)) )
require_flteq(1.632435821586171,alpc.crossSection(wl2ekin(4.0),(1,0,0)))
require( alpc.crossSectionNonOriented(wl2ekin(5.0)) == 0.0 )
require( alpc.rngSupportsStateManipulation() )
require(alpc.getRNGState()=='a79fd777407ba03b3d9d242b2b2a2e58b067bd44')
alpc.setRNGState('deadbeefdeadbeefdeadbeefdeadbeefb067bd44')
require(alpc.getRNGState()=='deadbeefdeadbeefdeadbeefdeadbeefb067bd44')
alpc_clone = alpc.clone()
require(alpc.getRNGState()=='deadbeefdeadbeefdeadbeefdeadbeefb067bd44')
require(alpc_clone.getRNGState()=='e0fd16d42a2aced7706cffa08536d869b067bd44')
alpc_clone2 = alpc_clone.clone(for_current_thread=True)
require(alpc_clone2.getRNGState()=='cc762bb1160a0be514300da860f6d160b067bd44')
alpc_clone3 = alpc_clone.clone(rng_stream_index = 12345 )
require(alpc_clone3.getRNGState()=='3a20660a10fd581bd7cddef8fc3f32a2b067bd44')
#Pick Nickel at 1.2 angstrom, to also both vdos + incoherent-elastic + coherent-elastic:
nipc = createScatterIndependentRNG('stdlib::Ni_sg225.ncmat;dcutoff=0.6;vdoslux=2',2543577)
nipc_testwl = 1.2
#print(nipc.xsect(wl=nipc_testwl),nipc.xsect(wl=5.0))
require_flteq(16.763384236274295,nipc.xsect(wl=nipc_testwl))
require_flteq(16.763384236274295,nipc.xsect(wl=nipc_testwl,direction=(1,0,0)))
require_flteq(5.958248153134731,nipc.xsect(wl=5.0))
require( nipc.name == 'ProcComposition' )
expected = [ ( 0.056808478892590906, 0.5361444826572668 ),
( 0.056808478892590906, 0.5361444826572668 ),
( 0.056808478892590906, 0.3621986636537414 ),
( 0.056808478892590906, 0.8391056916029316 ),
( 0.04306081224691682, -0.8967361422557182 ),
( 0.056808478892590906, 0.0028144544046340148 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.056808478892590906, -0.15963879335683306 ),
( 0.056808478892590906, 0.8260541809964751 ),
( 0.07817729397575277, -0.47118801199817245 ),
( 0.05401955085824633, -0.04621874653928581 ),
( 0.056808478892590906, 0.8260541809964751 ),
( 0.041670508464271845, -0.1503940486202929 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.056808478892590906, 0.5361444826572668 ),
( 0.056808478892590906, -0.3915665520281999 ),
( 0.056808478892590906, 0.3621986636537414 ),
( 0.05803278720551066, -0.4542597828355841 ),
( 0.056808478892590906, 0.3621986636537414 ),
( 0.08136537448918617, -0.9123985306524791 ),
( 0.056808478892590906, -0.5655123710317247 ),
( 0.058619077301964584, -0.8633498900743867 ),
( 0.056808478892590906, 0.3042167239859003 ),
( 0.056808478892590906, 0.7378808571510718 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.08060018281822305, -0.7370163968204352 ),
( 0.056808478892590906, -0.5655123710317247 ),
( 0.06972487412469042, 0.11577553721283067 ),
( 0.04057037479922572, -0.8570248794863715 ) ]
if _np is None:
ekin,mu=[],[]
for i in range(30):
_ekin,_mu=nipc.sampleScatterIsotropic(wl2ekin(nipc_testwl))
mu += [_mu]
ekin += [_ekin]
else:
ekin,mu = nipc.sampleScatterIsotropic(wl2ekin(nipc_testwl),repeat=30)
for i in range(len(ekin)):
#print ( f' ( {ekin[i]}, {mu[i]} ),');continue
require_flteq(ekin[i],expected[i][0])
require_flteq(mu[i],expected[i][1])
expected = [ ( 0.008416101633014865, (-0.757659986585644, -0.6452758602889546, -0.09782846648798732) ),
( 0.056808478892590906, (0.07228896531453344, -0.5190173207165885, 0.8517014302500192) ),
( 0.056808478892590906, (-0.9249112255344181, -0.32220112076758217, -0.20180600252850442) ),
( 0.056808478892590906, (-0.15963879335683306, -0.8486615569734178, 0.5042707778277745) ),
( 0.055175282123031216, (-0.9137234740275523, -0.01697993608051868, -0.4059816434048743) ),
( 0.056808478892590906, (0.3621986636537414, 0.9195336880770101, -0.15254482796521104) ),
( 0.056808478892590906, (-0.8667699444876275, 0.3952682020937969, -0.30409359043960893) ),
( 0.056808478892590906, (-0.10165685368899191, -0.8869759070713323, -0.4504882066969593) ),
( 0.056808478892590906, (0.07228896531453344, -0.39741541395284924, -0.914787021249449) ),
( 0.056808478892590906, (-0.10165685368899191, -0.9768880366798581, -0.1880309758785167) ),
( 0.02606330473669421, (-0.7582323992671114, -0.6516002612972578, 0.022376956428100652) ),
( 0.056808478892590906, (0.8260541809964751, 0.539797243436807, 0.16202909009269678) ),
( 0.04566678251268602, (-0.9534434024097809, 0.17205470782622773, -0.2476748996488988) ),
( 0.056808478892590906, (-0.5655123710317247, 0.15884349419072655, 0.8092987721252006) ),
( 0.056808478892590906, (0.5361444826572668, 0.7795115518549292, 0.32389941994528487) ),
( 0.056808478892590906, (0.07228896531453344, 0.746175597107444, 0.6618128767069312) ),
( 0.056808478892590906, (-0.10165685368899191, -0.4247181868490453, 0.8996001033001911) ),
( 0.056808478892590906, (0.5361444826572668, 0.555576061106532, -0.6355189486093414) ),
( 0.05789302012683443, (-0.12097277426532965, -0.6903199617139779, 0.7133189597548643) ),
( 0.056808478892590906, (0.3042167239859003, -0.8706122815482211, -0.3866347631352975) ),
( 0.056808478892590906, (-0.7384733804796917, 0.6322144258925643, -0.23443972789660028) ),
( 0.056808478892590906, (-0.15963879335683306, 0.21525619037302965, -0.9634211063505222) ),
( 0.056808478892590906, (0.41359447569500096, 0.4927058865194684, 0.7656242675514158) ),
( 0.056808478892590906, (0.25796367721315083, 0.48520231047621615, 0.8354839670198411) ),
( 0.056808478892590906, (0.5785005938702705, 0.8104481067271115, -0.09225469740985966) ),
( 0.04368024428240841, (0.020347983057095325, -0.49560493792245525, 0.8683096827125603) ),
( 0.05481910288975998, (-0.29870118895121683, -0.9272663989579163, 0.22573131170209382) ),
( 0.056808478892590906, (0.3621986636537414, -0.8822186430862218, 0.3008361577978115) ),
( 0.056808478892590906, (0.7680722413286334, 0.5975216576265994, -0.23028873347945303) ),
( 0.056808478892590906, (0.32922859149927786, -0.9426419619170849, 0.0550878042084668) ) ]
for i in range(30):
out_ekin,outdir = nipc.sampleScatter(wl2ekin(nipc_testwl),(1.0,0.0,0.0))
#print ( f' ( {out_ekin}, {outdir} ),');continue
require_flteq(out_ekin,expected[i][0])
require_flteq(outdir[0],expected[i][1][0])
require_flteq(outdir[1],expected[i][1][1])
require_flteq(outdir[2],expected[i][1][2])
gesc = createScatterIndependentRNG("""stdlib::Ge_sg227.ncmat;dcutoff=0.5;mos=40.0arcsec
;dir1=@crys_hkl:5,1,1@lab:0,0,1
;dir2=@crys_hkl:0,-1,1@lab:0,1,0""",3453455)
require_flteq(591.0256514168468,gesc.crossSection(wl2ekin(1.540),( 0., 1., 1. )))
require_flteq(1.666903965431398,gesc.crossSection(wl2ekin(1.540),( 1., 1., 0. )))
| #!/usr/bin/env python3
"""Python module for using the NCrystal library for thermal neutron transport in crystals
Please find more information about NCrystal at the website:
https://mctools.github.io/ncrystal/
In particular, a small example using the NCrystal python module can be found at:
https://github.com/mctools/ncrystal/blob/master/examples/ncrystal_example_py
A substantial effort went into developing NCrystal. If you use it for your work,
we would appreciate it if you would use the following reference in your work:
<NAME> and <NAME>, NCrystal: A library for thermal neutron
transport, Computer Physics Communications 246 (2020) 106851,
https://doi.org/10.1016/j.cpc.2019.07.015
For work benefitting from our inelastic physics, we furthermore request that you
additionally also use the following reference in your work:
<NAME>, <NAME>, et. al., "Rejection-based sampling of inelastic
neutron scattering", Journal of Computational Physics 380 (2019) 400-407,
https://doi.org/10.1016/j.jcp.2018.11.043
For detailed usage conditions and licensing of this open source project, see:
https://github.com/mctools/ncrystal/blob/master/NOTICE
https://github.com/mctools/ncrystal/blob/master/LICENSE
https://github.com/mctools/ncrystal/blob/master/ncrystal_extra/LICENSE
"""
################################################################################
## ##
## This file is part of NCrystal (see https://mctools.github.io/ncrystal/) ##
## ##
## Copyright 2015-2021 NCrystal developers ##
## ##
## Licensed under the Apache License, Version 2.0 (the "License"); ##
## you may not use this file except in compliance with the License. ##
## You may obtain a copy of the License at ##
## ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
## Unless required by applicable law or agreed to in writing, software ##
## distributed under the License is distributed on an "AS IS" BASIS, ##
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ##
## See the License for the specific language governing permissions and ##
## limitations under the License. ##
## ##
################################################################################
__license__ = "Apache 2.0, http://www.apache.org/licenses/LICENSE-2.0"
__version__ = '2.6.1'
__status__ = "Production"
__author__ = "NCrystal developers (<NAME>, <NAME>)"
__copyright__ = "Copyright 2015-2021 %s"%__author__
__maintainer__ = __author__
__email__ = "<EMAIL>"
#Only put the few most important items in __all__, to prevent cluttering on
#wildcard imports. Specifically this is the exceptions, the most important API
#classes, the factory functions, and the constants:
__all__ = [ 'NCException','NCFileNotFound','NCDataLoadError','NCMissingInfo','NCCalcError',
'NCLogicError','NCBadInput','RCBase','TextData','Info','CalcBase','Process',
'Absorption','Scatter','AtomData','FileListEntry','createTextData',
'createInfo','createScatter','createScatterIndependentRNG','createAbsorption',
'constant_c','constant_dalton2kg','constant_dalton2eVc2','constant_avogadro',
'constant_boltzmann','const_neutron_mass_amu','constant_planck']
import sys
pyversion = sys.version_info[0:3]
_minpyversion=(3,5,0)
if pyversion < (3,0,0):
raise SystemExit('NCrystal no longer supports Python2.')
if pyversion < (3,5,0):
if not _unittest:
print('WARNING: Unsupported python version %i.%i.%i detected (recommended is %i.%i.%i or later).'%(pyversion+_minpyversion))
import numbers
import pathlib
import os
import copy
import ctypes
import weakref
###################################
#Convert cstr<->str:
def _str2cstr(s):
#converts any string (str,bytes,unicode,path) to bytes
if hasattr(s,'__fspath__'):
s=str(s)
try:
return s if isinstance(s,bytes) else s.encode('ascii')
except UnicodeEncodeError:
#Attempt with file-system encoding, in case of non-ASCII path names:
return s.encode(sys.getfilesystemencoding())
def _cstr2str(s):
#converts bytes object to str (unicode in py3, bytes in py2)
try:
return s if isinstance(s,str) else s.decode('ascii')
except UnicodeDecodeError:
return s.decode(sys.getfilesystemencoding())
###################################
#Same as NCRYSTAL_VERSION macro:
version_num = sum(int(i)*j for i,j in zip(__version__.split('.'),(1000000,1000,1)))
class NCException(RuntimeError):
"""Base class for all exceptions raised by NCrystal code"""
pass
class NCFileNotFound(NCException):
pass
class NCDataLoadError(NCException):
pass
class NCMissingInfo(NCException):
pass
class NCCalcError(NCException):
pass
class NCLogicError(NCException):
pass
class NCBadInput(NCException):
pass
#some constants (NB: Copied here from NCMath.hh - must keep synchronized!! Also,
#remember to include in __all__ list above):
constant_c = 299792458e10# speed of light in Aa/s
constant_dalton2kg = 1.660539040e-27# amu to kg
constant_dalton2eVc2 = 931494095.17# amu to eV/c^2
constant_avogadro = 6.022140857e23# mol^-1
constant_boltzmann = 8.6173303e-5# eV/K
const_neutron_mass_amu = 1.00866491588# [amu]
constant_planck = 4.135667662e-15 # [eV*s]
_k4Pidiv100 = 0.125663706143591729538505735331180115367886776
def _find_nclib():
#If NCRYSTAL_LIB env var is set, we try that and only that:
override=os.environ.get('NCRYSTAL_LIB',None)
if override:
override = pathlib.Path(override)
if not override.exists() or override.is_dir():
raise NCFileNotFound('NCRYSTAL_LIB environment variable is set but does not point to an actual file.')
return override.absolute().resolve()
try:
if __name__ != '__main__':
#normal import
from . import _nclibpath
else:
#work if running as script:
sys.path.insert(0,str(pathlib.Path(__file__).absolute().parent))
import _nclibpath
sys.path.pop(0)
except ImportError:
raise NCFileNotFound('Autogenerated _nclibpath.py module not found (it should have been generated by'
+' CMake during installation). In this case you must set the environment variable'
+' NCRYSTAL_LIB to point at the compiled NCrystal library.')
_ = pathlib.Path(_nclibpath.liblocation)
if not _.is_absolute():
_ = (pathlib.Path(__file__).absolute().parent / _)
if not _.exists() or _.is_dir():
raise NCFileNotFound('Autogenerated _nclibpath.py module was found but no file exists in the indicated'
+' library location (%s). Either reinstall NCrystal or try to use the environment variable'%_
+' NCRYSTAL_LIB to point at the compiled NCrystal library.')
return _.resolve()
try:
import numpy as _np
except ImportError:
_np = None
def _ensure_numpy():
if not _np:
raise NCException("Numpy not available - array based functionality is unavailable")
_keepalive = []
def _load(nclib_filename):
_nclib = ctypes.CDLL(nclib_filename)
_int,_intp,_uint,_uintp,_dbl,_dblp,_cstr,_voidp = (ctypes.c_int, ctypes.POINTER(ctypes.c_int),
ctypes.c_uint,ctypes.POINTER(ctypes.c_uint), ctypes.c_double,
ctypes.POINTER(ctypes.c_double), ctypes.c_char_p, ctypes.c_void_p)
_ulong = ctypes.c_ulong
_charptr = ctypes.POINTER(ctypes.c_char)
_cstrp = ctypes.POINTER(_cstr)
_cstrpp = ctypes.POINTER(_cstrp)
_dblpp = ctypes.POINTER(_dblp)
ndarray_to_dblp = lambda a : a.ctypes.data_as(_dblp)
ndarray_to_uintp = lambda a : a.ctypes.data_as(_uintp)
def _create_numpy_double_array(n):
_ensure_numpy()
a=_np.empty(n,dtype=_dbl)
return a,ndarray_to_dblp(a)
def _create_numpy_unsigned_array(n):
_ensure_numpy()
a=_np.empty(n,dtype=_uint)
return a,ndarray_to_uintp(a)
class ncrystal_info_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_process_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_scatter_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_absorption_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
class ncrystal_atomdata_t(ctypes.Structure):
_fields_ = [('internal', _voidp)]
functions = {}
#Exceptions:
_errmap = {'FileNotFound':NCFileNotFound,
'DataLoadError':NCDataLoadError,
'MissingInfo':NCMissingInfo,
'CalcError':NCCalcError,
'LogicError':NCLogicError,
'BadInput':NCBadInput}
def _raise_err():
assert _ncerror()#checks there was an error
tm=(_cstr2str(_ncerror_type()),_cstr2str(_ncerror_msg()))
_ncerror_clear()
#TODO: Provide line number / file as well?
e=_errmap.get(tm[0],NCException)(tm[1])
e.message = tm[1]#to avoid warnings in py 2.6
raise e
#helper class for exporting the functions:
def _wrap(fct_name,restype,argtypes,take_ref = False, hide=False, error_check=True):
assert isinstance(argtypes,tuple)
raw=getattr(_nclib,fct_name)
raw.argtypes=argtypes
raw.restype=restype
if take_ref:
assert len(argtypes)==1
fct = lambda arg : raw(ctypes.byref(arg))
else:
fct = lambda *args : raw(*args)
if error_check:
#NB: we should read about return types in the ctypes tutorial. Apparently one
#can just set an error checking function as the restype.
raw_fct = fct
def fcte(*aaa):
r = raw_fct(*aaa)
if _ncerror():
_raise_err()
return r
fct=fcte
if not hide:
functions[fct_name] = fct
return fct
lib_version = _cstr2str(_wrap('ncrystal_version_str',_cstr,tuple(),hide=True,error_check=False)())
if lib_version != __version__:
raise RuntimeError("ERROR: Version mismatch detected between NCrystal python code (v%s)"
" and loaded binary"" library (v%s). Control which NCrystal library"
" to load with the NCRYSTAL_LIB env var."%(__version__,lib_version))
_wrap('ncrystal_sethaltonerror',_int,(_int,),hide=True,error_check=False)(False)
_wrap('ncrystal_setquietonerror',_int,(_int,),hide=True,error_check=False)(True)
_ncerror = _wrap('ncrystal_error',_int,tuple(),hide=True,error_check=False)
_ncerror_msg = _wrap('ncrystal_lasterror',_cstr,tuple(),hide=True,error_check=False)
_ncerror_type = _wrap('ncrystal_lasterrortype',_cstr,tuple(),hide=True,error_check=False)
_ncerror_clear = _wrap('ncrystal_clearerror',None,tuple(),hide=True,error_check=False)
_wrap('ncrystal_refcount',_int,(_voidp,),take_ref=True)
_wrap('ncrystal_valid',_int,(_voidp,),take_ref=True)
#NB: For ncrystal_unref we use take_ref=False, so RCBase.__del__ can cache
#the result of ctypes.byref(rawobj). This is needed since the ctypes module
#might have been unloaded before RCBase.__del__ is called:
_wrap('ncrystal_unref',None,(_voidp,),take_ref=False)
_wrap('ncrystal_cast_scat2proc',ncrystal_process_t,(ncrystal_scatter_t,))
_wrap('ncrystal_cast_abs2proc',ncrystal_process_t,(ncrystal_absorption_t,))
_wrap('ncrystal_dump',None,(ncrystal_info_t,))
_wrap('ncrystal_ekin2wl',_dbl,(_dbl,))
_wrap('ncrystal_wl2ekin',_dbl,(_dbl,))
_wrap('ncrystal_isnonoriented',_int,(ncrystal_process_t,))
_wrap('ncrystal_name',_cstr,(ncrystal_process_t,))
_wrap('ncrystal_debyetemp2msd',_dbl,(_dbl,_dbl,_dbl))
_wrap('ncrystal_msd2debyetemp',_dbl,(_dbl,_dbl,_dbl))
_wrap('ncrystal_create_atomdata_fromdb',ncrystal_atomdata_t,(_uint,_uint))
_wrap('ncrystal_create_atomdata_fromdbstr',ncrystal_atomdata_t,(_cstr,))
_raw_atomdb_getn = _wrap('ncrystal_atomdatadb_getnentries',_uint,tuple(), hide=True )
_raw_atomdb_getall = _wrap('ncrystal_atomdatadb_getallentries',_uint,(_uintp,_uintp), hide=True )
def atomdb_getall_za():
n = _raw_atomdb_getn()
zvals,zvalsptr = _create_numpy_unsigned_array(n)
avals,avalsptr = _create_numpy_unsigned_array(n)
_raw_atomdb_getall(zvalsptr,avalsptr)
za=_np.stack((zvals,avals)).T
return za
functions['atomdb_getall_za']=atomdb_getall_za
_wrap('ncrystal_info_natominfo',_uint,(ncrystal_info_t,))
_wrap('ncrystal_info_hasatommsd',_int,(ncrystal_info_t,))
_raw_info_getatominfo = _wrap('ncrystal_info_getatominfo',None,(ncrystal_info_t,_uint,_uintp,_uintp,_dblp,_dblp),hide=True)
def ncrystal_info_getatominfo(nfo,iatom):
atomidx,n,dt,msd=_uint(),_uint(),_dbl(),_dbl()
_raw_info_getatominfo(nfo,iatom,atomidx,n,dt,msd)
return (atomidx.value,n.value,dt.value,msd.value)
functions['ncrystal_info_getatominfo'] = ncrystal_info_getatominfo
_raw_info_getatompos = _wrap('ncrystal_info_getatompos',None,(ncrystal_info_t,_uint,_uint,_dblp,_dblp,_dblp),hide=True)
def ncrystal_info_getatompos(nfo,iatom,ipos):
x,y,z=_dbl(),_dbl(),_dbl()
_raw_info_getatompos(nfo,iatom,ipos,x,y,z)
return x.value, y.value, z.value
functions['ncrystal_info_getatompos'] = ncrystal_info_getatompos
for s in ('temperature','xsectabsorption','xsectfree','density','numberdensity'):
_wrap('ncrystal_info_get%s'%s,_dbl,(ncrystal_info_t,))
_raw_info_getstruct = _wrap('ncrystal_info_getstructure',_int,(ncrystal_info_t,_uintp,_dblp,_dblp,_dblp,_dblp,_dblp,_dblp,_dblp,_uintp))
def ncrystal_info_getstructure(nfo):
sg,natom=_uint(),_uint()
a,b,c,alpha,beta,gamma,vol = _dbl(),_dbl(),_dbl(),_dbl(),_dbl(),_dbl(),_dbl(),
if _raw_info_getstruct(nfo,sg,a,b,c,alpha,beta,gamma,vol,natom) == 0:
return {}
return dict(spacegroup=int(sg.value),a=a.value,b=b.value,c=c.value,alpha=alpha.value,
beta=beta.value,gamma=gamma.value,volume=vol.value,n_atoms=int(natom.value))
functions['ncrystal_info_getstructure'] = ncrystal_info_getstructure
_wrap('ncrystal_info_nhkl',_int,(ncrystal_info_t,))
_wrap('ncrystal_info_hkl_dlower',_dbl,(ncrystal_info_t,))
_wrap('ncrystal_info_hkl_dupper',_dbl,(ncrystal_info_t,))
_wrap('ncrystal_info_gethkl',None,(ncrystal_info_t,_int,_intp,_intp,_intp,_intp,_dblp,_dblp))
_wrap('ncrystal_info_dspacing_from_hkl',_dbl,(ncrystal_info_t,_int,_int,_int))
functions['ncrystal_info_gethkl_setuppars'] = lambda : (_int(),_int(),_int(),_int(),_dbl(),_dbl())
_wrap('ncrystal_info_ndyninfo',_uint,(ncrystal_info_t,))
_raw_di_base = _wrap('ncrystal_dyninfo_base',None,(ncrystal_info_t,_uint,_dblp,_uintp,_dblp,_uintp),hide=True)
_raw_di_scatknl = _wrap('ncrystal_dyninfo_extract_scatknl',None,(ncrystal_info_t,_uint,_uint,_dblp,_uintp,_uintp,_uintp,
_dblpp,_dblpp,_dblpp,_dblpp),hide=True)
_raw_di_vdos = _wrap('ncrystal_dyninfo_extract_vdos',None,(ncrystal_info_t,_uint,_dblp,_dblp,_uintp,_dblpp),hide=True)
_raw_di_vdosdebye = _wrap('ncrystal_dyninfo_extract_vdosdebye',None,(ncrystal_info_t,_uint,_dblp),hide=True)
_raw_di_vdos_input = _wrap('ncrystal_dyninfo_extract_vdos_input',None,(ncrystal_info_t,_uint,_uintp,_dblpp,_uintp,_dblpp),hide=True)
def ncrystal_dyninfo_base(key):
infoobj,dynidx = key
fr,tt,atomindex,ditype=_dbl(),_dbl(),_uint(),_uint()
_raw_di_base(infoobj,dynidx,fr,atomindex,tt,ditype)
return (fr.value,tt.value,atomindex.value,ditype.value)
def ncrystal_dyninfo_extract_scatknl(key,vdoslux):
infoobj,dynidx = key
sugEmax,ne,na,nb,e,a,b,sab = _dbl(),_uint(),_uint(),_uint(),_dblp(),_dblp(),_dblp(),_dblp()
_raw_di_scatknl(infoobj,dynidx,vdoslux,sugEmax,ne,na,nb,
ctypes.byref(e),ctypes.byref(a),ctypes.byref(b),ctypes.byref(sab))
return (sugEmax.value,ne.value,na.value,nb.value,e,a,b,sab)
def ncrystal_dyninfo_extract_vdos(key):
infoobj,dynidx = key
egrid_min,egrid_max,ndensity,densityptr = _dbl(),_dbl(),_uint(),_dblp()
_raw_di_vdos(infoobj,dynidx,egrid_min,egrid_max,ndensity,ctypes.byref(densityptr))
return (egrid_min.value,egrid_max.value,ndensity.value,densityptr)
def ncrystal_dyninfo_extract_vdosdebye(key):
infoobj,dynidx = key
td=_dbl()
_raw_di_vdosdebye(infoobj,dynidx,td)
return td.value
def ncrystal_dyninfo_extract_vdos_input(key):
infoobj,dynidx = key
negrid,egridptr,ndensity,densityptr = _uint(),_dblp(),_uint(),_dblp()
_raw_di_vdos_input(infoobj,dynidx,negrid,ctypes.byref(egridptr),ndensity,ctypes.byref(densityptr));
return (negrid.value,egridptr,ndensity.value,densityptr)
functions['ncrystal_dyninfo_base'] = ncrystal_dyninfo_base
functions['ncrystal_dyninfo_extract_scatknl'] = ncrystal_dyninfo_extract_scatknl
functions['ncrystal_dyninfo_extract_vdos'] = ncrystal_dyninfo_extract_vdos
functions['ncrystal_dyninfo_extract_vdosdebye'] = ncrystal_dyninfo_extract_vdosdebye
functions['ncrystal_dyninfo_extract_vdos_input'] = ncrystal_dyninfo_extract_vdos_input
_wrap('ncrystal_info_ncomponents',_uint,(ncrystal_info_t,))
_raw_info_getcomp=_wrap('ncrystal_info_getcomponent',None,(ncrystal_info_t,_uint,_uintp,_dblp),hide=True)
def ncrystal_info_getcomp(nfo,icomp):
aidx,fraction=_uint(),_dbl()
_raw_info_getcomp(nfo,icomp,aidx,fraction)
return aidx.value,fraction.value
functions['ncrystal_info_getcomp']=ncrystal_info_getcomp
_wrap('ncrystal_create_atomdata',ncrystal_atomdata_t,(ncrystal_info_t,_uint))
_raw_atomdata_subcomp = _wrap('ncrystal_create_atomdata_subcomp',ncrystal_atomdata_t,
(ncrystal_atomdata_t,_uint,_dblp),hide=True)
_raw_atomdata_getfields=_wrap('ncrystal_atomdata_getfields',None,(ncrystal_atomdata_t,_cstrp,_cstrp,
_dblp,_dblp,_dblp,_dblp,
_uintp,_uintp,_uintp),hide=True)
def ncrystal_atomdata_createsubcomp(ad,icomp):
fraction = _dbl()
comp_ad = _raw_atomdata_subcomp(ad,icomp,fraction)
return (comp_ad,fraction.value)
functions['ncrystal_atomdata_createsubcomp']=ncrystal_atomdata_createsubcomp
def ncrystal_atomdata_getfields(ad):
mass_amu,sigma_inc,scatlen_coh,sigma_abs=_dbl(),_dbl(),_dbl(),_dbl()
dl,descr=_cstr(),_cstr()
ncomp,zval,aval = _uint(),_uint(),_uint()
_raw_atomdata_getfields(ad,ctypes.byref(dl),ctypes.byref(descr),
mass_amu,sigma_inc,scatlen_coh,sigma_abs,
ncomp,zval,aval)
return dict(m=mass_amu.value,incxs=sigma_inc.value,cohsl_fm=scatlen_coh.value,absxs=sigma_abs.value,
dl=_cstr2str(dl.value),descr=_cstr2str(descr.value),
ncomp=ncomp.value,z=zval.value,a=aval.value)
functions['ncrystal_atomdata_getfields'] = ncrystal_atomdata_getfields
_raw_ncustom = _wrap('ncrystal_info_ncustomsections',_uint,(ncrystal_info_t,),hide=True)
_raw_csec_name = _wrap('ncrystal_info_customsec_name',_cstr,(ncrystal_info_t,_uint),hide=True)
_raw_csec_nlines = _wrap('ncrystal_info_customsec_nlines',_uint,(ncrystal_info_t,_uint),hide=True)
_raw_csec_nparts = _wrap('ncrystal_info_customline_nparts',_uint,(ncrystal_info_t,_uint,_uint),hide=True)
_raw_csec_part = _wrap('ncrystal_info_customline_getpart',_cstr,(ncrystal_info_t,_uint,_uint,_uint),hide=True)
def ncrystal_info_getcustomsections(nfo):
n=_raw_ncustom(nfo)
if n==0:
return tuple()
out=[]
for isec in range(n):
lines=[]
secname = _cstr2str(_raw_csec_name(nfo,isec))
nlines = _raw_csec_nlines(nfo,isec)
for iline in range(nlines):
nparts=_raw_csec_nparts(nfo,isec,iline)
parts=[]
for ipart in range(nparts):
parts.append(_cstr2str(_raw_csec_part(nfo,isec,iline,ipart)))
lines.append(tuple(parts))
out.append((secname,tuple(lines)))
return tuple(out)
functions['ncrystal_info_getcustomsections'] = ncrystal_info_getcustomsections
_raw_reginmemfd = _wrap('ncrystal_register_in_mem_file_data',None,(_cstr,_cstr),hide=True)
def ncrystal_register_in_mem_file_data(virtual_filename,data):
_raw_reginmemfd(_str2cstr(virtual_filename),
_str2cstr(data))
functions['ncrystal_register_in_mem_file_data']=ncrystal_register_in_mem_file_data
def _prepare_many(ekin,repeat):
if _np is None and not repeat is None:
raise NCBadInput('Can not use "repeat" parameter when Numpy is absent on the system')
if repeat is None and not hasattr(ekin,'__len__'):
return None#scalar case, array interface not triggered
repeat = 1 if repeat is None else repeat
ekin = (ekin if hasattr(ekin,'ctypes') else _np.asfarray(ekin) ) if hasattr(ekin,'__len__') else _np.ones(1)*ekin
#NB: returning the ekin object itself is important in order to keep a reference to it after the call:
return ndarray_to_dblp(ekin),len(ekin),repeat,ekin
_raw_xs_no = _wrap('ncrystal_crosssection_nonoriented',None,(ncrystal_process_t,_dbl,_dblp),hide=True)
_raw_xs_no_many = _wrap('ncrystal_crosssection_nonoriented_many',None,(ncrystal_process_t,_dblp,_ulong,
_ulong,_dblp),hide=True)
def ncrystal_crosssection_nonoriented(scat,ekin,repeat=None):
many = _prepare_many(ekin,repeat)
if many is None:
res = _dbl()
_raw_xs_no(scat,ekin,res)
return res.value
else:
ekin_ct,n_ekin,repeat,ekin_nparr = many
xs, xs_ct = _create_numpy_double_array(n_ekin*repeat)
_raw_xs_no_many(scat,ekin_ct,n_ekin,repeat,xs_ct)
return xs
functions['ncrystal_crosssection_nonoriented'] = ncrystal_crosssection_nonoriented
_raw_domain = _wrap('ncrystal_domain',None,(ncrystal_process_t,_dblp,_dblp),hide=True)
def ncrystal_domain(proc):
a,b = _dbl(),_dbl()
_raw_domain(proc,a,b)
return (a.value,b.value)
functions['ncrystal_domain'] = ncrystal_domain
_raw_samplesct_iso =_wrap('ncrystal_samplescatterisotropic',None,(ncrystal_scatter_t,_dbl,_dblp,_dblp),hide=True)
_raw_samplesct_iso_many =_wrap('ncrystal_samplescatterisotropic_many',None,
(ncrystal_scatter_t,_dblp,_ulong,_ulong,_dblp,_dblp),hide=True)
_raw_samplescat = _wrap('ncrystal_samplescatter',None,( ncrystal_scatter_t, _dbl,_dbl*3,_dblp,_dbl*3),hide=True)
_raw_samplescat_many = _wrap('ncrystal_samplescatter_many',None,( ncrystal_scatter_t,_dbl,_dbl*3,_ulong,
_dblp,_dblp,_dblp,_dblp),hide=True)
def ncrystal_samplesct_iso(scat,ekin,repeat=None):
many = _prepare_many(ekin,repeat)
if many is None:
ekin_final,mu = _dbl(),_dbl()
_raw_samplesct_iso(scat,ekin,ekin_final,mu)
return ekin_final.value,mu.value
else:
ekin_ct,n_ekin,repeat,ekin_nparr = many
ekin_final, ekin_final_ct = _create_numpy_double_array(n_ekin*repeat)
mu, mu_ct = _create_numpy_double_array(n_ekin*repeat)
_raw_samplesct_iso_many(scat,ekin_ct,n_ekin,repeat,ekin_final_ct,mu_ct)
return ekin_final,mu
functions['ncrystal_samplesct_iso'] = ncrystal_samplesct_iso
def ncrystal_samplesct(scat, ekin, direction, repeat):
cdir = (_dbl * 3)(*direction)
if not repeat:
res_dir = (_dbl * 3)(0,0,0)
res_ekin = _dbl()
_raw_samplescat(scat,ekin,cdir,res_ekin,res_dir)
return res_ekin.value,(res_dir[0],res_dir[1],res_dir[2])
else:
assert repeat>=1
res_ekin, res_ekin_ct = _create_numpy_double_array(repeat)
res_ux, res_ux_ct = _create_numpy_double_array(repeat)
res_uy, res_uy_ct = _create_numpy_double_array(repeat)
res_uz, res_uz_ct = _create_numpy_double_array(repeat)
_raw_samplescat_many(scat,ekin,cdir,repeat,res_ekin_ct,res_ux_ct,res_uy_ct,res_uz_ct)
return res_ekin,(res_ux,res_uy,res_uz)
functions['ncrystal_samplesct']=ncrystal_samplesct
_raw_xs = _wrap('ncrystal_crosssection',None,(ncrystal_process_t,_dbl,_dbl*3,_dblp),hide=True)
def ncrystal_crosssection( proc, ekin, direction):
res = _dbl()
cdir = (_dbl * 3)(*direction)
_raw_xs(proc,ekin,cdir,res)
return res.value
functions['ncrystal_crosssection'] = ncrystal_crosssection
#Obsolete:
_raw_gs_no = _wrap('ncrystal_genscatter_nonoriented',None,(ncrystal_scatter_t,_dbl,_dblp,_dblp),hide=True)
_raw_gs_no_many = _wrap('ncrystal_genscatter_nonoriented_many',None,(ncrystal_scatter_t,_dblp,_ulong,
_ulong,_dblp,_dblp),hide=True)
def ncrystal_genscatter_nonoriented(scat,ekin,repeat=None):
many = _prepare_many(ekin,repeat)
if many is None:
angle,de = _dbl(),_dbl()
_raw_gs_no(scat,ekin,angle,de)
return angle.value,de.value
else:
ekin_ct,n_ekin,repeat,ekin_nparr = many
angle, angle_ct = _create_numpy_double_array(n_ekin*repeat)
de, de_ct = _create_numpy_double_array(n_ekin*repeat)
_raw_gs_no_many(scat,ekin_ct,n_ekin,repeat,angle_ct,de_ct)
return angle,de
functions['ncrystal_genscatter_nonoriented'] = ncrystal_genscatter_nonoriented
_raw_gs = _wrap('ncrystal_genscatter',None,(ncrystal_scatter_t,_dbl,_dbl*3,_dbl*3,_dblp),hide=True)
_raw_gs_many = _wrap('ncrystal_genscatter_many',None,(ncrystal_scatter_t,_dbl,_dbl*3,
_ulong,_dblp,_dblp,_dblp,_dblp),hide=True)
def ncrystal_genscatter(scat, ekin, direction, repeat):
cdir = (_dbl * 3)(*direction)
if not repeat:
res_dir = (_dbl * 3)(0,0,0)
res_de = _dbl()
_raw_gs(scat,ekin,cdir,res_dir,res_de)
return (res_dir[0],res_dir[1],res_dir[2]),res_de.value
else:
assert repeat>=1
res_ux, res_ux_ct = _create_numpy_double_array(repeat)
res_uy, res_uy_ct = _create_numpy_double_array(repeat)
res_uz, res_uz_ct = _create_numpy_double_array(repeat)
res_de, res_de_ct = _create_numpy_double_array(repeat)
_raw_gs_many(scat,ekin,cdir,repeat,res_ux_ct,res_uy_ct,res_uz_ct,res_de_ct)
return (res_ux,res_uy,res_uz),res_de
functions['ncrystal_genscatter']=ncrystal_genscatter
_wrap('ncrystal_create_info',ncrystal_info_t,(_cstr,))
_wrap('ncrystal_create_scatter',ncrystal_scatter_t,(_cstr,))
_wrap('ncrystal_create_scatter_builtinrng',ncrystal_scatter_t,(_cstr,_ulong))
_wrap('ncrystal_create_absorption',ncrystal_absorption_t,(_cstr,))
_raw_multicreate_direct = _wrap('ncrystal_multicreate_direct',None,
( _cstr, _cstr, _cstr,
ctypes.POINTER(ncrystal_info_t),
ctypes.POINTER(ncrystal_scatter_t),
ctypes.POINTER(ncrystal_absorption_t) ),hide=True)
nullptr_ncrystal_info_t = ctypes.cast(None, ctypes.POINTER(ncrystal_info_t))
nullptr_ncrystal_scatter_t = ctypes.cast(None, ctypes.POINTER(ncrystal_scatter_t))
nullptr_ncrystal_absorption_t = ctypes.cast(None, ctypes.POINTER(ncrystal_absorption_t))
def multicreate_direct(data,dataType,cfg_params,doI,doS,doA):
rawi = ncrystal_info_t() if doI else None
raws = ncrystal_scatter_t() if doS else None
rawa = ncrystal_absorption_t() if doA else None
_raw_multicreate_direct( _str2cstr(data),_str2cstr(dataType or "" ),_str2cstr(cfg_params or ""),
ctypes.byref(rawi) if rawi else nullptr_ncrystal_info_t,
ctypes.byref(raws) if raws else nullptr_ncrystal_scatter_t,
ctypes.byref(rawa) if rawa else nullptr_ncrystal_absorption_t )
return rawi,raws,rawa
functions['multicreate_direct'] = multicreate_direct
_wrap('ncrystal_setbuiltinrandgen',None,tuple())
_RANDGENFCTTYPE = ctypes.CFUNCTYPE( _dbl )
_raw_setrand = _wrap('ncrystal_setrandgen',None,(_RANDGENFCTTYPE,),hide=True)
def ncrystal_setrandgen(randfct):
#Set random function, keeping references as needed (otherwise fct ptrs
#kept on C++ side will suddenly stop working!) and casting None to a null-ptr.
if not randfct:
keepalive=(None,ctypes.cast(None, _RANDGENFCTTYPE))
else:
keepalive=(randfct,_RANDGENFCTTYPE(randfct))#keep refs!
_keepalive.append(keepalive)
_raw_setrand(keepalive[1])
functions['ncrystal_setrandgen'] = ncrystal_setrandgen
_wrap('ncrystal_clone_absorption',ncrystal_absorption_t,(ncrystal_absorption_t,))
_wrap('ncrystal_clone_scatter',ncrystal_scatter_t,(ncrystal_scatter_t,))
_wrap('ncrystal_clone_scatter_rngbyidx',ncrystal_scatter_t,(ncrystal_scatter_t,_ulong))
_wrap('ncrystal_clone_scatter_rngforcurrentthread',ncrystal_scatter_t,(ncrystal_scatter_t,))
_wrap('ncrystal_decodecfg_packfact',_dbl,(_cstr,))
_wrap('ncrystal_decodecfg_vdoslux',_uint,(_cstr,))
_wrap('ncrystal_disable_caching',None,tuple())
_wrap('ncrystal_enable_caching',None,tuple())
_wrap('ncrystal_has_factory',_int,(_cstr,))
_wrap('ncrystal_clear_caches',None,tuple())
_wrap('ncrystal_rngsupportsstatemanip_ofscatter',_int,( ncrystal_scatter_t, ))
_wrap('ncrystal_setrngstate_ofscatter',None,(ncrystal_scatter_t, _cstr))
_raw_getrngstate_scat = _wrap('ncrystal_getrngstate_ofscatter',_charptr,( ncrystal_scatter_t,),hide=True)
def nc_getrngstate_scat(rawscatobj):
rawstate = _raw_getrngstate_scat(rawscatobj)
if not rawstate:
#null ptr, i.e. state manipulation is not supported
return None
state=_cstr2str(ctypes.cast(rawstate,_cstr).value)
_raw_deallocstr(rawstate)
return state
functions['nc_getrngstate_scat']=nc_getrngstate_scat
_raw_gettextdata = _wrap('ncrystal_get_text_data',_cstrp,(_cstr,),hide=True)
_raw_deallocstr = _wrap('ncrystal_dealloc_string',None,(_charptr,),hide=True)
def nc_gettextdata(name):
l = _raw_gettextdata(_str2cstr(str(name)))
assert l is not None
n = 5
res = [l[i].decode() for i in range(n)]
assert isinstance(res[0],str)
_raw_deallocstrlist(n,l)
return res
functions['nc_gettextdata'] = nc_gettextdata
_raw_getfilelist = _wrap('ncrystal_get_file_list',None,(_uintp,_cstrpp),hide=True)
_raw_deallocstrlist = _wrap('ncrystal_dealloc_stringlist',None,(_uint,_cstrp),hide=True)
def ncrystal_get_filelist():
n,l = _uint(),_cstrp()
_raw_getfilelist(n,ctypes.byref(l))
assert n.value%4==0
res=[]
for i in range(n.value//4):
res += [ (l[i*4].decode(),l[i*4+1].decode(),l[i*4+2].decode(),l[i*4+3].decode()) ]
_raw_deallocstrlist(n,l)
return res
functions['ncrystal_get_filelist'] = ncrystal_get_filelist
_raw_getpluginlist = _wrap('ncrystal_get_plugin_list',None,(_uintp,_cstrpp),hide=True)
def ncrystal_get_pluginlist():
n,l = _uint(),_cstrp()
_raw_getpluginlist(n,ctypes.byref(l))
assert n.value%3==0
res=[]
for i in range(n.value//3):
pluginname,filename,plugintype=l[i*3].decode(),l[i*3+1].decode(),l[i*3+2].decode()
res+=[(pluginname,filename,plugintype)]
_raw_deallocstrlist(n,l)
return res
functions['ncrystal_get_pluginlist'] = ncrystal_get_pluginlist
_wrap('ncrystal_add_custom_search_dir',None,(_cstr,))
_wrap('ncrystal_remove_custom_search_dirs',None,tuple())
_wrap('ncrystal_enable_abspaths',None,(_int,))
_wrap('ncrystal_enable_relpaths',None,(_int,))
_wrap('ncrystal_enable_stddatalib',None,(_int,_cstr))
_wrap('ncrystal_enable_stdsearchpath',None,(_int,))
_wrap('ncrystal_remove_all_data_sources',None,tuple())
return functions
_rawfct = _load(_find_nclib())
def decodecfg_packfact(cfgstr):
"""Extract packfact value from cfgstr"""
return float(_rawfct['ncrystal_decodecfg_packfact'](_str2cstr(cfgstr)))
def decodecfg_vdoslux(cfgstr):
"""Extract vdoslux value from cfgstr"""
return int(_rawfct['ncrystal_decodecfg_vdoslux'](_str2cstr(cfgstr)))
def createVDOSDebye(debye_temperature):
"""Create simplified VDOS according to the Debye model"""
_ensure_numpy()
#NB: Must keep function exactly synchronised with createVDOSDebye function
#in .cc src (although leaving out temperature,boundXS,elementMassAMU args
#here):
debye_energy = constant_boltzmann*debye_temperature;
vdos_egrid = _np.linspace(0.5*debye_energy,debye_energy,20);
scale = 1.0 / (debye_energy*debye_energy);
vdos_density = scale * (vdos_egrid**2)
#Actual returned egrid should contain only first and last value:
return (_np.asarray([vdos_egrid[0],vdos_egrid[-1]]) ,vdos_density)
class RCBase:
"""Base class for all NCrystal objects"""
def __init__(self, rawobj):
"""internal usage only"""
self._rawobj = rawobj
#do not ref here, since ncrystal_create_xxx functions in C-interface already did so.
self._rawunref = _rawfct['ncrystal_unref']#keep fct reference
self.__rawobj_byref = ctypes.byref(rawobj)#keep byref(rawobj), since ctypes might
#disappear before __del__ is called.
def __del__(self):
if hasattr(self,'_rawunref') and self._rawunref:
self._rawunref(self.__rawobj_byref)
def refCount(self):
"""Access reference count of wrapped C++ object"""
return _rawfct['ncrystal_refcount'](self._rawobj)
def nc_assert(b,msg=""):
"""Assertion which throws NCLogicError on failure"""
if not bool(b):
raise NCLogicError(msg if msg else 'assertion failed')
class AtomData(RCBase):
"""Class providing physical constants related to a particular mix of
isotopes. This can be used to represent elements (i.e. all isotopes having
same Z) in either natural or enriched form, but can also be used to
represent atoms in doped crystals. E.g. if a small fraction (0.1%) of
Cr-ions replace some Al-ions in a Al2O3 lattice, the AtomData could
represent a mix of 0.1% Cr and 99.9% Al.
"""
def __init__(self,rawobj):
"""internal usage only"""
super(AtomData, self).__init__(rawobj)
f=_rawfct['ncrystal_atomdata_getfields'](rawobj)
self.__m = f['m']
self.__incxs = f['incxs']
self.__cohsl_fm = f['cohsl_fm']
self.__absxs = f['absxs']
self.__dl = f['dl']
self.__descr = f['descr']
self.__ncomp = f['ncomp']
self.__z = f['z']
self.__a = f['a']
self.__b2f = (self.__m/(self.__m+const_neutron_mass_amu))**2
self.__comp = [None]*self.__ncomp
self.__compalldone = (self.__ncomp==0)
def averageMassAMU(self):
"""Atomic mass in Daltons (averaged appropriately over constituents)"""
return self.__m
def coherentScatLen(self):
"""Coherent scattering length in sqrt(barn)=10fm"""
return self.__cohsl_fm*0.1#0.1 is fm/sqrt(barn)
def coherentScatLenFM(self):
"""Coherent scattering length in fm"""
return self.__cohsl_fm
def coherentXS(self):
"""Bound coherent cross section in barn. Same as 4*pi*coherentScatLen()**2"""
return _k4Pidiv100*self.__cohsl_fm**2
def incoherentXS(self):
"""Bound incoherent cross section in barn"""
return self.__incxs
def scatteringXS(self):
"""Bound scattering cross section in barn (same as coherentXS()+incoherentXS())"""
return self.__incxs+self.coherentXS()
def captureXS(self):
"""Absorption cross section in barn"""
return self.__absxs
def freeScatteringXS(self):
"""Free scattering cross section in barn (same as freeCoherentXS()+freeIncoherentXS())"""
return self.__b2f * self.scatteringXS()
def freeCoherentXS(self):
"""Free coherent cross section in barn."""
return self.__b2f * self.coherentXS()
def freeIncoherentXS(self):
"""Free incoherent cross section in barn."""
return self.__b2f * self.incoherentXS()
def isNaturalElement(self):
"""Natural element with no composition."""
return self.__z!=0 and self.__ncomp==0 and self.__a==0
def isSingleIsotope(self):
"""Single isotope with no composition."""
return self.__a!=0
def isComposite(self):
"""Composite definition. See nComponents(), getComponent() and components property"""
return self.__ncomp!=0
def isElement(self):
"""If number of protons per nuclei is well defined. This is true for natural
elements, single isotopes, and composites where all components
have the same number of protons per nuclei."""
return self.__z!=0
def Z(self):
"""Number of protons per nuclei (0 if not well defined)."""
return self.__z
def elementName(self):
"""If Z()!=0, this returns the corresponding element name ('H', 'He', ...).
Returns empty string when Z() is 0."""
if not self.__z:
return ''
#NB: We are relying on natural elements to return their element names in
#description(false). This is promised by a comment in NCAtomData.hh!
if self.isNaturalElement():
return self.__descr
return atomDB(self.__z).description(False)
def A(self):
"""Number of nucleons per nuclei (0 if not well defined or natural element)."""
return self.__a
class Component:
def __init__(self,fr,ad):
"""internal usage only"""
self.__fr = fr
self.__ad = ad
assert not ad.isTopLevel()
@property
def fraction(self):
"""Fraction (by count) of component in mixture"""
return self.__fr
@property
def data(self):
"""AtomData of component"""
return self.__ad
def __str__(self):
return '%g*AtomData(%s)'%(self.__fr,self.__ad.description(True))
def nComponents(self):
"""Number of sub-components in a mixture"""
return self.__ncomp
def getComponent(self,icomponent):
"""Get component in a mixture"""
c=self.__comp[icomponent]
if c:
return c
rawobj_subc,fraction=_rawfct['ncrystal_atomdata_createsubcomp'](self._rawobj,icomponent)
ad = AtomData(rawobj_subc)
c = AtomData.Component(fraction,ad)
self.__comp[icomponent] = c
return c
def getAllComponents(self):
"""Get list of all components"""
if self.__compalldone:
return self.__comp
for i,c in enumerate(self.__comp):
if not c:
self.getComponent(i)
self.__compalldone=True
return self.__comp
components = property(getAllComponents)
def displayLabel(self):
"""Short label which unique identifies an atom role within a particular material."""
return self.__dl
def isTopLevel(self):
"""Whether or not AtomData appears directly on an Info object (if not, it must
be a component (direct or indirect) of a top level AtomData object"""
return bool(self.__dl)
def description(self,includeValues=True):
"""Returns description of material as a string, with or without values."""
if includeValues:
zstr=' Z=%i'%self.__z if self.__z else ''
astr=' A=%i'%self.__a if self.__a else ''
_=(self.__descr,self.__cohsl_fm,self.coherentXS(),self.__incxs,
self.__absxs,self.__m,zstr,astr)
return'%s(cohSL=%gfm cohXS=%gbarn incXS=%gbarn absXS=%gbarn mass=%gamu%s%s)'%_
return self.__descr
def __str__(self):
descr=self.description()
return '%s=%s'%(self.__dl,descr) if self.__dl else descr
class Info(RCBase):
"""Class representing information about a given material"""
def __init__(self, cfgstr):
"""create Info object based on cfg-string (same as using createInfo(cfgstr))"""
if isinstance(cfgstr,tuple) and len(cfgstr)==2 and cfgstr[0]=='_rawobj_':
#Already got an ncrystal_info_t object:
rawobj = cfgstr[1]
else:
rawobj = _rawfct['ncrystal_create_info'](_str2cstr(cfgstr))
super(Info, self).__init__(rawobj)
self.__dyninfo=None
self.__atominfo=None
self.__custom=None
self.__atomdatas=[]
self.__comp=None
def _initComp(self):
assert self.__comp is None
nc = _rawfct['ncrystal_info_ncomponents'](self._rawobj)
self.__comp = []
for icomp in range(nc):
atomidx,fraction = _rawfct['ncrystal_info_getcomp'](self._rawobj,icomp)
self.__comp += [(fraction,self._provideAtomData(atomidx))]
return self.__comp
def hasComposition(self):
"""Whether basic composition is available."""
return bool(self._initComp() if self.__comp is None else self.__comp)
def getComposition(self):
"""Get basic composition as list of (fraction,AtomData). The list is empty when
no composition is available, and is always consistent with AtomInfo/DynInfo (if
present). """
return self._initComp() if self.__comp is None else self.__comp
composition=property(getComposition)
def dump(self):
"""Dump contained information to standard output"""
sys.stdout.flush()
sys.stderr.flush()
_rawfct['ncrystal_dump'](self._rawobj)
def hasTemperature(self):
"""Whether or not material has a temperature available"""
return _rawfct['ncrystal_info_gettemperature'](self._rawobj)>-1
def getTemperature(self):
"""Material temperature (in kelvin)"""
t=_rawfct['ncrystal_info_gettemperature'](self._rawobj)
nc_assert(t>-1)
return t
def hasGlobalDebyeTemperature(self):
"""OBSOLETE FUNCTION: The concept of global versus per-element Debye
temperatures has been removed. Please iterate over AtomInfo objects
instead (see getAtomInfos() function) and get the Debye Temperature
from those. This function will be removed in a future release.
"""
return False
def getGlobalDebyeTemperature(self):
"""OBSOLETE FUNCTION: The concept of global versus per-element Debye
temperatures has been removed. Please iterate over AtomInfo objects
instead (see getAtomInfos() function) and get the Debye Temperature
from those. Calling this function will always result in an exception
thrown for now, and the function will be removed in a future release..
"""
raise NCLogicError('The concept of global Debye temperatures has been removed. Iterate over'
+' AtomInfo objects instead and get the Debye temperature values from those.')
return None
def hasAtomDebyeTemp(self):
"""Whether AtomInfo objects are present and have Debye temperatures available
(they will either all have them available, or none of them will have
them available).
"""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[3]
def hasDebyeTemperature(self):
"""Alias for hasAtomDebyeTemp()."""
return self.hasAtomDebyeTemp()
def hasAnyDebyeTemperature(self):
"""OBSOLETE FUNCTION which will be removed in a future release. Please
call hasDebyeTemperature() instead.
"""
return self.hasAtomDebyeTemp()
def getDebyeTemperatureByElement(self,atomdata):
"""OBSOLETE FUNCTION which will be removed in a future release. Please access
the AtomInfo objects instead and query the Debye temperature there.
"""
if atomdata.isTopLevel():
for ai in self.atominfos:
if atomdata is ai.atomData:
return ai.debyeTemperature
raise NCBadInput('Invalid atomdata object passed to Info.getDebyeTemperatureByElement'
+' (must be top-level AtomData from the same Info object)')
def hasDensity(self):
"""Whether or not material has density available"""
return _rawfct['ncrystal_info_getdensity'](self._rawobj)>-1
def getDensity(self):
"""Get density in g/cm^3. See also getNumberDensity()."""
t=_rawfct['ncrystal_info_getdensity'](self._rawobj)
nc_assert(t>-1)
return t
def hasNumberDensity(self):
"""Whether or not material has number density available"""
return _rawfct['ncrystal_info_getnumberdensity'](self._rawobj)>-1
def getNumberDensity(self):
"""Get number density in atoms/angstrom^3. See also getDensity()."""
t=_rawfct['ncrystal_info_getnumberdensity'](self._rawobj)
nc_assert(t>-1)
return t
def hasXSectAbsorption(self):
"""Whether or not material has absorption cross section available"""
return _rawfct['ncrystal_info_getxsectabsorption'](self._rawobj)>-1
def getXSectAbsorption(self):
"""Absorption cross section in barn (at 2200m/s)"""
t=_rawfct['ncrystal_info_getxsectabsorption'](self._rawobj)
nc_assert(t>-1)
return t
def hasXSectFree(self):
"""Whether or not material has free scattering cross section available"""
return _rawfct['ncrystal_info_getxsectfree'](self._rawobj)>-1
def getXSectFree(self):
"""Saturated (free) scattering cross section in barn in the high-E limit"""
t=_rawfct['ncrystal_info_getxsectfree'](self._rawobj)
nc_assert(t>-1)
return t
def hasStructureInfo(self):
"""Whether or not material has crystal structure information available."""
return bool(_rawfct['ncrystal_info_getstructure'](self._rawobj))
def getStructureInfo(self):
"""Information about crystal structure."""
d=_rawfct['ncrystal_info_getstructure'](self._rawobj)
nc_assert(d)
return d
def _provideAtomData(self,atomindex):
if atomindex >= len(self.__atomdatas):
assert atomindex < 100000#sanity check
self.__atomdatas.extend([None,]*(atomindex+1-len(self.__atomdatas)))
obj = self.__atomdatas[atomindex]
if obj:
return obj
raw_ad = _rawfct['ncrystal_create_atomdata'](self._rawobj,atomindex)
obj = AtomData(raw_ad)
assert obj.isTopLevel()
self.__atomdatas[atomindex] = obj
return obj
class AtomInfo:
"""Class with information about a particular atom in a unit cell, including the
composition of atoms, positions, Debye temperature, and mean-squared-displacements.
"""
def __init__(self,theinfoobj,atomidx,n,dt,msd,pos):
"""For internal usage only."""
assert dt is None or ( isinstance(dt,float) and dt > 0.0 )
assert msd is None or ( isinstance(msd,float) and msd > 0.0 )
self._info_wr = weakref.ref(theinfoobj)
self._atomidx,self.__n,self.__dt,self.__msd,=atomidx,n,dt,msd
self.__pos = tuple(pos)#tuple, since it is immutable
self.__atomdata = None
self.__correspDI_wp = None
def correspondingDynamicInfo(self):
"""Get corresponding DynamicInfo object from the same Info object. Returns None if Info object does not have dynamic info available"""
if self.__correspDI_wp is not None:
if self.__correspDI_wp == False:
return None
di = self.__correspDI_wp()
nc_assert(di is not None,"AtomInfo.correspondingDynamicInfo can not be used after associated Info object is deleted")
return di
_info = self._info_wr()
nc_assert(_info is not None,"AtomInfo.correspondingDynamicInfo can not be used after associated Info object is deleted")
if not _info.hasDynamicInfo():
self.__correspDI_wp = False
return None
for di in _info.dyninfos:
if di._atomidx == self._atomidx:
self.__correspDI_wp = weakref.ref(di)
return di
nc_assert(False,"AtomInfo.correspondingDynamicInfo: inconsistent internal state (bug?)")
dyninfo = property(correspondingDynamicInfo)
@property
def atomData(self):
"""Return AtomData object with details about composition and relevant physics constants"""
if self.__atomdata is None:
_info = self._info_wr()
nc_assert(_info is not None,"AtomInfo.atomData can not be used after associated Info object is deleted")
self.__atomdata = _info._provideAtomData(self._atomidx)
assert self.__atomdata.isTopLevel()
return self.__atomdata
@property
def count(self):
"""Number of atoms of this type per unit cell"""
return self.__n
@property
def debyeTemperature(self):
"""The Debye Temperature of the atom (kelvin). Returns None if not available."""
return self.__dt
@property
def meanSquaredDisplacement(self):
"""The mean-squared-displacement of the atom (angstrom^2). Returns None if not
available.
"""
return self.__msd
msd=meanSquaredDisplacement#alias
@property
def positions(self):
"""List (tuple actually) of positions of this atom in the unit cell. Each
entry is given as a tuple of three values, (x,y,z)"""
return self.__pos
@property
def atomIndex(self):
"""Index of atom on this material"""
return self._atomidx
def __str__(self):
l=[str(self.atomData.displayLabel()),str(self.__n)]
if self.__dt>0.0:
l.append('DebyeT=%gK'%self.__dt if self.__dt else 'DebyeT=n/a')
if self.__msd>0.0:
l.append('MSD=%gAa^2'%self.__msd if self.__msd else 'MSD=n/a')
l.append('hasPositions=%s'%('yes' if self.__pos else 'no'))
return 'AtomInfo(%s)'%(', '.join(l))
def hasAtomInfo(self):
"""Whether or no getAtomInfo()/atominfos are available"""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[0]
def hasAtomMSD(self):
"""Whether AtomInfo objects have mean-square-displacements available"""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[1]
def hasAtomPositions(self):
"""OBSOLETE FUNCTION: AtomInfo objects now always have positions
available. Returns same as hasAtomInfo(). Will be removed in a future
release.
"""
return self.hasAtomInfo()
def hasPerElementDebyeTemperature(self):
"""OBSOLETE FUNCTION which will be removed in a future
release. Please use hasAtomDebyeTemp() instead.
"""
return self.hasAtomDebyeTemp()
def getAtomInfo(self):
"""Get list of AtomInfo objects, one for each atom. Returns empty list if unavailable."""
if self.__atominfo is None:
self.__initAtomInfo()
return self.__atominfo[2]
atominfos = property(getAtomInfo)
def __initAtomInfo(self):
assert self.__atominfo is None
natoms = _rawfct['ncrystal_info_natominfo'](self._rawobj)
hasmsd = bool(_rawfct['ncrystal_info_hasatommsd'](self._rawobj))
hasperelemdt=False
l=[]
for iatom in range(natoms):
atomidx,n,dt,msd = _rawfct['ncrystal_info_getatominfo'](self._rawobj,iatom)
if dt:
hasperelemdt=True
assert hasmsd == (msd>0.0)
pos=[]
for ipos in range(n):
pos.append( _rawfct['ncrystal_info_getatompos'](self._rawobj,iatom,ipos) )
l.append( Info.AtomInfo(self,atomidx, n,
( dt if ( dt and dt>0.0) else None),
(msd if (msd and msd>0.0) else None),
pos) )
self.__atominfo = ( natoms>0, hasmsd, l, hasperelemdt )
def hasHKLInfo(self):
"""Whether or not material has lists of HKL-plane info available"""
return bool(_rawfct['ncrystal_info_nhkl'](self._rawobj)>-1)
def nHKL(self):
"""Number of HKL planes available (grouped into families with similar d-spacing and f-squared)"""
return int(_rawfct['ncrystal_info_nhkl'](self._rawobj))
def hklDLower(self):
"""Lower d-spacing cutoff (angstrom)."""
return float(_rawfct['ncrystal_info_hkl_dlower'](self._rawobj))
def hklDUpper(self):
"""Upper d-spacing cutoff (angstrom)."""
return float(_rawfct['ncrystal_info_hkl_dupper'](self._rawobj))
def hklList(self):
"""Iterator over HKL info, yielding tuples in the format
(h,k,l,multiplicity,dspacing,fsquared)"""
nc_assert(self.hasHKLInfo())
h,k,l,mult,dsp,fsq = _rawfct['ncrystal_info_gethkl_setuppars']()
for idx in range(self.nHKL()):
_rawfct['ncrystal_info_gethkl'](self._rawobj,idx,h,k,l,mult,dsp,fsq)
yield h.value,k.value,l.value,mult.value,dsp.value,fsq.value
def dspacingFromHKL(self, h, k, l):
"""Convenience method, calculating the d-spacing of a given Miller
index. Calling this incurs the overhead of creating a reciprocal lattice
matrix from the structure info."""
return float(_rawfct['ncrystal_info_dspacing_from_hkl'](self._rawobj,h,k,l))
class DynamicInfo:
"""Class representing dynamic information (related to inelastic scattering)
about a given atom"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
self._info_wr,self.__atomdata = weakref.ref(theinfoobj), None
self.__fraction, self._atomidx, self._key, self.__tt = fr,atomidx,key,tt
self.__correspAtomInfo_wp = None
def correspondingAtomInfo(self):
"""Get corresponding AtomInfo object from the same Info object. Returns None if Info object does not have AtomInfo available"""
if self.__correspAtomInfo_wp is not None:
if self.__correspAtomInfo_wp == False:
return None
ai = self.__correspAtomInfo_wp()
nc_assert(ai is not None,"DynamicInfo.correspondingAtomInfo can not be used after associated Info object is deleted")
return ai
_info = self._info_wr()
nc_assert(_info is not None,"DynamicInfo.correspondingAtomInfo can not be used after associated Info object is deleted")
if not _info.hasAtomInfo():
self.__correspAtomInfo_wp = False
return None
for ai in _info.atominfos:
if ai._atomidx == self._atomidx:
self.__correspAtomInfo_wp = weakref.ref(ai)
return ai
nc_assert(False,"DynamicInfo.correspondingAtomInfo: inconsistent internal state (bug?)")
atominfo = property(correspondingAtomInfo)
@property
def atomIndex(self):
"""Index of atom on this material"""
return self._atomidx
@property
def fraction(self):
"""Atom fraction in material (all fractions must add up to unity)"""
return self.__fraction
@property
def temperature(self):
"""Material temperature (same value as on associated Info object)"""
return self.__tt
@property
def atomData(self):
"""Return AtomData object with details about composition and relevant physics constants"""
if self.__atomdata is None:
_info = self._info_wr()
nc_assert(_info is not None,"DynamicInfo.atomData can not be used after associated Info object is deleted")
self.__atomdata = _info._provideAtomData(self._atomidx)
assert self.__atomdata.isTopLevel()
return self.__atomdata
def _np(self):
_ensure_numpy()
return _np
def _copy_cptr_2_nparray(self,cptr,n):
np = self._np()
return np.copy(np.ctypeslib.as_array(cptr, shape=(n,)))
def __str__(self):
n=self.__class__.__name__
if n.startswith('DI_'):
n=n[3:]
s=', %s'%self._extradescr() if hasattr(self,'_extradescr') else ''
return ('DynamicInfo(%s, fraction=%.4g%%, type=%s%s)'%(self.atomData.displayLabel(),
self.__fraction*100.0,
n,s))
class DI_Sterile(DynamicInfo):
"""Class indicating atoms for which inelastic neutron scattering is absent
or disabled."""
pass
class DI_FreeGas(DynamicInfo):
"""Class indicating atoms for which inelastic neutron scattering should be
modelled as scattering on a free gas."""
pass
class DI_ScatKnl(DynamicInfo):
"""Base class indicating atoms for which inelastic neutron scattering will
be, directly or indirectly, described by a scattering kernel,
S(alpha,beta). This is an abstract class, and derived classes provide
actual access to the kernels.
"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
super(Info.DI_ScatKnl, self).__init__(theinfoobj,fr,atomidx,tt,key)
self.__lastknl,self.__lastvdoslux = None,None
def _loadKernel( self, vdoslux = 3 ):
assert isinstance(vdoslux,numbers.Integral) and 0<=vdoslux<=5
vdoslux=int(vdoslux)
if self.__lastvdoslux != vdoslux:
sugEmax,ne,na,nb,eptr,aptr,bptr,sabptr = _rawfct['ncrystal_dyninfo_extract_scatknl'](self._key,vdoslux)
self.__lastvdoslux = vdoslux
res={}
assert ne>=0
res['suggestedEmax'] = float(sugEmax)
res['egrid'] = self._copy_cptr_2_nparray(eptr,ne) if ne > 0 else self._np().zeros(0)
assert na>1 and nb>1
res['alpha'] = self._copy_cptr_2_nparray(aptr,na)
res['beta'] = self._copy_cptr_2_nparray(bptr,nb)
res['sab'] = self._copy_cptr_2_nparray(sabptr,na*nb)
self.__lastknl = res
assert self.__lastknl is not None
return self.__lastknl
class DI_ScatKnlDirect(DI_ScatKnl):
"""Pre-calculated scattering kernel which at most needs a (hidden) conversion to
S(alpha,beta) format before it is available."""
def loadKernel( self ):
"""Prepares and returns the scattering kernel in S(alpha,beta) format"""
return self._loadKernel(vdoslux=3)#vdoslux value not actually used
class DI_VDOS(DI_ScatKnl):
"""Solid state material with a phonon spectrum in the form of a Vibrational
Density Of State (VDOS) parameterisation. This can be expanded into a
full scattering kernel. How luxurious this expansion will be is
controlled by an optional vdoslux parameter in the loadKernel call (must
be integer from 0 to 5)
"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
super(Info.DI_VDOS, self).__init__(theinfoobj,fr,atomidx,tt,key)
self.__vdosdata = None
self.__vdosegrid_expanded = None
self.__vdosorig = None
def _extradescr(self):
return 'npts=%i'%len(self.vdosOrigDensity())
def vdosData(self):
"""Access the VDOS as ([egrid_min,egrid_max],vdos_density)"""
if self.__vdosdata is None:
emin,emax,nd,dptr = _rawfct['ncrystal_dyninfo_extract_vdos'](self._key)
vdos_egrid = (emin,emax)
vdos_density = self._copy_cptr_2_nparray(dptr,nd)
self.__vdosdata = (vdos_egrid,vdos_density)
return self.__vdosdata
def __loadVDOSOrig(self):
if self.__vdosorig is None:
neg,egptr,nds,dsptr = _rawfct['ncrystal_dyninfo_extract_vdos_input'](self._key)
self.__vdosorig = ( self._copy_cptr_2_nparray(egptr,neg),
self._copy_cptr_2_nparray(dsptr,nds) )
return self.__vdosorig
def vdosOrigEgrid(self):
"""Access the original un-regularised VDOS energy grid"""
return self.__loadVDOSOrig()[0]
def vdosOrigDensity(self):
"""Access the original un-regularised VDOS energy grid"""
return self.__loadVDOSOrig()[1]
@property
def vdos_egrid(self):
"""Access the VDOS energy grid as [egrid_min,egrid_max]"""
return self.vdosData()[0]
@property
def vdos_egrid_expanded(self):
"""Access the egrid expanded into all actual egrid points"""
if self.__vdosegrid_expanded is None:
_ = self.vdosData()
self.__vdosegrid_expanded = self._np().linspace(_[0][0],_[0][1],len(_[1]))
return self.__vdosegrid_expanded
@property
def vdos_density(self):
"""Access the VDOS density array"""
return self.vdosData()[1]
def loadKernel( self, vdoslux = 3 ):
"""Converts VDOS to S(alpha,beta) kernel with a luxury level given by the vdoslux parameter."""
return self._loadKernel(vdoslux=vdoslux)
class DI_VDOSDebye(DI_ScatKnl):
"""Similarly to DI_VDOS, but instead of using a phonon VDOS spectrum provided
externally, an idealised spectrum is used for lack of better
options. This spectrum is based on the Debye Model, in which the
spectrum rises quadratically with phonon energy below a cutoff value,
kT, where T is the Debye temperature
"""
def __init__(self,theinfoobj,fr,atomidx,tt,key):
"""internal usage only"""
super(Info.DI_VDOSDebye, self).__init__(theinfoobj,fr,atomidx,tt,key)
self.__vdosdata = None
self.__debyetemp = None
self.__vdosegrid_expanded = None
def vdosData(self):
"""Access the idealised VDOS as ([egrid_min,egrid_max],vdos_density)"""
if self.__vdosdata is None:
self.__vdosdata = createVDOSDebye(self.debyeTemperature())
return self.__vdosdata
def debyeTemperature(self):
"""The Debye temperature of the atom"""
if self.__debyetemp is None:
self.__debyetemp = _rawfct['ncrystal_dyninfo_extract_vdosdebye'](self._key)
return self.__debyetemp
def _extradescr(self):
return 'TDebye=%gK'%self.debyeTemperature()
@property
def vdos_egrid(self):
"""Access the VDOS energy grid as [egrid_min,egrid_max]"""
return self.vdosData()[0]
@property
def vdos_egrid_expanded(self):
"""Access the egrid expanded into all actual egrid points"""
if self.__vdosegrid_expanded is None:
_ = self.vdosData()
self.__vdosegrid_expanded = self._np().linspace(_[0][0],_[0][1],len(_[1]))
return self.__vdosegrid_expanded
@property
def vdos_density(self):
"""Access the VDOS density array"""
return self.vdosData()[1]
def loadKernel( self, vdoslux = 3 ):
"""Converts VDOS to S(alpha,beta) kernel with a luxury level given by the
vdoslux parameter, which is similar to the vdoslux parameter used
in DI_VDOS. Notice that the vdoslux parameter specified here on
DI_VDOSDebye will be reduced internally by 3 (but not less than
0), since the Debye model is anyway only a crude approximation
and it accordingly does not need the same level of precise
treatment as a full externally specified VDOS.
"""
return self._loadKernel(vdoslux=vdoslux)
def hasDynamicInfo(self):
"""Whether or not dynamic information for each atom is present"""
return int(_rawfct['ncrystal_info_ndyninfo'](self._rawobj))>0 if self.__dyninfo is None else bool(self.__dyninfo)
def getDynamicInfoList(self):
"""Get list of DynamicInfo objects (if available). One for each atom."""
if self.__dyninfo is None:
self.__dyninfo = []
for idx in range(int(_rawfct['ncrystal_info_ndyninfo'](self._rawobj))):
key = (self._rawobj,idx)
fr,tt,atomidx,ditype = _rawfct['ncrystal_dyninfo_base'](key)
args=(self,fr,atomidx,tt,key)
if ditype==0:
di = Info.DI_Sterile(*args)
elif ditype==1:
di = Info.DI_FreeGas(*args)
elif ditype==2:
di = Info.DI_ScatKnlDirect(*args)
elif ditype==3:
di = Info.DI_VDOS(*args)
elif ditype==4:
di = Info.DI_VDOSDebye(*args)
else:
raise AssertionError('Unknown DynInfo type id (%i)'%ditype.value)
self.__dyninfo += [ di ]
return self.__dyninfo
dyninfos = property(getDynamicInfoList)
def getAllCustomSections(self):
"""Custom information for which the core NCrystal code does not have any
specific treatment. This is usually intended for usage by developers adding new
experimental physics models."""
if self.__custom is None:
self.__custom = _rawfct['ncrystal_info_getcustomsections'](self._rawobj)
return self.__custom
customsections = property(getAllCustomSections)
class CalcBase(RCBase):
"""Base class for all calculators"""
def getCalcName(self):
"""Calculator name"""
return _cstr2str(_rawfct['ncrystal_name'](self._rawobj))
@property
def name(self):
"""Calculator name as property"""
return self.getCalcName()
class Process(CalcBase):
"""Base class for calculations of processes in materials.
Note that kinetic energies are in electronvolt and direction vectors are
tuples of 3 numbers.
"""
def domain(self):
"""Domain where process has non-vanishing cross section.
Returns the domain as (ekin_low,ekin_high). Outside this range of
neutron kinetic energy, the process can be assumed to have vanishing
cross sections. Thus, processes present at all energies will return
(0.0,infinity).
"""
return _rawfct['ncrystal_domain'](self._rawobj)
def isNonOriented(self):
"""opposite of isOriented()"""
return bool(_rawfct['ncrystal_isnonoriented'](self._rawobj))
def isOriented(self):
"""Check if process is oriented and results depend on the incident direction of the neutron"""
return not self.isNonOriented()
def crossSection( self, ekin, direction ):
"""Access cross sections."""
return _rawfct['ncrystal_crosssection'](self._rawobj,ekin, direction)
def crossSectionNonOriented( self, ekin, repeat = None ):
"""Access cross sections (should not be called for oriented processes).
For efficiency it is possible to provide the ekin parameter as a numpy
array of numbers and get a corresponding array of cross sections
back. Likewise, the repeat parameter can be set to a positive number,
causing the ekin value(s) to be reused that many times and a numpy array
with results returned.
"""
return _rawfct['ncrystal_crosssection_nonoriented'](self._rawobj,ekin,repeat)
def xsect(self,ekin=None,direction=None,wl=None,repeat=None):
"""Convenience function which redirects calls to either crossSectionNonOriented
or crossSection depending on whether or not a direction is given. It can
also accept wavelengths instead of kinetic energies via the wl
parameter. The repeat parameter is currently only supported when
direction is not provided.
"""
ekin = Process._parseekin( ekin, wl )
if direction is None:
return self.crossSectionNonOriented( ekin, repeat )
else:
if repeat is None:
return self.crossSection( ekin, direction )
else:
raise NCBadInput('The repeat parameter is not currently supported when the direction parameter is also provided.')
@staticmethod
def _parseekin(ekin,wl):
if wl is None:
if ekin is None:
raise NCBadInput('Please provide either one of the "ekin" or "wl" parameters.')
return ekin
else:
if ekin is not None:
raise NCBadInput('Do not provide both "ekin" and "wl" parameters')
return wl2ekin(wl)
class Absorption(Process):
"""Base class for calculations of absorption in materials"""
def __init__(self, cfgstr):
"""create Absorption object based on cfg-string (same as using createAbsorption(cfgstr))"""
if isinstance(cfgstr,tuple) and len(cfgstr)==2 and cfgstr[0]=='_rawobj_':
#Cloning:
rawobj_abs = cfgstr[1]
else:
rawobj_abs = _rawfct['ncrystal_create_absorption'](_str2cstr(cfgstr))
self._rawobj_abs = rawobj_abs
rawobj_proc = _rawfct['ncrystal_cast_abs2proc'](rawobj_abs)
super(Absorption, self).__init__(rawobj_proc)
def clone(self):
"""Clone object. The clone will be using the same physics models and sharing any
read-only data with the original, but will be using its own private copy of any
mutable caches. All in all, this means that the objects are safe to use
concurrently in multi-threaded programming, as long as each thread gets
its own clone. Return value is the new Absorption object.
"""
newrawobj = _rawfct['ncrystal_clone_absorption'](self._rawobj_abs)
return Absorption( ('_rawobj_',newrawobj) )
class Scatter(Process):
"""Base class for calculations of scattering in materials.
Note that kinetic energies are in electronvolt and direction vectors are
tuples of 3 numbers.
"""
def __init__(self, cfgstr):
"""create Scatter object based on cfg-string (same as using createScatter(cfgstr))"""
if isinstance(cfgstr,tuple) and len(cfgstr)==2 and cfgstr[0]=='_rawobj_':
#Already got an ncrystal_scatter_t object:
self._rawobj_scat = cfgstr[1]
else:
self._rawobj_scat = _rawfct['ncrystal_create_scatter'](_str2cstr(cfgstr))
rawobj_proc = _rawfct['ncrystal_cast_scat2proc'](self._rawobj_scat)
super(Scatter, self).__init__(rawobj_proc)
def clone(self,rng_stream_index=None,for_current_thread=False):
"""Clone object. The clone will be using the same physics models and sharing any
read-only data with the original, but will be using its own private copy
of any mutable caches and will get an independent RNG stream. All in
all, this means that the objects are safe to use concurrently in
multi-threaded programming, as long as each thread gets its own
clone. Return value is the new Scatter object.
If greater control over RNG streams are needed, it is optionally allowed
to either set rng_stream_index to a non-negative integral value, or set
for_current_thread=True.
If rng_stream_index is set, the resulting object will use a specific
rngstream index. All objects with the same indeed will share the same
RNG state, so a sensible strategy is to use the same index for all
scatter objects which are to be used in the same thread:
If setting for_current_thread=True, the resulting object will use a
specific rngstream which has been set aside for the current thread. Thus
this function can be called from a given work-thread, in order to get
thread-safe scatter handle, with all objects cloned within the same
thread sharing RNG state.
"""
if rng_stream_index is not None:
if for_current_thread:
raise NCBadInput('Scatter.clone(..): do not set both rng_stream_index and for_current_thread parameters')
if not isinstance(rng_stream_index, numbers.Integral) or not 0 <= rng_stream_index <= 4294967295:
raise NCBadInput('Scatter.clone(..): rng_stream_index must be integral and in range [0,4294967295]')
newrawobj = _rawfct['ncrystal_clone_scatter_rngbyidx'](self._rawobj_scat,int(rng_stream_index))
elif for_current_thread:
newrawobj = _rawfct['ncrystal_clone_scatter_rngforcurrentthread'](self._rawobj_scat)
else:
newrawobj = _rawfct['ncrystal_clone_scatter'](self._rawobj_scat)
return Scatter( ('_rawobj_',newrawobj) )
def sampleScatter( self, ekin, direction, repeat = None ):
"""Randomly generate scatterings.
Assuming a scattering took place, generate final state of neutron based
on current kinetic energy and direction. Returns
tuple(ekin_final,direction_final) where direct_final is itself a tuple
(ux,uy,uz). The repeat parameter can be set to a positive number,
causing the scattering to be sampled that many times and numpy arrays
with results returned.
"""
return _rawfct['ncrystal_samplesct'](self._rawobj_scat,ekin,direction,repeat)
def sampleScatterIsotropic( self, ekin, repeat = None ):
"""Randomly generate scatterings (should not be called for oriented processes).
Assuming a scattering took place, generate final state of
neutron. Returns tuple(ekin_final,mu) where mu is the cosine of the
scattering angle. For efficiency it is possible to provide the ekin
parameter as a numpy array of numbers and get corresponding arrays of
angles and energy transfers back. Likewise, the repeat parameter can be
set to a positive number, causing the ekin value(s) to be reused that
many times and numpy arrays with results returned.
"""
return _rawfct['ncrystal_samplesct_iso'](self._rawobj_scat,ekin,repeat)
def generateScattering( self, ekin, direction, repeat = None ):
"""WARNING: Deprecated method. Please use the sampleScatter method instead.
Randomly generate scatterings.
Assuming a scattering took place, generate energy transfer (delta_ekin)
and new neutron direction based on current kinetic energy and direction
and return tuple(new_direction,delta_ekin). The repeat parameter can be
set to a positive number, causing the scattering to be sampled that many
times and numpy arrays with results returned.
"""
return _rawfct['ncrystal_genscatter'](self._rawobj_scat,ekin,direction,repeat)
def generateScatteringNonOriented( self, ekin, repeat = None ):
"""WARNING: Deprecated method. Please use the sampleScatterIsotropic method instead.
Randomly generate scatterings (should not be called for oriented processes).
Assuming a scattering took place, generate energy transfer (delta_ekin)
and scatter angle in radians and return tuple(scatter_angle,delta_ekin)
(this method should not be invoked on oriented processes). For
efficiency it is possible to provide the ekin parameter as a numpy array
of numbers and get corresponding arrays of angles and energy transfers
back. Likewise, the repeat parameter can be set to a positive number,
causing the ekin value(s) to be reused that many times and numpy arrays
with results returned.
"""
return _rawfct['ncrystal_genscatter_nonoriented'](self._rawobj_scat,ekin,repeat)
def scatter(self,ekin=None,direction=None,wl=None,repeat=None):
"""Convenience function which redirects calls to either
sampleScatterIsotropic or sampleScatter depending on whether
or not a direction is given. It can also accept wavelengths instead of
kinetic energies via the wl parameter.
"""
ekin = Process._parseekin( ekin, wl )
return self.sampleScatterIsotropic( ekin, repeat ) if direction is None else self.sampleScatter( ekin, direction, repeat )
def genscat(self,ekin=None,direction=None,wl=None,repeat=None):
"""WARNING: Deprecated method. Please use the "scatter" method instead.
Convenience function which redirects calls to either
generateScatteringNonOriented or generateScattering depending on whether
or not a direction is given. It can also accept wavelengths instead of
kinetic energies via the wl parameter.
"""
ekin = Process._parseekin( ekin, wl )
return self.generateScatteringNonOriented( ekin, repeat ) if direction is None else self.generateScattering( ekin, direction, repeat )
def rngSupportsStateManipulation(self):
"""Query whether associated RNG stream supports state manipulation"""
return bool(_rawfct['ncrystal_rngsupportsstatemanip_ofscatter'](self._rawobj_scat))
def getRNGState(self):
"""Get current RNG state (as printable hex-string with RNG type info
embedded). This function returns None if RNG stream does not support
state manipulation
"""
return _rawfct['nc_getrngstate_scat'](self._rawobj_scat)
def setRNGState(self,state):
"""Set current RNG state.
Note that setting the rng state will affect all objects sharing the
RNG stream with the given scatter object (and those subsequently cloned
from any of those).
Note that if the provided state originates in (the current version
of) NCrystal's builtin RNG algorithm, it can always be used here,
even if the current RNG uses a different algorithm (it will simply be
replaced). Otherwise, a mismatch of RNG stream algorithms will result
in an error.
"""
_rawfct['ncrystal_setrngstate_ofscatter']( self._rawobj_scat,
_str2cstr(state) )
def createInfo(cfgstr):
"""Construct Info object based on provided configuration (using available factories)"""
return Info(cfgstr)
def createScatter(cfgstr):
"""Construct Scatter object based on provided configuration (using available factories)"""
return Scatter(cfgstr)
def createScatterIndependentRNG(cfgstr,seed = 0):
"""Construct Scatter object based on provided configuration (using available
factories) and with its own independent RNG stream (using the builtin RNG
generator and the provided seed)"""
rawobj = _rawfct['ncrystal_create_scatter_builtinrng'](_str2cstr(cfgstr),seed)
return Scatter(('_rawobj_',rawobj))
def createAbsorption(cfgstr):
"""Construct Absorption object based on provided configuration (using available factories)"""
return Absorption(cfgstr)
def directMultiCreate( data, cfg_params='', *, dtype='',
doInfo = True, doScatter = True, doAbsorption = True ):
"""Convenience function which creates Info, Scatter, and Absorption objects
directly from a data string rather than an on-disk or in-memory
file. Such usage obviously precludes proper caching behind the scenes,
and is intended for scenarios where the same data should not be used
repeatedly.
"""
if not dtype and not data.startswith('NCMAT') and 'NCMAT' in data:
if data.strip().startswith('NCMAT'):
raise NCBadInput('NCMAT data must have "NCMAT" as the first 5 characters (must not be preceded by whitespace)')
rawi,raws,rawa = _rawfct['multicreate_direct'](data,dtype,cfg_params,doInfo,doScatter,doAbsorption)
info = Info( ('_rawobj_',rawi) ) if rawi else None
scatter = Scatter( ('_rawobj_',raws) ) if raws else None
absorption = Absorption( ('_rawobj_',rawa) ) if rawa else None
class MultiCreated:
def __init__(self,i,s,a):
self.__i,self.__s,self.__a = i,s,a
@property
def info(self):
"""Info object (None if not present)."""
return self.__i
@property
def scatter(self):
"""Scatter object (None if not present)."""
return self.__s
@property
def absorption(self):
"""Absorption object (None if not present)."""
return self.__a
def __str__(self):
fmt = lambda x : str(x) if x else 'n/a'
return 'MultiCreated(Info=%s, Scatter=%s, Absorption=%s)'%(fmt(self.__i),
fmt(self.__s),
fmt(self.__a))
return MultiCreated(info,scatter,absorption)
def registerInMemoryFileData(virtual_filename,data):
"""Register in-memory file data. This needs a "filename" and the content of this
virtual file. After registering such in-memory "files", they can be used
as file names in cfg strings or MatCfg objects. Registering the same
filename more than once, will simply override the content.
As a special case data can specified as "ondisk://<path>",
which will instead create a virtual alias for an on-disk file.
"""
if ( isinstance(data,str) and data.startswith('ondisk://')):
data = 'ondisk://'+str(pathlib.Path(data[9:]).resolve())
_rawfct['ncrystal_register_in_mem_file_data'](virtual_filename,data)
#numpy compatible wl2ekin and ekin2wl
_c_wl2ekin = float(_rawfct['ncrystal_wl2ekin'](1.0))
_c_ekin2wl = float(_rawfct['ncrystal_ekin2wl'](1.0))
def wl2ekin(wl):
"""Convert neutron wavelength in Angstrom to kinetic energy in electronvolt"""
if _np and hasattr(wl,'__len__'):
#reciprocals without zero division:
wlnonzero = wl != 0.0
wlinv = 1.0 / _np.where( wlnonzero, wl, 1.0)#fallback 1.0 wont be used
return _c_wl2ekin * _np.square(_np.where( wlnonzero, wlinv, _np.inf))
else:
return _rawfct['ncrystal_wl2ekin'](wl)
def ekin2wl(ekin):
"""Convert neutron kinetic energy in electronvolt to wavelength in Angstrom"""
if _np and hasattr(ekin,'__len__'):
#reciprocals without zero division:
ekinnonzero = ekin != 0.0
ekininv = 1.0 / _np.where( ekinnonzero, ekin, 1.0)#fallback 1.0 wont be used
return _c_ekin2wl * _np.sqrt(_np.where( ekinnonzero, ekininv, _np.inf))
else:
return _rawfct['ncrystal_ekin2wl'](ekin)
def clearCaches():
"""Clear various caches"""
_rawfct['ncrystal_clear_caches']()
def clearInfoCaches():
"""Deprecated. Does the same as clearCaches()"""
clearCaches()
def disableCaching():
"""Disable caching of Info objects in factory infrastructure"""
_rawfct['ncrystal_disable_caching']()
def enableCaching():
"""Enable caching of Info objects in factory infrastructure"""
_rawfct['ncrystal_enable_caching']()
def hasFactory(name):
"""Check if a factory of a given name exists"""
return bool(_rawfct['ncrystal_has_factory'](_str2cstr(name)))
#Helper function, for scripts creating ncmat files:
def formatVectorForNCMAT(name,values):
"""Utility function for help in python scripts composing .ncmat files,
transforming an array of of values into a properly formatted text string,
with word-wrapping, usage of <val>r<n> syntax, etc. Returns list of lines
(strings) for .ncmat files.
"""
v,res,l,indent,efmt_prev,efmt_nrepeat=values.flatten(),' %s'%name,'','',None,0
if not len(v):
return res
ilast,nv=len(v)-1,len(v)
def _fmtnum(num):
_ = '%g'%num if num else '0'#avoid 0.0, -0, etc.
if _.startswith('0.'):
_=_[1:]
return _
i=0
while i<nv:
fmt_vi=_fmtnum(v[i])
#check if is repeated:
irepeat=i
while irepeat+1<nv:
if _fmtnum(v[irepeat+1])==fmt_vi:
irepeat+=1
else:
break
#Write:
s = ' %sr%i'%(fmt_vi,1+irepeat-i) if irepeat>i else ' %s'%fmt_vi
l+=(s if l else (indent+s))
i=irepeat+1#advance
if i>=nv or len(l)>80:
#Flush line
res += '%s\n'%l
l,indent='',' '
return res
#Accept custom random generator:
def setDefaultRandomGenerator(rg, keepalive=True):
"""Set the default random generator for CalcBase classes.
Note that this can only changes the random generator for those CalcBase
instances that did not already use random numbers). Default generator when
using the NCrystal python interface is the scientifically sound
random.random stream from the python standard library (a Mersenne Twister).
To ensure Python does not clean up the passed function object prematurely,
the NCrystal python module will keep a reference to it eternally. To avoid
this, call with keepalive=False. But in that case the caller is responsible
for keeping a reference to the object for as long as NCrystal might use it
to generate random numbers.
"""
_rawfct['ncrystal_setrandgen'](rg)
__atomdb={}
def atomDB(Z,A=None,throwOnErrors=True):
"""Access internal database with data for isotopes and natural elements.
If A is provided, both A and Z must be integers, thus defining a specific isotope.
If Z is an integer and A is 0 or None, the corresponding natural element is provided.
Finally, the function can be called with a string identifying either natural
elements or isotopes: atomDB("Al"), atomDB("He3"), ...
In all cases, in case of errors or missing entries in the database, either
an NCBadInput exception is thrown (throwOnErrors==True) or None is
returned (when throwOnErrors==False).
"""
global __atomdb
if isinstance(Z,numbers.Integral):
Z=int(Z)
key=(Z,int(A or 0))
strkey=False
else:
assert A is None,"Do not supply two arguments unless the first argument is an integer"
assert isinstance(Z,str),"The first argument to the function must either be of int or str type"
key=Z
strkey=True
obj=__atomdb.get(key,None)
if obj:
return obj
if strkey:
rawatomdata=_rawfct['ncrystal_create_atomdata_fromdbstr'](_str2cstr(key))
else:
rawatomdata=_rawfct['ncrystal_create_atomdata_fromdb'](*key)
if not _rawfct['ncrystal_valid'](rawatomdata):
if not throwOnErrors:
return None
if strkey:
s='key="%s"'%key
else:
if key[1]==0:
s='Z=%i'%key[0]
else:
s='Z=%i,A=%i'%key
raise NCBadInput('atomDB: Could not find entry for key (%s)'%s)
ad = AtomData(rawatomdata)
assert ad.isElement()
Z,A = ad.Z(), (ad.A() if ad.isSingleIsotope() else 0)
keys=[ (Z,A)]
if Z==1 and A==2:
keys+=['H2','D']
elif Z==1 and A==3:
keys+=['H3','T']
else:
assert ad.isNaturalElement() or ad.isSingleIsotope()
keys += [ ad.description(False) ]#guaranteed to give just symbol for natelem/singleisotope!
assert key in keys#Should always be true unless we forgot some keys above
assert ad.description(False) in keys#Should also be true, given guarantees for AtomData::description(false)
for k in keys:
__atomdb[k] = ad
return ad
def iterateAtomDB(objects=True):
"""Iterate over all entries in the internal database with data for isotopes and
natural elements. If objects=True, AtomData objects are returned. If
objects=False, (Z,A) values are returned (A=0 indicates a natural
element)."""
for z,a in _rawfct['atomdb_getall_za']():
yield atomDB(z,a) if objects else (int(z),int(a))
class FileListEntry:
"""Entry in list returned by browseFiles."""
def __init__(self,*,name,source,factName,priority):
self.__n = name or None
self.__f = factName or None
self.__p = int(priority) if priority.isdigit() else priority
self.__s = source or None
@property
def name(self):
"""The (possibly virtual) filename needed to select this entry"""
return self.__n
@property
def source(self):
"""Description (such as the parent directory in case of on-disk files)"""
return self.__s
@property
def factName(self):
"""Name of the factory delivering entry."""
return self.__f
@property
def priority(self):
"""The priority value of the entry (important in case multiple factories
delivers content with the the same name). Can be 'Unable',
'OnlyOnExplicitRequest' or an integer priority value (entries with
higher values will be preferred).
"""
return self.__p
@property
def fullKey(self):
"""The string '%s::%s'%(self.factName,self.name), which can be used to
explicitly request this entry without interference from similarly
named entries in other factories.
"""
return '%s::%s'%(self.__f,self.__n)
def __str__(self):
l=[]
if self.__n:
l+=['name=%s'%self.__n]
if self.__s:
l+=['source=%s'%self.__s]
if self.__f:
l+=['factory=%s'%self.__f]
l+=['priority=%s'%self.__p]
return 'FileListEntry(%s)'%(', '.join(l))
def browseFiles(dump=False,factory=None):
"""Browse list of available input files (virtual or on-disk). The list is not
guaranteed to be exhaustive, but will usually include all files in
supported files in the most obvious locations (the NCrystal data
directory and other directories of the standard search path, the current
working directory, virtual files embedded in the NCrystal library or
registered dynamically.
Returns a list of FileListEntry objects. If the dump flag is set to True,
the list will also be printed to stdout in a human readable form.
Setting factory parameter will only return / print entries from the
factory of that name.
"""
res=[]
def sortkey(e):
praw = e.priority
if praw=='Unable':
p=-2
elif isinstance(praw,int):
p=praw
else:
assert praw=='OnlyOnExplicitRequest'
p=-1
return (-p, e.factName,e.source,e.name)
for n,s,f,p in _rawfct['ncrystal_get_filelist']():
res.append( FileListEntry(name=n,source=s,factName=f,priority=p) )
res.sort(key=sortkey)
if dump:
seen_names=set()
groupfct = lambda e : (e.factName,e.source,e.priority)
lastgroup = None
pending=[]
def print_pending():
if not pending:
return
if factory is not None and lastgroup[0]!=factory:
pending.clear()
return
n=len(pending) - 1
pending[0] = pending[0]%('%s files'%n if n!=1 else '%s file'%n )
for line in pending:
print (line)
pending.clear()
for e in res:
group = groupfct(e)
if lastgroup != group:
print_pending()
lastgroup = group
pending.append('==> %%s from "%s" (%s, priority=%s):'%group)
hidden = e.name in seen_names
seen_names.add(e.name)
extra=''
prname=e.name
if e.priority=='OnlyOnExplicitRequest':
prname='%s::%s'%(e.factName,e.name)
elif hidden:
extra=' <--- Hidden by higher priority entries (select as "%s::%s")'%(e.factName,e.name)
pending.append( ' %s%s'%(prname,extra))
print_pending()
if factory is None:
return res
return [e for e in res if e.factName==factory]
class TextData:
"""Text data accessible line by line, with associated meta-data. This always
include a UID (useful for comparison and downstream caching purposes) and
the data type (e.g. "ncmat"). Optionally available is the last known
on-disk path to a file with the same content, which might be useful in
case the data needs to be passed to 3rd party software which can only
work with physical files.
Text data objects are easily line-iterable, easily providing lines
(without newline characters): for( auto& line : mytextdata ) {...}. Of
course, the raw underlying data buffer can also be accessed if needed.
The raw data must be ASCII or UTF-8 text, with line endings \n=CR=0x0A
(Unix) or \r\n=LF+CR=0x0D0A (Windows/dos). Other encodings might work
only if 0x00, 0x0A, 0x0D bytes do not occur in them outside of line
endings.
Notice that ancient pre-OSX Mac line-endings \r=LF=0x0D are not
supported, and iterators will actually throw an error upon encountering
them. This is done on purpose, since files with \r on unix might hide
content when inspected in a terminal can be either confusing, a potential
security issue, or both.
"""
def __init__(self,name):
"""create TextData object based on string (same as using createTextData(name))"""
l=_rawfct['nc_gettextdata'](name)
assert len(l)==5
self.__rd = l[0]
self.__uid = int(l[1])
self.__descr = l[2]
self.__datatype= l[3]
self.__rp = pathlib.Path(l[4]) if l[4] else None
@property
def uid(self):
"""Unique identifier. Objects only have identical UID if all contents and
metadata are identical."""
return self.__uid
@property
def dataType(self):
"""Data type ("ncmat", "nxs", ...)."""
return self.__datatype
@property
def description(self):
"""Short description. This might for instance be a filename."""
return self.__descr
@property
def rawData(self):
"""Raw access to underlying data."""
return self.__rd
@property
def lastKnownOnDiskLocation(self):
"""Last known on-disk location (returns None if unavailable). Note that there
is no guarantee against the file having been removed or modified since the
TextData object was created.
"""
return self.__rp
def __str__(self):
return 'TextData(%s, uid=%i, %i chars)'%(self.__descr,self.__uid,len(self.__rd))
def __iter__(self):
"""Line-iteration, yielding lines without terminating newline characters"""
from io import StringIO
def chomp(x):
return x[:-2] if x.endswith('\r\n') else (x[:-1] if x.endswith('\n') else x)
for l in StringIO(self.__rd):
yield chomp(l)
def createTextData(name):
"""creates TextData objects based on requested name"""
return TextData(name)
def getFileContents(name):
"""OBSOLETE FUNCTION: Use createTextData(..).rawData instead."""
return createTextData(name).rawData
def addCustomSearchDirectory(dirpath):
"""Register custom directories to be monitored for data files."""
_rawfct['ncrystal_add_custom_search_dir'](_str2cstr(str(pathlib.Path(dirpath).resolve())))
def removeCustomSearchDirectories():
"""Remove all search directories added with addCustomSearchDirectory."""
_rawfct['ncrystal_remove_custom_search_dirs']()
def removeAllDataSources():
"""Disable all standard data sources, remove all TextData factories as well,
clear all registered virtual files and custom search directories. Finish
by calling global clearCaches function ("Ripley: I say we take off and
nuke the entire site from orbit. It's the only way to be sure.").
"""
_rawfct['ncrystal_remove_all_data_sources']()
def enableAbsolutePaths( enable = True ):
"""Whether or not absolute file paths are allowed."""
_rawfct['ncrystal_enable_abspaths'](1 if enable else 0)
def enableRelativePaths( enable = True ):
"""Whether or not paths relative to current working directory are allowed."""
_rawfct['ncrystal_enable_relpaths'](1 if enable else 0)
def enableStandardSearchPath( enable = True ):
"""Whether or not the standard search path should be searched. This standard
search path is is by default searched *after* the standard data library,
and is built by concatenating entries in the NCRYSTAL_DATA_PATH
environment variables with entries in the compile time definition of the
same name (in that order). Note that by default the standard search path
is searched *after* the standard data library.
"""
_rawfct['ncrystal_enable_stdsearchpath'](1 if enable else 0)
def enableStandardDataLibrary( enable = True, dirpath_override = None ):
"""Whether or not the standard data library shipped with NCrystal should be
searched.
Unless NCrystal is configured to have the standard data library embedded
into the binary at compilation time, the location (directory path) of the
standard data library is taken from the NCRYSTAL_DATADIR environment
variable. If the environment variable is not set, the location is taken
from the compile time definition of the same name. If neither is set, and
data was not embedded at compilation time, the standard data library will
be disabled by default and the location must be provided before it can be
enabled. In all cases, the location can be overridden if explicitly
provided by the user as the second parameter to this function.
"""
d = _str2cstr(str(pathlib.Path(dirpath_override).resolve())) if dirpath_override else ctypes.cast(None, ctypes.c_char_p)
_rawfct['ncrystal_enable_stddatalib'](1 if enable else 0, d)
def browsePlugins(dump=False):
"""Return list of plugins [(pluginname,filename,plugintype),...].
If the dump flag is set to True, the list will not be returned. Instead it
will be printed to stdout.
"""
l=_rawfct['ncrystal_get_pluginlist']()
if not dump:
return l
print('NCrystal has %i plugins loaded.'%len(l))
for i in range(len(l)):
pluginname, filename, plugintype = l[i]
print('==> %s (%s%s)'%(pluginname,plugintype,
' from %s'%filename if filename else ''))
def debyeIsotropicMSD( *, debye_temperature, temperature, mass ):
"""Estimate (isotropic, harmonic) atomic mean-squared-displacement using the
Debye Model (eq. 11+12 in <NAME>, Phys. Rev. Vol98 num 6,
1955). Unit of returned MSD value is Aa^2. Input temperatures should be
in Kelvin, and input atomic mass should be in amu.
"""
return float(_rawfct['ncrystal_debyetemp2msd'](debye_temperature, temperature, mass))
def debyeTempFromIsotropicMSD( *, msd, temperature, mass ):
"""The inverse of debyeIsotropicMSD (implemented via root-finding), allowing to
get the Debye temperature which will give rise to a given
mean-squared-displacement.
"""
return float(_rawfct['ncrystal_msd2debyetemp'](msd, temperature, mass))
def test():
"""Quick test that NCrystal works as expected in the current installation."""
_actualtest()
print("Tests completed succesfully")
def _actualtest():
def require(b):
if not b:
raise RuntimeError('check failed')
def flteq(a,b,rtol=1.0e-6,atol=1.0e-6):
return abs(a-b) <= 0.5 * rtol * (abs(a) + abs(b)) + atol
def require_flteq(a,b):
if not flteq(a,b):
raise RuntimeError('check failed (%.16g != %.16g, diff %g)'%(a,b,a-b))
return True
al = createInfo('stdlib::Al_sg225.ncmat;dcutoff=1.4')
require(hasFactory('stdncmat'))
require(al.hasTemperature() and require_flteq(al.getTemperature(),293.15))
require(al.hasXSectFree() and require_flteq(al.getXSectFree(),1.39667))
require(al.hasXSectAbsorption() and require_flteq(al.getXSectAbsorption(),0.231))
require(al.hasDensity() and require_flteq(al.getDensity(),2.69864547673))
require(al.hasNumberDensity() and require_flteq(al.getNumberDensity(),0.06023238256131625))
require(al.hasDebyeTemperature())
require(al.hasStructureInfo())
si=al.getStructureInfo()
require( si['spacegroup'] == 225 )
require_flteq(si['a'],4.04958)
require_flteq(si['b'],4.04958)
require_flteq(si['c'],4.04958)
require( si['alpha'] == 90.0 )
require( si['beta'] == 90.0 )
require( si['gamma'] == 90.0 )
require( si['n_atoms'] == 4 )
require_flteq(si['volume'],66.4094599932)
require( al.hasHKLInfo() )
require( al.nHKL() == 3 )
require_flteq(al.hklDLower(),1.4)
require( al.hklDUpper() > 1e36 )
expected_hkl = { 0 : (1, -1, -1, 8, 2.3380261031049243, 1.773159275925474),
1 : (0, 0, 2, 6, 2.02479, 1.731788590086223),
2 : (0, 2, -2, 12, 1.4317427394787094, 1.575735707233723) }
for idx,hkl in enumerate(al.hklList()):
h,k,l,mult,dsp,fsq = hkl
require(idx<len(expected_hkl))
e = expected_hkl[idx]
require( list(e)[0:4] == [h,k,l,mult] )
require_flteq(dsp, e[4])
require_flteq(fsq, e[5])
#We do all createScatter... here with independent RNG, for reproducibility
#and to avoid consuming random numbers from other streams.
alpc = createScatterIndependentRNG('stdlib::Al_sg225.ncmat;dcutoff=1.4;incoh_elas=0;inelas=0')
require( alpc.name == 'PCBragg' )
require( isinstance(alpc.name,str) )
require( alpc.refCount() in (1,2) and type(alpc.refCount()) == int )
require( alpc.isNonOriented() )
#print(alpc.xsect(wl=4.0))
require_flteq(1.632435821586171,alpc.crossSectionNonOriented(wl2ekin(4.0)) )
require_flteq(1.632435821586171,alpc.crossSection(wl2ekin(4.0),(1,0,0)))
require( alpc.crossSectionNonOriented(wl2ekin(5.0)) == 0.0 )
require( alpc.rngSupportsStateManipulation() )
require(alpc.getRNGState()=='a79fd777407ba03b3d9d242b2b2a2e58b067bd44')
alpc.setRNGState('deadbeefdeadbeefdeadbeefdeadbeefb067bd44')
require(alpc.getRNGState()=='deadbeefdeadbeefdeadbeefdeadbeefb067bd44')
alpc_clone = alpc.clone()
require(alpc.getRNGState()=='deadbeefdeadbeefdeadbeefdeadbeefb067bd44')
require(alpc_clone.getRNGState()=='e0fd16d42a2aced7706cffa08536d869b067bd44')
alpc_clone2 = alpc_clone.clone(for_current_thread=True)
require(alpc_clone2.getRNGState()=='cc762bb1160a0be514300da860f6d160b067bd44')
alpc_clone3 = alpc_clone.clone(rng_stream_index = 12345 )
require(alpc_clone3.getRNGState()=='3a20660a10fd581bd7cddef8fc3f32a2b067bd44')
#Pick Nickel at 1.2 angstrom, to also both vdos + incoherent-elastic + coherent-elastic:
nipc = createScatterIndependentRNG('stdlib::Ni_sg225.ncmat;dcutoff=0.6;vdoslux=2',2543577)
nipc_testwl = 1.2
#print(nipc.xsect(wl=nipc_testwl),nipc.xsect(wl=5.0))
require_flteq(16.763384236274295,nipc.xsect(wl=nipc_testwl))
require_flteq(16.763384236274295,nipc.xsect(wl=nipc_testwl,direction=(1,0,0)))
require_flteq(5.958248153134731,nipc.xsect(wl=5.0))
require( nipc.name == 'ProcComposition' )
expected = [ ( 0.056808478892590906, 0.5361444826572668 ),
( 0.056808478892590906, 0.5361444826572668 ),
( 0.056808478892590906, 0.3621986636537414 ),
( 0.056808478892590906, 0.8391056916029316 ),
( 0.04306081224691682, -0.8967361422557182 ),
( 0.056808478892590906, 0.0028144544046340148 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.056808478892590906, -0.15963879335683306 ),
( 0.056808478892590906, 0.8260541809964751 ),
( 0.07817729397575277, -0.47118801199817245 ),
( 0.05401955085824633, -0.04621874653928581 ),
( 0.056808478892590906, 0.8260541809964751 ),
( 0.041670508464271845, -0.1503940486202929 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.056808478892590906, 0.5361444826572668 ),
( 0.056808478892590906, -0.3915665520281999 ),
( 0.056808478892590906, 0.3621986636537414 ),
( 0.05803278720551066, -0.4542597828355841 ),
( 0.056808478892590906, 0.3621986636537414 ),
( 0.08136537448918617, -0.9123985306524791 ),
( 0.056808478892590906, -0.5655123710317247 ),
( 0.058619077301964584, -0.8633498900743867 ),
( 0.056808478892590906, 0.3042167239859003 ),
( 0.056808478892590906, 0.7378808571510718 ),
( 0.056808478892590906, -0.10165685368899191 ),
( 0.08060018281822305, -0.7370163968204352 ),
( 0.056808478892590906, -0.5655123710317247 ),
( 0.06972487412469042, 0.11577553721283067 ),
( 0.04057037479922572, -0.8570248794863715 ) ]
if _np is None:
ekin,mu=[],[]
for i in range(30):
_ekin,_mu=nipc.sampleScatterIsotropic(wl2ekin(nipc_testwl))
mu += [_mu]
ekin += [_ekin]
else:
ekin,mu = nipc.sampleScatterIsotropic(wl2ekin(nipc_testwl),repeat=30)
for i in range(len(ekin)):
#print ( f' ( {ekin[i]}, {mu[i]} ),');continue
require_flteq(ekin[i],expected[i][0])
require_flteq(mu[i],expected[i][1])
expected = [ ( 0.008416101633014865, (-0.757659986585644, -0.6452758602889546, -0.09782846648798732) ),
( 0.056808478892590906, (0.07228896531453344, -0.5190173207165885, 0.8517014302500192) ),
( 0.056808478892590906, (-0.9249112255344181, -0.32220112076758217, -0.20180600252850442) ),
( 0.056808478892590906, (-0.15963879335683306, -0.8486615569734178, 0.5042707778277745) ),
( 0.055175282123031216, (-0.9137234740275523, -0.01697993608051868, -0.4059816434048743) ),
( 0.056808478892590906, (0.3621986636537414, 0.9195336880770101, -0.15254482796521104) ),
( 0.056808478892590906, (-0.8667699444876275, 0.3952682020937969, -0.30409359043960893) ),
( 0.056808478892590906, (-0.10165685368899191, -0.8869759070713323, -0.4504882066969593) ),
( 0.056808478892590906, (0.07228896531453344, -0.39741541395284924, -0.914787021249449) ),
( 0.056808478892590906, (-0.10165685368899191, -0.9768880366798581, -0.1880309758785167) ),
( 0.02606330473669421, (-0.7582323992671114, -0.6516002612972578, 0.022376956428100652) ),
( 0.056808478892590906, (0.8260541809964751, 0.539797243436807, 0.16202909009269678) ),
( 0.04566678251268602, (-0.9534434024097809, 0.17205470782622773, -0.2476748996488988) ),
( 0.056808478892590906, (-0.5655123710317247, 0.15884349419072655, 0.8092987721252006) ),
( 0.056808478892590906, (0.5361444826572668, 0.7795115518549292, 0.32389941994528487) ),
( 0.056808478892590906, (0.07228896531453344, 0.746175597107444, 0.6618128767069312) ),
( 0.056808478892590906, (-0.10165685368899191, -0.4247181868490453, 0.8996001033001911) ),
( 0.056808478892590906, (0.5361444826572668, 0.555576061106532, -0.6355189486093414) ),
( 0.05789302012683443, (-0.12097277426532965, -0.6903199617139779, 0.7133189597548643) ),
( 0.056808478892590906, (0.3042167239859003, -0.8706122815482211, -0.3866347631352975) ),
( 0.056808478892590906, (-0.7384733804796917, 0.6322144258925643, -0.23443972789660028) ),
( 0.056808478892590906, (-0.15963879335683306, 0.21525619037302965, -0.9634211063505222) ),
( 0.056808478892590906, (0.41359447569500096, 0.4927058865194684, 0.7656242675514158) ),
( 0.056808478892590906, (0.25796367721315083, 0.48520231047621615, 0.8354839670198411) ),
( 0.056808478892590906, (0.5785005938702705, 0.8104481067271115, -0.09225469740985966) ),
( 0.04368024428240841, (0.020347983057095325, -0.49560493792245525, 0.8683096827125603) ),
( 0.05481910288975998, (-0.29870118895121683, -0.9272663989579163, 0.22573131170209382) ),
( 0.056808478892590906, (0.3621986636537414, -0.8822186430862218, 0.3008361577978115) ),
( 0.056808478892590906, (0.7680722413286334, 0.5975216576265994, -0.23028873347945303) ),
( 0.056808478892590906, (0.32922859149927786, -0.9426419619170849, 0.0550878042084668) ) ]
for i in range(30):
out_ekin,outdir = nipc.sampleScatter(wl2ekin(nipc_testwl),(1.0,0.0,0.0))
#print ( f' ( {out_ekin}, {outdir} ),');continue
require_flteq(out_ekin,expected[i][0])
require_flteq(outdir[0],expected[i][1][0])
require_flteq(outdir[1],expected[i][1][1])
require_flteq(outdir[2],expected[i][1][2])
gesc = createScatterIndependentRNG("""stdlib::Ge_sg227.ncmat;dcutoff=0.5;mos=40.0arcsec
;dir1=@crys_hkl:5,1,1@lab:0,0,1
;dir2=@crys_hkl:0,-1,1@lab:0,1,0""",3453455)
require_flteq(591.0256514168468,gesc.crossSection(wl2ekin(1.540),( 0., 1., 1. )))
require_flteq(1.666903965431398,gesc.crossSection(wl2ekin(1.540),( 1., 1., 0. )))
| en | 0.804147 | #!/usr/bin/env python3 Python module for using the NCrystal library for thermal neutron transport in crystals Please find more information about NCrystal at the website: https://mctools.github.io/ncrystal/ In particular, a small example using the NCrystal python module can be found at: https://github.com/mctools/ncrystal/blob/master/examples/ncrystal_example_py A substantial effort went into developing NCrystal. If you use it for your work, we would appreciate it if you would use the following reference in your work: <NAME> and <NAME>, NCrystal: A library for thermal neutron transport, Computer Physics Communications 246 (2020) 106851, https://doi.org/10.1016/j.cpc.2019.07.015 For work benefitting from our inelastic physics, we furthermore request that you additionally also use the following reference in your work: <NAME>, <NAME>, et. al., "Rejection-based sampling of inelastic neutron scattering", Journal of Computational Physics 380 (2019) 400-407, https://doi.org/10.1016/j.jcp.2018.11.043 For detailed usage conditions and licensing of this open source project, see: https://github.com/mctools/ncrystal/blob/master/NOTICE https://github.com/mctools/ncrystal/blob/master/LICENSE https://github.com/mctools/ncrystal/blob/master/ncrystal_extra/LICENSE ################################################################################ ## ## ## This file is part of NCrystal (see https://mctools.github.io/ncrystal/) ## ## ## ## Copyright 2015-2021 NCrystal developers ## ## ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## ## you may not use this file except in compliance with the License. ## ## You may obtain a copy of the License at ## ## ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## ## ## Unless required by applicable law or agreed to in writing, software ## ## distributed under the License is distributed on an "AS IS" BASIS, ## ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## ## See the License for the specific language governing permissions and ## ## limitations under the License. ## ## ## ################################################################################ #Only put the few most important items in __all__, to prevent cluttering on #wildcard imports. Specifically this is the exceptions, the most important API #classes, the factory functions, and the constants: ################################### #Convert cstr<->str: #converts any string (str,bytes,unicode,path) to bytes #Attempt with file-system encoding, in case of non-ASCII path names: #converts bytes object to str (unicode in py3, bytes in py2) ################################### #Same as NCRYSTAL_VERSION macro: Base class for all exceptions raised by NCrystal code #some constants (NB: Copied here from NCMath.hh - must keep synchronized!! Also, #remember to include in __all__ list above): # speed of light in Aa/s # amu to kg # amu to eV/c^2 # mol^-1 # eV/K # [amu] # [eV*s] #If NCRYSTAL_LIB env var is set, we try that and only that: #normal import #work if running as script: #Exceptions: #checks there was an error #TODO: Provide line number / file as well? #to avoid warnings in py 2.6 #helper class for exporting the functions: #NB: we should read about return types in the ctypes tutorial. Apparently one #can just set an error checking function as the restype. #NB: For ncrystal_unref we use take_ref=False, so RCBase.__del__ can cache #the result of ctypes.byref(rawobj). This is needed since the ctypes module #might have been unloaded before RCBase.__del__ is called: #scalar case, array interface not triggered #NB: returning the ekin object itself is important in order to keep a reference to it after the call: #Obsolete: #Set random function, keeping references as needed (otherwise fct ptrs #kept on C++ side will suddenly stop working!) and casting None to a null-ptr. #keep refs! #null ptr, i.e. state manipulation is not supported Extract packfact value from cfgstr Extract vdoslux value from cfgstr Create simplified VDOS according to the Debye model #NB: Must keep function exactly synchronised with createVDOSDebye function #in .cc src (although leaving out temperature,boundXS,elementMassAMU args #here): #Actual returned egrid should contain only first and last value: Base class for all NCrystal objects internal usage only #do not ref here, since ncrystal_create_xxx functions in C-interface already did so. #keep fct reference #keep byref(rawobj), since ctypes might #disappear before __del__ is called. Access reference count of wrapped C++ object Assertion which throws NCLogicError on failure Class providing physical constants related to a particular mix of isotopes. This can be used to represent elements (i.e. all isotopes having same Z) in either natural or enriched form, but can also be used to represent atoms in doped crystals. E.g. if a small fraction (0.1%) of Cr-ions replace some Al-ions in a Al2O3 lattice, the AtomData could represent a mix of 0.1% Cr and 99.9% Al. internal usage only Atomic mass in Daltons (averaged appropriately over constituents) Coherent scattering length in sqrt(barn)=10fm #0.1 is fm/sqrt(barn) Coherent scattering length in fm Bound coherent cross section in barn. Same as 4*pi*coherentScatLen()**2 Bound incoherent cross section in barn Bound scattering cross section in barn (same as coherentXS()+incoherentXS()) Absorption cross section in barn Free scattering cross section in barn (same as freeCoherentXS()+freeIncoherentXS()) Free coherent cross section in barn. Free incoherent cross section in barn. Natural element with no composition. Single isotope with no composition. Composite definition. See nComponents(), getComponent() and components property If number of protons per nuclei is well defined. This is true for natural elements, single isotopes, and composites where all components have the same number of protons per nuclei. Number of protons per nuclei (0 if not well defined). If Z()!=0, this returns the corresponding element name ('H', 'He', ...). Returns empty string when Z() is 0. #NB: We are relying on natural elements to return their element names in #description(false). This is promised by a comment in NCAtomData.hh! Number of nucleons per nuclei (0 if not well defined or natural element). internal usage only Fraction (by count) of component in mixture AtomData of component Number of sub-components in a mixture Get component in a mixture Get list of all components Short label which unique identifies an atom role within a particular material. Whether or not AtomData appears directly on an Info object (if not, it must be a component (direct or indirect) of a top level AtomData object Returns description of material as a string, with or without values. Class representing information about a given material create Info object based on cfg-string (same as using createInfo(cfgstr)) #Already got an ncrystal_info_t object: Whether basic composition is available. Get basic composition as list of (fraction,AtomData). The list is empty when no composition is available, and is always consistent with AtomInfo/DynInfo (if present). Dump contained information to standard output Whether or not material has a temperature available Material temperature (in kelvin) OBSOLETE FUNCTION: The concept of global versus per-element Debye temperatures has been removed. Please iterate over AtomInfo objects instead (see getAtomInfos() function) and get the Debye Temperature from those. This function will be removed in a future release. OBSOLETE FUNCTION: The concept of global versus per-element Debye temperatures has been removed. Please iterate over AtomInfo objects instead (see getAtomInfos() function) and get the Debye Temperature from those. Calling this function will always result in an exception thrown for now, and the function will be removed in a future release.. Whether AtomInfo objects are present and have Debye temperatures available (they will either all have them available, or none of them will have them available). Alias for hasAtomDebyeTemp(). OBSOLETE FUNCTION which will be removed in a future release. Please call hasDebyeTemperature() instead. OBSOLETE FUNCTION which will be removed in a future release. Please access the AtomInfo objects instead and query the Debye temperature there. Whether or not material has density available Get density in g/cm^3. See also getNumberDensity(). Whether or not material has number density available Get number density in atoms/angstrom^3. See also getDensity(). Whether or not material has absorption cross section available Absorption cross section in barn (at 2200m/s) Whether or not material has free scattering cross section available Saturated (free) scattering cross section in barn in the high-E limit Whether or not material has crystal structure information available. Information about crystal structure. #sanity check Class with information about a particular atom in a unit cell, including the composition of atoms, positions, Debye temperature, and mean-squared-displacements. For internal usage only. #tuple, since it is immutable Get corresponding DynamicInfo object from the same Info object. Returns None if Info object does not have dynamic info available Return AtomData object with details about composition and relevant physics constants Number of atoms of this type per unit cell The Debye Temperature of the atom (kelvin). Returns None if not available. The mean-squared-displacement of the atom (angstrom^2). Returns None if not available. #alias List (tuple actually) of positions of this atom in the unit cell. Each entry is given as a tuple of three values, (x,y,z) Index of atom on this material Whether or no getAtomInfo()/atominfos are available Whether AtomInfo objects have mean-square-displacements available OBSOLETE FUNCTION: AtomInfo objects now always have positions available. Returns same as hasAtomInfo(). Will be removed in a future release. OBSOLETE FUNCTION which will be removed in a future release. Please use hasAtomDebyeTemp() instead. Get list of AtomInfo objects, one for each atom. Returns empty list if unavailable. Whether or not material has lists of HKL-plane info available Number of HKL planes available (grouped into families with similar d-spacing and f-squared) Lower d-spacing cutoff (angstrom). Upper d-spacing cutoff (angstrom). Iterator over HKL info, yielding tuples in the format (h,k,l,multiplicity,dspacing,fsquared) Convenience method, calculating the d-spacing of a given Miller index. Calling this incurs the overhead of creating a reciprocal lattice matrix from the structure info. Class representing dynamic information (related to inelastic scattering) about a given atom internal usage only Get corresponding AtomInfo object from the same Info object. Returns None if Info object does not have AtomInfo available Index of atom on this material Atom fraction in material (all fractions must add up to unity) Material temperature (same value as on associated Info object) Return AtomData object with details about composition and relevant physics constants Class indicating atoms for which inelastic neutron scattering is absent or disabled. Class indicating atoms for which inelastic neutron scattering should be modelled as scattering on a free gas. Base class indicating atoms for which inelastic neutron scattering will be, directly or indirectly, described by a scattering kernel, S(alpha,beta). This is an abstract class, and derived classes provide actual access to the kernels. internal usage only Pre-calculated scattering kernel which at most needs a (hidden) conversion to S(alpha,beta) format before it is available. Prepares and returns the scattering kernel in S(alpha,beta) format #vdoslux value not actually used Solid state material with a phonon spectrum in the form of a Vibrational Density Of State (VDOS) parameterisation. This can be expanded into a full scattering kernel. How luxurious this expansion will be is controlled by an optional vdoslux parameter in the loadKernel call (must be integer from 0 to 5) internal usage only Access the VDOS as ([egrid_min,egrid_max],vdos_density) Access the original un-regularised VDOS energy grid Access the original un-regularised VDOS energy grid Access the VDOS energy grid as [egrid_min,egrid_max] Access the egrid expanded into all actual egrid points Access the VDOS density array Converts VDOS to S(alpha,beta) kernel with a luxury level given by the vdoslux parameter. Similarly to DI_VDOS, but instead of using a phonon VDOS spectrum provided externally, an idealised spectrum is used for lack of better options. This spectrum is based on the Debye Model, in which the spectrum rises quadratically with phonon energy below a cutoff value, kT, where T is the Debye temperature internal usage only Access the idealised VDOS as ([egrid_min,egrid_max],vdos_density) The Debye temperature of the atom Access the VDOS energy grid as [egrid_min,egrid_max] Access the egrid expanded into all actual egrid points Access the VDOS density array Converts VDOS to S(alpha,beta) kernel with a luxury level given by the vdoslux parameter, which is similar to the vdoslux parameter used in DI_VDOS. Notice that the vdoslux parameter specified here on DI_VDOSDebye will be reduced internally by 3 (but not less than 0), since the Debye model is anyway only a crude approximation and it accordingly does not need the same level of precise treatment as a full externally specified VDOS. Whether or not dynamic information for each atom is present Get list of DynamicInfo objects (if available). One for each atom. Custom information for which the core NCrystal code does not have any specific treatment. This is usually intended for usage by developers adding new experimental physics models. Base class for all calculators Calculator name Calculator name as property Base class for calculations of processes in materials. Note that kinetic energies are in electronvolt and direction vectors are tuples of 3 numbers. Domain where process has non-vanishing cross section. Returns the domain as (ekin_low,ekin_high). Outside this range of neutron kinetic energy, the process can be assumed to have vanishing cross sections. Thus, processes present at all energies will return (0.0,infinity). opposite of isOriented() Check if process is oriented and results depend on the incident direction of the neutron Access cross sections. Access cross sections (should not be called for oriented processes). For efficiency it is possible to provide the ekin parameter as a numpy array of numbers and get a corresponding array of cross sections back. Likewise, the repeat parameter can be set to a positive number, causing the ekin value(s) to be reused that many times and a numpy array with results returned. Convenience function which redirects calls to either crossSectionNonOriented or crossSection depending on whether or not a direction is given. It can also accept wavelengths instead of kinetic energies via the wl parameter. The repeat parameter is currently only supported when direction is not provided. Base class for calculations of absorption in materials create Absorption object based on cfg-string (same as using createAbsorption(cfgstr)) #Cloning: Clone object. The clone will be using the same physics models and sharing any read-only data with the original, but will be using its own private copy of any mutable caches. All in all, this means that the objects are safe to use concurrently in multi-threaded programming, as long as each thread gets its own clone. Return value is the new Absorption object. Base class for calculations of scattering in materials. Note that kinetic energies are in electronvolt and direction vectors are tuples of 3 numbers. create Scatter object based on cfg-string (same as using createScatter(cfgstr)) #Already got an ncrystal_scatter_t object: Clone object. The clone will be using the same physics models and sharing any read-only data with the original, but will be using its own private copy of any mutable caches and will get an independent RNG stream. All in all, this means that the objects are safe to use concurrently in multi-threaded programming, as long as each thread gets its own clone. Return value is the new Scatter object. If greater control over RNG streams are needed, it is optionally allowed to either set rng_stream_index to a non-negative integral value, or set for_current_thread=True. If rng_stream_index is set, the resulting object will use a specific rngstream index. All objects with the same indeed will share the same RNG state, so a sensible strategy is to use the same index for all scatter objects which are to be used in the same thread: If setting for_current_thread=True, the resulting object will use a specific rngstream which has been set aside for the current thread. Thus this function can be called from a given work-thread, in order to get thread-safe scatter handle, with all objects cloned within the same thread sharing RNG state. Randomly generate scatterings. Assuming a scattering took place, generate final state of neutron based on current kinetic energy and direction. Returns tuple(ekin_final,direction_final) where direct_final is itself a tuple (ux,uy,uz). The repeat parameter can be set to a positive number, causing the scattering to be sampled that many times and numpy arrays with results returned. Randomly generate scatterings (should not be called for oriented processes). Assuming a scattering took place, generate final state of neutron. Returns tuple(ekin_final,mu) where mu is the cosine of the scattering angle. For efficiency it is possible to provide the ekin parameter as a numpy array of numbers and get corresponding arrays of angles and energy transfers back. Likewise, the repeat parameter can be set to a positive number, causing the ekin value(s) to be reused that many times and numpy arrays with results returned. WARNING: Deprecated method. Please use the sampleScatter method instead. Randomly generate scatterings. Assuming a scattering took place, generate energy transfer (delta_ekin) and new neutron direction based on current kinetic energy and direction and return tuple(new_direction,delta_ekin). The repeat parameter can be set to a positive number, causing the scattering to be sampled that many times and numpy arrays with results returned. WARNING: Deprecated method. Please use the sampleScatterIsotropic method instead. Randomly generate scatterings (should not be called for oriented processes). Assuming a scattering took place, generate energy transfer (delta_ekin) and scatter angle in radians and return tuple(scatter_angle,delta_ekin) (this method should not be invoked on oriented processes). For efficiency it is possible to provide the ekin parameter as a numpy array of numbers and get corresponding arrays of angles and energy transfers back. Likewise, the repeat parameter can be set to a positive number, causing the ekin value(s) to be reused that many times and numpy arrays with results returned. Convenience function which redirects calls to either sampleScatterIsotropic or sampleScatter depending on whether or not a direction is given. It can also accept wavelengths instead of kinetic energies via the wl parameter. WARNING: Deprecated method. Please use the "scatter" method instead. Convenience function which redirects calls to either generateScatteringNonOriented or generateScattering depending on whether or not a direction is given. It can also accept wavelengths instead of kinetic energies via the wl parameter. Query whether associated RNG stream supports state manipulation Get current RNG state (as printable hex-string with RNG type info embedded). This function returns None if RNG stream does not support state manipulation Set current RNG state. Note that setting the rng state will affect all objects sharing the RNG stream with the given scatter object (and those subsequently cloned from any of those). Note that if the provided state originates in (the current version of) NCrystal's builtin RNG algorithm, it can always be used here, even if the current RNG uses a different algorithm (it will simply be replaced). Otherwise, a mismatch of RNG stream algorithms will result in an error. Construct Info object based on provided configuration (using available factories) Construct Scatter object based on provided configuration (using available factories) Construct Scatter object based on provided configuration (using available factories) and with its own independent RNG stream (using the builtin RNG generator and the provided seed) Construct Absorption object based on provided configuration (using available factories) Convenience function which creates Info, Scatter, and Absorption objects directly from a data string rather than an on-disk or in-memory file. Such usage obviously precludes proper caching behind the scenes, and is intended for scenarios where the same data should not be used repeatedly. Info object (None if not present). Scatter object (None if not present). Absorption object (None if not present). Register in-memory file data. This needs a "filename" and the content of this virtual file. After registering such in-memory "files", they can be used as file names in cfg strings or MatCfg objects. Registering the same filename more than once, will simply override the content. As a special case data can specified as "ondisk://<path>", which will instead create a virtual alias for an on-disk file. #numpy compatible wl2ekin and ekin2wl Convert neutron wavelength in Angstrom to kinetic energy in electronvolt #reciprocals without zero division: #fallback 1.0 wont be used Convert neutron kinetic energy in electronvolt to wavelength in Angstrom #reciprocals without zero division: #fallback 1.0 wont be used Clear various caches Deprecated. Does the same as clearCaches() Disable caching of Info objects in factory infrastructure Enable caching of Info objects in factory infrastructure Check if a factory of a given name exists #Helper function, for scripts creating ncmat files: Utility function for help in python scripts composing .ncmat files, transforming an array of of values into a properly formatted text string, with word-wrapping, usage of <val>r<n> syntax, etc. Returns list of lines (strings) for .ncmat files. #check if is repeated: #Write: #advance #Flush line #Accept custom random generator: Set the default random generator for CalcBase classes. Note that this can only changes the random generator for those CalcBase instances that did not already use random numbers). Default generator when using the NCrystal python interface is the scientifically sound random.random stream from the python standard library (a Mersenne Twister). To ensure Python does not clean up the passed function object prematurely, the NCrystal python module will keep a reference to it eternally. To avoid this, call with keepalive=False. But in that case the caller is responsible for keeping a reference to the object for as long as NCrystal might use it to generate random numbers. Access internal database with data for isotopes and natural elements. If A is provided, both A and Z must be integers, thus defining a specific isotope. If Z is an integer and A is 0 or None, the corresponding natural element is provided. Finally, the function can be called with a string identifying either natural elements or isotopes: atomDB("Al"), atomDB("He3"), ... In all cases, in case of errors or missing entries in the database, either an NCBadInput exception is thrown (throwOnErrors==True) or None is returned (when throwOnErrors==False). #guaranteed to give just symbol for natelem/singleisotope! #Should always be true unless we forgot some keys above #Should also be true, given guarantees for AtomData::description(false) Iterate over all entries in the internal database with data for isotopes and natural elements. If objects=True, AtomData objects are returned. If objects=False, (Z,A) values are returned (A=0 indicates a natural element). Entry in list returned by browseFiles. The (possibly virtual) filename needed to select this entry Description (such as the parent directory in case of on-disk files) Name of the factory delivering entry. The priority value of the entry (important in case multiple factories delivers content with the the same name). Can be 'Unable', 'OnlyOnExplicitRequest' or an integer priority value (entries with higher values will be preferred). The string '%s::%s'%(self.factName,self.name), which can be used to explicitly request this entry without interference from similarly named entries in other factories. Browse list of available input files (virtual or on-disk). The list is not guaranteed to be exhaustive, but will usually include all files in supported files in the most obvious locations (the NCrystal data directory and other directories of the standard search path, the current working directory, virtual files embedded in the NCrystal library or registered dynamically. Returns a list of FileListEntry objects. If the dump flag is set to True, the list will also be printed to stdout in a human readable form. Setting factory parameter will only return / print entries from the factory of that name. Text data accessible line by line, with associated meta-data. This always include a UID (useful for comparison and downstream caching purposes) and the data type (e.g. "ncmat"). Optionally available is the last known on-disk path to a file with the same content, which might be useful in case the data needs to be passed to 3rd party software which can only work with physical files. Text data objects are easily line-iterable, easily providing lines (without newline characters): for( auto& line : mytextdata ) {...}. Of course, the raw underlying data buffer can also be accessed if needed. The raw data must be ASCII or UTF-8 text, with line endings \n=CR=0x0A (Unix) or \r\n=LF+CR=0x0D0A (Windows/dos). Other encodings might work only if 0x00, 0x0A, 0x0D bytes do not occur in them outside of line endings. Notice that ancient pre-OSX Mac line-endings \r=LF=0x0D are not supported, and iterators will actually throw an error upon encountering them. This is done on purpose, since files with \r on unix might hide content when inspected in a terminal can be either confusing, a potential security issue, or both. create TextData object based on string (same as using createTextData(name)) Unique identifier. Objects only have identical UID if all contents and metadata are identical. Data type ("ncmat", "nxs", ...). Short description. This might for instance be a filename. Raw access to underlying data. Last known on-disk location (returns None if unavailable). Note that there is no guarantee against the file having been removed or modified since the TextData object was created. Line-iteration, yielding lines without terminating newline characters creates TextData objects based on requested name OBSOLETE FUNCTION: Use createTextData(..).rawData instead. Register custom directories to be monitored for data files. Remove all search directories added with addCustomSearchDirectory. Disable all standard data sources, remove all TextData factories as well, clear all registered virtual files and custom search directories. Finish by calling global clearCaches function ("Ripley: I say we take off and nuke the entire site from orbit. It's the only way to be sure."). Whether or not absolute file paths are allowed. Whether or not paths relative to current working directory are allowed. Whether or not the standard search path should be searched. This standard search path is is by default searched *after* the standard data library, and is built by concatenating entries in the NCRYSTAL_DATA_PATH environment variables with entries in the compile time definition of the same name (in that order). Note that by default the standard search path is searched *after* the standard data library. Whether or not the standard data library shipped with NCrystal should be searched. Unless NCrystal is configured to have the standard data library embedded into the binary at compilation time, the location (directory path) of the standard data library is taken from the NCRYSTAL_DATADIR environment variable. If the environment variable is not set, the location is taken from the compile time definition of the same name. If neither is set, and data was not embedded at compilation time, the standard data library will be disabled by default and the location must be provided before it can be enabled. In all cases, the location can be overridden if explicitly provided by the user as the second parameter to this function. Return list of plugins [(pluginname,filename,plugintype),...]. If the dump flag is set to True, the list will not be returned. Instead it will be printed to stdout. Estimate (isotropic, harmonic) atomic mean-squared-displacement using the Debye Model (eq. 11+12 in <NAME>, Phys. Rev. Vol98 num 6, 1955). Unit of returned MSD value is Aa^2. Input temperatures should be in Kelvin, and input atomic mass should be in amu. The inverse of debyeIsotropicMSD (implemented via root-finding), allowing to get the Debye temperature which will give rise to a given mean-squared-displacement. Quick test that NCrystal works as expected in the current installation. #We do all createScatter... here with independent RNG, for reproducibility #and to avoid consuming random numbers from other streams. #print(alpc.xsect(wl=4.0)) #Pick Nickel at 1.2 angstrom, to also both vdos + incoherent-elastic + coherent-elastic: #print(nipc.xsect(wl=nipc_testwl),nipc.xsect(wl=5.0)) #print ( f' ( {ekin[i]}, {mu[i]} ),');continue #print ( f' ( {out_ekin}, {outdir} ),');continue stdlib::Ge_sg227.ncmat;dcutoff=0.5;mos=40.0arcsec ;dir1=@crys_hkl:5,1,1@lab:0,0,1 ;dir2=@crys_hkl:0,-1,1@lab:0,1,0 | 2.627468 | 3 |
SubtitleSearcher/data/handle_zip.py | razorblade23/SubtitleSearcher | 3 | 6632761 | <reponame>razorblade23/SubtitleSearcher
# Importing modules
import zipfile
import requests
import os
from zipfile import ZipFile
import shutil
# Move subtitles (OpenSubtitles.com)
def move_subtitle(mode, source_path, dst_path, append_lang_code=None):
org_string = dst_path
size = len(org_string)
mod_string = org_string[:size - 4]
if append_lang_code != None:
mod_string = f'{mod_string}-{append_lang_code}'
if mode == 'zip':
final_path = os.path.join(dst_path, f'{mod_string}.zip')
if mode == 'srt':
final_path = os.path.join(dst_path, f'{mod_string}.srt')
shutil.move(source_path, final_path)
# Move subtitles (Titlovi.com)
class SubFileHandler:
'''
Base class of File Handler. Not to be called directly. Instantiate from child classes.
Used to handle all common operations with subtitle files like downloading and unzipping.
Child classes handle diffrent aproaches by diffrent sources
'''
def __init__(self):
self.download_folder = 'downloaded'
self.extracted_folder = 'extracted'
self.zip_name = 'sub.zip'
self.extracted_zip = os.path.join(self.extracted_folder, self.zip_name)
@staticmethod
def check_for_folders():
if not os.path.isdir('downloaded'):
os.makedirs('downloaded')
if not os.path.isdir('extracted'):
os.makedirs('extracted')
def download_zip(self, url):
self.check_for_folders()
success = False
try:
r = requests.get(url, allow_redirects=True, timeout=20)
open('downloaded/sub.zip', 'wb').write(r.content)
except:
success = False
else:
success = True
return success
def list_all_extracted(self):
extracted_files = os.listdir(self.extracted_folder)
return extracted_files
def get_zip(self):
if os.path.isdir('downloaded'):
try:
path = os.path.join(self.download_folder, self.zip_name)
except:
pass
else:
return path
def delete_remains(self):
with os.scandir(self.download_folder) as entries:
for entry in entries:
if entry.is_dir() and not entry.is_symlink():
shutil.rmtree(entry.path)
else:
os.remove(entry.path)
with os.scandir(self.extracted_folder) as entries:
for entry in entries:
if entry.is_dir() and not entry.is_symlink():
shutil.rmtree(entry.path)
else:
os.remove(entry.path)
class OpenSubtitlesHandler(SubFileHandler):
def __init__(self):
super().__init__()
def download_zip(self, url):
return super().download_zip(url)
def extract_zip(self):
# specifying the zip file name
file_name = self.get_zip()
# opening the zip file in READ mode
try:
with ZipFile(file_name, 'r') as zip:
zip.extractall(path='extracted')
except zipfile.BadZipFile:
print('Bad zip, cant continue')
return
def move_files(self, dst_path, filename, append_lang_code=None):
src_path = os.path.join(self.extracted_folder, filename)
org_string = dst_path
size = len(org_string)
mod_string = org_string[:size - 4]
if append_lang_code != None:
mod_string = f'{mod_string}-{append_lang_code}'
final_path = os.path.join(dst_path, f'{mod_string}.srt')
shutil.move(src_path, final_path)
class TitloviFileHandler(SubFileHandler):
def __init__(self):
super().__init__()
def download(self, url):
self.download_zip(url)
def move_file(self, dst_folder, append_lang_code=None):
src_path = os.path.join(self.download_folder, self.zip_name)
org_string = dst_folder
size = len(org_string)
mod_string = org_string[:size - 4]
if append_lang_code != None:
mod_string = f'{mod_string}-{append_lang_code}'
final_path = os.path.join(dst_folder, f'{mod_string}.zip')
shutil.move(src_path, final_path)
| # Importing modules
import zipfile
import requests
import os
from zipfile import ZipFile
import shutil
# Move subtitles (OpenSubtitles.com)
def move_subtitle(mode, source_path, dst_path, append_lang_code=None):
org_string = dst_path
size = len(org_string)
mod_string = org_string[:size - 4]
if append_lang_code != None:
mod_string = f'{mod_string}-{append_lang_code}'
if mode == 'zip':
final_path = os.path.join(dst_path, f'{mod_string}.zip')
if mode == 'srt':
final_path = os.path.join(dst_path, f'{mod_string}.srt')
shutil.move(source_path, final_path)
# Move subtitles (Titlovi.com)
class SubFileHandler:
'''
Base class of File Handler. Not to be called directly. Instantiate from child classes.
Used to handle all common operations with subtitle files like downloading and unzipping.
Child classes handle diffrent aproaches by diffrent sources
'''
def __init__(self):
self.download_folder = 'downloaded'
self.extracted_folder = 'extracted'
self.zip_name = 'sub.zip'
self.extracted_zip = os.path.join(self.extracted_folder, self.zip_name)
@staticmethod
def check_for_folders():
if not os.path.isdir('downloaded'):
os.makedirs('downloaded')
if not os.path.isdir('extracted'):
os.makedirs('extracted')
def download_zip(self, url):
self.check_for_folders()
success = False
try:
r = requests.get(url, allow_redirects=True, timeout=20)
open('downloaded/sub.zip', 'wb').write(r.content)
except:
success = False
else:
success = True
return success
def list_all_extracted(self):
extracted_files = os.listdir(self.extracted_folder)
return extracted_files
def get_zip(self):
if os.path.isdir('downloaded'):
try:
path = os.path.join(self.download_folder, self.zip_name)
except:
pass
else:
return path
def delete_remains(self):
with os.scandir(self.download_folder) as entries:
for entry in entries:
if entry.is_dir() and not entry.is_symlink():
shutil.rmtree(entry.path)
else:
os.remove(entry.path)
with os.scandir(self.extracted_folder) as entries:
for entry in entries:
if entry.is_dir() and not entry.is_symlink():
shutil.rmtree(entry.path)
else:
os.remove(entry.path)
class OpenSubtitlesHandler(SubFileHandler):
def __init__(self):
super().__init__()
def download_zip(self, url):
return super().download_zip(url)
def extract_zip(self):
# specifying the zip file name
file_name = self.get_zip()
# opening the zip file in READ mode
try:
with ZipFile(file_name, 'r') as zip:
zip.extractall(path='extracted')
except zipfile.BadZipFile:
print('Bad zip, cant continue')
return
def move_files(self, dst_path, filename, append_lang_code=None):
src_path = os.path.join(self.extracted_folder, filename)
org_string = dst_path
size = len(org_string)
mod_string = org_string[:size - 4]
if append_lang_code != None:
mod_string = f'{mod_string}-{append_lang_code}'
final_path = os.path.join(dst_path, f'{mod_string}.srt')
shutil.move(src_path, final_path)
class TitloviFileHandler(SubFileHandler):
def __init__(self):
super().__init__()
def download(self, url):
self.download_zip(url)
def move_file(self, dst_folder, append_lang_code=None):
src_path = os.path.join(self.download_folder, self.zip_name)
org_string = dst_folder
size = len(org_string)
mod_string = org_string[:size - 4]
if append_lang_code != None:
mod_string = f'{mod_string}-{append_lang_code}'
final_path = os.path.join(dst_folder, f'{mod_string}.zip')
shutil.move(src_path, final_path) | en | 0.786954 | # Importing modules # Move subtitles (OpenSubtitles.com) # Move subtitles (Titlovi.com) Base class of File Handler. Not to be called directly. Instantiate from child classes. Used to handle all common operations with subtitle files like downloading and unzipping. Child classes handle diffrent aproaches by diffrent sources # specifying the zip file name # opening the zip file in READ mode | 2.886353 | 3 |
ckanext/datapusher/tests/test.py | Pardhu448/ckan | 0 | 6632762 | <reponame>Pardhu448/ckan
# encoding: utf-8
import datetime
import json
import pytest
import responses
import sqlalchemy.orm as orm
import ckan.lib.create_test_data as ctd
import ckan.model as model
import ckan.plugins as p
import ckan.tests.legacy as tests
import ckanext.datastore.backend.postgres as db
from ckan.common import config
from ckanext.datastore.tests.helpers import set_url_type
class TestDatastoreCreate(object):
sysadmin_user = None
normal_user = None
@pytest.fixture(autouse=True)
def initial_data(self, clean_db, clean_index, test_request_context):
if not tests.is_datastore_supported():
pytest.skip("Datastore not supported")
ctd.CreateTestData.create()
self.sysadmin_user = model.User.get("testsysadmin")
self.normal_user = model.User.get("annafan")
engine = db.get_write_engine()
self.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
with test_request_context():
set_url_type(
model.Package.get("annakarenina").resources, self.sysadmin_user
)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_create_ckan_resource_in_package(self, app):
package = model.Package.get("annakarenina")
data = {"resource": {"package_id": package.id}}
auth = {"Authorization": str(self.sysadmin_user.apikey)}
res = app.post(
"/api/action/datastore_create",
json=data,
extra_environ=auth,
status=200,
)
res_dict = json.loads(res.body)
assert "resource_id" in res_dict["result"]
assert len(model.Package.get("annakarenina").resources) == 3
res = tests.call_action_api(
app, "resource_show", id=res_dict["result"]["resource_id"]
)
assert res["url"].endswith("/datastore/dump/" + res["id"]), res
@responses.activate
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_providing_res_with_url_calls_datapusher_correctly(self, app):
config["datapusher.url"] = "http://datapusher.ckan.org"
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "bar"}),
)
responses.add_passthru(config["solr_url"])
package = model.Package.get("annakarenina")
tests.call_action_api(
app,
"datastore_create",
apikey=self.sysadmin_user.apikey,
resource=dict(package_id=package.id, url="demo.ckan.org"),
)
assert len(package.resources) == 3, len(package.resources)
resource = package.resources[2]
data = json.loads(responses.calls[-1].request.body)
assert data["metadata"]["resource_id"] == resource.id, data
assert not data["metadata"].get("ignore_hash"), data
assert data["result_url"].endswith("/action/datapusher_hook"), data
assert data["result_url"].startswith("http://"), data
@responses.activate
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_pass_the_received_ignore_hash_param_to_the_datapusher(self, app):
config["datapusher.url"] = "http://datapusher.ckan.org"
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "bar"}),
)
package = model.Package.get("annakarenina")
resource = package.resources[0]
tests.call_action_api(
app,
"datapusher_submit",
apikey=self.sysadmin_user.apikey,
resource_id=resource.id,
ignore_hash=True,
)
data = json.loads(responses.calls[-1].request.body)
assert data["metadata"]["ignore_hash"], data
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_cant_provide_resource_and_resource_id(self, app):
package = model.Package.get("annakarenina")
resource = package.resources[0]
data = {
"resource_id": resource.id,
"resource": {"package_id": package.id},
}
auth = {"Authorization": str(self.sysadmin_user.apikey)}
res = app.post(
"/api/action/datastore_create",
json=data,
extra_environ=auth,
status=409,
)
res_dict = json.loads(res.body)
assert res_dict["error"]["__type"] == "Validation Error"
@responses.activate
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_send_datapusher_creates_task(self, test_request_context):
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "bar"}),
)
package = model.Package.get("annakarenina")
resource = package.resources[0]
context = {"ignore_auth": True, "user": self.sysadmin_user.name}
with test_request_context():
p.toolkit.get_action("datapusher_submit")(
context, {"resource_id": resource.id}
)
context.pop("task_status", None)
task = p.toolkit.get_action("task_status_show")(
context,
{
"entity_id": resource.id,
"task_type": "datapusher",
"key": "datapusher",
},
)
assert task["state"] == "pending", task
def _call_datapusher_hook(self, user, app):
package = model.Package.get("annakarenina")
resource = package.resources[0]
context = {"user": self.sysadmin_user.name}
p.toolkit.get_action("task_status_update")(
context,
{
"entity_id": resource.id,
"entity_type": "resource",
"task_type": "datapusher",
"key": "datapusher",
"value": '{"job_id": "my_id", "job_key":"my_key"}',
"last_updated": str(datetime.datetime.now()),
"state": "pending",
},
)
data = {"status": "success", "metadata": {"resource_id": resource.id}}
auth = {"Authorization": str(user.apikey)}
res = app.post(
"/api/action/datapusher_hook",
json=data,
extra_environ=auth,
status=200,
)
res_dict = json.loads(res.body)
assert res_dict["success"] is True
task = tests.call_action_api(
app,
"task_status_show",
entity_id=resource.id,
task_type="datapusher",
key="datapusher",
)
assert task["state"] == "success", task
task = tests.call_action_api(
app,
"task_status_show",
entity_id=resource.id,
task_type="datapusher",
key="datapusher",
)
assert task["state"] == "success", task
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_sysadmin(self, app, test_request_context):
with test_request_context():
self._call_datapusher_hook(self.sysadmin_user, app)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_normal_user(self, app, test_request_context):
with test_request_context():
self._call_datapusher_hook(self.normal_user, app)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_no_metadata(self, app):
data = {"status": "success"}
app.post("/api/action/datapusher_hook", json=data, status=409)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_no_status(self, app):
data = {"metadata": {"resource_id": "res_id"}}
app.post("/api/action/datapusher_hook", json=data, status=409)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_no_resource_id_in_metadata(self, app):
data = {"status": "success", "metadata": {}}
app.post("/api/action/datapusher_hook", json=data, status=409)
@responses.activate
@pytest.mark.ckan_config(
"ckan.datapusher.callback_url_base", "https://ckan.example.com"
)
@pytest.mark.ckan_config(
"ckan.datapusher.url", "http://datapusher.ckan.org"
)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_custom_callback_url_base(self, app):
package = model.Package.get("annakarenina")
resource = package.resources[0]
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "barloco"}),
)
responses.add_passthru(config["solr_url"])
tests.call_action_api(
app,
"datapusher_submit",
apikey=self.sysadmin_user.apikey,
resource_id=resource.id,
ignore_hash=True,
)
data = json.loads(responses.calls[-1].request.body)
assert (
data["result_url"]
== "https://ckan.example.com/api/3/action/datapusher_hook"
)
| # encoding: utf-8
import datetime
import json
import pytest
import responses
import sqlalchemy.orm as orm
import ckan.lib.create_test_data as ctd
import ckan.model as model
import ckan.plugins as p
import ckan.tests.legacy as tests
import ckanext.datastore.backend.postgres as db
from ckan.common import config
from ckanext.datastore.tests.helpers import set_url_type
class TestDatastoreCreate(object):
sysadmin_user = None
normal_user = None
@pytest.fixture(autouse=True)
def initial_data(self, clean_db, clean_index, test_request_context):
if not tests.is_datastore_supported():
pytest.skip("Datastore not supported")
ctd.CreateTestData.create()
self.sysadmin_user = model.User.get("testsysadmin")
self.normal_user = model.User.get("annafan")
engine = db.get_write_engine()
self.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
with test_request_context():
set_url_type(
model.Package.get("annakarenina").resources, self.sysadmin_user
)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_create_ckan_resource_in_package(self, app):
package = model.Package.get("annakarenina")
data = {"resource": {"package_id": package.id}}
auth = {"Authorization": str(self.sysadmin_user.apikey)}
res = app.post(
"/api/action/datastore_create",
json=data,
extra_environ=auth,
status=200,
)
res_dict = json.loads(res.body)
assert "resource_id" in res_dict["result"]
assert len(model.Package.get("annakarenina").resources) == 3
res = tests.call_action_api(
app, "resource_show", id=res_dict["result"]["resource_id"]
)
assert res["url"].endswith("/datastore/dump/" + res["id"]), res
@responses.activate
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_providing_res_with_url_calls_datapusher_correctly(self, app):
config["datapusher.url"] = "http://datapusher.ckan.org"
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "bar"}),
)
responses.add_passthru(config["solr_url"])
package = model.Package.get("annakarenina")
tests.call_action_api(
app,
"datastore_create",
apikey=self.sysadmin_user.apikey,
resource=dict(package_id=package.id, url="demo.ckan.org"),
)
assert len(package.resources) == 3, len(package.resources)
resource = package.resources[2]
data = json.loads(responses.calls[-1].request.body)
assert data["metadata"]["resource_id"] == resource.id, data
assert not data["metadata"].get("ignore_hash"), data
assert data["result_url"].endswith("/action/datapusher_hook"), data
assert data["result_url"].startswith("http://"), data
@responses.activate
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_pass_the_received_ignore_hash_param_to_the_datapusher(self, app):
config["datapusher.url"] = "http://datapusher.ckan.org"
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "bar"}),
)
package = model.Package.get("annakarenina")
resource = package.resources[0]
tests.call_action_api(
app,
"datapusher_submit",
apikey=self.sysadmin_user.apikey,
resource_id=resource.id,
ignore_hash=True,
)
data = json.loads(responses.calls[-1].request.body)
assert data["metadata"]["ignore_hash"], data
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_cant_provide_resource_and_resource_id(self, app):
package = model.Package.get("annakarenina")
resource = package.resources[0]
data = {
"resource_id": resource.id,
"resource": {"package_id": package.id},
}
auth = {"Authorization": str(self.sysadmin_user.apikey)}
res = app.post(
"/api/action/datastore_create",
json=data,
extra_environ=auth,
status=409,
)
res_dict = json.loads(res.body)
assert res_dict["error"]["__type"] == "Validation Error"
@responses.activate
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_send_datapusher_creates_task(self, test_request_context):
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "bar"}),
)
package = model.Package.get("annakarenina")
resource = package.resources[0]
context = {"ignore_auth": True, "user": self.sysadmin_user.name}
with test_request_context():
p.toolkit.get_action("datapusher_submit")(
context, {"resource_id": resource.id}
)
context.pop("task_status", None)
task = p.toolkit.get_action("task_status_show")(
context,
{
"entity_id": resource.id,
"task_type": "datapusher",
"key": "datapusher",
},
)
assert task["state"] == "pending", task
def _call_datapusher_hook(self, user, app):
package = model.Package.get("annakarenina")
resource = package.resources[0]
context = {"user": self.sysadmin_user.name}
p.toolkit.get_action("task_status_update")(
context,
{
"entity_id": resource.id,
"entity_type": "resource",
"task_type": "datapusher",
"key": "datapusher",
"value": '{"job_id": "my_id", "job_key":"my_key"}',
"last_updated": str(datetime.datetime.now()),
"state": "pending",
},
)
data = {"status": "success", "metadata": {"resource_id": resource.id}}
auth = {"Authorization": str(user.apikey)}
res = app.post(
"/api/action/datapusher_hook",
json=data,
extra_environ=auth,
status=200,
)
res_dict = json.loads(res.body)
assert res_dict["success"] is True
task = tests.call_action_api(
app,
"task_status_show",
entity_id=resource.id,
task_type="datapusher",
key="datapusher",
)
assert task["state"] == "success", task
task = tests.call_action_api(
app,
"task_status_show",
entity_id=resource.id,
task_type="datapusher",
key="datapusher",
)
assert task["state"] == "success", task
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_sysadmin(self, app, test_request_context):
with test_request_context():
self._call_datapusher_hook(self.sysadmin_user, app)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_normal_user(self, app, test_request_context):
with test_request_context():
self._call_datapusher_hook(self.normal_user, app)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_no_metadata(self, app):
data = {"status": "success"}
app.post("/api/action/datapusher_hook", json=data, status=409)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_no_status(self, app):
data = {"metadata": {"resource_id": "res_id"}}
app.post("/api/action/datapusher_hook", json=data, status=409)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_datapusher_hook_no_resource_id_in_metadata(self, app):
data = {"status": "success", "metadata": {}}
app.post("/api/action/datapusher_hook", json=data, status=409)
@responses.activate
@pytest.mark.ckan_config(
"ckan.datapusher.callback_url_base", "https://ckan.example.com"
)
@pytest.mark.ckan_config(
"ckan.datapusher.url", "http://datapusher.ckan.org"
)
@pytest.mark.ckan_config("ckan.plugins", "datastore datapusher")
@pytest.mark.usefixtures("with_plugins")
def test_custom_callback_url_base(self, app):
package = model.Package.get("annakarenina")
resource = package.resources[0]
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "barloco"}),
)
responses.add_passthru(config["solr_url"])
tests.call_action_api(
app,
"datapusher_submit",
apikey=self.sysadmin_user.apikey,
resource_id=resource.id,
ignore_hash=True,
)
data = json.loads(responses.calls[-1].request.body)
assert (
data["result_url"]
== "https://ckan.example.com/api/3/action/datapusher_hook"
) | en | 0.83829 | # encoding: utf-8 | 1.884079 | 2 |
api/sources/coingecko.py | aalavanthan18/kylin-api | 0 | 6632763 | from api.sources import source_config
from api.sources.generic_source import GenericSource
import requests
from datetime import datetime
class CoinGecko(GenericSource):
def __init__(self):
self.url = source_config.sources["coingecko"]['url']
self.source_name = source_config.sources["coingecko"]['source_name']
super().__init__(self.url,self.source_name)
def get_prices(self,currency_pairs):
full_response = {}
full_response[self.source_name] = {}
symbol_lookup_url = self.url.replace("simple/price?ids=FROM_CURRENCY&vs_currencies=TO_CURRENCY","coins/list/")
all_coins = requests.get(symbol_lookup_url).json()
for currency_pair in currency_pairs.split(","):
from_currency_symbol = currency_pair.split("_")[0]
to_currency_symbol = currency_pair.split("_")[1]
from_currency_id = [coin for coin in all_coins if coin['symbol'] == from_currency_symbol][0]['id']
response = requests.get(self.url.replace("FROM_CURRENCY",from_currency_id).replace("TO_CURRENCY",to_currency_symbol)).json()
current_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
full_response[self.source_name][currency_pair] = {"processed_at":current_timestamp,"source":self.source_name, "payload":response}
return full_response
| from api.sources import source_config
from api.sources.generic_source import GenericSource
import requests
from datetime import datetime
class CoinGecko(GenericSource):
def __init__(self):
self.url = source_config.sources["coingecko"]['url']
self.source_name = source_config.sources["coingecko"]['source_name']
super().__init__(self.url,self.source_name)
def get_prices(self,currency_pairs):
full_response = {}
full_response[self.source_name] = {}
symbol_lookup_url = self.url.replace("simple/price?ids=FROM_CURRENCY&vs_currencies=TO_CURRENCY","coins/list/")
all_coins = requests.get(symbol_lookup_url).json()
for currency_pair in currency_pairs.split(","):
from_currency_symbol = currency_pair.split("_")[0]
to_currency_symbol = currency_pair.split("_")[1]
from_currency_id = [coin for coin in all_coins if coin['symbol'] == from_currency_symbol][0]['id']
response = requests.get(self.url.replace("FROM_CURRENCY",from_currency_id).replace("TO_CURRENCY",to_currency_symbol)).json()
current_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
full_response[self.source_name][currency_pair] = {"processed_at":current_timestamp,"source":self.source_name, "payload":response}
return full_response
| none | 1 | 2.729901 | 3 |
|
tensorflow/python/autograph/pyct/error_utils_test.py | EricRemmerswaal/tensorflow | 190,993 | 6632764 | <reponame>EricRemmerswaal/tensorflow
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for error_utils module."""
import re
from tensorflow.python.autograph.pyct import error_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.platform import test
class ErrorMetadataBaseTest(test.TestCase):
def test_create_exception_default_constructor(self):
class CustomError(Exception):
pass
em = error_utils.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={},
converter_filename=None)
exc = em.create_exception(CustomError())
self.assertIsInstance(exc, CustomError)
self.assertIn('test message', str(exc))
def test_create_exception_custom_constructor(self):
class CustomError(Exception):
def __init__(self):
super(CustomError, self).__init__('test_message')
em = error_utils.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={},
converter_filename=None)
exc = em.create_exception(CustomError())
self.assertIsNone(exc)
def test_get_message_no_code(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', None),
('/path/two.py', 171, 'test_fn_2', 'test code'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={},
converter_filename=None)
self.assertRegex(
em.get_message(),
re.compile(('"/path/one.py", line 11, in test_fn_1.*'
'"/path/two.py", line 171, in test_fn_2.*'
'Test message'), re.DOTALL))
def test_get_message_converted_code(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', 'test code 1'),
('/path/two.py', 171, 'test_fn_2', 'test code 2'),
('/path/three.py', 171, 'test_fn_3', 'test code 3'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={
origin_info.LineLocation(filename='/path/two.py', lineno=171):
origin_info.OriginInfo(
loc=origin_info.LineLocation(
filename='/path/other_two.py', lineno=13),
function_name='converted_fn',
source_code_line='converted test code',
comment=None)
},
converter_filename=None)
result = em.get_message()
self.assertRegex(
result,
re.compile((r'converted_fn \*.*'
r'"/path/three.py", line 171, in test_fn_3.*'
r'Test message'), re.DOTALL))
self.assertNotRegex(result, re.compile('test_fn_1'))
def test_get_message_call_overload(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', 'test code 1'),
('/path/two.py', 0, 'test_fn_2', 'test code 2'),
('/path/three.py', 171, 'test_fn_3', 'test code 3'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={},
converter_filename='/path/two.py')
self.assertRegex(
em.get_message(),
re.compile((r'"/path/one.py", line 11, in test_fn_1.*'
r'"/path/three.py", line 171, in test_fn_3 \*\*.*'
r'Test message'), re.DOTALL))
if __name__ == '__main__':
test.main()
| # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for error_utils module."""
import re
from tensorflow.python.autograph.pyct import error_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.platform import test
class ErrorMetadataBaseTest(test.TestCase):
def test_create_exception_default_constructor(self):
class CustomError(Exception):
pass
em = error_utils.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={},
converter_filename=None)
exc = em.create_exception(CustomError())
self.assertIsInstance(exc, CustomError)
self.assertIn('test message', str(exc))
def test_create_exception_custom_constructor(self):
class CustomError(Exception):
def __init__(self):
super(CustomError, self).__init__('test_message')
em = error_utils.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={},
converter_filename=None)
exc = em.create_exception(CustomError())
self.assertIsNone(exc)
def test_get_message_no_code(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', None),
('/path/two.py', 171, 'test_fn_2', 'test code'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={},
converter_filename=None)
self.assertRegex(
em.get_message(),
re.compile(('"/path/one.py", line 11, in test_fn_1.*'
'"/path/two.py", line 171, in test_fn_2.*'
'Test message'), re.DOTALL))
def test_get_message_converted_code(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', 'test code 1'),
('/path/two.py', 171, 'test_fn_2', 'test code 2'),
('/path/three.py', 171, 'test_fn_3', 'test code 3'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={
origin_info.LineLocation(filename='/path/two.py', lineno=171):
origin_info.OriginInfo(
loc=origin_info.LineLocation(
filename='/path/other_two.py', lineno=13),
function_name='converted_fn',
source_code_line='converted test code',
comment=None)
},
converter_filename=None)
result = em.get_message()
self.assertRegex(
result,
re.compile((r'converted_fn \*.*'
r'"/path/three.py", line 171, in test_fn_3.*'
r'Test message'), re.DOTALL))
self.assertNotRegex(result, re.compile('test_fn_1'))
def test_get_message_call_overload(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', 'test code 1'),
('/path/two.py', 0, 'test_fn_2', 'test code 2'),
('/path/three.py', 171, 'test_fn_3', 'test code 3'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={},
converter_filename='/path/two.py')
self.assertRegex(
em.get_message(),
re.compile((r'"/path/one.py", line 11, in test_fn_1.*'
r'"/path/three.py", line 171, in test_fn_3 \*\*.*'
r'Test message'), re.DOTALL))
if __name__ == '__main__':
test.main() | en | 0.794467 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for error_utils module. | 2.085296 | 2 |
gestaoVendas/cadastros/urls.py | evton/Gestao-de-vendas | 1 | 6632765 | <gh_stars>1-10
from django.urls import path
from . import views
urlpatterns = [
path('cadastro/', views.novocliente, name='cadastrocliente'),
] | from django.urls import path
from . import views
urlpatterns = [
path('cadastro/', views.novocliente, name='cadastrocliente'),
] | none | 1 | 1.452414 | 1 |
|
embedding_propagation/__init__.py | dvd42/embedding-propagation | 0 | 6632766 | from .embedding_propagation import * | from .embedding_propagation import * | none | 1 | 1.100213 | 1 |
|
Support/Fuego/Pythia/pythia-0.4/packages/fuego/fuego/serialization/chemkin/unpickle/parsers/Thermo.py | marient/PelePhysics | 1 | 6632767 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from BaseParser import BaseParser
class Thermo(BaseParser):
# the interesting tokens
def aTemperatureRange(self, token):
if not self._thermoAll:
msg = "Unexpected temperature range definitions without THERMO ALL"
self.onWarning(msg, self.locator())
self._range = token.range
self._mechanism.thermoRange(self._range)
return 0
def aThermoLine(self, token):
if not self._range and self._thermoAll:
if not self._thermoAllWarned:
msg = "THERMO ALL: Expected temperature range definition, not species info"
self.onWarning(msg, self.locator())
self._thermoAllWarned = 1
# is this the next valid thermo line in the sequence?
id = token.id - 1
if id != self._nextId:
msg = "Unexpected thermo line: found %d while expecting %d" % \
(token.id, self._nextId + 1)
self.onError(msg, self.locator())
# dispatch to appropriate line info parser
self._lineParsers[id](token)
# next valid thermo line is...
self._nextId = (self._nextId + 1) % 4
return 0
# transitions
def aThermoSection(self, token):
self._info.log("thermo parser: section start")
self._thermoAll = token._all
self._mechanism.thermoAll(self._thermoAll)
self._range = self._mechanism.thermoRange()
self._parse(self._scanner, self._tokenizer)
return 0
def anEndSection(self, token):
BaseParser.anEndSection(self, token)
if self._thermoAll:
species = self._mechanism.species()
candidates = []
for s in species:
if not s.thermo:
candidates.append(s.symbol)
if candidates:
msg = "no species information for the folowing species: %s" % candidates
self.onWarning(msg, self.locator())
return 1
def onEndOfFile(self):
# if not self._thermoAll:
# msg = "this mechanism requires an external thermo database"
# self.onWarning(msg)
self._mechanism.thermoDone()
return 1
# other methods
def __init__(self, mechanism, tokenizer):
import pyre
import fuego
BaseParser.__init__(self, mechanism)
self._tokenizer = tokenizer
self._scanner = fuego.serialization.chemkin.unpickle.scanners.thermo()
# Private data
self._range = ()
self._thermoAll = 0
self._parameters = []
self._currentRange = None
import re
from fuego.serialization.chemkin.unpickle.tokens.RegularExpressions import species
self._speciesScanner = re.compile(species)
self._nextId = 0 #line ids are zero-based
self._currentSpecies = None
self._lineParsers = [
self._parseLine1, self._parseLine2, self._parseLine3, self._parseLine4
]
self._thermoAllWarned = 0
return
def _parseLine1(self, token):
text = token.text
match = self._speciesScanner.match(text[0:18])
if not match:
msg = "Could not match a valid species name in '%s'" % text[0:18]
self.onError(msg, self.locator())
speciesName = match.group()
species = self._mechanism.species(speciesName)
if not species:
msg = "thermo section: undeclared species '%s'" % speciesName
self.onWarning(msg, self.locator())
species = self._mechanism.newSpecies(speciesName)
species.locator(self.locator())
# Parse the element coefficient in columns 24-43 (zero-based)
for i in range(0, 4):
offset = 24+i*5
self._extractComposition(token, text, offset, species.composition)
# Get the phase
phase = text[44].lower()
if phase not in ["s", "l", "g"]:
msg = "Unkown phase code '%s'" % phase
locator = self.locator()
locator.column = 44
self.onError(msg, self.locator())
species.phase = phase
# Get the temperature intervals
lowT = self._extractFloat(token, text, 45, 10)
highT = self._extractFloat(token, text, 55, 10)
midT = self._extractFloat(token, text, 65, 10, optional=1)
if midT == None:
midT = self._range[1]
self._currentRange = (lowT, midT, highT)
# The extra possible element,coef pair
self._extractComposition(token, text, 73, species.composition)
# Save this information
self._currentSpecies = species
return
def _parseLine2(self, token):
if not self._currentSpecies: return
text = token.text
# extract the high T range parametrization
self._parameters = []
for i in range(0, 5):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
return
def _parseLine3(self, token):
if not self._currentSpecies: return
text = token.text
# finish extracting the high T range parametrization
for i in range(0, 2):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
# store in the species
self._currentSpecies.thermalParametrization(
"NASA", self._currentRange[1], self._currentRange[2], self.locator(),
self._parameters
)
# extract the first part of the low T parameters
self._parameters = []
for i in range(2, 5):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
return
def _parseLine4(self, token):
species = self._currentSpecies
if not species: return
text = token.text
# finish extracting the low T range parametrization
for i in range(0, 4):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
# store in the species
self._currentSpecies.thermalParametrization(
"NASA", self._currentRange[0], self._currentRange[1], self.locator(),
self._parameters
)
return
def _extractFloat(self, token, text, offset, width, optional=0):
str = text[offset:offset+width].strip()
if not str:
if optional: return None
msg = "Expected a required numeric field instead of '%s'" % text[offset:offset+width]
locator = self.locator()
locator.column = offset
self.onError(msg, locator)
try:
value = float(str)
except ValueError:
msg = "Could not convert '%s' into a number" % text[offset:offset+width]
locator = self.locator()
locator.column = offset
self.onError(msg, locator)
return value
def _extractComposition(self, token, text, offset, composition):
# Extract the coefficient first:
# some files have junk in the element slot when the coefficient is 0
coef = text[offset+2:offset+5].strip()
# The coefficient could be blank, which means 0
if not coef: return
coef = self._extractFloat(token, text, offset+2, 3)
# Extract the element name
name = text[offset:offset+2].strip()
if name and coef:
element = self._mechanism.element(name)
if not element:
msg = "Element '%s' not declared in an ELEMENT section" % name
locator = self.locator()
locator.column = offset
self.onWarning(msg, locator)
composition.append( (name, coef) )
return
# version
__id__ = "$Id$"
#
# End of file
| #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from BaseParser import BaseParser
class Thermo(BaseParser):
# the interesting tokens
def aTemperatureRange(self, token):
if not self._thermoAll:
msg = "Unexpected temperature range definitions without THERMO ALL"
self.onWarning(msg, self.locator())
self._range = token.range
self._mechanism.thermoRange(self._range)
return 0
def aThermoLine(self, token):
if not self._range and self._thermoAll:
if not self._thermoAllWarned:
msg = "THERMO ALL: Expected temperature range definition, not species info"
self.onWarning(msg, self.locator())
self._thermoAllWarned = 1
# is this the next valid thermo line in the sequence?
id = token.id - 1
if id != self._nextId:
msg = "Unexpected thermo line: found %d while expecting %d" % \
(token.id, self._nextId + 1)
self.onError(msg, self.locator())
# dispatch to appropriate line info parser
self._lineParsers[id](token)
# next valid thermo line is...
self._nextId = (self._nextId + 1) % 4
return 0
# transitions
def aThermoSection(self, token):
self._info.log("thermo parser: section start")
self._thermoAll = token._all
self._mechanism.thermoAll(self._thermoAll)
self._range = self._mechanism.thermoRange()
self._parse(self._scanner, self._tokenizer)
return 0
def anEndSection(self, token):
BaseParser.anEndSection(self, token)
if self._thermoAll:
species = self._mechanism.species()
candidates = []
for s in species:
if not s.thermo:
candidates.append(s.symbol)
if candidates:
msg = "no species information for the folowing species: %s" % candidates
self.onWarning(msg, self.locator())
return 1
def onEndOfFile(self):
# if not self._thermoAll:
# msg = "this mechanism requires an external thermo database"
# self.onWarning(msg)
self._mechanism.thermoDone()
return 1
# other methods
def __init__(self, mechanism, tokenizer):
import pyre
import fuego
BaseParser.__init__(self, mechanism)
self._tokenizer = tokenizer
self._scanner = fuego.serialization.chemkin.unpickle.scanners.thermo()
# Private data
self._range = ()
self._thermoAll = 0
self._parameters = []
self._currentRange = None
import re
from fuego.serialization.chemkin.unpickle.tokens.RegularExpressions import species
self._speciesScanner = re.compile(species)
self._nextId = 0 #line ids are zero-based
self._currentSpecies = None
self._lineParsers = [
self._parseLine1, self._parseLine2, self._parseLine3, self._parseLine4
]
self._thermoAllWarned = 0
return
def _parseLine1(self, token):
text = token.text
match = self._speciesScanner.match(text[0:18])
if not match:
msg = "Could not match a valid species name in '%s'" % text[0:18]
self.onError(msg, self.locator())
speciesName = match.group()
species = self._mechanism.species(speciesName)
if not species:
msg = "thermo section: undeclared species '%s'" % speciesName
self.onWarning(msg, self.locator())
species = self._mechanism.newSpecies(speciesName)
species.locator(self.locator())
# Parse the element coefficient in columns 24-43 (zero-based)
for i in range(0, 4):
offset = 24+i*5
self._extractComposition(token, text, offset, species.composition)
# Get the phase
phase = text[44].lower()
if phase not in ["s", "l", "g"]:
msg = "Unkown phase code '%s'" % phase
locator = self.locator()
locator.column = 44
self.onError(msg, self.locator())
species.phase = phase
# Get the temperature intervals
lowT = self._extractFloat(token, text, 45, 10)
highT = self._extractFloat(token, text, 55, 10)
midT = self._extractFloat(token, text, 65, 10, optional=1)
if midT == None:
midT = self._range[1]
self._currentRange = (lowT, midT, highT)
# The extra possible element,coef pair
self._extractComposition(token, text, 73, species.composition)
# Save this information
self._currentSpecies = species
return
def _parseLine2(self, token):
if not self._currentSpecies: return
text = token.text
# extract the high T range parametrization
self._parameters = []
for i in range(0, 5):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
return
def _parseLine3(self, token):
if not self._currentSpecies: return
text = token.text
# finish extracting the high T range parametrization
for i in range(0, 2):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
# store in the species
self._currentSpecies.thermalParametrization(
"NASA", self._currentRange[1], self._currentRange[2], self.locator(),
self._parameters
)
# extract the first part of the low T parameters
self._parameters = []
for i in range(2, 5):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
return
def _parseLine4(self, token):
species = self._currentSpecies
if not species: return
text = token.text
# finish extracting the low T range parametrization
for i in range(0, 4):
number = self._extractFloat(token, text, i*15, 15)
self._parameters.append(number)
# store in the species
self._currentSpecies.thermalParametrization(
"NASA", self._currentRange[0], self._currentRange[1], self.locator(),
self._parameters
)
return
def _extractFloat(self, token, text, offset, width, optional=0):
str = text[offset:offset+width].strip()
if not str:
if optional: return None
msg = "Expected a required numeric field instead of '%s'" % text[offset:offset+width]
locator = self.locator()
locator.column = offset
self.onError(msg, locator)
try:
value = float(str)
except ValueError:
msg = "Could not convert '%s' into a number" % text[offset:offset+width]
locator = self.locator()
locator.column = offset
self.onError(msg, locator)
return value
def _extractComposition(self, token, text, offset, composition):
# Extract the coefficient first:
# some files have junk in the element slot when the coefficient is 0
coef = text[offset+2:offset+5].strip()
# The coefficient could be blank, which means 0
if not coef: return
coef = self._extractFloat(token, text, offset+2, 3)
# Extract the element name
name = text[offset:offset+2].strip()
if name and coef:
element = self._mechanism.element(name)
if not element:
msg = "Element '%s' not declared in an ELEMENT section" % name
locator = self.locator()
locator.column = offset
self.onWarning(msg, locator)
composition.append( (name, coef) )
return
# version
__id__ = "$Id$"
#
# End of file
| en | 0.558607 | #!/usr/bin/env python # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # <NAME> # California Institute of Technology # (C) 1998-2003 All Rights Reserved # # <LicenseText> # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # the interesting tokens # is this the next valid thermo line in the sequence? # dispatch to appropriate line info parser # next valid thermo line is... # transitions # if not self._thermoAll: # msg = "this mechanism requires an external thermo database" # self.onWarning(msg) # other methods # Private data #line ids are zero-based # Parse the element coefficient in columns 24-43 (zero-based) # Get the phase # Get the temperature intervals # The extra possible element,coef pair # Save this information # extract the high T range parametrization # finish extracting the high T range parametrization # store in the species # extract the first part of the low T parameters # finish extracting the low T range parametrization # store in the species # Extract the coefficient first: # some files have junk in the element slot when the coefficient is 0 # The coefficient could be blank, which means 0 # Extract the element name # version # # End of file | 2.773103 | 3 |
game.py | cxong/Slappa | 7 | 6632768 | from game_object_factory import *
from game_time import *
from loader import *
from scale_manager import *
from state import *
from world import *
class Game(object):
def __init__(self, caption, width, height):
pygame.mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=1024)
pygame.init()
pygame.display.set_caption(caption)
self.width = width
self.height = height
self.__paused = False
self.on_paused = None
self.on_resume = None
self.add = GameObjectFactory(self)
self.config = Config()
self.load = Loader()
self.scale = ScaleManager(self)
self.state = StateManager(self)
self.time = Time()
self.world = World()
if GCW_ZERO:
pygame.mouse.set_visible(False)
@property
def paused(self):
return self.__paused
@paused.setter
def paused(self, value):
if value == self.__paused:
return
self.__paused = value
if value:
if self.on_paused is not None:
self.on_paused()
# Do one last draw
self.state.active_state.draw_screen(self.state.screen)
else:
if self.on_resume is not None:
self.on_resume()
def __exit__(self, type, value, traceback):
pygame.mixer.quit()
pygame.quit()
| from game_object_factory import *
from game_time import *
from loader import *
from scale_manager import *
from state import *
from world import *
class Game(object):
def __init__(self, caption, width, height):
pygame.mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=1024)
pygame.init()
pygame.display.set_caption(caption)
self.width = width
self.height = height
self.__paused = False
self.on_paused = None
self.on_resume = None
self.add = GameObjectFactory(self)
self.config = Config()
self.load = Loader()
self.scale = ScaleManager(self)
self.state = StateManager(self)
self.time = Time()
self.world = World()
if GCW_ZERO:
pygame.mouse.set_visible(False)
@property
def paused(self):
return self.__paused
@paused.setter
def paused(self, value):
if value == self.__paused:
return
self.__paused = value
if value:
if self.on_paused is not None:
self.on_paused()
# Do one last draw
self.state.active_state.draw_screen(self.state.screen)
else:
if self.on_resume is not None:
self.on_resume()
def __exit__(self, type, value, traceback):
pygame.mixer.quit()
pygame.quit()
| en | 0.791713 | # Do one last draw | 2.566073 | 3 |
slbooking/admin_actions.py | gocept/slbooking | 0 | 6632769 | <filename>slbooking/admin_actions.py<gh_stars>0
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, session
)
from werkzeug.exceptions import abort
from werkzeug.security import check_password_hash, generate_password_hash
from slbooking.admin import admin_login_required
from slbooking.db import get_db
bp = Blueprint('admin_actions', __name__, url_prefix='/admin_actions')
@bp.route('/organize_rooms', methods=('GET', 'POST'))
@admin_login_required
def organize_rooms():
db = get_db()
rooms = db.execute('SELECT * FROM room').fetchall()
if request.method == 'POST':
name = request.form['name']
description = request.form['description']
picture = request.form['picture']
db.execute('INSERT INTO room (name, description, picture) VALUES \
(?, ?, ?)', (name, description, picture))
db.commit()
return redirect(url_for('admin_actions.organize_rooms'))
return render_template('admin/organize_rooms.html', rooms=rooms);
@bp.route('/edit_room', methods=('GET', 'POST'))
@admin_login_required
def edit_room():
room_id = request.args.get('room_id')
db = get_db()
room = db.execute('SELECT * FROM room WHERE id = (?)', (room_id,)).fetchone()
if request.method == 'POST':
name = request.form['room_name']
description = request.form['room_description']
picture = request.form['room_picture']
db.execute('UPDATE room SET name = (?), description = (?), picture = (?)\
WHERE id = (?)', (name, description, picture, room_id))
db.commit()
return redirect(url_for('admin_actions.organize_rooms'))
return render_template('admin/edit_room.html', room=room);
@bp.route('/delete_room', methods=('GET',))
@admin_login_required
def delete_room():
room_id = request.args.get('room_id')
db = get_db()
db.execute('DELETE FROM room WHERE id = (?)', (room_id,))
db.commit()
return redirect(url_for('admin_actions.organize_rooms'))
@bp.route('/organize_bookings', methods=('GET', 'POST'))
@admin_login_required
def organize_bookings():
db = get_db()
booked_rooms = db.execute('SELECT booking.id AS booking_id, room_id, \
user_id, b_checkin, b_checkout, room.name AS name,\
user.username AS username FROM booking\
INNER JOIN room ON booking.room_id = room.id\
INNER JOIN user on booking.user_id = user.id').fetchall()
return render_template('admin/organize_bookings.html',
booked_rooms=booked_rooms);
@bp.route('/delete_booking', methods=('GET',))
@admin_login_required
def delete_booking():
booking_id = request.args.get('booking_id')
db = get_db()
db.execute('DELETE FROM booking WHERE id = (?)', (booking_id,))
db.commit()
return redirect(url_for('admin_actions.organize_bookings'))
@bp.route('/organize_user', methods=('GET', 'POST'))
@admin_login_required
def organize_user():
db = get_db()
user = db.execute('SELECT * FROM user').fetchall()
if request.method == 'POST':
username = request.form['username']
mail = request.form['mail']
password = request.form['password']
db.execute('INSERT INTO user (username, mail, password) VALUES \
(?, ?, ?)', (username, mail, generate_password_hash(password)))
db.commit()
return redirect(url_for('admin_actions.organize_user'))
return render_template('admin/organize_user.html', user=user);
@bp.route('/edit_user', methods=('GET', 'POST'))
@admin_login_required
def edit_user():
user_id = request.args.get('user_id')
db = get_db()
user = db.execute('SELECT * FROM user WHERE id = (?)', (user_id,)).fetchone()
if request.method == 'POST':
username = request.form['username']
mail = request.form['mail']
password = request.form['password']
db.execute('UPDATE user SET username = (?), mail = (?), password = (?)\
WHERE id = (?)', (username, mail, generate_password_hash
(password), user_id))
db.commit()
return redirect(url_for('admin_actions.organize_user'))
return render_template('admin/edit_user.html', user=user);
@bp.route('/delete_user', methods=('GET',))
@admin_login_required
def delete_user():
user_id = request.args.get('user_id')
db = get_db()
db.execute('DELETE FROM user WHERE id = (?)', (user_id,))
db.commit()
return redirect(url_for('admin_actions.organize_user'))
@bp.route('/admin_index', methods=('GET', ))
@admin_login_required
def admin_index():
return render_template('admin/admin_index.html'); | <filename>slbooking/admin_actions.py<gh_stars>0
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, session
)
from werkzeug.exceptions import abort
from werkzeug.security import check_password_hash, generate_password_hash
from slbooking.admin import admin_login_required
from slbooking.db import get_db
bp = Blueprint('admin_actions', __name__, url_prefix='/admin_actions')
@bp.route('/organize_rooms', methods=('GET', 'POST'))
@admin_login_required
def organize_rooms():
db = get_db()
rooms = db.execute('SELECT * FROM room').fetchall()
if request.method == 'POST':
name = request.form['name']
description = request.form['description']
picture = request.form['picture']
db.execute('INSERT INTO room (name, description, picture) VALUES \
(?, ?, ?)', (name, description, picture))
db.commit()
return redirect(url_for('admin_actions.organize_rooms'))
return render_template('admin/organize_rooms.html', rooms=rooms);
@bp.route('/edit_room', methods=('GET', 'POST'))
@admin_login_required
def edit_room():
room_id = request.args.get('room_id')
db = get_db()
room = db.execute('SELECT * FROM room WHERE id = (?)', (room_id,)).fetchone()
if request.method == 'POST':
name = request.form['room_name']
description = request.form['room_description']
picture = request.form['room_picture']
db.execute('UPDATE room SET name = (?), description = (?), picture = (?)\
WHERE id = (?)', (name, description, picture, room_id))
db.commit()
return redirect(url_for('admin_actions.organize_rooms'))
return render_template('admin/edit_room.html', room=room);
@bp.route('/delete_room', methods=('GET',))
@admin_login_required
def delete_room():
room_id = request.args.get('room_id')
db = get_db()
db.execute('DELETE FROM room WHERE id = (?)', (room_id,))
db.commit()
return redirect(url_for('admin_actions.organize_rooms'))
@bp.route('/organize_bookings', methods=('GET', 'POST'))
@admin_login_required
def organize_bookings():
db = get_db()
booked_rooms = db.execute('SELECT booking.id AS booking_id, room_id, \
user_id, b_checkin, b_checkout, room.name AS name,\
user.username AS username FROM booking\
INNER JOIN room ON booking.room_id = room.id\
INNER JOIN user on booking.user_id = user.id').fetchall()
return render_template('admin/organize_bookings.html',
booked_rooms=booked_rooms);
@bp.route('/delete_booking', methods=('GET',))
@admin_login_required
def delete_booking():
booking_id = request.args.get('booking_id')
db = get_db()
db.execute('DELETE FROM booking WHERE id = (?)', (booking_id,))
db.commit()
return redirect(url_for('admin_actions.organize_bookings'))
@bp.route('/organize_user', methods=('GET', 'POST'))
@admin_login_required
def organize_user():
db = get_db()
user = db.execute('SELECT * FROM user').fetchall()
if request.method == 'POST':
username = request.form['username']
mail = request.form['mail']
password = request.form['password']
db.execute('INSERT INTO user (username, mail, password) VALUES \
(?, ?, ?)', (username, mail, generate_password_hash(password)))
db.commit()
return redirect(url_for('admin_actions.organize_user'))
return render_template('admin/organize_user.html', user=user);
@bp.route('/edit_user', methods=('GET', 'POST'))
@admin_login_required
def edit_user():
user_id = request.args.get('user_id')
db = get_db()
user = db.execute('SELECT * FROM user WHERE id = (?)', (user_id,)).fetchone()
if request.method == 'POST':
username = request.form['username']
mail = request.form['mail']
password = request.form['password']
db.execute('UPDATE user SET username = (?), mail = (?), password = (?)\
WHERE id = (?)', (username, mail, generate_password_hash
(password), user_id))
db.commit()
return redirect(url_for('admin_actions.organize_user'))
return render_template('admin/edit_user.html', user=user);
@bp.route('/delete_user', methods=('GET',))
@admin_login_required
def delete_user():
user_id = request.args.get('user_id')
db = get_db()
db.execute('DELETE FROM user WHERE id = (?)', (user_id,))
db.commit()
return redirect(url_for('admin_actions.organize_user'))
@bp.route('/admin_index', methods=('GET', ))
@admin_login_required
def admin_index():
return render_template('admin/admin_index.html'); | none | 1 | 2.091374 | 2 |
|
enrich/followthemoney_enrich/cli.py | tendai-zw/followthemoney | 1 | 6632770 | import json
import click
import logging
from banal import is_mapping
from followthemoney.namespace import Namespace
from followthemoney.dedupe import Recon, EntityLinker
from followthemoney.cli.cli import cli
from followthemoney.cli.util import read_entity, write_object
from followthemoney_enrich import get_enricher, enricher_cache
from followthemoney_enrich.result import Result
log = logging.getLogger(__name__)
NS = Namespace(None)
ENRICHERS = {}
def load_enricher(name):
if name not in ENRICHERS:
clazz = get_enricher(name)
if clazz is None:
raise click.BadParameter("Unknown enricher: %s" % name)
enricher = clazz()
enricher.cache = enricher_cache()
ENRICHERS[name] = enricher
return ENRICHERS[name]
def read_result(stream):
line = stream.readline()
if not line:
return
data = json.loads(line)
if is_mapping(data) and 'enricher' in data:
enricher = load_enricher(data.get('enricher'))
return Result.from_dict(enricher, data)
return data
@cli.command('enrich', help="Find matching entities remotely")
@click.argument('enricher')
def enrich(enricher):
enricher = load_enricher(enricher)
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
entity = read_entity(stdin)
if entity is None:
break
for result in enricher.enrich_entity(entity):
write_object(stdout, result)
except BrokenPipeError:
raise click.Abort()
finally:
enricher.close()
@cli.command('expand', help="Expand enriched entities")
@click.argument('enricher')
def expand(enricher):
enricher = load_enricher(enricher)
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
entity = read_entity(stdin)
if entity is None:
break
result = enricher.expand_entity(entity)
write_object(stdout, result)
except BrokenPipeError:
raise click.Abort()
finally:
enricher.close()
@cli.command('auto-match', help="Generate result matches based purely on score") # noqa
@click.option('-t', '--threshold', type=float, default=0.8)
def auto_match(threshold):
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
result = read_result(stdin)
if result is None:
break
if result.score > threshold:
recon = Recon(result.subject, result.candidate, Recon.MATCH)
write_object(stdout, recon)
except BrokenPipeError:
raise click.Abort()
@cli.command('apply-recon', help="Apply matches from a recon file") # noqa
@click.option('-r', '--recon', type=click.File('r'), required=True) # noqa
def apply_recon(recon):
try:
linker = EntityLinker()
for recon in Recon.from_file(recon):
if recon.judgement == Recon.MATCH:
linker.add(recon.subject, recon.canonical)
log.info("Linker: %s clusters.", len(linker.clusters))
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
entity = read_entity(stdin)
if entity is None:
break
entity = NS.apply(entity)
outgoing = linker.apply(entity)
if outgoing.id != entity.id:
outgoing.add('sameAs', entity.id, quiet=True)
write_object(stdout, outgoing)
except BrokenPipeError:
raise click.Abort()
@cli.command('filter-results', help="Filter results to those matching a recon") # noqa
@click.option('-r', '--recon', type=click.File('r'), required=True) # noqa
def filter_results(recon):
try:
matches = set()
for recon in Recon.from_file(recon):
if recon.judgement == Recon.MATCH:
matches.add(recon.subject)
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
result = read_result(stdin)
if result is None:
break
if result.candidate is None:
continue
candidate = NS.apply(result.candidate)
if candidate.id in matches:
write_object(stdout, result)
except BrokenPipeError:
raise click.Abort()
@cli.command('result-entities', help="Unnests results into entities")
def result_entities():
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
result = read_result(stdin)
if result is None:
break
for entity in result.entities:
write_object(stdout, entity)
except BrokenPipeError:
raise click.Abort()
| import json
import click
import logging
from banal import is_mapping
from followthemoney.namespace import Namespace
from followthemoney.dedupe import Recon, EntityLinker
from followthemoney.cli.cli import cli
from followthemoney.cli.util import read_entity, write_object
from followthemoney_enrich import get_enricher, enricher_cache
from followthemoney_enrich.result import Result
log = logging.getLogger(__name__)
NS = Namespace(None)
ENRICHERS = {}
def load_enricher(name):
if name not in ENRICHERS:
clazz = get_enricher(name)
if clazz is None:
raise click.BadParameter("Unknown enricher: %s" % name)
enricher = clazz()
enricher.cache = enricher_cache()
ENRICHERS[name] = enricher
return ENRICHERS[name]
def read_result(stream):
line = stream.readline()
if not line:
return
data = json.loads(line)
if is_mapping(data) and 'enricher' in data:
enricher = load_enricher(data.get('enricher'))
return Result.from_dict(enricher, data)
return data
@cli.command('enrich', help="Find matching entities remotely")
@click.argument('enricher')
def enrich(enricher):
enricher = load_enricher(enricher)
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
entity = read_entity(stdin)
if entity is None:
break
for result in enricher.enrich_entity(entity):
write_object(stdout, result)
except BrokenPipeError:
raise click.Abort()
finally:
enricher.close()
@cli.command('expand', help="Expand enriched entities")
@click.argument('enricher')
def expand(enricher):
enricher = load_enricher(enricher)
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
entity = read_entity(stdin)
if entity is None:
break
result = enricher.expand_entity(entity)
write_object(stdout, result)
except BrokenPipeError:
raise click.Abort()
finally:
enricher.close()
@cli.command('auto-match', help="Generate result matches based purely on score") # noqa
@click.option('-t', '--threshold', type=float, default=0.8)
def auto_match(threshold):
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
result = read_result(stdin)
if result is None:
break
if result.score > threshold:
recon = Recon(result.subject, result.candidate, Recon.MATCH)
write_object(stdout, recon)
except BrokenPipeError:
raise click.Abort()
@cli.command('apply-recon', help="Apply matches from a recon file") # noqa
@click.option('-r', '--recon', type=click.File('r'), required=True) # noqa
def apply_recon(recon):
try:
linker = EntityLinker()
for recon in Recon.from_file(recon):
if recon.judgement == Recon.MATCH:
linker.add(recon.subject, recon.canonical)
log.info("Linker: %s clusters.", len(linker.clusters))
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
entity = read_entity(stdin)
if entity is None:
break
entity = NS.apply(entity)
outgoing = linker.apply(entity)
if outgoing.id != entity.id:
outgoing.add('sameAs', entity.id, quiet=True)
write_object(stdout, outgoing)
except BrokenPipeError:
raise click.Abort()
@cli.command('filter-results', help="Filter results to those matching a recon") # noqa
@click.option('-r', '--recon', type=click.File('r'), required=True) # noqa
def filter_results(recon):
try:
matches = set()
for recon in Recon.from_file(recon):
if recon.judgement == Recon.MATCH:
matches.add(recon.subject)
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
result = read_result(stdin)
if result is None:
break
if result.candidate is None:
continue
candidate = NS.apply(result.candidate)
if candidate.id in matches:
write_object(stdout, result)
except BrokenPipeError:
raise click.Abort()
@cli.command('result-entities', help="Unnests results into entities")
def result_entities():
try:
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
while True:
result = read_result(stdin)
if result is None:
break
for entity in result.entities:
write_object(stdout, entity)
except BrokenPipeError:
raise click.Abort()
| uz | 0.44857 | # noqa # noqa # noqa # noqa # noqa | 2.055359 | 2 |
tabdoc/__init__.py | lpcheng1208/tabdoc | 1 | 6632771 | #!/usr/bin/env python3
# coding=utf-8
"""
@author: guoyanfeng
@software: PyCharm
@time: 19-3-20 下午6:29
"""
__version__ = "1.0.0b9"
from .tabexcel import *
from .tabpdf import *
from .tabword import *
| #!/usr/bin/env python3
# coding=utf-8
"""
@author: guoyanfeng
@software: PyCharm
@time: 19-3-20 下午6:29
"""
__version__ = "1.0.0b9"
from .tabexcel import *
from .tabpdf import *
from .tabword import *
| en | 0.147215 | #!/usr/bin/env python3 # coding=utf-8 @author: guoyanfeng @software: PyCharm @time: 19-3-20 下午6:29 | 1.048901 | 1 |
test/test_general.py | iansmcf/busybees | 0 | 6632772 | <gh_stars>0
import threading
import time
import sys
from busybees import worker
from busybees import hive
import pash
class ErrWorker(worker.Worker):
def work(self, command):
proc = pash.ShellProc()
proc.run(command)
return "Exit code: %s" % proc.get_val('exit_code')
def test_hive():
apiary = hive.Hive()
apiary.create_queen('A1')
apiary.create_queen('A2')
apiary.start_queen('A1')
apiary.start_queen('A2')
job1 = ["iscsiadm -m discovery -t st -p 192.168.88.110",
"iscsiadm -m discovery -t st -p 192.168.90.110",
"iscsiadm -m discovery -t st -p 192.168.88.110"]
apiary.instruct_queen('A1', job1, ErrWorker)
job2 = ["ls -l ~", "date", "cal"]
apiary.instruct_queen('A2', job2)
apiary.kill_queen('A1')
time.sleep(3)
results = apiary.die()
for key in results.keys():
for i in results[key]:
assert i != '' and i != None
| import threading
import time
import sys
from busybees import worker
from busybees import hive
import pash
class ErrWorker(worker.Worker):
def work(self, command):
proc = pash.ShellProc()
proc.run(command)
return "Exit code: %s" % proc.get_val('exit_code')
def test_hive():
apiary = hive.Hive()
apiary.create_queen('A1')
apiary.create_queen('A2')
apiary.start_queen('A1')
apiary.start_queen('A2')
job1 = ["iscsiadm -m discovery -t st -p 192.168.88.110",
"iscsiadm -m discovery -t st -p 192.168.90.110",
"iscsiadm -m discovery -t st -p 192.168.88.110"]
apiary.instruct_queen('A1', job1, ErrWorker)
job2 = ["ls -l ~", "date", "cal"]
apiary.instruct_queen('A2', job2)
apiary.kill_queen('A1')
time.sleep(3)
results = apiary.die()
for key in results.keys():
for i in results[key]:
assert i != '' and i != None | none | 1 | 2.234376 | 2 |
|
sold_anisotrop.py | shongi-yd/FEniCSopt | 0 | 6632773 | from dolfin import *
from scipy.optimize import minimize
import numpy as np
import time as pyt
import pprint
coth = lambda x: 1./np.tanh(x)
from fenicsopt.core.convdif import *
from fenicsopt.examples.sc_examples import sc_setup
import fenicsopt.exports.results as rs
################################################################################
SC_EXAMPLE = 1 # 8, 9, 20, 55
# Mesh
NUM_CELL = 33
#mesh = UnitSquareMesh(NUM_CELL,NUM_CELL)
mesh = Mesh('anisotrop.xml')
h = CellDiameter(mesh)
cell_volume = CellVolume(mesh)
DG0 = FunctionSpace(mesh, "DG", 0)
# Whole Boundary
def whole_boundary(x, on_boundary):
return on_boundary
cut_b_elem_dofs = get_boundary(mesh, DG0)
setup = { "V_TYPE": "CG", "V_DEGREE": 1, "W_TYPE": "DG", "W_DEGREE": 0 }
# Function Spaces on the mesh
V = FunctionSpace(mesh, setup["V_TYPE"], setup["V_DEGREE"])
v = TestFunction(V)
W = FunctionSpace(mesh, setup["W_TYPE"], setup["W_DEGREE"])
bc_V_zero = DirichletBC(V, 0., whole_boundary)
# Data
bcs, epsilon, c, b, f, u_exact = sc_setup(V, SC_EXAMPLE)
b_perp = as_vector([( b[1]/sqrt(b[0]**2+b[1]**2)),
(-b[0]/sqrt(b[0]**2+b[1]**2))]) # ! possible division by 0
# Basic Definitions
p = 1 # Constant(V.ufl_element().degree())
tau = compute_tau(W, h, p, epsilon, b)
uh = solve_supg(V, bcs, epsilon, b, c, f, tau)
tau2 = iterate_sold_cross(mesh, h, V, W, bcs, epsilon, b, b_perp, c, f, tau, uh, 0.9999)
uh = solve_sold_cross(V, bcs, epsilon, b, b_perp, c, f, tau, tau2)
one = project(1., V)
area = assemble(one*dx)
h_average = assemble(h*dx)/area
error_function = Function(V, assemble(abs(uh-u_exact)*v*dx))
l2_norm_of_error = norm(error_function, 'l2')
plot(uh)
results = []
rs.make_results_anisotrop('RESULTS/' + str(SC_EXAMPLE) + 'SOLD_ANISOTROP', mesh, V, W, uh, u_exact, tau2, 1., results)
| from dolfin import *
from scipy.optimize import minimize
import numpy as np
import time as pyt
import pprint
coth = lambda x: 1./np.tanh(x)
from fenicsopt.core.convdif import *
from fenicsopt.examples.sc_examples import sc_setup
import fenicsopt.exports.results as rs
################################################################################
SC_EXAMPLE = 1 # 8, 9, 20, 55
# Mesh
NUM_CELL = 33
#mesh = UnitSquareMesh(NUM_CELL,NUM_CELL)
mesh = Mesh('anisotrop.xml')
h = CellDiameter(mesh)
cell_volume = CellVolume(mesh)
DG0 = FunctionSpace(mesh, "DG", 0)
# Whole Boundary
def whole_boundary(x, on_boundary):
return on_boundary
cut_b_elem_dofs = get_boundary(mesh, DG0)
setup = { "V_TYPE": "CG", "V_DEGREE": 1, "W_TYPE": "DG", "W_DEGREE": 0 }
# Function Spaces on the mesh
V = FunctionSpace(mesh, setup["V_TYPE"], setup["V_DEGREE"])
v = TestFunction(V)
W = FunctionSpace(mesh, setup["W_TYPE"], setup["W_DEGREE"])
bc_V_zero = DirichletBC(V, 0., whole_boundary)
# Data
bcs, epsilon, c, b, f, u_exact = sc_setup(V, SC_EXAMPLE)
b_perp = as_vector([( b[1]/sqrt(b[0]**2+b[1]**2)),
(-b[0]/sqrt(b[0]**2+b[1]**2))]) # ! possible division by 0
# Basic Definitions
p = 1 # Constant(V.ufl_element().degree())
tau = compute_tau(W, h, p, epsilon, b)
uh = solve_supg(V, bcs, epsilon, b, c, f, tau)
tau2 = iterate_sold_cross(mesh, h, V, W, bcs, epsilon, b, b_perp, c, f, tau, uh, 0.9999)
uh = solve_sold_cross(V, bcs, epsilon, b, b_perp, c, f, tau, tau2)
one = project(1., V)
area = assemble(one*dx)
h_average = assemble(h*dx)/area
error_function = Function(V, assemble(abs(uh-u_exact)*v*dx))
l2_norm_of_error = norm(error_function, 'l2')
plot(uh)
results = []
rs.make_results_anisotrop('RESULTS/' + str(SC_EXAMPLE) + 'SOLD_ANISOTROP', mesh, V, W, uh, u_exact, tau2, 1., results)
| de | 0.293259 | ################################################################################ # 8, 9, 20, 55 # Mesh #mesh = UnitSquareMesh(NUM_CELL,NUM_CELL) # Whole Boundary # Function Spaces on the mesh # Data # ! possible division by 0 # Basic Definitions # Constant(V.ufl_element().degree()) | 2.202805 | 2 |
HackerRank/30 Days of Code/Day 20 - Sorting.py | anubhab-code/Competitive-Programming | 0 | 6632774 | import sys
n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
numSwaps = 0
for i in range(n):
currentSwaps = 0
for j in range(n-1):
if a[j] > a[j+1]:
tmp = a[j]
a[j] = a[j+1]
a[j+1] = tmp
numSwaps += 1
currentSwaps += 1
if currentSwaps == 0:
break
print('Array is sorted in ' + str(numSwaps) + ' swaps.')
print('First Element: ' + str(a[0]))
print('Last Element: ' + str(a[n-1]))
| import sys
n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
numSwaps = 0
for i in range(n):
currentSwaps = 0
for j in range(n-1):
if a[j] > a[j+1]:
tmp = a[j]
a[j] = a[j+1]
a[j+1] = tmp
numSwaps += 1
currentSwaps += 1
if currentSwaps == 0:
break
print('Array is sorted in ' + str(numSwaps) + ' swaps.')
print('First Element: ' + str(a[0]))
print('Last Element: ' + str(a[n-1]))
| none | 1 | 3.147838 | 3 |
|
test/mbedapi.py | yinglangli/ARMmbed_DAPLink | 0 | 6632775 | <reponame>yinglangli/ARMmbed_DAPLink
#
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Usage example:
python mbedapi.py --repo http://developer.mbed.org/users/dan/code/pubtest/
--user dan --api http://developer.mbed.org --platform mbed-LPC1768
--destdir /tmp/ --debug 2
This will compile http://developer.mbed.org/users/dan/code/pubtest/
for the 1768 and download the result.
Examples of options:
--extra_symbols "foo=bar,x=y"
--replace_file "main.cpp:/tmp/replace_main.cpp"
(can be repeated)
"""
import os
import getpass
import json
import time
import requests
import logging
MBED_API_SERVER = 'https://os.mbed.com/'
def build_repo(user, password, repo, platform, destdir,
replace='', symbols='', clean=False, api=MBED_API_SERVER):
payload = {
'clean': clean,
'platform': platform,
'repo': repo,
'extra_symbols': symbols
}
if replace:
replace = []
for pair in replace:
dest = pair.split(':')[0]
src = pair.split(':')[1]
print dest
cwd = os.getcwd()
srcfile = open(os.path.join(cwd, src), 'r')
replace.append({dest: srcfile.read()})
payload['replace'] = json.dumps(replace)
logging.debug("Payload is: %s" % payload)
auth = (user, password,)
# send task to api
logging.debug(api + "/api/v2/tasks/compiler/start/" + "| data: " +
str(payload))
r = requests.post(api + "/api/v2/tasks/compiler/start/",
data=payload, auth=auth)
logging.debug(r.content)
if r.status_code != 200:
raise Exception("Error while talking to the mbed API")
uuid = json.loads(r.content)['result']['data']['task_id']
logging.debug("Task accepted and given ID: %s" % uuid)
success = False
# poll for output
for check in range(0, 40):
logging.debug("Checking for output: cycle %s of %s" % (check, 10))
time.sleep(2)
r = requests.get(api + "/api/v2/tasks/compiler/output/%s" %
uuid, auth=auth)
logging.debug(r.content)
response = json.loads(r.content)
messages = response['result']['data']['new_messages']
percent = 0
for message in messages:
if message.get('message'):
if message.get('type') != 'debug':
logging.info("[%s] %s" % (message['type'],
message['message']))
if message.get('action'):
if message.get('percent'):
percent = message['percent']
logging.info("[%s%% - %s] %s " % (percent, message['action'],
message.get('file', '')))
if response['result']['data']['task_complete']:
logging.info("Task completed.")
success = response['result']['data']['compilation_success']
logging.info("Compile success: %s" % (success))
break
# now download
if success:
logging.info("Downloading your binary")
params = {
'repomode': True,
'program': response['result']['data']['program'],
'binary': response['result']['data']['binary'],
'task_id': uuid
}
r = requests.get(api + "/api/v2/tasks/compiler/bin/",
params=params, auth=auth)
destination = os.path.join(destdir,
response['result']['data']['binary'])
with open(destination, 'wb') as fd:
for chunk in r.iter_content(1024):
fd.write(chunk)
logging.info("Finished!")
else:
raise Exception("Failed to build platform %s" % platform)
return destination
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Build an mbed repository.')
parser.add_argument('--user', type=str, help='Your username on mbed.', required=True)
parser.add_argument('--password', type=str, help='Your password on mbed.', default=None, required=False)
parser.add_argument('--api', type=str, help='URL to API server', required=False, default=MBED_API_SERVER)
parser.add_argument('--repo', type=str, help='URL of repository to build.', required=True)
parser.add_argument('--platform', type=str, help='Platform name', required=True)
parser.add_argument('--destdir', type=str, help='Binary destination directory', required=True)
parser.add_argument('--replace_file', type=str, help='Replace file and build. Can be repeated. Syntax: remotepath:localpath', required=False, action='append')
parser.add_argument('--extra_symbols', type=str, help='Provide extra symbols to build system', required=False, action='append')
parser.add_argument('--clean', action='store_true', help='Force clean build')
parser.add_argument('--debug', help='Show debugging info', required=False)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if args.password is None:
args.password = <PASSWORD>('mbed password: ')
build_repo(args.user, args.password, args.repo, args.platform,
args.destdir, args.replace_file, args.extra_symbols,
args.clean, args.api)
| #
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Usage example:
python mbedapi.py --repo http://developer.mbed.org/users/dan/code/pubtest/
--user dan --api http://developer.mbed.org --platform mbed-LPC1768
--destdir /tmp/ --debug 2
This will compile http://developer.mbed.org/users/dan/code/pubtest/
for the 1768 and download the result.
Examples of options:
--extra_symbols "foo=bar,x=y"
--replace_file "main.cpp:/tmp/replace_main.cpp"
(can be repeated)
"""
import os
import getpass
import json
import time
import requests
import logging
MBED_API_SERVER = 'https://os.mbed.com/'
def build_repo(user, password, repo, platform, destdir,
replace='', symbols='', clean=False, api=MBED_API_SERVER):
payload = {
'clean': clean,
'platform': platform,
'repo': repo,
'extra_symbols': symbols
}
if replace:
replace = []
for pair in replace:
dest = pair.split(':')[0]
src = pair.split(':')[1]
print dest
cwd = os.getcwd()
srcfile = open(os.path.join(cwd, src), 'r')
replace.append({dest: srcfile.read()})
payload['replace'] = json.dumps(replace)
logging.debug("Payload is: %s" % payload)
auth = (user, password,)
# send task to api
logging.debug(api + "/api/v2/tasks/compiler/start/" + "| data: " +
str(payload))
r = requests.post(api + "/api/v2/tasks/compiler/start/",
data=payload, auth=auth)
logging.debug(r.content)
if r.status_code != 200:
raise Exception("Error while talking to the mbed API")
uuid = json.loads(r.content)['result']['data']['task_id']
logging.debug("Task accepted and given ID: %s" % uuid)
success = False
# poll for output
for check in range(0, 40):
logging.debug("Checking for output: cycle %s of %s" % (check, 10))
time.sleep(2)
r = requests.get(api + "/api/v2/tasks/compiler/output/%s" %
uuid, auth=auth)
logging.debug(r.content)
response = json.loads(r.content)
messages = response['result']['data']['new_messages']
percent = 0
for message in messages:
if message.get('message'):
if message.get('type') != 'debug':
logging.info("[%s] %s" % (message['type'],
message['message']))
if message.get('action'):
if message.get('percent'):
percent = message['percent']
logging.info("[%s%% - %s] %s " % (percent, message['action'],
message.get('file', '')))
if response['result']['data']['task_complete']:
logging.info("Task completed.")
success = response['result']['data']['compilation_success']
logging.info("Compile success: %s" % (success))
break
# now download
if success:
logging.info("Downloading your binary")
params = {
'repomode': True,
'program': response['result']['data']['program'],
'binary': response['result']['data']['binary'],
'task_id': uuid
}
r = requests.get(api + "/api/v2/tasks/compiler/bin/",
params=params, auth=auth)
destination = os.path.join(destdir,
response['result']['data']['binary'])
with open(destination, 'wb') as fd:
for chunk in r.iter_content(1024):
fd.write(chunk)
logging.info("Finished!")
else:
raise Exception("Failed to build platform %s" % platform)
return destination
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Build an mbed repository.')
parser.add_argument('--user', type=str, help='Your username on mbed.', required=True)
parser.add_argument('--password', type=str, help='Your password on mbed.', default=None, required=False)
parser.add_argument('--api', type=str, help='URL to API server', required=False, default=MBED_API_SERVER)
parser.add_argument('--repo', type=str, help='URL of repository to build.', required=True)
parser.add_argument('--platform', type=str, help='Platform name', required=True)
parser.add_argument('--destdir', type=str, help='Binary destination directory', required=True)
parser.add_argument('--replace_file', type=str, help='Replace file and build. Can be repeated. Syntax: remotepath:localpath', required=False, action='append')
parser.add_argument('--extra_symbols', type=str, help='Provide extra symbols to build system', required=False, action='append')
parser.add_argument('--clean', action='store_true', help='Force clean build')
parser.add_argument('--debug', help='Show debugging info', required=False)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if args.password is None:
args.password = <PASSWORD>('mbed password: ')
build_repo(args.user, args.password, args.repo, args.platform,
args.destdir, args.replace_file, args.extra_symbols,
args.clean, args.api) | en | 0.625233 | # # DAPLink Interface Firmware # Copyright (c) 2009-2016, ARM Limited, All Rights Reserved # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Usage example: python mbedapi.py --repo http://developer.mbed.org/users/dan/code/pubtest/ --user dan --api http://developer.mbed.org --platform mbed-LPC1768 --destdir /tmp/ --debug 2 This will compile http://developer.mbed.org/users/dan/code/pubtest/ for the 1768 and download the result. Examples of options: --extra_symbols "foo=bar,x=y" --replace_file "main.cpp:/tmp/replace_main.cpp" (can be repeated) # send task to api # poll for output # now download | 2.121347 | 2 |
ex0008.py | dev-cesaraugusto/Atividades-resolvidas-Python | 0 | 6632776 | <reponame>dev-cesaraugusto/Atividades-resolvidas-Python<filename>ex0008.py<gh_stars>0
'''Escreva um programa que leia um valor em metros e o exiba convertido em centímetros e milímetros'''
n1 = float(input('Escreva a medida: '))
c = n1 * 100
mi = n1 * 1000
print('{} metros é igual a {} centímetros e {} milímetros.'.format(n1,c,mi))
| '''Escreva um programa que leia um valor em metros e o exiba convertido em centímetros e milímetros'''
n1 = float(input('Escreva a medida: '))
c = n1 * 100
mi = n1 * 1000
print('{} metros é igual a {} centímetros e {} milímetros.'.format(n1,c,mi)) | pt | 0.932663 | Escreva um programa que leia um valor em metros e o exiba convertido em centímetros e milímetros | 4.031972 | 4 |
FortressOfSolitude/_FortressOfSolitude/NeutrinoKey/forms.py | BDD16/FortressOfSolitude | 0 | 6632777 | """
DBA 1337_TECH, AUSTIN TEXAS © MAY 2021
Proof of Concept code, No liabilities or warranties expressed or implied.
"""
from django import forms
from django.core.exceptions import ValidationError
from .models import NewsLink, Startup, Tag, Tasking
from datetime import datetime
class DownloadFileForm(forms.Form):
url = forms.CharField(max_length=32)
| """
DBA 1337_TECH, AUSTIN TEXAS © MAY 2021
Proof of Concept code, No liabilities or warranties expressed or implied.
"""
from django import forms
from django.core.exceptions import ValidationError
from .models import NewsLink, Startup, Tag, Tasking
from datetime import datetime
class DownloadFileForm(forms.Form):
url = forms.CharField(max_length=32)
| en | 0.795638 | DBA 1337_TECH, AUSTIN TEXAS © MAY 2021 Proof of Concept code, No liabilities or warranties expressed or implied. | 1.782682 | 2 |
students/K33422/Izmaylova_Anna/web_lab2/2.2/django_project_Izmaylova/urls.py | Anna0102/ITMO_ICT_WebDevelopment_2021-2022 | 0 | 6632778 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('project_first_app.urls')), #данная строчка импортирует в проект отдельный файл юрлов Вашего приложения (example6_app - название Вашего приложения (название папки)), urls указывает на файл юрлов в папке приложения (указывает на тот пустой файл, который мы создали в пункте 9.а.
]
| from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('project_first_app.urls')), #данная строчка импортирует в проект отдельный файл юрлов Вашего приложения (example6_app - название Вашего приложения (название папки)), urls указывает на файл юрлов в папке приложения (указывает на тот пустой файл, который мы создали в пункте 9.а.
]
| ru | 0.99368 | #данная строчка импортирует в проект отдельный файл юрлов Вашего приложения (example6_app - название Вашего приложения (название папки)), urls указывает на файл юрлов в папке приложения (указывает на тот пустой файл, который мы создали в пункте 9.а. | 1.934251 | 2 |
config.py | RonquilloAeon/flask-starter | 0 | 6632779 | import os
DEBUG = os.environ.get('DEBUG', False)
| import os
DEBUG = os.environ.get('DEBUG', False)
| none | 1 | 1.219324 | 1 |
|
Python/code case/code case 164.py | amazing-2020/pdf | 3 | 6632780 | from random import randrange, shuffle
def bubble_sort():
array = []
while len(array) < 12:
array.append(randrange(-99, 101, 3))
shuffle(array)
print(array)
for i in range(12):
for j in range(11 - i):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
print("After sort:", array)
bubble_sort()
| from random import randrange, shuffle
def bubble_sort():
array = []
while len(array) < 12:
array.append(randrange(-99, 101, 3))
shuffle(array)
print(array)
for i in range(12):
for j in range(11 - i):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
print("After sort:", array)
bubble_sort()
| none | 1 | 4.205186 | 4 |
|
backend/stock_k_line.py | wuxianliang/StockBrain | 2 | 6632781 | from flask import jsonify
def get_s_k(k_data, stock_code):
if stock_code[0] == '6':
stock_code = stock_code + '.SH'
elif stock_code[0] == '0' or stock_code[0] == '3':
stock_code = stock_code + '.SZ'
stock = k_data.loc[k_data['S_INFO_WINDCODE'] == stock_code]
result = stock.sort_values(by="TRADE_DT").reset_index().T.to_json()
return result
| from flask import jsonify
def get_s_k(k_data, stock_code):
if stock_code[0] == '6':
stock_code = stock_code + '.SH'
elif stock_code[0] == '0' or stock_code[0] == '3':
stock_code = stock_code + '.SZ'
stock = k_data.loc[k_data['S_INFO_WINDCODE'] == stock_code]
result = stock.sort_values(by="TRADE_DT").reset_index().T.to_json()
return result
| none | 1 | 2.547966 | 3 |
|
boilerplate/app/controllers/default.py | davideasaf/effortless_rest_flask | 0 | 6632782 | <gh_stars>0
from flask import abort, jsonify, request
from flask_accepts import responds, accepts
from flask_praetorian import roles_required
from flask_restx import Namespace, Resource
from app import api, guard
from app.models import User
from app.schemas import UserSchema, UserLoginSchema
@api.route("/login")
class UserLoginResource(Resource):
@accepts(schema=UserLoginSchema, api=api)
@responds(dict(name="access_token", type=str), status_code=200, api=api)
def post(self):
# I can confidently access parsed_args based on @accepts criteria
# use request.parsed_obj for body
# use request.parsed_args for query params
username = request.parsed_obj["username"]
password = request.parsed_obj["password"]
user = guard.authenticate(username, password)
ret = {"access_token": guard.encode_jwt_token(user)}
return ret
| from flask import abort, jsonify, request
from flask_accepts import responds, accepts
from flask_praetorian import roles_required
from flask_restx import Namespace, Resource
from app import api, guard
from app.models import User
from app.schemas import UserSchema, UserLoginSchema
@api.route("/login")
class UserLoginResource(Resource):
@accepts(schema=UserLoginSchema, api=api)
@responds(dict(name="access_token", type=str), status_code=200, api=api)
def post(self):
# I can confidently access parsed_args based on @accepts criteria
# use request.parsed_obj for body
# use request.parsed_args for query params
username = request.parsed_obj["username"]
password = request.parsed_obj["password"]
user = guard.authenticate(username, password)
ret = {"access_token": guard.encode_jwt_token(user)}
return ret | en | 0.55521 | # I can confidently access parsed_args based on @accepts criteria # use request.parsed_obj for body # use request.parsed_args for query params | 2.430612 | 2 |
4. Organizational Widgets/paned_window.py | samujjwaal/Tkinter-GUI-Course | 0 | 6632783 | <reponame>samujjwaal/Tkinter-GUI-Course<gh_stars>0
from tkinter import *
from tkinter import ttk
root = Tk()
# paned window object,panes stacked next to each other
paned_window = ttk.PanedWindow(root, orient=HORIZONTAL)
# BOTH: to expand and fill entire space inside window
# expand: to allow panes to expand on resizing window
paned_window.pack(fill=BOTH, expand=True)
frame1 = ttk.Frame(paned_window, width=100, height=300, relief=SUNKEN)
frame2 = ttk.Frame(paned_window, width=400, height=300, relief=SUNKEN)
# add frames with scale weight
paned_window.add(frame1, weight=1)
paned_window.add(frame2, weight=4)
frame3 = ttk.Frame(paned_window, width=50, height=300, relief=SUNKEN)
# insert new pane between 0 and 1 panes
paned_window.insert(1, frame3)
# to no longer display pane at index 1
paned_window.forget(1)
root.mainloop()
| from tkinter import *
from tkinter import ttk
root = Tk()
# paned window object,panes stacked next to each other
paned_window = ttk.PanedWindow(root, orient=HORIZONTAL)
# BOTH: to expand and fill entire space inside window
# expand: to allow panes to expand on resizing window
paned_window.pack(fill=BOTH, expand=True)
frame1 = ttk.Frame(paned_window, width=100, height=300, relief=SUNKEN)
frame2 = ttk.Frame(paned_window, width=400, height=300, relief=SUNKEN)
# add frames with scale weight
paned_window.add(frame1, weight=1)
paned_window.add(frame2, weight=4)
frame3 = ttk.Frame(paned_window, width=50, height=300, relief=SUNKEN)
# insert new pane between 0 and 1 panes
paned_window.insert(1, frame3)
# to no longer display pane at index 1
paned_window.forget(1)
root.mainloop() | en | 0.862517 | # paned window object,panes stacked next to each other # BOTH: to expand and fill entire space inside window # expand: to allow panes to expand on resizing window # add frames with scale weight # insert new pane between 0 and 1 panes # to no longer display pane at index 1 | 4.237733 | 4 |
tests/test_0166-0167-0170-random-issues.py | colesbury/awkward-1.0 | 0 | 6632784 | <reponame>colesbury/awkward-1.0
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_0166_IndexedOptionArray():
array = ak.Array([[2, 3, 5], None, [], [7, 11], None, [13], None, [17, 19]])
assert ak.to_list(ak.prod(array, axis=-1)) == [30, None, 1, 77, None, 13, None, 323]
array = ak.Array(
[[[2, 3], [5]], None, [], [[7], [11]], None, [[13]], None, [[17, 19]]]
)
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, 5],
None,
[],
[7, 11],
None,
[13],
None,
[323],
]
array = ak.Array([[[2, 3], None, [5]], [], [[7], [11]], [[13]], [None, [17], [19]]])
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, None, 5],
[],
[7, 11],
[13],
[None, 17, 19],
]
array = ak.Array([[6, None, 5], [], [7, 11], [13], [None, 17, 19]])
assert ak.to_list(ak.prod(array, axis=-1)) == [30, 1, 77, 13, 323]
def test_0166_ByteMaskedArray():
content = ak.from_iter(
[[2, 3, 5], [999], [], [7, 11], [], [13], [123, 999], [17, 19]], highlevel=False
)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 1, 0, 1, 0], dtype=np.int8))
array = ak.Array(ak.layout.ByteMaskedArray(mask, content, valid_when=False))
assert ak.to_list(array) == [
[2, 3, 5],
None,
[],
[7, 11],
None,
[13],
None,
[17, 19],
]
assert ak.to_list(ak.prod(array, axis=-1)) == [30, None, 1, 77, None, 13, None, 323]
content = ak.from_iter(
[
[[2, 3], [5]],
[[999]],
[],
[[7], [11]],
[],
[[13]],
[[123], [999]],
[[17, 19]],
],
highlevel=False,
)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 1, 0, 1, 0], dtype=np.int8))
array = ak.Array(ak.layout.ByteMaskedArray(mask, content, valid_when=False))
assert ak.to_list(array) == [
[[2, 3], [5]],
None,
[],
[[7], [11]],
None,
[[13]],
None,
[[17, 19]],
]
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, 5],
None,
[],
[7, 11],
None,
[13],
None,
[323],
]
content = ak.from_iter(
[[2, 3], [999], [5], [7], [11], [13], [], [17], [19]], highlevel=False
)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 0, 0, 1, 0, 0], dtype=np.int8))
bytemasked = ak.layout.ByteMaskedArray(mask, content, valid_when=False)
offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9], dtype=np.int64))
array = ak.Array(ak.layout.ListOffsetArray64(offsets, bytemasked))
array = ak.Array([[[2, 3], None, [5]], [], [[7], [11]], [[13]], [None, [17], [19]]])
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, None, 5],
[],
[7, 11],
[13],
[None, 17, 19],
]
content = ak.from_iter([6, None, 5, 7, 11, 13, None, 17, 19], highlevel=False)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 0, 0, 1, 0, 0], dtype=np.int8))
bytemasked = ak.layout.ByteMaskedArray(mask, content, valid_when=False)
offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9], dtype=np.int64))
array = ak.Array(ak.layout.ListOffsetArray64(offsets, bytemasked))
assert ak.to_list(array) == [[6, None, 5], [], [7, 11], [13], [None, 17, 19]]
assert ak.to_list(ak.prod(array, axis=-1)) == [30, 1, 77, 13, 323]
def test_0167_strings():
array = ak.Array(["one", "two", "three", "two", "two", "one", "three"])
assert ak.to_list(array == "two") == [False, True, False, True, True, False, False]
assert ak.to_list("two" == array) == [False, True, False, True, True, False, False]
assert ak.to_list(array == ["two"]) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(["two"] == array) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(array == ak.Array(["two"])) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(ak.Array(["two"]) == array) == [
False,
True,
False,
True,
True,
False,
False,
]
array = ak.Array([["one", "two", "three"], [], ["two"], ["two", "one"], ["three"]])
assert ak.to_list(array == "two") == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list("two" == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == ["two"]) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(["two"] == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == ak.Array(["two"])) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(ak.Array(["two"]) == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
array = ak.Array([["one", "two", "three"], [], ["two"], ["two", "one"], ["three"]])
assert ak.to_list(array == ["three", "two", "one", "one", "three"]) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(["three", "two", "one", "one", "three"] == array) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(array == ak.Array(["three", "two", "one", "one", "three"])) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(ak.Array(["three", "two", "one", "one", "three"]) == array) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
def test_0167_bytestrings():
array = ak.Array([b"one", b"two", b"three", b"two", b"two", b"one", b"three"])
assert ak.to_list(array == b"two") == [False, True, False, True, True, False, False]
assert ak.to_list(b"two" == array) == [False, True, False, True, True, False, False]
assert ak.to_list(array == [b"two"]) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list([b"two"] == array) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(array == ak.Array([b"two"])) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(ak.Array([b"two"]) == array) == [
False,
True,
False,
True,
True,
False,
False,
]
array = ak.Array(
[[b"one", b"two", b"three"], [], [b"two"], [b"two", b"one"], [b"three"]]
)
assert ak.to_list(array == b"two") == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(b"two" == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == [b"two"]) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list([b"two"] == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == ak.Array([b"two"])) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(ak.Array([b"two"]) == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
array = ak.Array(
[[b"one", b"two", b"three"], [], [b"two"], [b"two", b"one"], [b"three"]]
)
assert ak.to_list(array == [b"three", b"two", b"one", b"one", b"three"]) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list([b"three", b"two", b"one", b"one", b"three"] == array) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(
array == ak.Array([b"three", b"two", b"one", b"one", b"three"])
) == [[False, False, True], [], [False], [False, True], [True]]
assert ak.to_list(
ak.Array([b"three", b"two", b"one", b"one", b"three"]) == array
) == [[False, False, True], [], [False], [False, True], [True]]
| # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_0166_IndexedOptionArray():
array = ak.Array([[2, 3, 5], None, [], [7, 11], None, [13], None, [17, 19]])
assert ak.to_list(ak.prod(array, axis=-1)) == [30, None, 1, 77, None, 13, None, 323]
array = ak.Array(
[[[2, 3], [5]], None, [], [[7], [11]], None, [[13]], None, [[17, 19]]]
)
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, 5],
None,
[],
[7, 11],
None,
[13],
None,
[323],
]
array = ak.Array([[[2, 3], None, [5]], [], [[7], [11]], [[13]], [None, [17], [19]]])
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, None, 5],
[],
[7, 11],
[13],
[None, 17, 19],
]
array = ak.Array([[6, None, 5], [], [7, 11], [13], [None, 17, 19]])
assert ak.to_list(ak.prod(array, axis=-1)) == [30, 1, 77, 13, 323]
def test_0166_ByteMaskedArray():
content = ak.from_iter(
[[2, 3, 5], [999], [], [7, 11], [], [13], [123, 999], [17, 19]], highlevel=False
)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 1, 0, 1, 0], dtype=np.int8))
array = ak.Array(ak.layout.ByteMaskedArray(mask, content, valid_when=False))
assert ak.to_list(array) == [
[2, 3, 5],
None,
[],
[7, 11],
None,
[13],
None,
[17, 19],
]
assert ak.to_list(ak.prod(array, axis=-1)) == [30, None, 1, 77, None, 13, None, 323]
content = ak.from_iter(
[
[[2, 3], [5]],
[[999]],
[],
[[7], [11]],
[],
[[13]],
[[123], [999]],
[[17, 19]],
],
highlevel=False,
)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 1, 0, 1, 0], dtype=np.int8))
array = ak.Array(ak.layout.ByteMaskedArray(mask, content, valid_when=False))
assert ak.to_list(array) == [
[[2, 3], [5]],
None,
[],
[[7], [11]],
None,
[[13]],
None,
[[17, 19]],
]
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, 5],
None,
[],
[7, 11],
None,
[13],
None,
[323],
]
content = ak.from_iter(
[[2, 3], [999], [5], [7], [11], [13], [], [17], [19]], highlevel=False
)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 0, 0, 1, 0, 0], dtype=np.int8))
bytemasked = ak.layout.ByteMaskedArray(mask, content, valid_when=False)
offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9], dtype=np.int64))
array = ak.Array(ak.layout.ListOffsetArray64(offsets, bytemasked))
array = ak.Array([[[2, 3], None, [5]], [], [[7], [11]], [[13]], [None, [17], [19]]])
assert ak.to_list(ak.prod(array, axis=-1)) == [
[6, None, 5],
[],
[7, 11],
[13],
[None, 17, 19],
]
content = ak.from_iter([6, None, 5, 7, 11, 13, None, 17, 19], highlevel=False)
mask = ak.layout.Index8(np.array([0, 1, 0, 0, 0, 0, 1, 0, 0], dtype=np.int8))
bytemasked = ak.layout.ByteMaskedArray(mask, content, valid_when=False)
offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9], dtype=np.int64))
array = ak.Array(ak.layout.ListOffsetArray64(offsets, bytemasked))
assert ak.to_list(array) == [[6, None, 5], [], [7, 11], [13], [None, 17, 19]]
assert ak.to_list(ak.prod(array, axis=-1)) == [30, 1, 77, 13, 323]
def test_0167_strings():
array = ak.Array(["one", "two", "three", "two", "two", "one", "three"])
assert ak.to_list(array == "two") == [False, True, False, True, True, False, False]
assert ak.to_list("two" == array) == [False, True, False, True, True, False, False]
assert ak.to_list(array == ["two"]) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(["two"] == array) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(array == ak.Array(["two"])) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(ak.Array(["two"]) == array) == [
False,
True,
False,
True,
True,
False,
False,
]
array = ak.Array([["one", "two", "three"], [], ["two"], ["two", "one"], ["three"]])
assert ak.to_list(array == "two") == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list("two" == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == ["two"]) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(["two"] == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == ak.Array(["two"])) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(ak.Array(["two"]) == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
array = ak.Array([["one", "two", "three"], [], ["two"], ["two", "one"], ["three"]])
assert ak.to_list(array == ["three", "two", "one", "one", "three"]) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(["three", "two", "one", "one", "three"] == array) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(array == ak.Array(["three", "two", "one", "one", "three"])) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(ak.Array(["three", "two", "one", "one", "three"]) == array) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
def test_0167_bytestrings():
array = ak.Array([b"one", b"two", b"three", b"two", b"two", b"one", b"three"])
assert ak.to_list(array == b"two") == [False, True, False, True, True, False, False]
assert ak.to_list(b"two" == array) == [False, True, False, True, True, False, False]
assert ak.to_list(array == [b"two"]) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list([b"two"] == array) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(array == ak.Array([b"two"])) == [
False,
True,
False,
True,
True,
False,
False,
]
assert ak.to_list(ak.Array([b"two"]) == array) == [
False,
True,
False,
True,
True,
False,
False,
]
array = ak.Array(
[[b"one", b"two", b"three"], [], [b"two"], [b"two", b"one"], [b"three"]]
)
assert ak.to_list(array == b"two") == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(b"two" == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == [b"two"]) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list([b"two"] == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(array == ak.Array([b"two"])) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
assert ak.to_list(ak.Array([b"two"]) == array) == [
[False, True, False],
[],
[True],
[True, False],
[False],
]
array = ak.Array(
[[b"one", b"two", b"three"], [], [b"two"], [b"two", b"one"], [b"three"]]
)
assert ak.to_list(array == [b"three", b"two", b"one", b"one", b"three"]) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list([b"three", b"two", b"one", b"one", b"three"] == array) == [
[False, False, True],
[],
[False],
[False, True],
[True],
]
assert ak.to_list(
array == ak.Array([b"three", b"two", b"one", b"one", b"three"])
) == [[False, False, True], [], [False], [False, True], [True]]
assert ak.to_list(
ak.Array([b"three", b"two", b"one", b"one", b"three"]) == array
) == [[False, False, True], [], [False], [False, True], [True]] | en | 0.392246 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE # noqa: F401 # noqa: F401 # noqa: F401 | 2.115391 | 2 |
src/api-engine/api/auth.py | tianxuanhong/cello | 0 | 6632785 | <filename>src/api-engine/api/auth.py
import logging
import os
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import authentication
from rest_framework.permissions import BasePermission
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_jwt.serializers import VerifyJSONWebTokenSerializer
from api.common.enums import UserRole
from api.models import UserProfile
LOG = logging.getLogger(__name__)
TOKEN_INFO_URL = getattr(settings, "TOKEN_INFO_URL", "")
SUPER_USER_TOKEN = os.environ.get("ADMIN_TOKEN", "")
ADMIN_NAME = os.getenv("ADMIN_USERNAME")
class CustomAuthenticate(authentication.BaseAuthentication):
def authenticate(self, request):
authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not authorization or not authorization.startswith("JWT"):
return None
token = authorization.split(" ")[-1]
if token == SUPER_USER_TOKEN:
username = ADMIN_NAME
try:
user = UserProfile.objects.get(username=username)
except ObjectDoesNotExist:
return None
return user, None
else:
return None
class TokenAuth(authentication.BaseAuthentication):
def authenticate(self, request):
token = {"token": request.META.get('HTTP_AUTHORIZATION', None)}
valid_data = VerifyJSONWebTokenSerializer().validate(token)
user = valid_data['user']
organization = user.organization
#organization_id = user.organization.id
#organization_name = user.organization.name
#request.user.
if user:
return
else:
raise AuthenticationFailed('认证失败')
class IsAdminAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.role == UserRole.Administrator.name.lower()
)
class IsOperatorAuthenticated(BasePermission):
"""
Allows access only to operators.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.role == UserRole.Operator.name.lower()
)
class IsSuperUserAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.is_authenticated
and request.user.is_super_user
)
| <filename>src/api-engine/api/auth.py
import logging
import os
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import authentication
from rest_framework.permissions import BasePermission
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_jwt.serializers import VerifyJSONWebTokenSerializer
from api.common.enums import UserRole
from api.models import UserProfile
LOG = logging.getLogger(__name__)
TOKEN_INFO_URL = getattr(settings, "TOKEN_INFO_URL", "")
SUPER_USER_TOKEN = os.environ.get("ADMIN_TOKEN", "")
ADMIN_NAME = os.getenv("ADMIN_USERNAME")
class CustomAuthenticate(authentication.BaseAuthentication):
def authenticate(self, request):
authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not authorization or not authorization.startswith("JWT"):
return None
token = authorization.split(" ")[-1]
if token == SUPER_USER_TOKEN:
username = ADMIN_NAME
try:
user = UserProfile.objects.get(username=username)
except ObjectDoesNotExist:
return None
return user, None
else:
return None
class TokenAuth(authentication.BaseAuthentication):
def authenticate(self, request):
token = {"token": request.META.get('HTTP_AUTHORIZATION', None)}
valid_data = VerifyJSONWebTokenSerializer().validate(token)
user = valid_data['user']
organization = user.organization
#organization_id = user.organization.id
#organization_name = user.organization.name
#request.user.
if user:
return
else:
raise AuthenticationFailed('认证失败')
class IsAdminAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.role == UserRole.Administrator.name.lower()
)
class IsOperatorAuthenticated(BasePermission):
"""
Allows access only to operators.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.role == UserRole.Operator.name.lower()
)
class IsSuperUserAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.is_authenticated
and request.user.is_super_user
)
| en | 0.770807 | #organization_id = user.organization.id #organization_name = user.organization.name #request.user. Allows access only to authenticated users. Allows access only to operators. Allows access only to authenticated users. | 2.219155 | 2 |
pyep11/ep11.py | quantumbitcoin/secure-bitcoin-wallet | 0 | 6632786 | <filename>pyep11/ep11.py
CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000
CKM_MD5_RSA_PKCS = 0x00000005
CKM_SHA256_RSA_PKCS = 0x00000040
CKM_ECDSA_SHA1 = 0x00001042
CKM_EC_KEY_PAIR_GEN = 0x00001040
CKM_AES_KEY_GEN = 0x00001080
CKM_AES_CBC = 0x00001082
CKM_AES_CBC_PAD = 0x00001085
CKA_CLASS = 0x00000000
CKA_TOKEN = 0<PASSWORD>
CKA_PRIVATE = 0x00000002
CKA_LABEL = 0x00000003
CKA_APPLICATION = 0x00000010
CKA_VALUE = 0x00000011
CKA_OBJECT_ID = 0x00000012
CKA_CERTIFICATE_TYPE = 0x00000080
CKA_ISSUER = 0x00000081
CKA_SERIAL_NUMBER = 0x00000082
CKA_AC_ISSUER = 0x00000083
CKA_OWNER = 0x00000084
CKA_ATTR_TYPES = 0x00000085
CKA_TRUSTED = 0x00000086
CKA_CERTIFICATE_CATEGORY = 0x00000087
CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x00000088
CKA_URL = 0x00000089
CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A
CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B
CKA_NAME_HASH_ALGORITHM = 0x0000008C
CKA_CHECK_VALUE = 0x00000090
CKA_KEY_TYPE = 0x00000100
CKA_SUBJECT = 0x00000101
CKA_ID = 0x00000102
CKA_SENSITIVE = 0x00000103
CKA_ENCRYPT = 0x00000104
CKA_DECRYPT = 0x00000105
CKA_WRAP = 0x00000106
CKA_UNWRAP = 0x00000107
CKA_SIGN = 0x00000108
CKA_SIGN_RECOVER = 0x00000109
CKA_VERIFY = 0x0000010A
CKA_VERIFY_RECOVER = 0x0000010B
CKA_DERIVE = 0x0000010C
CKA_START_DATE = 0x00000110
CKA_END_DATE = 0x00000111
CKA_MODULUS = 0x00000120
CKA_MODULUS_BITS = 0x00000121
CKA_PUBLIC_EXPONENT = 0x00000122
CKA_PRIVATE_EXPONENT = 0x00000123
CKA_PRIME_1 = 0x00000124
CKA_PRIME_2 = 0x00000125
CKA_EXPONENT_1 = 0x00000126
CKA_EXPONENT_2 = 0x00000127
CKA_COEFFICIENT = 0x00000128
CKA_PUBLIC_KEY_INFO = 0x00000129
CKA_PRIME = 0x00000130
CKA_SUBPRIME = 0x00000131
CKA_BASE = 0x00000132
CKA_PRIME_BITS = 0x00000133
CKA_SUBPRIME_BITS = 0x00000134
CKA_SUB_PRIME_BITS = CKA_SUBPRIME_BITS
CKA_VALUE_BITS = 0x00000160
CKA_VALUE_LEN = 0x00000161
CKA_EXTRACTABLE = 0x00000162
CKA_LOCAL = 0x00000163
CKA_NEVER_EXTRACTABLE = 0x00000164
CKA_ALWAYS_SENSITIVE = 0x00000165
CKA_KEY_GEN_MECHANISM = 0x00000166
CKM_SHA_1 = 0x00000220
# CKA_VALUE_LEN
# CKA_WRAP
# CKA_UNWRAP
# CKA_ENCRYPT
# CKA_DECRYPT,
# CKA_EXTRACTABLE
# CKA_TOKEN
CKR_OK = 0x00000000
AES_BLOCK_SIZE = 16
| <filename>pyep11/ep11.py
CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000
CKM_MD5_RSA_PKCS = 0x00000005
CKM_SHA256_RSA_PKCS = 0x00000040
CKM_ECDSA_SHA1 = 0x00001042
CKM_EC_KEY_PAIR_GEN = 0x00001040
CKM_AES_KEY_GEN = 0x00001080
CKM_AES_CBC = 0x00001082
CKM_AES_CBC_PAD = 0x00001085
CKA_CLASS = 0x00000000
CKA_TOKEN = 0<PASSWORD>
CKA_PRIVATE = 0x00000002
CKA_LABEL = 0x00000003
CKA_APPLICATION = 0x00000010
CKA_VALUE = 0x00000011
CKA_OBJECT_ID = 0x00000012
CKA_CERTIFICATE_TYPE = 0x00000080
CKA_ISSUER = 0x00000081
CKA_SERIAL_NUMBER = 0x00000082
CKA_AC_ISSUER = 0x00000083
CKA_OWNER = 0x00000084
CKA_ATTR_TYPES = 0x00000085
CKA_TRUSTED = 0x00000086
CKA_CERTIFICATE_CATEGORY = 0x00000087
CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x00000088
CKA_URL = 0x00000089
CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A
CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B
CKA_NAME_HASH_ALGORITHM = 0x0000008C
CKA_CHECK_VALUE = 0x00000090
CKA_KEY_TYPE = 0x00000100
CKA_SUBJECT = 0x00000101
CKA_ID = 0x00000102
CKA_SENSITIVE = 0x00000103
CKA_ENCRYPT = 0x00000104
CKA_DECRYPT = 0x00000105
CKA_WRAP = 0x00000106
CKA_UNWRAP = 0x00000107
CKA_SIGN = 0x00000108
CKA_SIGN_RECOVER = 0x00000109
CKA_VERIFY = 0x0000010A
CKA_VERIFY_RECOVER = 0x0000010B
CKA_DERIVE = 0x0000010C
CKA_START_DATE = 0x00000110
CKA_END_DATE = 0x00000111
CKA_MODULUS = 0x00000120
CKA_MODULUS_BITS = 0x00000121
CKA_PUBLIC_EXPONENT = 0x00000122
CKA_PRIVATE_EXPONENT = 0x00000123
CKA_PRIME_1 = 0x00000124
CKA_PRIME_2 = 0x00000125
CKA_EXPONENT_1 = 0x00000126
CKA_EXPONENT_2 = 0x00000127
CKA_COEFFICIENT = 0x00000128
CKA_PUBLIC_KEY_INFO = 0x00000129
CKA_PRIME = 0x00000130
CKA_SUBPRIME = 0x00000131
CKA_BASE = 0x00000132
CKA_PRIME_BITS = 0x00000133
CKA_SUBPRIME_BITS = 0x00000134
CKA_SUB_PRIME_BITS = CKA_SUBPRIME_BITS
CKA_VALUE_BITS = 0x00000160
CKA_VALUE_LEN = 0x00000161
CKA_EXTRACTABLE = 0x00000162
CKA_LOCAL = 0x00000163
CKA_NEVER_EXTRACTABLE = 0x00000164
CKA_ALWAYS_SENSITIVE = 0x00000165
CKA_KEY_GEN_MECHANISM = 0x00000166
CKM_SHA_1 = 0x00000220
# CKA_VALUE_LEN
# CKA_WRAP
# CKA_UNWRAP
# CKA_ENCRYPT
# CKA_DECRYPT,
# CKA_EXTRACTABLE
# CKA_TOKEN
CKR_OK = 0x00000000
AES_BLOCK_SIZE = 16
| en | 0.339591 | # CKA_VALUE_LEN # CKA_WRAP # CKA_UNWRAP # CKA_ENCRYPT # CKA_DECRYPT, # CKA_EXTRACTABLE # CKA_TOKEN | 1.422128 | 1 |
pyleecan/Methods/Machine/LamSlotWind/comp_mmf_unit.py | EmileDvs/pyleecan | 95 | 6632787 | # -*- coding: utf-8 -*-
from numpy import pi, linspace, zeros, ones, dot, squeeze
from SciDataTool import Data1D, DataTime, Norm_ref
from ....Functions.Electrical.coordinate_transformation import dq2n
from ....Functions.Winding.gen_phase_list import gen_name
from pyleecan.Classes.Winding import Winding
def comp_mmf_unit(self, Na=None, Nt=None, freq=1):
"""Compute the winding Unit magnetomotive force
Parameters
----------
self : LamSlotWind
an LamSlotWind object
Na : int
Space discretization for offline computation (otherwise use out.elec.angle)
Nt : int
Time discretization for offline computation (otherwise use out.elec.time)
freq : float
Stator current frequency to consider
Returns
-------
MMF_U : SciDataTool.Classes.DataND.DataND
Unit magnetomotive force (Na,Nt)
WF : SciDataTool.Classes.DataND.DataND
Winding functions (qs,Na)
"""
# Get stator winding number of phases
qs = self.winding.qs
# Get number of pole pairs
p = self.get_pole_pair_number()
# Get spatial symmetry
per_a, _, _, _ = self.comp_periodicity(p=p)
# Define the space dicretization
angle = linspace(0, 2 * pi / per_a, Na, endpoint=False)
# Define the time dicretization
time = linspace(0, 1 / freq, Nt, endpoint=False)
# Compute the winding function and mmf
if self.winding is None or self.winding.conductor is None:
wf = zeros((qs, Na))
else:
wf = self.comp_wind_function(angle=angle, per_a=per_a)
# Compute unit current function of time applying constant Id=1 Arms, Iq=0
Idq = zeros((Nt, 2))
Idq[:, 0] = ones(Nt)
I = dq2n(Idq, 2 * pi * freq * time, n=qs, is_n_rms=False)
# Compute unit mmf
mmf_u = squeeze(dot(I, wf))
# Create a Data object
Time = Data1D(name="time", unit="s", values=time)
Angle = Data1D(
name="angle",
unit="rad",
symmetries={"period": per_a},
values=angle,
normalizations={"space_order": Norm_ref(ref=self.get_pole_pair_number())},
)
Phase = Data1D(
name="phase",
unit="",
values=gen_name(qs),
is_components=True,
)
MMF_U = DataTime(
name="Unit MMF",
unit="A",
symbol="Magnitude",
axes=[Time, Angle],
values=mmf_u,
)
WF = DataTime(
name="Winding Functions",
unit="A",
symbol="Magnitude",
axes=[Phase, Angle],
values=wf,
)
return MMF_U, WF
| # -*- coding: utf-8 -*-
from numpy import pi, linspace, zeros, ones, dot, squeeze
from SciDataTool import Data1D, DataTime, Norm_ref
from ....Functions.Electrical.coordinate_transformation import dq2n
from ....Functions.Winding.gen_phase_list import gen_name
from pyleecan.Classes.Winding import Winding
def comp_mmf_unit(self, Na=None, Nt=None, freq=1):
"""Compute the winding Unit magnetomotive force
Parameters
----------
self : LamSlotWind
an LamSlotWind object
Na : int
Space discretization for offline computation (otherwise use out.elec.angle)
Nt : int
Time discretization for offline computation (otherwise use out.elec.time)
freq : float
Stator current frequency to consider
Returns
-------
MMF_U : SciDataTool.Classes.DataND.DataND
Unit magnetomotive force (Na,Nt)
WF : SciDataTool.Classes.DataND.DataND
Winding functions (qs,Na)
"""
# Get stator winding number of phases
qs = self.winding.qs
# Get number of pole pairs
p = self.get_pole_pair_number()
# Get spatial symmetry
per_a, _, _, _ = self.comp_periodicity(p=p)
# Define the space dicretization
angle = linspace(0, 2 * pi / per_a, Na, endpoint=False)
# Define the time dicretization
time = linspace(0, 1 / freq, Nt, endpoint=False)
# Compute the winding function and mmf
if self.winding is None or self.winding.conductor is None:
wf = zeros((qs, Na))
else:
wf = self.comp_wind_function(angle=angle, per_a=per_a)
# Compute unit current function of time applying constant Id=1 Arms, Iq=0
Idq = zeros((Nt, 2))
Idq[:, 0] = ones(Nt)
I = dq2n(Idq, 2 * pi * freq * time, n=qs, is_n_rms=False)
# Compute unit mmf
mmf_u = squeeze(dot(I, wf))
# Create a Data object
Time = Data1D(name="time", unit="s", values=time)
Angle = Data1D(
name="angle",
unit="rad",
symmetries={"period": per_a},
values=angle,
normalizations={"space_order": Norm_ref(ref=self.get_pole_pair_number())},
)
Phase = Data1D(
name="phase",
unit="",
values=gen_name(qs),
is_components=True,
)
MMF_U = DataTime(
name="Unit MMF",
unit="A",
symbol="Magnitude",
axes=[Time, Angle],
values=mmf_u,
)
WF = DataTime(
name="Winding Functions",
unit="A",
symbol="Magnitude",
axes=[Phase, Angle],
values=wf,
)
return MMF_U, WF
| en | 0.508713 | # -*- coding: utf-8 -*- Compute the winding Unit magnetomotive force Parameters ---------- self : LamSlotWind an LamSlotWind object Na : int Space discretization for offline computation (otherwise use out.elec.angle) Nt : int Time discretization for offline computation (otherwise use out.elec.time) freq : float Stator current frequency to consider Returns ------- MMF_U : SciDataTool.Classes.DataND.DataND Unit magnetomotive force (Na,Nt) WF : SciDataTool.Classes.DataND.DataND Winding functions (qs,Na) # Get stator winding number of phases # Get number of pole pairs # Get spatial symmetry # Define the space dicretization # Define the time dicretization # Compute the winding function and mmf # Compute unit current function of time applying constant Id=1 Arms, Iq=0 # Compute unit mmf # Create a Data object | 2.626157 | 3 |
solum-6.0.0/solum/tests/deployer/handlers/test_noop.py | scottwedge/OpenStack-Stein | 0 | 6632788 | <reponame>scottwedge/OpenStack-Stein
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.deployer.handlers import noop as noop_handler
from solum.i18n import _
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
class HandlerTest(base.BaseTestCase):
def setUp(self):
super(HandlerTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_echo(self, fake_LOG):
noop_handler.Handler().echo({}, 'foo')
fake_LOG.debug.assert_called_once_with(_('%s') % 'foo')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_deploy(self, fake_LOG):
args = [77, 'created_image_id', [80]]
noop_handler.Handler().deploy(self.ctx, *args)
message = 'Deploy %s %s %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message)
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_destroy(self, fake_LOG, fake_registry):
fake_assembly = fakes.FakeAssembly()
fake_registry.Assembly.get_by_id.return_value = fake_assembly
args = [fake_assembly.id]
noop_handler.Handler().destroy_assembly(self.ctx, *args)
fake_assembly.destroy.assert_called_once_with(self.ctx)
message = 'Destroy %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message)
| # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.deployer.handlers import noop as noop_handler
from solum.i18n import _
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
class HandlerTest(base.BaseTestCase):
def setUp(self):
super(HandlerTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_echo(self, fake_LOG):
noop_handler.Handler().echo({}, 'foo')
fake_LOG.debug.assert_called_once_with(_('%s') % 'foo')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_deploy(self, fake_LOG):
args = [77, 'created_image_id', [80]]
noop_handler.Handler().deploy(self.ctx, *args)
message = 'Deploy %s %s %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message)
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_destroy(self, fake_LOG, fake_registry):
fake_assembly = fakes.FakeAssembly()
fake_registry.Assembly.get_by_id.return_value = fake_assembly
args = [fake_assembly.id]
noop_handler.Handler().destroy_assembly(self.ctx, *args)
fake_assembly.destroy.assert_called_once_with(self.ctx)
message = 'Destroy %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message) | en | 0.84661 | # Copyright 2014 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.856271 | 2 |
Andromeda/run.py | Kittex0/Andromeda | 0 | 6632789 | <filename>Andromeda/run.py
while True:
qs = input("> ")
with open("Responses.txt", "r") as file:
data = file.readlines() # reads it line by line
for i in data:
if qs == i.split(" = ")[0]:
answer = i.split(" = ")[1]
print(answer)
| <filename>Andromeda/run.py
while True:
qs = input("> ")
with open("Responses.txt", "r") as file:
data = file.readlines() # reads it line by line
for i in data:
if qs == i.split(" = ")[0]:
answer = i.split(" = ")[1]
print(answer)
| en | 0.995266 | # reads it line by line | 3.113677 | 3 |
stock_forecast.py | whitecat-22/prophet | 0 | 6632790 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation
from fbprophet.diagnostics import performance_metrics
from fbprophet.plot import plot_cross_validation_metric
data = pd.DataFrame()
args = sys.argv
file_name = args[1] #ここでデータファイルを読み込む
data2 = pd.read_csv(file_name, skiprows=1, header=None, names=['ds','Open','High','Low','Close','Adj_Close','Volume'])
data3 = data2.dropna(how='any')
data = data.append(data3)
plt.style.use('ggplot')
fig = plt.figure(figsize=(18,8))
ax_Nikkei = fig.add_subplot(224)
ax_Nikkei.plot(data.loc["1965-04-21": ,['Adj_Close']], label='Nikkei', color='r')
ax_Nikkei.set_title('NIKKEI')
data['Adj_Close_log'] = np.log(data.Adj_Close).diff()
data.head()
data.tail()
fig = plt.figure(figsize=(18,8))
ax_Nikkei_log = fig.add_subplot(224)
ax_Nikkei_log.plot(data.loc["1965-04-21": ,['Adj_Close_log']], label='log diff', color='b')
ax_Nikkei_log.set_title('log earning rate')
model = Prophet()
model.fit(data.rename(columns={'Adj_Close':'y'}))
future_data = model.make_future_dataframe(periods=365, freq= 'd')
forecast_data = model.predict(future_data)
fig = model.plot(forecast_data)
df_cv = cross_validation(model, initial='730 days', period='180 days', horizon = '365 days')
df_cv.head()
df_p = performance_metrics(df_cv)
df_p.head()
#MSEをプロットする関数をデバッグのために再定義
def plot_cross_validation_metric(
df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)
):
"""Plot a performance metric vs. forecast horizon from cross validation.
Cross validation produces a collection of out-of-sample model predictions
that can be compared to actual values, at a range of different horizons
(distance from the cutoff). This computes a specified performance metric
for each prediction, and aggregated over a rolling window with horizon.
This uses fbprophet.diagnostics.performance_metrics to compute the metrics.
Valid values of metric are 'mse', 'rmse', 'mae', 'mape', and 'coverage'.
rolling_window is the proportion of data included in the rolling window of
aggregation. The default value of 0.1 means 10% of data are included in the
aggregation for computing the metric.
As a concrete example, if metric='mse', then this plot will show the
squared error for each cross validation prediction, along with the MSE
averaged over rolling windows of 10% of the data.
Parameters
----------
df_cv: The output from fbprophet.diagnostics.cross_validation.
metric: Metric name, one of ['mse', 'rmse', 'mae', 'mape', 'coverage'].
rolling_window: Proportion of data to use for rolling average of metric.
In [0, 1]. Defaults to 0.1.
ax: Optional matplotlib axis on which to plot. If not given, a new figure
will be created.
Returns
-------
a matplotlib figure.
"""
if ax is None:
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
# Get the metric at the level of individual predictions, and with the rolling window.
df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)
df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)
# Some work because matplotlib does not handle timedelta
# Target ~10 ticks.
tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.
# Find the largest time resolution that has <1 unit per bin.
dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']
dt_names = [
'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',
'nanoseconds'
]
dt_conversions = [
24 * 60 * 60 * 10 ** 9,
60 * 60 * 10 ** 9,
60 * 10 ** 9,
10 ** 9,
10 ** 6,
10 ** 3,
1.,
]
for i, dt in enumerate(dts):
if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):
break
x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])
x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])
ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')
ax.plot(x_plt_h, df_h[metric], '-', c='b')
ax.grid(True)
ax.set_xlabel('Horizon ({})'.format(dt_names[i]))
ax.set_ylabel(metric)
return fig
# 日経平均株価をそのままモデリング(MSEが高くなり当てはまりが悪い)
fig = plot_cross_validation_metric(df_cv, metric='mse')#MSEをプロット
model2 = Prophet()
model2.fit(data.rename(columns={'Adj_Close_log':'y'}))
future_data2 = model2.make_future_dataframe(periods=365, freq= 'd')
forecast_data2 = model2.predict(future_data2)
fig2 = model2.plot(forecast_data2)
df_cv2 = cross_validation(model2, initial='730 days', period='180 days', horizon = '365 days')
df_cv2.head()
df_p2 = performance_metrics(df_cv2)
df_p2.head()
# 株価ではなく対数収益率をモデリング(当てはまりが高い)
fig = plot_cross_validation_metric(df_cv2, metric='mse')#MSEをプロット
plt.show()
| import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation
from fbprophet.diagnostics import performance_metrics
from fbprophet.plot import plot_cross_validation_metric
data = pd.DataFrame()
args = sys.argv
file_name = args[1] #ここでデータファイルを読み込む
data2 = pd.read_csv(file_name, skiprows=1, header=None, names=['ds','Open','High','Low','Close','Adj_Close','Volume'])
data3 = data2.dropna(how='any')
data = data.append(data3)
plt.style.use('ggplot')
fig = plt.figure(figsize=(18,8))
ax_Nikkei = fig.add_subplot(224)
ax_Nikkei.plot(data.loc["1965-04-21": ,['Adj_Close']], label='Nikkei', color='r')
ax_Nikkei.set_title('NIKKEI')
data['Adj_Close_log'] = np.log(data.Adj_Close).diff()
data.head()
data.tail()
fig = plt.figure(figsize=(18,8))
ax_Nikkei_log = fig.add_subplot(224)
ax_Nikkei_log.plot(data.loc["1965-04-21": ,['Adj_Close_log']], label='log diff', color='b')
ax_Nikkei_log.set_title('log earning rate')
model = Prophet()
model.fit(data.rename(columns={'Adj_Close':'y'}))
future_data = model.make_future_dataframe(periods=365, freq= 'd')
forecast_data = model.predict(future_data)
fig = model.plot(forecast_data)
df_cv = cross_validation(model, initial='730 days', period='180 days', horizon = '365 days')
df_cv.head()
df_p = performance_metrics(df_cv)
df_p.head()
#MSEをプロットする関数をデバッグのために再定義
def plot_cross_validation_metric(
df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)
):
"""Plot a performance metric vs. forecast horizon from cross validation.
Cross validation produces a collection of out-of-sample model predictions
that can be compared to actual values, at a range of different horizons
(distance from the cutoff). This computes a specified performance metric
for each prediction, and aggregated over a rolling window with horizon.
This uses fbprophet.diagnostics.performance_metrics to compute the metrics.
Valid values of metric are 'mse', 'rmse', 'mae', 'mape', and 'coverage'.
rolling_window is the proportion of data included in the rolling window of
aggregation. The default value of 0.1 means 10% of data are included in the
aggregation for computing the metric.
As a concrete example, if metric='mse', then this plot will show the
squared error for each cross validation prediction, along with the MSE
averaged over rolling windows of 10% of the data.
Parameters
----------
df_cv: The output from fbprophet.diagnostics.cross_validation.
metric: Metric name, one of ['mse', 'rmse', 'mae', 'mape', 'coverage'].
rolling_window: Proportion of data to use for rolling average of metric.
In [0, 1]. Defaults to 0.1.
ax: Optional matplotlib axis on which to plot. If not given, a new figure
will be created.
Returns
-------
a matplotlib figure.
"""
if ax is None:
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
# Get the metric at the level of individual predictions, and with the rolling window.
df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)
df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)
# Some work because matplotlib does not handle timedelta
# Target ~10 ticks.
tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.
# Find the largest time resolution that has <1 unit per bin.
dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']
dt_names = [
'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',
'nanoseconds'
]
dt_conversions = [
24 * 60 * 60 * 10 ** 9,
60 * 60 * 10 ** 9,
60 * 10 ** 9,
10 ** 9,
10 ** 6,
10 ** 3,
1.,
]
for i, dt in enumerate(dts):
if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):
break
x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])
x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])
ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')
ax.plot(x_plt_h, df_h[metric], '-', c='b')
ax.grid(True)
ax.set_xlabel('Horizon ({})'.format(dt_names[i]))
ax.set_ylabel(metric)
return fig
# 日経平均株価をそのままモデリング(MSEが高くなり当てはまりが悪い)
fig = plot_cross_validation_metric(df_cv, metric='mse')#MSEをプロット
model2 = Prophet()
model2.fit(data.rename(columns={'Adj_Close_log':'y'}))
future_data2 = model2.make_future_dataframe(periods=365, freq= 'd')
forecast_data2 = model2.predict(future_data2)
fig2 = model2.plot(forecast_data2)
df_cv2 = cross_validation(model2, initial='730 days', period='180 days', horizon = '365 days')
df_cv2.head()
df_p2 = performance_metrics(df_cv2)
df_p2.head()
# 株価ではなく対数収益率をモデリング(当てはまりが高い)
fig = plot_cross_validation_metric(df_cv2, metric='mse')#MSEをプロット
plt.show()
| en | 0.626908 | #ここでデータファイルを読み込む #MSEをプロットする関数をデバッグのために再定義 Plot a performance metric vs. forecast horizon from cross validation. Cross validation produces a collection of out-of-sample model predictions that can be compared to actual values, at a range of different horizons (distance from the cutoff). This computes a specified performance metric for each prediction, and aggregated over a rolling window with horizon. This uses fbprophet.diagnostics.performance_metrics to compute the metrics. Valid values of metric are 'mse', 'rmse', 'mae', 'mape', and 'coverage'. rolling_window is the proportion of data included in the rolling window of aggregation. The default value of 0.1 means 10% of data are included in the aggregation for computing the metric. As a concrete example, if metric='mse', then this plot will show the squared error for each cross validation prediction, along with the MSE averaged over rolling windows of 10% of the data. Parameters ---------- df_cv: The output from fbprophet.diagnostics.cross_validation. metric: Metric name, one of ['mse', 'rmse', 'mae', 'mape', 'coverage']. rolling_window: Proportion of data to use for rolling average of metric. In [0, 1]. Defaults to 0.1. ax: Optional matplotlib axis on which to plot. If not given, a new figure will be created. Returns ------- a matplotlib figure. # Get the metric at the level of individual predictions, and with the rolling window. # Some work because matplotlib does not handle timedelta # Target ~10 ticks. # Find the largest time resolution that has <1 unit per bin. # 日経平均株価をそのままモデリング(MSEが高くなり当てはまりが悪い) #MSEをプロット # 株価ではなく対数収益率をモデリング(当てはまりが高い) #MSEをプロット | 2.439542 | 2 |
kick/device2/ssp/actions/statements.py | CiscoDevNet/firepower-kickstart | 2 | 6632791 | <reponame>CiscoDevNet/firepower-kickstart
from unicon.eal.dialogs import Statement
def login_handler(spawn, patterns):
spawn.sendline(patterns.login_username)
spawn.expect('Password: ')
spawn.sendline(patterns.login_password)
spawn.expect('Cisco Firepower')
spawn.sendline()
class SspStatements:
def __init__(self, patterns):
self.login_password = Statement(pattern=patterns.prompt.prelogin_prompt, action=login_handler,
args={'patterns': patterns}, loop_continue=True, continue_timer=True)
| from unicon.eal.dialogs import Statement
def login_handler(spawn, patterns):
spawn.sendline(patterns.login_username)
spawn.expect('Password: ')
spawn.sendline(patterns.login_password)
spawn.expect('Cisco Firepower')
spawn.sendline()
class SspStatements:
def __init__(self, patterns):
self.login_password = Statement(pattern=patterns.prompt.prelogin_prompt, action=login_handler,
args={'patterns': patterns}, loop_continue=True, continue_timer=True) | none | 1 | 2.103792 | 2 |
|
card.py | HousedHorse/study-buddy | 1 | 6632792 | from fpdf import FPDF
class Cards(FPDF):
def __init__(self, orientation = 'P', unit = 'mm', format='A4'):
super().__init__(orientation, unit, format)
self.cards = []
self.curr_card = 0
# we do not want to auto page break
self.set_auto_page_break(False)
def add_card(self, card):
self.cards.append(card)
def header(self):
self.set_font("Arial")
try:
self.cards[self.curr_card].title.to_pdf(self)
except IndexError:
return
def export(self,filename):
# draw each card
for card in self.cards:
# draw card
card.to_pdf(self)
# check to see if we went over the page; if so, print a warning
page_height = self.fw_pt if self.def_orientation == "L" else self.fh_pt
if self.get_y() > page_height:
print(f"WARNING: Card \"{card.title.text}\" is too long. Output truncated.")
# increment card number
self.curr_card += 1
# write card to file
self.output(filename)
class Card:
def __init__(self, title_str = "Untitled"):
self.title = Title(title_str)
self.contents = []
self.printed = []
def add_content(self, content):
self.contents.append(content)
def soft_page_break(self, pdf):
pdf.add_page()
for printed in self.printed:
printed.to_pdf(pdf)
def to_pdf(self, pdf):
# blank page with just title
pdf.add_page()
# page with information
pdf.add_page()
# card contents
for content in self.contents:
# insert an extra page break before printing subtitles
# but only if they are not the first subtitles
if type(content) is Subtitle and not content.first:
self.soft_page_break(pdf)
self.printed.append(content)
content.to_pdf(pdf)
# insert an extra page break after printing subtitles
if type(content) is Subtitle:
self.soft_page_break(pdf)
class CardContents:
def __init__(self, text = "NULL"):
self.text = text
def __str__(self):
return self.text
def to_pdf(self, pdf):
raise NotImplementedError("This is an abstract method and has no business being called.")
# a card title
class Title(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",20)
pdf.multi_cell(0, 20, txt=self.text, align="C", border=0)
pdf.set_font("Arial","",12)
pdf.ln(12)
# a subtitle within a card
class Subtitle(CardContents):
def __init__(self, text = "NULL", first=False):
super().__init__(text)
self.first = first
def to_pdf(self, pdf):
pdf.set_font("Arial","B",16)
# add a blank space if necessary
if not self.first:
pdf.ln(12)
pdf.multi_cell(0, 16, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a subsubtitle within a card
class Subsubtitle(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",14)
pdf.multi_cell(0, 14, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a subsubsubtitle within a card
class Subsubsubtitle(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",12)
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a bulleted point
class BulletedPoint(CardContents):
def __init__(self, text = "NULL", level = 0):
super().__init__(text)
self.spacing = " " * level
self.number = 0
def to_pdf(self, pdf):
# save old font and change family to Courier
old_font = pdf.font_family
pdf.set_font("Courier")
# add spacing
pdf.cell(pdf.get_string_width(self.spacing) + pdf.c_margin * 2, 14, txt=self.spacing, align="L", border=0)
# draw bullet point
self.draw_point(pdf, self.number)
# return old font
pdf.set_font(old_font)
# draw text
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
def draw_point(self, pdf, number=1):
# set bullet character
bullet = "".join([" ",chr(149)])
# we want this to be wide enough to match NumberedPoint
pdf.cell(pdf.get_string_width("99.") + 2 + pdf.c_margin * 2, 14, txt=bullet, align="L", border=0)
# a numbered point
class NumberedPoint(BulletedPoint):
def __init__(self, text="NULL", level=0, number=1):
super().__init__(text, level)
self.number = number
def draw_point(self, pdf, number=1):
# set number string
numstr = f"{number:2}. "
# we want this to be wide enough to fit up to 99 numbers
pdf.cell(pdf.get_string_width("99.") + 2 + pdf.c_margin * 2, 14, txt=numstr, align="L", border=0)
# a plaintext paragraph
class Text(CardContents):
def to_pdf(self, pdf):
pdf.set_font_size(12)
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
pdf.set_font_size(12)
| from fpdf import FPDF
class Cards(FPDF):
def __init__(self, orientation = 'P', unit = 'mm', format='A4'):
super().__init__(orientation, unit, format)
self.cards = []
self.curr_card = 0
# we do not want to auto page break
self.set_auto_page_break(False)
def add_card(self, card):
self.cards.append(card)
def header(self):
self.set_font("Arial")
try:
self.cards[self.curr_card].title.to_pdf(self)
except IndexError:
return
def export(self,filename):
# draw each card
for card in self.cards:
# draw card
card.to_pdf(self)
# check to see if we went over the page; if so, print a warning
page_height = self.fw_pt if self.def_orientation == "L" else self.fh_pt
if self.get_y() > page_height:
print(f"WARNING: Card \"{card.title.text}\" is too long. Output truncated.")
# increment card number
self.curr_card += 1
# write card to file
self.output(filename)
class Card:
def __init__(self, title_str = "Untitled"):
self.title = Title(title_str)
self.contents = []
self.printed = []
def add_content(self, content):
self.contents.append(content)
def soft_page_break(self, pdf):
pdf.add_page()
for printed in self.printed:
printed.to_pdf(pdf)
def to_pdf(self, pdf):
# blank page with just title
pdf.add_page()
# page with information
pdf.add_page()
# card contents
for content in self.contents:
# insert an extra page break before printing subtitles
# but only if they are not the first subtitles
if type(content) is Subtitle and not content.first:
self.soft_page_break(pdf)
self.printed.append(content)
content.to_pdf(pdf)
# insert an extra page break after printing subtitles
if type(content) is Subtitle:
self.soft_page_break(pdf)
class CardContents:
def __init__(self, text = "NULL"):
self.text = text
def __str__(self):
return self.text
def to_pdf(self, pdf):
raise NotImplementedError("This is an abstract method and has no business being called.")
# a card title
class Title(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",20)
pdf.multi_cell(0, 20, txt=self.text, align="C", border=0)
pdf.set_font("Arial","",12)
pdf.ln(12)
# a subtitle within a card
class Subtitle(CardContents):
def __init__(self, text = "NULL", first=False):
super().__init__(text)
self.first = first
def to_pdf(self, pdf):
pdf.set_font("Arial","B",16)
# add a blank space if necessary
if not self.first:
pdf.ln(12)
pdf.multi_cell(0, 16, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a subsubtitle within a card
class Subsubtitle(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",14)
pdf.multi_cell(0, 14, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a subsubsubtitle within a card
class Subsubsubtitle(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",12)
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a bulleted point
class BulletedPoint(CardContents):
def __init__(self, text = "NULL", level = 0):
super().__init__(text)
self.spacing = " " * level
self.number = 0
def to_pdf(self, pdf):
# save old font and change family to Courier
old_font = pdf.font_family
pdf.set_font("Courier")
# add spacing
pdf.cell(pdf.get_string_width(self.spacing) + pdf.c_margin * 2, 14, txt=self.spacing, align="L", border=0)
# draw bullet point
self.draw_point(pdf, self.number)
# return old font
pdf.set_font(old_font)
# draw text
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
def draw_point(self, pdf, number=1):
# set bullet character
bullet = "".join([" ",chr(149)])
# we want this to be wide enough to match NumberedPoint
pdf.cell(pdf.get_string_width("99.") + 2 + pdf.c_margin * 2, 14, txt=bullet, align="L", border=0)
# a numbered point
class NumberedPoint(BulletedPoint):
def __init__(self, text="NULL", level=0, number=1):
super().__init__(text, level)
self.number = number
def draw_point(self, pdf, number=1):
# set number string
numstr = f"{number:2}. "
# we want this to be wide enough to fit up to 99 numbers
pdf.cell(pdf.get_string_width("99.") + 2 + pdf.c_margin * 2, 14, txt=numstr, align="L", border=0)
# a plaintext paragraph
class Text(CardContents):
def to_pdf(self, pdf):
pdf.set_font_size(12)
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
pdf.set_font_size(12)
| en | 0.803421 | # we do not want to auto page break # draw each card # draw card # check to see if we went over the page; if so, print a warning # increment card number # write card to file # blank page with just title # page with information # card contents # insert an extra page break before printing subtitles # but only if they are not the first subtitles # insert an extra page break after printing subtitles # a card title # a subtitle within a card # add a blank space if necessary # a subsubtitle within a card # a subsubsubtitle within a card # a bulleted point # save old font and change family to Courier # add spacing # draw bullet point # return old font # draw text # set bullet character # we want this to be wide enough to match NumberedPoint # a numbered point # set number string # we want this to be wide enough to fit up to 99 numbers # a plaintext paragraph | 3.1966 | 3 |
mabooia/accounting/__init__.py | Mabooia/mabooia-py | 0 | 6632793 |
from .enums import *
from .timeline import *
|
from .enums import *
from .timeline import *
| none | 1 | 1.08423 | 1 |
|
ml-agents/mlagents/trainers/sac/trainer.py | XiaoDiDiDa/ml-agents | 0 | 6632794 | # # Unity ML-Agents Toolkit
# ## ML-Agent Learning (SAC)
# Contains an implementation of SAC as described in https://arxiv.org/abs/1801.01290
# and implemented in https://github.com/hill-a/stable-baselines
import logging
from collections import defaultdict
from typing import Dict
import os
import numpy as np
from mlagents.trainers.brain import BrainInfo
from mlagents.trainers.action_info import ActionInfoOutputs
from mlagents_envs.timers import timed
from mlagents.trainers.sac.policy import SACPolicy
from mlagents.trainers.rl_trainer import RLTrainer, AllRewardsOutput
LOGGER = logging.getLogger("mlagents.trainers")
BUFFER_TRUNCATE_PERCENT = 0.8
class SACTrainer(RLTrainer):
"""
The SACTrainer is an implementation of the SAC algorithm, with support
for discrete actions and recurrent networks.
"""
def __init__(
self, brain, reward_buff_cap, trainer_parameters, training, load, seed, run_id
):
"""
Responsible for collecting experiences and training SAC model.
:param trainer_parameters: The parameters for the trainer (dictionary).
:param training: Whether the trainer is set for training.
:param load: Whether the model should be loaded.
:param seed: The seed the model will be initialized with
:param run_id: The The identifier of the current run
"""
super().__init__(brain, trainer_parameters, training, run_id, reward_buff_cap)
self.param_keys = [
"batch_size",
"buffer_size",
"buffer_init_steps",
"hidden_units",
"learning_rate",
"init_entcoef",
"max_steps",
"normalize",
"num_update",
"num_layers",
"time_horizon",
"sequence_length",
"summary_freq",
"tau",
"use_recurrent",
"summary_path",
"memory_size",
"model_path",
"reward_signals",
"vis_encode_type",
]
self.check_param_keys()
self.step = 0
self.train_interval = (
trainer_parameters["train_interval"]
if "train_interval" in trainer_parameters
else 1
)
self.reward_signal_updates_per_train = (
trainer_parameters["reward_signals"]["reward_signal_num_update"]
if "reward_signal_num_update" in trainer_parameters["reward_signals"]
else trainer_parameters["num_update"]
)
self.checkpoint_replay_buffer = (
trainer_parameters["save_replay_buffer"]
if "save_replay_buffer" in trainer_parameters
else False
)
self.sac_policy = SACPolicy(
seed, brain, trainer_parameters, self.is_training, load
)
self.policy = self.sac_policy
# Load the replay buffer if load
if load and self.checkpoint_replay_buffer:
try:
self.load_replay_buffer()
except (AttributeError, FileNotFoundError):
LOGGER.warning(
"Replay buffer was unable to load, starting from scratch."
)
LOGGER.debug(
"Loaded update buffer with {} sequences".format(
self.update_buffer.num_experiences
)
)
for _reward_signal in self.policy.reward_signals.keys():
self.collected_rewards[_reward_signal] = {}
self.episode_steps = {}
def save_model(self) -> None:
"""
Saves the model. Overrides the default save_model since we want to save
the replay buffer as well.
"""
self.policy.save_model(self.get_step)
if self.checkpoint_replay_buffer:
self.save_replay_buffer()
def save_replay_buffer(self) -> None:
"""
Save the training buffer's update buffer to a pickle file.
"""
filename = os.path.join(self.policy.model_path, "last_replay_buffer.hdf5")
LOGGER.info("Saving Experience Replay Buffer to {}".format(filename))
with open(filename, "wb") as file_object:
self.update_buffer.save_to_file(file_object)
def load_replay_buffer(self) -> None:
"""
Loads the last saved replay buffer from a file.
"""
filename = os.path.join(self.policy.model_path, "last_replay_buffer.hdf5")
LOGGER.info("Loading Experience Replay Buffer from {}".format(filename))
with open(filename, "rb+") as file_object:
self.update_buffer.load_from_file(file_object)
LOGGER.info(
"Experience replay buffer has {} experiences.".format(
self.update_buffer.num_experiences
)
)
def add_policy_outputs(
self, take_action_outputs: ActionInfoOutputs, agent_id: str, agent_idx: int
) -> None:
"""
Takes the output of the last action and store it into the training buffer.
"""
actions = take_action_outputs["action"]
self.processing_buffer[agent_id]["actions"].append(actions[agent_idx])
def add_rewards_outputs(
self,
rewards_out: AllRewardsOutput,
values: Dict[str, np.ndarray],
agent_id: str,
agent_idx: int,
agent_next_idx: int,
) -> None:
"""
Takes the value output of the last action and store it into the training buffer.
"""
self.processing_buffer[agent_id]["environment_rewards"].append(
rewards_out.environment[agent_next_idx]
)
def process_experiences(
self, current_info: BrainInfo, next_info: BrainInfo
) -> None:
"""
Checks agent histories for processing condition, and processes them as necessary.
:param current_info: current BrainInfo.
:param next_info: next BrainInfo.
"""
if self.is_training:
self.policy.update_normalization(next_info.vector_observations)
for l in range(len(next_info.agents)):
agent_actions = self.processing_buffer[next_info.agents[l]]["actions"]
if (
next_info.local_done[l]
or len(agent_actions) >= self.trainer_parameters["time_horizon"]
) and len(agent_actions) > 0:
agent_id = next_info.agents[l]
# Bootstrap using last brain info. Set last element to duplicate obs and remove dones.
if next_info.max_reached[l]:
bootstrapping_info = self.processing_buffer[
agent_id
].last_brain_info
idx = bootstrapping_info.agents.index(agent_id)
for i, obs in enumerate(bootstrapping_info.visual_observations):
self.processing_buffer[agent_id]["next_visual_obs%d" % i][
-1
] = obs[idx]
if self.policy.use_vec_obs:
self.processing_buffer[agent_id]["next_vector_in"][
-1
] = bootstrapping_info.vector_observations[idx]
self.processing_buffer[agent_id]["done"][-1] = False
self.processing_buffer.append_to_update_buffer(
self.update_buffer,
agent_id,
batch_size=None,
training_length=self.policy.sequence_length,
)
self.processing_buffer[agent_id].reset_agent()
if next_info.local_done[l]:
self.stats["Environment/Episode Length"].append(
self.episode_steps.get(agent_id, 0)
)
self.episode_steps[agent_id] = 0
for name, rewards in self.collected_rewards.items():
if name == "environment":
self.cumulative_returns_since_policy_update.append(
rewards.get(agent_id, 0)
)
self.stats["Environment/Cumulative Reward"].append(
rewards.get(agent_id, 0)
)
self.reward_buffer.appendleft(rewards.get(agent_id, 0))
rewards[agent_id] = 0
else:
self.stats[
self.policy.reward_signals[name].stat_name
].append(rewards.get(agent_id, 0))
rewards[agent_id] = 0
def is_ready_update(self) -> bool:
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
return (
self.update_buffer.num_experiences >= self.trainer_parameters["batch_size"]
and self.step >= self.trainer_parameters["buffer_init_steps"]
)
@timed
def update_policy(self) -> None:
"""
If train_interval is met, update the SAC policy given the current reward signals.
If reward_signal_train_interval is met, update the reward signals from the buffer.
"""
if self.step % self.train_interval == 0:
self.trainer_metrics.start_policy_update_timer(
number_experiences=self.update_buffer.num_experiences,
mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),
)
self.update_sac_policy()
self.update_reward_signals()
self.trainer_metrics.end_policy_update()
def update_sac_policy(self) -> None:
"""
Uses demonstration_buffer to update the policy.
The reward signal generators are updated using different mini batches.
If we want to imitate http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times, then reward_signal_updates_per_train
is greater than 1 and the reward signals are not updated in parallel.
"""
self.cumulative_returns_since_policy_update.clear()
n_sequences = max(
int(self.trainer_parameters["batch_size"] / self.policy.sequence_length), 1
)
num_updates = self.trainer_parameters["num_update"]
batch_update_stats: Dict[str, list] = defaultdict(list)
for _ in range(num_updates):
LOGGER.debug("Updating SAC policy at step {}".format(self.step))
buffer = self.update_buffer
if (
self.update_buffer.num_experiences
>= self.trainer_parameters["batch_size"]
):
sampled_minibatch = buffer.sample_mini_batch(
self.trainer_parameters["batch_size"],
sequence_length=self.policy.sequence_length,
)
# Get rewards for each reward
for name, signal in self.policy.reward_signals.items():
sampled_minibatch[
"{}_rewards".format(name)
] = signal.evaluate_batch(sampled_minibatch).scaled_reward
update_stats = self.policy.update(sampled_minibatch, n_sequences)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
# Truncate update buffer if neccessary. Truncate more than we need to to avoid truncating
# a large buffer at each update.
if self.update_buffer.num_experiences > self.trainer_parameters["buffer_size"]:
self.update_buffer.truncate(
int(self.trainer_parameters["buffer_size"] * BUFFER_TRUNCATE_PERCENT)
)
for stat, stat_list in batch_update_stats.items():
self.stats[stat].append(np.mean(stat_list))
bc_module = self.sac_policy.bc_module
if bc_module:
update_stats = bc_module.update()
for stat, val in update_stats.items():
self.stats[stat].append(val)
def update_reward_signals(self) -> None:
"""
Iterate through the reward signals and update them. Unlike in PPO,
do it separate from the policy so that it can be done at a different
interval.
This function should only be used to simulate
http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times. Normally, the reward signal
and policy are updated in parallel.
"""
buffer = self.update_buffer
num_updates = self.reward_signal_updates_per_train
n_sequences = max(
int(self.trainer_parameters["batch_size"] / self.policy.sequence_length), 1
)
batch_update_stats: Dict[str, list] = defaultdict(list)
for _ in range(num_updates):
# Get minibatches for reward signal update if needed
reward_signal_minibatches = {}
for name, signal in self.policy.reward_signals.items():
LOGGER.debug("Updating {} at step {}".format(name, self.step))
# Some signals don't need a minibatch to be sampled - so we don't!
if signal.update_dict:
reward_signal_minibatches[name] = buffer.sample_mini_batch(
self.trainer_parameters["batch_size"],
sequence_length=self.policy.sequence_length,
)
update_stats = self.sac_policy.update_reward_signals(
reward_signal_minibatches, n_sequences
)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
for stat, stat_list in batch_update_stats.items():
self.stats[stat].append(np.mean(stat_list))
| # # Unity ML-Agents Toolkit
# ## ML-Agent Learning (SAC)
# Contains an implementation of SAC as described in https://arxiv.org/abs/1801.01290
# and implemented in https://github.com/hill-a/stable-baselines
import logging
from collections import defaultdict
from typing import Dict
import os
import numpy as np
from mlagents.trainers.brain import BrainInfo
from mlagents.trainers.action_info import ActionInfoOutputs
from mlagents_envs.timers import timed
from mlagents.trainers.sac.policy import SACPolicy
from mlagents.trainers.rl_trainer import RLTrainer, AllRewardsOutput
LOGGER = logging.getLogger("mlagents.trainers")
BUFFER_TRUNCATE_PERCENT = 0.8
class SACTrainer(RLTrainer):
"""
The SACTrainer is an implementation of the SAC algorithm, with support
for discrete actions and recurrent networks.
"""
def __init__(
self, brain, reward_buff_cap, trainer_parameters, training, load, seed, run_id
):
"""
Responsible for collecting experiences and training SAC model.
:param trainer_parameters: The parameters for the trainer (dictionary).
:param training: Whether the trainer is set for training.
:param load: Whether the model should be loaded.
:param seed: The seed the model will be initialized with
:param run_id: The The identifier of the current run
"""
super().__init__(brain, trainer_parameters, training, run_id, reward_buff_cap)
self.param_keys = [
"batch_size",
"buffer_size",
"buffer_init_steps",
"hidden_units",
"learning_rate",
"init_entcoef",
"max_steps",
"normalize",
"num_update",
"num_layers",
"time_horizon",
"sequence_length",
"summary_freq",
"tau",
"use_recurrent",
"summary_path",
"memory_size",
"model_path",
"reward_signals",
"vis_encode_type",
]
self.check_param_keys()
self.step = 0
self.train_interval = (
trainer_parameters["train_interval"]
if "train_interval" in trainer_parameters
else 1
)
self.reward_signal_updates_per_train = (
trainer_parameters["reward_signals"]["reward_signal_num_update"]
if "reward_signal_num_update" in trainer_parameters["reward_signals"]
else trainer_parameters["num_update"]
)
self.checkpoint_replay_buffer = (
trainer_parameters["save_replay_buffer"]
if "save_replay_buffer" in trainer_parameters
else False
)
self.sac_policy = SACPolicy(
seed, brain, trainer_parameters, self.is_training, load
)
self.policy = self.sac_policy
# Load the replay buffer if load
if load and self.checkpoint_replay_buffer:
try:
self.load_replay_buffer()
except (AttributeError, FileNotFoundError):
LOGGER.warning(
"Replay buffer was unable to load, starting from scratch."
)
LOGGER.debug(
"Loaded update buffer with {} sequences".format(
self.update_buffer.num_experiences
)
)
for _reward_signal in self.policy.reward_signals.keys():
self.collected_rewards[_reward_signal] = {}
self.episode_steps = {}
def save_model(self) -> None:
"""
Saves the model. Overrides the default save_model since we want to save
the replay buffer as well.
"""
self.policy.save_model(self.get_step)
if self.checkpoint_replay_buffer:
self.save_replay_buffer()
def save_replay_buffer(self) -> None:
"""
Save the training buffer's update buffer to a pickle file.
"""
filename = os.path.join(self.policy.model_path, "last_replay_buffer.hdf5")
LOGGER.info("Saving Experience Replay Buffer to {}".format(filename))
with open(filename, "wb") as file_object:
self.update_buffer.save_to_file(file_object)
def load_replay_buffer(self) -> None:
"""
Loads the last saved replay buffer from a file.
"""
filename = os.path.join(self.policy.model_path, "last_replay_buffer.hdf5")
LOGGER.info("Loading Experience Replay Buffer from {}".format(filename))
with open(filename, "rb+") as file_object:
self.update_buffer.load_from_file(file_object)
LOGGER.info(
"Experience replay buffer has {} experiences.".format(
self.update_buffer.num_experiences
)
)
def add_policy_outputs(
self, take_action_outputs: ActionInfoOutputs, agent_id: str, agent_idx: int
) -> None:
"""
Takes the output of the last action and store it into the training buffer.
"""
actions = take_action_outputs["action"]
self.processing_buffer[agent_id]["actions"].append(actions[agent_idx])
def add_rewards_outputs(
self,
rewards_out: AllRewardsOutput,
values: Dict[str, np.ndarray],
agent_id: str,
agent_idx: int,
agent_next_idx: int,
) -> None:
"""
Takes the value output of the last action and store it into the training buffer.
"""
self.processing_buffer[agent_id]["environment_rewards"].append(
rewards_out.environment[agent_next_idx]
)
def process_experiences(
self, current_info: BrainInfo, next_info: BrainInfo
) -> None:
"""
Checks agent histories for processing condition, and processes them as necessary.
:param current_info: current BrainInfo.
:param next_info: next BrainInfo.
"""
if self.is_training:
self.policy.update_normalization(next_info.vector_observations)
for l in range(len(next_info.agents)):
agent_actions = self.processing_buffer[next_info.agents[l]]["actions"]
if (
next_info.local_done[l]
or len(agent_actions) >= self.trainer_parameters["time_horizon"]
) and len(agent_actions) > 0:
agent_id = next_info.agents[l]
# Bootstrap using last brain info. Set last element to duplicate obs and remove dones.
if next_info.max_reached[l]:
bootstrapping_info = self.processing_buffer[
agent_id
].last_brain_info
idx = bootstrapping_info.agents.index(agent_id)
for i, obs in enumerate(bootstrapping_info.visual_observations):
self.processing_buffer[agent_id]["next_visual_obs%d" % i][
-1
] = obs[idx]
if self.policy.use_vec_obs:
self.processing_buffer[agent_id]["next_vector_in"][
-1
] = bootstrapping_info.vector_observations[idx]
self.processing_buffer[agent_id]["done"][-1] = False
self.processing_buffer.append_to_update_buffer(
self.update_buffer,
agent_id,
batch_size=None,
training_length=self.policy.sequence_length,
)
self.processing_buffer[agent_id].reset_agent()
if next_info.local_done[l]:
self.stats["Environment/Episode Length"].append(
self.episode_steps.get(agent_id, 0)
)
self.episode_steps[agent_id] = 0
for name, rewards in self.collected_rewards.items():
if name == "environment":
self.cumulative_returns_since_policy_update.append(
rewards.get(agent_id, 0)
)
self.stats["Environment/Cumulative Reward"].append(
rewards.get(agent_id, 0)
)
self.reward_buffer.appendleft(rewards.get(agent_id, 0))
rewards[agent_id] = 0
else:
self.stats[
self.policy.reward_signals[name].stat_name
].append(rewards.get(agent_id, 0))
rewards[agent_id] = 0
def is_ready_update(self) -> bool:
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
return (
self.update_buffer.num_experiences >= self.trainer_parameters["batch_size"]
and self.step >= self.trainer_parameters["buffer_init_steps"]
)
@timed
def update_policy(self) -> None:
"""
If train_interval is met, update the SAC policy given the current reward signals.
If reward_signal_train_interval is met, update the reward signals from the buffer.
"""
if self.step % self.train_interval == 0:
self.trainer_metrics.start_policy_update_timer(
number_experiences=self.update_buffer.num_experiences,
mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),
)
self.update_sac_policy()
self.update_reward_signals()
self.trainer_metrics.end_policy_update()
def update_sac_policy(self) -> None:
"""
Uses demonstration_buffer to update the policy.
The reward signal generators are updated using different mini batches.
If we want to imitate http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times, then reward_signal_updates_per_train
is greater than 1 and the reward signals are not updated in parallel.
"""
self.cumulative_returns_since_policy_update.clear()
n_sequences = max(
int(self.trainer_parameters["batch_size"] / self.policy.sequence_length), 1
)
num_updates = self.trainer_parameters["num_update"]
batch_update_stats: Dict[str, list] = defaultdict(list)
for _ in range(num_updates):
LOGGER.debug("Updating SAC policy at step {}".format(self.step))
buffer = self.update_buffer
if (
self.update_buffer.num_experiences
>= self.trainer_parameters["batch_size"]
):
sampled_minibatch = buffer.sample_mini_batch(
self.trainer_parameters["batch_size"],
sequence_length=self.policy.sequence_length,
)
# Get rewards for each reward
for name, signal in self.policy.reward_signals.items():
sampled_minibatch[
"{}_rewards".format(name)
] = signal.evaluate_batch(sampled_minibatch).scaled_reward
update_stats = self.policy.update(sampled_minibatch, n_sequences)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
# Truncate update buffer if neccessary. Truncate more than we need to to avoid truncating
# a large buffer at each update.
if self.update_buffer.num_experiences > self.trainer_parameters["buffer_size"]:
self.update_buffer.truncate(
int(self.trainer_parameters["buffer_size"] * BUFFER_TRUNCATE_PERCENT)
)
for stat, stat_list in batch_update_stats.items():
self.stats[stat].append(np.mean(stat_list))
bc_module = self.sac_policy.bc_module
if bc_module:
update_stats = bc_module.update()
for stat, val in update_stats.items():
self.stats[stat].append(val)
def update_reward_signals(self) -> None:
"""
Iterate through the reward signals and update them. Unlike in PPO,
do it separate from the policy so that it can be done at a different
interval.
This function should only be used to simulate
http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times. Normally, the reward signal
and policy are updated in parallel.
"""
buffer = self.update_buffer
num_updates = self.reward_signal_updates_per_train
n_sequences = max(
int(self.trainer_parameters["batch_size"] / self.policy.sequence_length), 1
)
batch_update_stats: Dict[str, list] = defaultdict(list)
for _ in range(num_updates):
# Get minibatches for reward signal update if needed
reward_signal_minibatches = {}
for name, signal in self.policy.reward_signals.items():
LOGGER.debug("Updating {} at step {}".format(name, self.step))
# Some signals don't need a minibatch to be sampled - so we don't!
if signal.update_dict:
reward_signal_minibatches[name] = buffer.sample_mini_batch(
self.trainer_parameters["batch_size"],
sequence_length=self.policy.sequence_length,
)
update_stats = self.sac_policy.update_reward_signals(
reward_signal_minibatches, n_sequences
)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
for stat, stat_list in batch_update_stats.items():
self.stats[stat].append(np.mean(stat_list))
| en | 0.826342 | # # Unity ML-Agents Toolkit # ## ML-Agent Learning (SAC) # Contains an implementation of SAC as described in https://arxiv.org/abs/1801.01290 # and implemented in https://github.com/hill-a/stable-baselines The SACTrainer is an implementation of the SAC algorithm, with support for discrete actions and recurrent networks. Responsible for collecting experiences and training SAC model. :param trainer_parameters: The parameters for the trainer (dictionary). :param training: Whether the trainer is set for training. :param load: Whether the model should be loaded. :param seed: The seed the model will be initialized with :param run_id: The The identifier of the current run # Load the replay buffer if load Saves the model. Overrides the default save_model since we want to save the replay buffer as well. Save the training buffer's update buffer to a pickle file. Loads the last saved replay buffer from a file. Takes the output of the last action and store it into the training buffer. Takes the value output of the last action and store it into the training buffer. Checks agent histories for processing condition, and processes them as necessary. :param current_info: current BrainInfo. :param next_info: next BrainInfo. # Bootstrap using last brain info. Set last element to duplicate obs and remove dones. Returns whether or not the trainer has enough elements to run update model :return: A boolean corresponding to whether or not update_model() can be run If train_interval is met, update the SAC policy given the current reward signals. If reward_signal_train_interval is met, update the reward signals from the buffer. Uses demonstration_buffer to update the policy. The reward signal generators are updated using different mini batches. If we want to imitate http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated N times, then the reward signals are updated N times, then reward_signal_updates_per_train is greater than 1 and the reward signals are not updated in parallel. # Get rewards for each reward # Truncate update buffer if neccessary. Truncate more than we need to to avoid truncating # a large buffer at each update. Iterate through the reward signals and update them. Unlike in PPO, do it separate from the policy so that it can be done at a different interval. This function should only be used to simulate http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated N times, then the reward signals are updated N times. Normally, the reward signal and policy are updated in parallel. # Get minibatches for reward signal update if needed # Some signals don't need a minibatch to be sampled - so we don't! | 2.470122 | 2 |
egs/datasets/audio/lj/preprocess.py | leminhnguyen/NATSpeech | 561 | 6632795 | <reponame>leminhnguyen/NATSpeech
from data_gen.tts.base_preprocess import BasePreprocessor
class LJPreprocess(BasePreprocessor):
def meta_data(self):
for l in open(f'{self.raw_data_dir}/metadata.csv').readlines():
item_name, _, txt = l.strip().split("|")
wav_fn = f"{self.raw_data_dir}/wavs/{item_name}.wav"
yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt}
| from data_gen.tts.base_preprocess import BasePreprocessor
class LJPreprocess(BasePreprocessor):
def meta_data(self):
for l in open(f'{self.raw_data_dir}/metadata.csv').readlines():
item_name, _, txt = l.strip().split("|")
wav_fn = f"{self.raw_data_dir}/wavs/{item_name}.wav"
yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt} | none | 1 | 2.538705 | 3 |
|
TPS_dice_roller_bot/core/spongebob_feature.py | JacopoDeAngelis/TPS-dice-roller-bot | 0 | 6632796 | ### spongebob_sentence(message)
# take the message text and returns a sentence with alternating upper and lowercase chars
def spongebob_sentence(message_text):
message_split = message_text.split()
if '/spongebob' in message_split:
message_split = message_split[message_split.index('/spongebob') + 1:len(message_split)]
elif '/sp' in message_split:
message_split = message_split[message_split.index('/sp') + 1:len(message_split)]
else:
message_split = message_split[0:len(message_split)]
new_message = ""
# takes every string in the list
for string in message_split:
i = 0
# takes every char in string and iterate counting the element index
for char in string:
if i % 2 == 0:
new_message += char.upper()
else:
new_message += char.lower()
i += 1
new_message += ' '
return new_message | ### spongebob_sentence(message)
# take the message text and returns a sentence with alternating upper and lowercase chars
def spongebob_sentence(message_text):
message_split = message_text.split()
if '/spongebob' in message_split:
message_split = message_split[message_split.index('/spongebob') + 1:len(message_split)]
elif '/sp' in message_split:
message_split = message_split[message_split.index('/sp') + 1:len(message_split)]
else:
message_split = message_split[0:len(message_split)]
new_message = ""
# takes every string in the list
for string in message_split:
i = 0
# takes every char in string and iterate counting the element index
for char in string:
if i % 2 == 0:
new_message += char.upper()
else:
new_message += char.lower()
i += 1
new_message += ' '
return new_message | en | 0.638156 | ### spongebob_sentence(message) # take the message text and returns a sentence with alternating upper and lowercase chars # takes every string in the list # takes every char in string and iterate counting the element index | 4.078435 | 4 |
lib/common/error.py | smallstrong0/easy_python | 2 | 6632797 | <filename>lib/common/error.py
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum
from tornado.web import HTTPError
status_0 = dict(status_code=405, reason='Method not allowed.')
status_1 = dict(status_code=404, reason='API not found.')
class BMCError(HTTPError):
def __init__(self, error):
if isinstance(error, tuple) and len(error) == 2:
pass
else:
raise Exception('error must tuple and length is two')
super(BMCError, self).__init__(
200
)
self.bmc_error = error
class CommonErrorType(Enum):
UNEXCEPT_ERROR = (-1, '未知异常,请联系客服')
INVALID_SIG = (-3, '登陆超时,请重新登陆')
TOKEN_NULL = (-2, 'cannot get user\'s token')
INVALID_ROLE = (-5, '无效角色身份')
INVALID_REGION = (-6, '无效的地区名')
CREATE_ID_ERROR = (-7, '生成id失败')
NO_RIGHT_ACCESS = (-8, '无操作权限')
PARAMS_ERROR = (-9, '参数错误')
WX_ACCESS_TOKEN_ERROR = (-10, '获取微信access_token失败')
DATA_ERROR = (-11, '数据提交失败')
# 示例相关
class DemoErrorType(Enum):
DEMO_ADD_ERROR = (34300, "示例添加失败")
DEMO_BULK_ADD_ERROR = (34301, "示例批量添加失败")
DEMO_UPDATE_ERROR = (34302, "示例修改失败")
DEMO_BULK_UPDATE_ERROR = (34303, "示例批量修改失败")
DEMO_DELETE_ERROR = (34304, "示例删除失败")
# 测试相关
class TestErrorType(Enum):
TEST_ADD_ERROR = (100, "测试添加失败")
TEST_BULK_ADD_ERROR = (101, "测试批量添加失败")
TEST_UPDATE_ERROR = (102, "测试修改失败")
TEST_BULK_UPDATE_ERROR = (103, "测试批量修改失败")
TEST_DELETE_ERROR = (104, "测试删除失败")
| <filename>lib/common/error.py
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum
from tornado.web import HTTPError
status_0 = dict(status_code=405, reason='Method not allowed.')
status_1 = dict(status_code=404, reason='API not found.')
class BMCError(HTTPError):
def __init__(self, error):
if isinstance(error, tuple) and len(error) == 2:
pass
else:
raise Exception('error must tuple and length is two')
super(BMCError, self).__init__(
200
)
self.bmc_error = error
class CommonErrorType(Enum):
UNEXCEPT_ERROR = (-1, '未知异常,请联系客服')
INVALID_SIG = (-3, '登陆超时,请重新登陆')
TOKEN_NULL = (-2, 'cannot get user\'s token')
INVALID_ROLE = (-5, '无效角色身份')
INVALID_REGION = (-6, '无效的地区名')
CREATE_ID_ERROR = (-7, '生成id失败')
NO_RIGHT_ACCESS = (-8, '无操作权限')
PARAMS_ERROR = (-9, '参数错误')
WX_ACCESS_TOKEN_ERROR = (-10, '获取微信access_token失败')
DATA_ERROR = (-11, '数据提交失败')
# 示例相关
class DemoErrorType(Enum):
DEMO_ADD_ERROR = (34300, "示例添加失败")
DEMO_BULK_ADD_ERROR = (34301, "示例批量添加失败")
DEMO_UPDATE_ERROR = (34302, "示例修改失败")
DEMO_BULK_UPDATE_ERROR = (34303, "示例批量修改失败")
DEMO_DELETE_ERROR = (34304, "示例删除失败")
# 测试相关
class TestErrorType(Enum):
TEST_ADD_ERROR = (100, "测试添加失败")
TEST_BULK_ADD_ERROR = (101, "测试批量添加失败")
TEST_UPDATE_ERROR = (102, "测试修改失败")
TEST_BULK_UPDATE_ERROR = (103, "测试批量修改失败")
TEST_DELETE_ERROR = (104, "测试删除失败")
| en | 0.289356 | # ! /usr/bin/env python # -*- coding: utf-8 -*- # 示例相关 # 测试相关 | 2.283439 | 2 |
test_mathlib.py | locspoc/python-test-driven-development | 0 | 6632798 | <filename>test_mathlib.py
import mathlib
def test_calc_total():
total = mathlib.calc_total(4,5)
assert total == 9
def test_calc_multiply():
result = mathlib.calc_multiply(10,3)
assert result == 30 | <filename>test_mathlib.py
import mathlib
def test_calc_total():
total = mathlib.calc_total(4,5)
assert total == 9
def test_calc_multiply():
result = mathlib.calc_multiply(10,3)
assert result == 30 | none | 1 | 2.870033 | 3 |
|
solutions/Count Submatrices With All Ones/solution.py | nilax97/leetcode-solutions | 3 | 6632799 | class Solution:
def numSubmat(self, mat: List[List[int]]) -> int:
m, n = len(mat), len(mat[0])
#precipitate mat to histogram
for i in range(m):
for j in range(n):
if mat[i][j] and i > 0:
mat[i][j] += mat[i-1][j] #histogram
ans = 0
for i in range(m):
stack = [] #mono-stack of indices of non-decreasing height
cnt = 0
for j in range(n):
while stack and mat[i][stack[-1]] > mat[i][j]:
jj = stack.pop() #start
kk = stack[-1] if stack else -1 #end
cnt -= (mat[i][jj] - mat[i][j])*(jj - kk) #adjust to reflect lower height
cnt += mat[i][j] #count submatrices bottom-right at (i, j)
ans += cnt
stack.append(j)
return ans
| class Solution:
def numSubmat(self, mat: List[List[int]]) -> int:
m, n = len(mat), len(mat[0])
#precipitate mat to histogram
for i in range(m):
for j in range(n):
if mat[i][j] and i > 0:
mat[i][j] += mat[i-1][j] #histogram
ans = 0
for i in range(m):
stack = [] #mono-stack of indices of non-decreasing height
cnt = 0
for j in range(n):
while stack and mat[i][stack[-1]] > mat[i][j]:
jj = stack.pop() #start
kk = stack[-1] if stack else -1 #end
cnt -= (mat[i][jj] - mat[i][j])*(jj - kk) #adjust to reflect lower height
cnt += mat[i][j] #count submatrices bottom-right at (i, j)
ans += cnt
stack.append(j)
return ans
| en | 0.534185 | #precipitate mat to histogram #histogram #mono-stack of indices of non-decreasing height #start #end #adjust to reflect lower height #count submatrices bottom-right at (i, j) | 3.204834 | 3 |
lang/py/cookbook/v2/source/cb2_6_21_exm_2.py | ch1huizong/learning | 0 | 6632800 | class Foo(object):
def meth1(self, arg):
print arg
def meth2(self, arg):
print -arg
| class Foo(object):
def meth1(self, arg):
print arg
def meth2(self, arg):
print -arg
| none | 1 | 2.612411 | 3 |
|
ParlAI/tests/test_zootasks.py | UmaTaru/run | 163 | 6632801 | <reponame>UmaTaru/run
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test common developer mistakes in the model zoo and task list.
Mostly just ensures the docs will output nicely.
"""
import os
import unittest
import parlai.core.testing_utils as testing_utils
from parlai.zoo.model_list import model_list
from parlai.tasks.task_list import task_list
ZOO_EXCEPTIONS = {"fasttext_cc_vectors", "fasttext_vectors", "glove_vectors", "bert"}
class TestZooAndTasks(unittest.TestCase):
"""Make sure the package is alive."""
def _assertZooString(self, member, container, animal_name=None):
msg = f'Missing or empty {member} in parlai.zoo.model_list'
if animal_name:
msg += f' [{animal_name}]'
self.assertIn(member, container, msg=msg)
self.assertTrue(container[member], msg=msg)
def test_zoolist_fields(self):
"""Ensure zoo entries conform to style standards."""
for animal in model_list:
self._assertZooString('title', animal)
name = animal['title']
# every task must at least contain these
for key in ['id', 'task', 'description', 'example', 'result']:
self._assertZooString(key, animal, name)
# if there's a second example there should be a second result
if 'example2' in animal:
self._assertZooString('result2', animal, name)
# every entry needs a project page or a website
self.assertTrue(
("project" in animal) or ("external_website" in animal),
f"Zoo entry ({name}) should contain either project or external_website",
)
def test_zoolist_types(self):
"""Ensure no type errors in the model zoo."""
self._check_types(model_list, 'Zoo')
def test_tasklist_types(self):
"""Ensure no type errors in the task list."""
self._check_types(task_list, 'Task')
def test_tasklist(self):
"""Check the task list for issues."""
self._check_directory(
"task_list",
task_list,
"parlai/tasks",
"task",
ignore=['fromfile', 'interactive'],
)
def test_zoolist(self):
"""Check the zoo list for issues."""
self._check_directory(
"model_list", model_list, "parlai/zoo", "id", ignore=ZOO_EXCEPTIONS
)
def _check_directory(self, listname, thing_list, thing_dir, thing_key, ignore=None):
if ignore is None:
ignore = []
dirs = testing_utils.git_ls_dirs()
# get only directories directly in the thing directory
dirs = [d for d in dirs if os.path.dirname(d) == thing_dir]
# just the folder names
dirs = [os.path.basename(d) for d in dirs]
# skip the whitelist
dirs = [d for d in dirs if d not in ignore]
# make it a set
dirs = set(dirs)
# and the list of thing names
thing_names = {thing[thing_key].split(':')[0] for thing in thing_list}
errors = []
# items with a directory but not a listing
for name in dirs - thing_names:
errors.append(
"Directory {}/{} exists, but isn't in {}".format(
thing_dir, name, listname
)
)
for name in thing_names - dirs:
errors.append(
"{} exists in {}, but {}/{} isn't a directory".format(
name, listname, thing_dir, name
)
)
if errors:
self.assertTrue(False, "\n".join(errors))
def _check_types(self, thing_list, listname):
for thing in thing_list:
name = thing['id']
for key, value in thing.items():
if key == 'tags':
self.assertIsInstance(
value, list, "{} {} tags is not a list".format(listname, name)
)
self.assertIsNot(
value, [], "{} {} must have some tags".format(listname, name)
)
else:
self.assertIsInstance(
value,
str,
"{} {}:{} must be string".format(listname, name, key),
)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test common developer mistakes in the model zoo and task list.
Mostly just ensures the docs will output nicely.
"""
import os
import unittest
import parlai.core.testing_utils as testing_utils
from parlai.zoo.model_list import model_list
from parlai.tasks.task_list import task_list
ZOO_EXCEPTIONS = {"fasttext_cc_vectors", "fasttext_vectors", "glove_vectors", "bert"}
class TestZooAndTasks(unittest.TestCase):
"""Make sure the package is alive."""
def _assertZooString(self, member, container, animal_name=None):
msg = f'Missing or empty {member} in parlai.zoo.model_list'
if animal_name:
msg += f' [{animal_name}]'
self.assertIn(member, container, msg=msg)
self.assertTrue(container[member], msg=msg)
def test_zoolist_fields(self):
"""Ensure zoo entries conform to style standards."""
for animal in model_list:
self._assertZooString('title', animal)
name = animal['title']
# every task must at least contain these
for key in ['id', 'task', 'description', 'example', 'result']:
self._assertZooString(key, animal, name)
# if there's a second example there should be a second result
if 'example2' in animal:
self._assertZooString('result2', animal, name)
# every entry needs a project page or a website
self.assertTrue(
("project" in animal) or ("external_website" in animal),
f"Zoo entry ({name}) should contain either project or external_website",
)
def test_zoolist_types(self):
"""Ensure no type errors in the model zoo."""
self._check_types(model_list, 'Zoo')
def test_tasklist_types(self):
"""Ensure no type errors in the task list."""
self._check_types(task_list, 'Task')
def test_tasklist(self):
"""Check the task list for issues."""
self._check_directory(
"task_list",
task_list,
"parlai/tasks",
"task",
ignore=['fromfile', 'interactive'],
)
def test_zoolist(self):
"""Check the zoo list for issues."""
self._check_directory(
"model_list", model_list, "parlai/zoo", "id", ignore=ZOO_EXCEPTIONS
)
def _check_directory(self, listname, thing_list, thing_dir, thing_key, ignore=None):
if ignore is None:
ignore = []
dirs = testing_utils.git_ls_dirs()
# get only directories directly in the thing directory
dirs = [d for d in dirs if os.path.dirname(d) == thing_dir]
# just the folder names
dirs = [os.path.basename(d) for d in dirs]
# skip the whitelist
dirs = [d for d in dirs if d not in ignore]
# make it a set
dirs = set(dirs)
# and the list of thing names
thing_names = {thing[thing_key].split(':')[0] for thing in thing_list}
errors = []
# items with a directory but not a listing
for name in dirs - thing_names:
errors.append(
"Directory {}/{} exists, but isn't in {}".format(
thing_dir, name, listname
)
)
for name in thing_names - dirs:
errors.append(
"{} exists in {}, but {}/{} isn't a directory".format(
name, listname, thing_dir, name
)
)
if errors:
self.assertTrue(False, "\n".join(errors))
def _check_types(self, thing_list, listname):
for thing in thing_list:
name = thing['id']
for key, value in thing.items():
if key == 'tags':
self.assertIsInstance(
value, list, "{} {} tags is not a list".format(listname, name)
)
self.assertIsNot(
value, [], "{} {} must have some tags".format(listname, name)
)
else:
self.assertIsInstance(
value,
str,
"{} {}:{} must be string".format(listname, name, key),
)
if __name__ == '__main__':
unittest.main() | en | 0.865132 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Test common developer mistakes in the model zoo and task list. Mostly just ensures the docs will output nicely. Make sure the package is alive. Ensure zoo entries conform to style standards. # every task must at least contain these # if there's a second example there should be a second result # every entry needs a project page or a website Ensure no type errors in the model zoo. Ensure no type errors in the task list. Check the task list for issues. Check the zoo list for issues. # get only directories directly in the thing directory # just the folder names # skip the whitelist # make it a set # and the list of thing names # items with a directory but not a listing | 2.539374 | 3 |
heroes/heroes/serializers.py | devrishik/Heroes | 0 | 6632802 | <gh_stars>0
from rest_framework import serializers
from .models import *
class PowerSerializer(serializers.ModelSerializer):
class Meta:
model = Power
fields = ('name',)
class WeaknessSerializer(serializers.ModelSerializer):
class Meta:
model = Weakness
fields = ('name',)
class AttributesSerializer(serializers.ModelSerializer):
class Meta:
model = Attributes
fields = ('intelligence', 'strength', 'speed', 'durability', 'power', 'combat',)
class HeroSerializer(serializers.ModelSerializer):
attributes = AttributesSerializer()
powers = serializers.CharField(source='power_list')
weaknesses = serializers.CharField(source='weakness_list')
class Meta:
model = Heroes
fields = ('hero_name', 'real_name', 'gender', 'attributes', 'powers', 'weaknesses',)
| from rest_framework import serializers
from .models import *
class PowerSerializer(serializers.ModelSerializer):
class Meta:
model = Power
fields = ('name',)
class WeaknessSerializer(serializers.ModelSerializer):
class Meta:
model = Weakness
fields = ('name',)
class AttributesSerializer(serializers.ModelSerializer):
class Meta:
model = Attributes
fields = ('intelligence', 'strength', 'speed', 'durability', 'power', 'combat',)
class HeroSerializer(serializers.ModelSerializer):
attributes = AttributesSerializer()
powers = serializers.CharField(source='power_list')
weaknesses = serializers.CharField(source='weakness_list')
class Meta:
model = Heroes
fields = ('hero_name', 'real_name', 'gender', 'attributes', 'powers', 'weaknesses',) | none | 1 | 2.374032 | 2 |
|
kinoko/misc/web.py | koyo922/kinoko | 13 | 6632803 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 expandtab number
"""
utils for web developing
- a simple RESTful decorater for testing purpose
Authors: qianweishuo<<EMAIL>>
Date: 2019/7/11 下午3:59
"""
from __future__ import unicode_literals
import socket
import warnings
import six
import tornado.ioloop
import tornado.web
from typing import Text, Callable, Any
from ..func import try_flatten
from ..misc.log_writer import init_log
from ..text.io import ensure_text, dump_utf8
logger = init_log(__name__)
if six.PY3: # pragma: no cover
import asyncio
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
class RESTful(object):
"""
Decorator class; simply wrapping a function into a RESTful GET service
Usage:
#----- Server side
@RESTful(port=8104, route='/')
def introduce(name, friends):
return '{} has friends: {}'.format(name.upper(), ', '.join(friends))
introduce.serve()
#----- Client side
$ curl 'http://localhost:8104?name=koyo' -d 'friends=tsuga' -d 'friends=Uncle.Li' -d 'friends=Robin'
KOYO has friends: tsuga, yamamura, Robin%
"""
def __init__(self, port=8104, route='/'):
# type: (RESTful, int, Text) -> None
"""
Setting the default params of the RESTful service object
:param port: which port to listen, default to 8104; if <=1024, need sudo
:param route: which route to listen
"""
self.port = port
self.path = route
self.handler_class = None
self.fn_name = None # the wrapped function name, used as service logger name
def __call__(self, fn):
# type: (RESTful, Callable[[Any], Text]) -> RESTful
"""
wraps the function into a tornado.web.RequestHandler object, for later serving
:param fn: the function to be wrapped
:return:
"""
assert self.fn_name is None, 'The wrapper should not be called more than once'
self.fn_name = fn.__name__ # for later use in serve()
warnings.warn('DO NOT USE FOR PRODUCTION ENVIRONMENT, '
'this decorator was simply designed for toy-demo purpose', UserWarning)
# noinspection PyAbstractClass
class InnerHandler(tornado.web.RequestHandler):
""" the inner class for wrapping the logic of ``fn`` """
def post(this):
""" post """
request_kwargs = this.request.arguments
# if param 'name' was only defined once,
# then flatten the corresponding value(a size==1 list) into a scalar
request_kwargs = {ensure_text(k): try_flatten([ensure_text(arg) for arg in v])
for k, v in six.iteritems(request_kwargs)}
logger.info('----- got request_param: %s', dump_utf8(request_kwargs))
this.write(fn(**request_kwargs))
def get(this):
""" get """
this.post()
self.handler_class = InnerHandler
return self
def serve(self):
""" start serving """
app = tornado.web.Application([
(self.path, self.handler_class)
])
app.listen(self.port)
logger.info('---------- serving `{}` at http://{}:{}'.format(self.fn_name, socket.gethostname(), self.port))
tornado.ioloop.IOLoop.current().start()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 expandtab number
"""
utils for web developing
- a simple RESTful decorater for testing purpose
Authors: qianweishuo<<EMAIL>>
Date: 2019/7/11 下午3:59
"""
from __future__ import unicode_literals
import socket
import warnings
import six
import tornado.ioloop
import tornado.web
from typing import Text, Callable, Any
from ..func import try_flatten
from ..misc.log_writer import init_log
from ..text.io import ensure_text, dump_utf8
logger = init_log(__name__)
if six.PY3: # pragma: no cover
import asyncio
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
class RESTful(object):
"""
Decorator class; simply wrapping a function into a RESTful GET service
Usage:
#----- Server side
@RESTful(port=8104, route='/')
def introduce(name, friends):
return '{} has friends: {}'.format(name.upper(), ', '.join(friends))
introduce.serve()
#----- Client side
$ curl 'http://localhost:8104?name=koyo' -d 'friends=tsuga' -d 'friends=Uncle.Li' -d 'friends=Robin'
KOYO has friends: tsuga, yamamura, Robin%
"""
def __init__(self, port=8104, route='/'):
# type: (RESTful, int, Text) -> None
"""
Setting the default params of the RESTful service object
:param port: which port to listen, default to 8104; if <=1024, need sudo
:param route: which route to listen
"""
self.port = port
self.path = route
self.handler_class = None
self.fn_name = None # the wrapped function name, used as service logger name
def __call__(self, fn):
# type: (RESTful, Callable[[Any], Text]) -> RESTful
"""
wraps the function into a tornado.web.RequestHandler object, for later serving
:param fn: the function to be wrapped
:return:
"""
assert self.fn_name is None, 'The wrapper should not be called more than once'
self.fn_name = fn.__name__ # for later use in serve()
warnings.warn('DO NOT USE FOR PRODUCTION ENVIRONMENT, '
'this decorator was simply designed for toy-demo purpose', UserWarning)
# noinspection PyAbstractClass
class InnerHandler(tornado.web.RequestHandler):
""" the inner class for wrapping the logic of ``fn`` """
def post(this):
""" post """
request_kwargs = this.request.arguments
# if param 'name' was only defined once,
# then flatten the corresponding value(a size==1 list) into a scalar
request_kwargs = {ensure_text(k): try_flatten([ensure_text(arg) for arg in v])
for k, v in six.iteritems(request_kwargs)}
logger.info('----- got request_param: %s', dump_utf8(request_kwargs))
this.write(fn(**request_kwargs))
def get(this):
""" get """
this.post()
self.handler_class = InnerHandler
return self
def serve(self):
""" start serving """
app = tornado.web.Application([
(self.path, self.handler_class)
])
app.listen(self.port)
logger.info('---------- serving `{}` at http://{}:{}'.format(self.fn_name, socket.gethostname(), self.port))
tornado.ioloop.IOLoop.current().start() | en | 0.560637 | #!/usr/bin/env python # -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 expandtab number utils for web developing - a simple RESTful decorater for testing purpose Authors: qianweishuo<<EMAIL>> Date: 2019/7/11 下午3:59 # pragma: no cover Decorator class; simply wrapping a function into a RESTful GET service Usage: #----- Server side @RESTful(port=8104, route='/') def introduce(name, friends): return '{} has friends: {}'.format(name.upper(), ', '.join(friends)) introduce.serve() #----- Client side $ curl 'http://localhost:8104?name=koyo' -d 'friends=tsuga' -d 'friends=Uncle.Li' -d 'friends=Robin' KOYO has friends: tsuga, yamamura, Robin% # type: (RESTful, int, Text) -> None Setting the default params of the RESTful service object :param port: which port to listen, default to 8104; if <=1024, need sudo :param route: which route to listen # the wrapped function name, used as service logger name # type: (RESTful, Callable[[Any], Text]) -> RESTful wraps the function into a tornado.web.RequestHandler object, for later serving :param fn: the function to be wrapped :return: # for later use in serve() # noinspection PyAbstractClass the inner class for wrapping the logic of ``fn`` post # if param 'name' was only defined once, # then flatten the corresponding value(a size==1 list) into a scalar get start serving | 2.459531 | 2 |
mb/ed/definition.py | ajah/represent-canada-data | 0 | 6632804 | <reponame>ajah/represent-canada-data
from datetime import date
import boundaries
boundaries.register('Manitoba electoral districts',
domain='Manitoba',
last_updated=date(2011, 11, 1),
name_func=boundaries.dashed_attr('ED'),
authority='Her Majesty the Queen in Right of Manitoba',
source_url='https://mli2.gov.mb.ca/adminbnd/index.html',
licence_url='https://mli2.gov.mb.ca/app/register/app/index.php',
data_url='https://mli2.gov.mb.ca/adminbnd/shp_zip_files/bdy_mb_electoral_divisions_shp.zip',
encoding='iso-8859-1',
)
| from datetime import date
import boundaries
boundaries.register('Manitoba electoral districts',
domain='Manitoba',
last_updated=date(2011, 11, 1),
name_func=boundaries.dashed_attr('ED'),
authority='Her Majesty the Queen in Right of Manitoba',
source_url='https://mli2.gov.mb.ca/adminbnd/index.html',
licence_url='https://mli2.gov.mb.ca/app/register/app/index.php',
data_url='https://mli2.gov.mb.ca/adminbnd/shp_zip_files/bdy_mb_electoral_divisions_shp.zip',
encoding='iso-8859-1',
) | none | 1 | 1.684003 | 2 |
|
configurations.py | davidpierre21/doe-sangue-scrapy | 1 | 6632805 | from decouple import config
class Config(object):
MONGODB_SERVER = config('MONGODB_SERVER')
MONGODB_PORT = config('MONGODB_PORT')
MONGODB_DB = config('MONGODB_DB')
ENV = config('FLASK_ENV')
MONGO_URI = f"mongodb://{MONGODB_SERVER}:{MONGODB_PORT}/{MONGODB_DB}"
class ProductionConfig(Config):
DEBUG = False
TESTING = False
class DevelopmentConfig(Config):
ENV = 'development'
DEBUG = True
class TestingConfig(Config):
TESTING = True
| from decouple import config
class Config(object):
MONGODB_SERVER = config('MONGODB_SERVER')
MONGODB_PORT = config('MONGODB_PORT')
MONGODB_DB = config('MONGODB_DB')
ENV = config('FLASK_ENV')
MONGO_URI = f"mongodb://{MONGODB_SERVER}:{MONGODB_PORT}/{MONGODB_DB}"
class ProductionConfig(Config):
DEBUG = False
TESTING = False
class DevelopmentConfig(Config):
ENV = 'development'
DEBUG = True
class TestingConfig(Config):
TESTING = True
| none | 1 | 2.281067 | 2 |
|
game/__init__.py | marblexu/gearhead-caramel | 0 | 6632806 | <reponame>marblexu/gearhead-caramel
import exploration
import combat
import teams
import content
import ghdialogue
import configedit
import invoker
import cosplay
import chargen
import services
import fieldhq
from game.fieldhq import backpack
def start_campaign(pc_egg,adv_type="SCENARIO_DEADZONEDRIFTER"):
camp = content.narrative_convenience_function(pc_egg,adv_type=adv_type)
if camp:
camp.place_party()
camp.save()
camp.play()
| import exploration
import combat
import teams
import content
import ghdialogue
import configedit
import invoker
import cosplay
import chargen
import services
import fieldhq
from game.fieldhq import backpack
def start_campaign(pc_egg,adv_type="SCENARIO_DEADZONEDRIFTER"):
camp = content.narrative_convenience_function(pc_egg,adv_type=adv_type)
if camp:
camp.place_party()
camp.save()
camp.play() | none | 1 | 1.71511 | 2 |
|
modules/pmg_tk/TextEditor.py | NIRAJbme/pymol | 0 | 6632807 | '''
Simple Text Editor
'''
import os
import sys
if sys.version_info[0] == 2:
import Tkinter
import tkFileDialog
import tkMessageBox
else:
import tkinter as Tkinter
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
class TextEditor:
def _write(self, handle):
content = self._get()
handle.write(content)
self._savedcontent = content
def _open(self, filename):
self.filename = filename or ''
if filename and os.path.exists(filename):
with open(filename, 'rU') as handle:
content = handle.read()
else:
content = ''
self._set(content)
def _get(self):
return self.text.get(0.0, Tkinter.END)
def _set(self, content):
self.text.delete(0.0, Tkinter.END)
self.text.insert(0.0, content)
self._savedcontent = self._get()
def doSaveAs(self, *args):
handle = tkFileDialog.asksaveasfile(mode='w',
initialfile=os.path.basename(self.filename),
initialdir=os.path.dirname(self.filename),
parent=self.root)
if handle:
with handle:
self._write(handle)
self.filename = handle.name
def doSave(self, *args):
if not self.filename:
return self.doSaveAs()
with open(self.filename, 'w') as handle:
self._write(handle)
def doOpen(self, *args):
filename = tkFileDialog.askopenfilename(parent=self.root)
if filename:
self._open(filename)
def onClose(self):
if self._get() != self._savedcontent:
ok = tkMessageBox.askyesnocancel("Save?", "Save before quit?",
parent=self.root)
if ok:
self.doSave()
elif ok is None:
return
self.root.destroy()
def __init__(self, parent=None, filename='', title='Text Editor'):
self.root = Tkinter.Toplevel(parent) if parent else Tkinter.Tk()
self.root.title(title)
self.root.minsize(width=500, height=400)
self.root.protocol("WM_DELETE_WINDOW", self.onClose)
menubar = Tkinter.Menu(self.root)
filemenu = Tkinter.Menu(menubar)
filemenu.add_command(label="Open", command=self.doOpen, accelerator="Ctrl+O")
filemenu.add_command(label="Save", command=self.doSave, accelerator="Ctrl+S")
filemenu.add_command(label="Save as ...", command=self.doSaveAs, accelerator="Ctrl+Shift+S")
menubar.add_cascade(label="File", menu=filemenu)
self.root.config(menu=menubar)
self.text = Tkinter.Text(self.root, background='white', foreground='black')
self.text.pack(expand=Tkinter.YES, fill=Tkinter.BOTH)
self._open(filename)
self.text.bind("<Control-o>", self.doOpen)
self.text.bind("<Control-s>", self.doSave)
self.text.bind("<Control-S>", self.doSaveAs)
if __name__ == '__main__':
try:
filename = sys.argv[1]
except:
filename = ''
app = TextEditor(None, filename)
app.root.mainloop()
| '''
Simple Text Editor
'''
import os
import sys
if sys.version_info[0] == 2:
import Tkinter
import tkFileDialog
import tkMessageBox
else:
import tkinter as Tkinter
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
class TextEditor:
def _write(self, handle):
content = self._get()
handle.write(content)
self._savedcontent = content
def _open(self, filename):
self.filename = filename or ''
if filename and os.path.exists(filename):
with open(filename, 'rU') as handle:
content = handle.read()
else:
content = ''
self._set(content)
def _get(self):
return self.text.get(0.0, Tkinter.END)
def _set(self, content):
self.text.delete(0.0, Tkinter.END)
self.text.insert(0.0, content)
self._savedcontent = self._get()
def doSaveAs(self, *args):
handle = tkFileDialog.asksaveasfile(mode='w',
initialfile=os.path.basename(self.filename),
initialdir=os.path.dirname(self.filename),
parent=self.root)
if handle:
with handle:
self._write(handle)
self.filename = handle.name
def doSave(self, *args):
if not self.filename:
return self.doSaveAs()
with open(self.filename, 'w') as handle:
self._write(handle)
def doOpen(self, *args):
filename = tkFileDialog.askopenfilename(parent=self.root)
if filename:
self._open(filename)
def onClose(self):
if self._get() != self._savedcontent:
ok = tkMessageBox.askyesnocancel("Save?", "Save before quit?",
parent=self.root)
if ok:
self.doSave()
elif ok is None:
return
self.root.destroy()
def __init__(self, parent=None, filename='', title='Text Editor'):
self.root = Tkinter.Toplevel(parent) if parent else Tkinter.Tk()
self.root.title(title)
self.root.minsize(width=500, height=400)
self.root.protocol("WM_DELETE_WINDOW", self.onClose)
menubar = Tkinter.Menu(self.root)
filemenu = Tkinter.Menu(menubar)
filemenu.add_command(label="Open", command=self.doOpen, accelerator="Ctrl+O")
filemenu.add_command(label="Save", command=self.doSave, accelerator="Ctrl+S")
filemenu.add_command(label="Save as ...", command=self.doSaveAs, accelerator="Ctrl+Shift+S")
menubar.add_cascade(label="File", menu=filemenu)
self.root.config(menu=menubar)
self.text = Tkinter.Text(self.root, background='white', foreground='black')
self.text.pack(expand=Tkinter.YES, fill=Tkinter.BOTH)
self._open(filename)
self.text.bind("<Control-o>", self.doOpen)
self.text.bind("<Control-s>", self.doSave)
self.text.bind("<Control-S>", self.doSaveAs)
if __name__ == '__main__':
try:
filename = sys.argv[1]
except:
filename = ''
app = TextEditor(None, filename)
app.root.mainloop()
| en | 0.287367 | Simple Text Editor | 3.387452 | 3 |
generate_free.py | lkugler/DART-WRF | 1 | 6632808 | <reponame>lkugler/DART-WRF
#!/usr/bin/python3
"""
running the forecast model without assimilation
"""
import os, sys, shutil
import datetime as dt
import pandas as pd
from slurmpy import Slurm
from config.cfg import exp, cluster
from dartwrf.utils import script_to_str, symlink
log_dir = cluster.archivedir+'/logs/'
slurm_scripts_dir = cluster.archivedir+'/slurm-scripts/'
print('logging to', log_dir)
print('scripts, which are submitted to SLURM:', slurm_scripts_dir)
from scheduler import *
################################
print('starting osse')
backup_scripts()
id = None
is_nature = False
begin = dt.datetime(2008, 7, 30, 7)
id = prepare_WRFrundir(begin) # create initial conditions
id = run_ideal(depends_on=id)
if is_nature:
id = wrfinput_insert_wbubble(perturb=False, depends_on=id)
end = dt.datetime(2008, 7, 30, 18)
id = run_ENS(begin=begin, end=end,
first_minute=False,
input_is_restart=False,
output_restart_interval=(end-begin).total_seconds()/60,
depends_on=id)
id = create_satimages(begin, depends_on=id)
else:
#id = wrfinput_insert_wbubble(perturb=True, depends_on=id)
restarts = pd.date_range(start=dt.datetime(2008, 7, 30, 10),
end=dt.datetime(2008, 7, 30, 12),
freq=dt.timedelta(minutes=60))
#restarts = [dt.datetime(2008, 7, 30, 11)]
input_is_restart = False
time = begin
last_init = dt.datetime(2008, 7, 30, 9) # dummy value
for next_restart in restarts:
print('run_WRF from', time, 'to', next_restart)
id = run_ENS(begin=time, end=next_restart,
first_minute=False,
input_is_restart=input_is_restart,
restart_path=cluster.archivedir+last_init.strftime('/%Y-%m-%d_%H:%M/'),
output_restart_interval=(next_restart-time).total_seconds()/60,
#output_restart_interval=720,
depends_on=id)
last_init = time
time = next_restart
input_is_restart = True
create_satimages(last_init, depends_on=id)
#sys.exit()
# free run, no restart files anymore
print('run WRF from', time, 'until', end)
end = dt.datetime(2008, 7, 30, 18)
id = run_ENS(begin=time, end=end,
first_minute=False,
input_is_restart=input_is_restart,
restart_path=cluster.archivedir+time.strftime('/%Y-%m-%d_%H:%M/'),
#output_restart_interval=(next_restart-time).total_seconds()/60,
output_restart_interval=9999,
depends_on=id)
id = create_satimages(time, depends_on=id)
verify(depends_on=id)
| #!/usr/bin/python3
"""
running the forecast model without assimilation
"""
import os, sys, shutil
import datetime as dt
import pandas as pd
from slurmpy import Slurm
from config.cfg import exp, cluster
from dartwrf.utils import script_to_str, symlink
log_dir = cluster.archivedir+'/logs/'
slurm_scripts_dir = cluster.archivedir+'/slurm-scripts/'
print('logging to', log_dir)
print('scripts, which are submitted to SLURM:', slurm_scripts_dir)
from scheduler import *
################################
print('starting osse')
backup_scripts()
id = None
is_nature = False
begin = dt.datetime(2008, 7, 30, 7)
id = prepare_WRFrundir(begin) # create initial conditions
id = run_ideal(depends_on=id)
if is_nature:
id = wrfinput_insert_wbubble(perturb=False, depends_on=id)
end = dt.datetime(2008, 7, 30, 18)
id = run_ENS(begin=begin, end=end,
first_minute=False,
input_is_restart=False,
output_restart_interval=(end-begin).total_seconds()/60,
depends_on=id)
id = create_satimages(begin, depends_on=id)
else:
#id = wrfinput_insert_wbubble(perturb=True, depends_on=id)
restarts = pd.date_range(start=dt.datetime(2008, 7, 30, 10),
end=dt.datetime(2008, 7, 30, 12),
freq=dt.timedelta(minutes=60))
#restarts = [dt.datetime(2008, 7, 30, 11)]
input_is_restart = False
time = begin
last_init = dt.datetime(2008, 7, 30, 9) # dummy value
for next_restart in restarts:
print('run_WRF from', time, 'to', next_restart)
id = run_ENS(begin=time, end=next_restart,
first_minute=False,
input_is_restart=input_is_restart,
restart_path=cluster.archivedir+last_init.strftime('/%Y-%m-%d_%H:%M/'),
output_restart_interval=(next_restart-time).total_seconds()/60,
#output_restart_interval=720,
depends_on=id)
last_init = time
time = next_restart
input_is_restart = True
create_satimages(last_init, depends_on=id)
#sys.exit()
# free run, no restart files anymore
print('run WRF from', time, 'until', end)
end = dt.datetime(2008, 7, 30, 18)
id = run_ENS(begin=time, end=end,
first_minute=False,
input_is_restart=input_is_restart,
restart_path=cluster.archivedir+time.strftime('/%Y-%m-%d_%H:%M/'),
#output_restart_interval=(next_restart-time).total_seconds()/60,
output_restart_interval=9999,
depends_on=id)
id = create_satimages(time, depends_on=id)
verify(depends_on=id) | en | 0.241405 | #!/usr/bin/python3 running the forecast model without assimilation ################################ # create initial conditions #id = wrfinput_insert_wbubble(perturb=True, depends_on=id) #restarts = [dt.datetime(2008, 7, 30, 11)] # dummy value #output_restart_interval=720, #sys.exit() # free run, no restart files anymore #output_restart_interval=(next_restart-time).total_seconds()/60, | 1.949933 | 2 |
setup.py | vedavamadathil/smake | 23 | 6632809 | import setuptools
# Create the package
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = 'smake',
version = '1.2.3',
scripts = ['smake'],
author = "<NAME>",
author_email = "<EMAIL>",
description = "A simple and convenient build-and-run system for C and C++.",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/vedavamadathil/smake",
packages = setuptools.find_packages(),
instal_requires = ['pyyaml'],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| import setuptools
# Create the package
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = 'smake',
version = '1.2.3',
scripts = ['smake'],
author = "<NAME>",
author_email = "<EMAIL>",
description = "A simple and convenient build-and-run system for C and C++.",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/vedavamadathil/smake",
packages = setuptools.find_packages(),
instal_requires = ['pyyaml'],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| en | 0.657942 | # Create the package | 1.59847 | 2 |
pytests/security/x509main.py | bkumaran/testrunner | 0 | 6632810 | import logger
from lib.Cb_constants.CBServer import CbServer
log = logger.Logger.get_logger()
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection
import httplib2
import base64
import requests
import urllib.request, urllib.parse, urllib.error
import random
import os
import copy
import subprocess
import json
from pytests.security.ntonencryptionBase import ntonencryptionBase
class ServerInfo():
def __init__(self,
ip,
port,
ssh_username,
ssh_password,
ssh_key=''):
self.ip = ip
self.ssh_username = ssh_username
self.ssh_password = <PASSWORD>
self.port = port
self.ssh_key = ssh_key
class x509main:
CHAINCERTFILE = 'chain.pem'
NODECAKEYFILE = 'pkey.key'
CACERTFILE = "root.crt"
CAKEYFILE = "root.key"
WININSTALLPATH = "C:/Program Files/Couchbase/Server/var/lib/couchbase/"
LININSTALLPATH = "/opt/couchbase/var/lib/couchbase/"
MACINSTALLPATH = "/Users/couchbase/Library/Application Support/Couchbase/var/lib/couchbase/"
DOWNLOADPATH = "/tmp/"
CACERTFILEPATH = "/tmp/newcerts" + str(random.randint(1, 100)) + "/"
CHAINFILEPATH = "inbox"
GOCERTGENFILE = "gencert.go"
INCORRECT_ROOT_CERT = "incorrect_root_cert.crt"
SLAVE_HOST = ServerInfo('127.0.0.1', 22, 'root', 'couchbase')
CLIENT_CERT_AUTH_JSON = 'client_cert_auth1.json'
CLIENT_CERT_AUTH_TEMPLATE = 'client_cert_config_template.txt'
IP_ADDRESS = '172.16.1.174'
KEY_FILE = CACERTFILEPATH + "/" + CAKEYFILE
CERT_FILE = CACERTFILEPATH + "/" + CACERTFILE
CLIENT_CERT_KEY = CACERTFILEPATH + IP_ADDRESS + ".key"
CLIENT_CERT_PEM = CACERTFILEPATH + IP_ADDRESS + ".pem"
SRC_CHAIN_FILE = CACERTFILEPATH + "/long_chain" + IP_ADDRESS + ".pem"
def __init__(self,
host=None,
method='REST'):
if host is not None:
self.host = host
self.install_path = self._get_install_path(self.host)
self.disable_ssl_certificate_validation = False
if CbServer.use_https:
self.disable_ssl_certificate_validation = True
self.slave_host = x509main.SLAVE_HOST
def getLocalIPAddress(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('couchbase.com', 0))
return s.getsockname()[0]
'''
status, ipAddress = commands.getstatusoutput("ifconfig en0 | grep 'inet addr:' | cut -d: -f2 |awk '{print $1}'")
if '1' not in ipAddress:
status, ipAddress = commands.getstatusoutput("ifconfig eth0 | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | awk '{print $2}'")
return ipAddress
'''
def get_data_path(self,node):
"""Gets couchbase log directory, even for cluster_run
"""
_, dir = RestConnection(node).diag_eval(
'filename:absname(element(2, application:get_env(ns_server,path_config_datadir))).')
dir = dir.strip('"')
return str(dir)
def _generate_cert(self, servers, root_cn='Root\ Authority', type='go', encryption="", key_length=1024, client_ip=None, alt_names='default', dns=None, uri=None,wildcard_dns=None):
shell = RemoteMachineShellConnection(self.slave_host)
shell.execute_command("rm -rf " + x509main.CACERTFILEPATH)
shell.execute_command("mkdir " + x509main.CACERTFILEPATH)
if type == 'go':
files = []
cert_file = "./pytests/security/" + x509main.GOCERTGENFILE
output, error = shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + "root -common-name=" + root_cn)
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + "interm -sign-with=" + x509main.CACERTFILEPATH + "root -common-name=Intemediate\ Authority")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
for server in servers:
if "[" in server.ip:
server.ip = server.ip.replace("[", "").replace("]", "")
output, error = shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + server.ip + " -sign-with=" + x509main.CACERTFILEPATH + "interm -common-name=" + server.ip + " -final=true")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cat " + x509main.CACERTFILEPATH + server.ip + ".crt " + x509main.CACERTFILEPATH + "interm.crt > " + " " + x509main.CACERTFILEPATH + "long_chain" + server.ip + ".pem")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + "incorrect_root_cert -common-name=Incorrect\ Authority")
elif type == 'openssl':
files = []
v3_ca = "./pytests/security/v3_ca.crt"
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + "ca.key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -x509 -days 3650 -sha256 -key " + x509main.CACERTFILEPATH + "ca.key -out " + x509main.CACERTFILEPATH + "ca.pem -subj '/C=UA/O=My Company/CN=My Company Root CA'")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + "int.key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -key " + x509main.CACERTFILEPATH + "int.key -out " + x509main.CACERTFILEPATH + "int.csr -subj '/C=UA/O=My Company/CN=My Company Intermediate CA'")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl x509 -req -in " + x509main.CACERTFILEPATH + "int.csr -CA " + x509main.CACERTFILEPATH + "ca.pem -CAkey " + x509main.CACERTFILEPATH + "ca.key -CAcreateserial -CAserial " \
+ x509main.CACERTFILEPATH + "rootCA.srl -extfile ./pytests/security/v3_ca.ext -out " + x509main.CACERTFILEPATH + "int.pem -days 365 -sha256")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
for server in servers:
# check if the ip address is ipv6 raw ip address, remove [] brackets
if "[" in server.ip:
server.ip = server.ip.replace("[", "").replace("]", "")
from shutil import copyfile
copyfile("./pytests/security/clientconf.conf", "./pytests/security/clientconf3.conf")
fin = open("./pytests/security/clientconf3.conf", "a+")
if ".com" in server.ip and wildcard_dns is None:
fin.write("\nDNS.0 = {0}".format(server.ip))
elif wildcard_dns:
fin.write("\nDNS.0 = {0}".format(wildcard_dns))
else:
fin.write("\nIP.0 = {0}".format(server.ip.replace('[', '').replace(']', '')))
fin.close()
import fileinput
import sys
for line in fileinput.input("./pytests/security/clientconf3.conf", inplace=1):
if "ip_address" in line:
line = line.replace("ip_address", server.ip)
sys.stdout.write(line)
# print file contents for easy debugging
fout = open("./pytests/security/clientconf3.conf", "r")
print((fout.read()))
fout.close()
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + server.ip + ".key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -key " + x509main.CACERTFILEPATH + server.ip + ".key -out " + x509main.CACERTFILEPATH + server.ip + ".csr -config ./pytests/security/clientconf3.conf")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl x509 -req -in " + x509main.CACERTFILEPATH + server.ip + ".csr -CA " + x509main.CACERTFILEPATH + "int.pem -CAkey " + \
x509main.CACERTFILEPATH + "int.key -CAcreateserial -CAserial " + x509main.CACERTFILEPATH + "intermediateCA.srl -out " + x509main.CACERTFILEPATH + server.ip + ".pem -days 365 -sha256 -extfile ./pytests/security/clientconf3.conf -extensions req_ext")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cat " + x509main.CACERTFILEPATH + server.ip + ".pem " + x509main.CACERTFILEPATH + "int.pem " + x509main.CACERTFILEPATH + "ca.pem > " + x509main.CACERTFILEPATH + "long_chain" + server.ip + ".pem")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cp " + x509main.CACERTFILEPATH + "ca.pem " + x509main.CACERTFILEPATH + "root.crt")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
os.remove("./pytests/security/clientconf3.conf")
# Check if client_ip is ipv6, remove []
if "[" in client_ip:
client_ip = client_ip.replace("[", "").replace("]", "")
from shutil import copyfile
copyfile("./pytests/security/clientconf.conf", "./pytests/security/clientconf2.conf")
fin = open("./pytests/security/clientconf2.conf", "a+")
if alt_names == 'default':
fin.write("\nDNS.1 = us.cbadminbucket.com")
fin.write("\nURI.1 = www.cbadminbucket.com")
elif alt_names == 'non_default':
if dns is not None:
dns = "\nDNS.1 = " + dns
fin.write(dns)
if uri is not None:
uri = "\nURI.1 = " + dns
fin.write(uri)
if ".com" in server.ip:
fin.write("\nDNS.0 = {0}".format(server.ip))
else:
fin.write("\nIP.0 = {0}".format(server.ip.replace('[', '').replace(']', '')))
fin.close()
# print file contents for easy debugging
fout = open("./pytests/security/clientconf2.conf", "r")
print((fout.read()))
fout.close()
# Generate Certificate for the client
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + client_ip + ".key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -key " + x509main.CACERTFILEPATH + client_ip + ".key -out " + x509main.CACERTFILEPATH + client_ip + ".csr -config ./pytests/security/clientconf2.conf")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl x509 -req -in " + x509main.CACERTFILEPATH + client_ip + ".csr -CA " + x509main.CACERTFILEPATH + "int.pem -CAkey " + \
x509main.CACERTFILEPATH + "int.key -CAcreateserial -CAserial " + x509main.CACERTFILEPATH + "intermediateCA.srl -out " + x509main.CACERTFILEPATH + client_ip + ".pem -days 365 -sha256 -extfile ./pytests/security/clientconf2.conf -extensions req_ext")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cat " + x509main.CACERTFILEPATH + client_ip + ".pem " + x509main.CACERTFILEPATH + "int.pem " + x509main.CACERTFILEPATH + "ca.pem > " + x509main.CACERTFILEPATH + "long_chain" + client_ip + ".pem")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
os.remove("./pytests/security/clientconf2.conf")
# Top level method for setup of nodes in the cluster
def setup_cluster_nodes_ssl(self, servers=[], reload_cert=False):
# Make a copy of the servers not to change self.servers
copy_servers = copy.deepcopy(servers)
# For each server in cluster, setup a node certificates, create inbox folders and copy + chain cert
for server in copy_servers:
x509main(server)._setup_node_certificates(reload_cert=reload_cert, host=server)
# Create inbox folder and copy node cert and chain cert
def _setup_node_certificates(self, chain_cert=True, node_key=True, reload_cert=True, host=None):
if host == None:
host = self.host
self._create_inbox_folder(host)
if host.ip.count(':') > 0 and host.ip.count(']') > 0:
# raw ipv6? enclose in square brackets
host.ip = host.ip.replace('[', '').replace(']', '')
src_chain_file = x509main.CACERTFILEPATH + "/long_chain" + host.ip + ".pem"
dest_chain_file = self.install_path + x509main.CHAINFILEPATH + "/" + x509main.CHAINCERTFILE
src_node_key = x509main.CACERTFILEPATH + "/" + host.ip + ".key"
dest_node_key = self.install_path + x509main.CHAINFILEPATH + "/" + x509main.NODECAKEYFILE
if chain_cert:
self._copy_node_key_chain_cert(host, src_chain_file, dest_chain_file)
if node_key:
self._copy_node_key_chain_cert(host, src_node_key, dest_node_key)
if reload_cert:
status, content = self._reload_node_certificate(host)
return status, content
# Reload cert for self signed certificate
def _reload_node_certificate(self, host):
rest = RestConnection(host)
api = rest.baseUrl + "node/controller/reloadCertificate"
http = httplib2.Http(disable_ssl_certificate_validation=self.disable_ssl_certificate_validation)
status, content, header = rest._http_request(api, 'POST', headers=self._create_rest_headers('Administrator', 'password'))
return status, content
# Get the install path for different operating system
def _get_install_path(self, host):
shell = RemoteMachineShellConnection(host)
os_type = shell.extract_remote_info().distribution_type
log.info ("OS type is {0}".format(os_type))
if os_type == 'windows':
install_path = x509main.WININSTALLPATH
elif os_type == 'Mac':
install_path = x509main.MACINSTALLPATH
else:
#install_path = x509main.LININSTALLPATH
install_path = str(self.get_data_path(host)) + "/"
return install_path
# create inbox folder for host
def _create_inbox_folder(self, host):
shell = RemoteMachineShellConnection(self.host)
final_path = self.install_path + x509main.CHAINFILEPATH
shell.create_directory(final_path)
# delete all file inbox folder and remove inbox folder
def _delete_inbox_folder(self):
shell = RemoteMachineShellConnection(self.host)
final_path = self.install_path + x509main.CHAINFILEPATH
shell = RemoteMachineShellConnection(self.host)
os_type = shell.extract_remote_info().distribution_type
log.info ("OS type is {0}".format(os_type))
shell.delete_file(final_path, "root.crt")
shell.delete_file(final_path, "chain.pem")
shell.delete_file(final_path, "pkey.key")
if os_type == 'windows':
final_path = '/cygdrive/c/Program\ Files/Couchbase/Server/var/lib/couchbase/inbox'
shell.execute_command('rm -rf ' + final_path)
else:
shell.execute_command('rm -rf ' + final_path)
# Function to simply copy from source to destination
def _copy_node_key_chain_cert(self, host, src_path, dest_path):
shell = RemoteMachineShellConnection(host)
shell.copy_file_local_to_remote(src_path, dest_path)
def _create_rest_headers(self, username="Administrator", password="password"):
authorization = base64.encodebytes(('%s:%s' % (username, password)).encode()).decode()
return {'Content-Type': 'application/octet-stream',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
# Function that will upload file via rest
def _rest_upload_file(self, URL, file_path_name, username=None, password=None):
data = open(file_path_name, 'rb').read()
http = httplib2.Http(disable_ssl_certificate_validation=self.disable_ssl_certificate_validation)
status, content = http.request(URL, 'POST', headers=self._create_rest_headers(username, password), body=data)
log.info (" Status from rest file upload command is {0}".format(status))
log.info (" Content from rest file upload command is {0}".format(content))
return status, content
# Upload Cluster or root cert
def _upload_cluster_ca_certificate(self, username, password):
rest = RestConnection(self.host)
url = "controller/uploadClusterCA"
api = rest.baseUrl + url
self._rest_upload_file(api, x509main.CACERTFILEPATH + "/" + x509main.CACERTFILE, "Administrator", 'password')
# Upload security setting for client cert
def _upload_cluster_ca_settings(self, username, password):
temp = self.host
rest = RestConnection(temp)
url = "settings/clientCertAuth"
api = rest.baseUrl + url
status, content = self._rest_upload_file(api, x509main.CACERTFILEPATH + x509main.CLIENT_CERT_AUTH_JSON, "Administrator", 'password')
log.info (" --- Status from upload of client cert settings is {0} and Content is {1}".format(status, content))
return status, content
'''
Use requests module to execute rest api's
Steps:
1. Check if client_cert is set or not. This will define rest of the parameters for client certificates to set for connections
2. check what is the verb required for rest api, get, post, put and delete for each rest api
3. Call request with client certs, data that is passed for each request and headers for each request
4. Return text of the response to the calling function
Capture any exception in the code and return error
'''
def _validate_ssl_login(self, final_url=None, header=None, client_cert=False, verb='GET', data='', plain_curl=False, username='Administrator', password='password', host=None):
if verb == 'GET' and plain_curl:
r = requests.get(final_url, data=data)
return r.status_code, r.text
elif client_cert:
try:
if verb == 'GET':
r = requests.get(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), data=data)
elif verb == 'POST':
r = requests.post(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), data=data)
elif verb == 'PUT':
header = {'Content-type': 'Content-Type: application/json'}
r = requests.put(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), data=data, headers=header)
elif verb == 'DELETE':
header = {'Content-type': 'Content-Type: application/json'}
r = requests.delete(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), headers=header)
return r.status_code, r.text
except Exception as ex:
log.info ("into exception form validate_ssl_login with client cert")
log.info (" Exception is {0}".format(ex))
return 'error','error'
else:
try:
r = requests.get("https://" + str(self.host.ip) + ":18091", verify=x509main.CERT_FILE)
if r.status_code == 200:
header = {'Content-type': 'application/x-www-form-urlencoded'}
params = urllib.parse.urlencode({'user':'{0}'.format(username), 'password':'{<PASSWORD>(password)})
r = requests.post("https://" + str(self.host.ip) + ":18091/uilogin", data=params, headers=header, verify=x509main.CERT_FILE)
return r.status_code
except Exception as ex:
log.info ("into exception form validate_ssl_login")
log.info (" Exception is {0}".format(ex))
return 'error','error'
'''
Call in curl requests to execute rest api's
1. check for the verb that is GET or post or delete and decide which header to use
2. check if the request is a simple curl to execute a rest api, no cert and no client auth
3. Check if client cert is going to be used, in that case pass in the root cert, client key and cert
4. Check the request is not for client cert, then pass in just the root cert
5. Form the url, add url, header and ulr
6. Add any data is there
7. Execute the curl command
retun the output of the curl command
'''
def _validate_curl(self, final_url=None, headers=None, client_cert=False, verb='GET', data='', plain_curl=False, username='Administrator', password='password'):
if verb == 'GET':
final_verb = 'curl -v'
elif verb == 'POST':
final_verb = 'curl -v -X POST'
elif verb == 'DELETE':
final_verb = 'curl -v -X DELETE'
if plain_curl:
main_url = final_verb
elif client_cert:
main_url = final_verb + " --cacert " + x509main.SRC_CHAIN_FILE + " --cert-type PEM --cert " + x509main.CLIENT_CERT_PEM + " --key-type PEM --key " + x509main.CLIENT_CERT_KEY
else:
main_url = final_verb + " --cacert " + x509main.CERT_FILE
cmd = str(main_url) + " " + str(headers) + " " + str(final_url)
if data is not None:
cmd = cmd + " -d " + data
log.info("Running command : {0}".format(cmd))
output = subprocess.check_output(cmd, shell=True)
return output
'''
Define what needs to be called for the authentication
1. check if host is none, get it from the object
2. check if the curl has to be plain execution, no cert at all else make it a https
3. if the execution needs to be done via curl or via python requests
Return the value of result.
'''
def _execute_command_clientcert(self, host=None, url=None, port=18091, headers=None, client_cert=False, curl=False, verb='GET', data=None,
plain_curl=False, username='Administrator', password='password'):
if host is None:
host = self.host.ip
if plain_curl:
final_url = "http://" + str(host) + ":" + str(port) + str(url)
else:
final_url = "https://" + str(host) + ":" + str(port) + str(url)
if curl:
result = self._validate_curl(final_url, headers, client_cert, verb, data, plain_curl, username, password)
return result
else:
status, result = self._validate_ssl_login(final_url, headers, client_cert, verb, data, plain_curl, username, password)
return status, result
# Get current root cert from the cluster
def _get_cluster_ca_cert(self):
rest = RestConnection(self.host)
api = rest.baseUrl + "pools/default/certificate?extended=true"
status, content, header = rest._http_request(api, 'GET')
return status, content, header
# Setup master node
# 1. Upload Cluster cert i.e
# 2. Setup other nodes for certificates
# 3. Create the cert.json file which contains state, path, prefixes and delimeters
# 4. Upload the cert.json file
def setup_master(self, state=None, paths=None, prefixs=None, delimeters=None, mode='rest', user='Administrator', password='password'):
level = ntonencryptionBase().get_encryption_level_cli(self.host)
if level:
ntonencryptionBase().disable_nton_cluster([self.host])
copy_host = copy.deepcopy(self.host)
x509main(copy_host)._upload_cluster_ca_certificate(user, password)
x509main(copy_host)._setup_node_certificates()
if state is not None:
self.write_client_cert_json_new(state, paths, prefixs, delimeters)
if mode == 'rest':
x509main(copy_host)._upload_cluster_ca_settings(user, password)
elif mode == 'cli':
x509main(copy_host)._upload_cert_file_via_cli(user, password)
if level:
ntonencryptionBase().setup_nton_cluster([self.host], clusterEncryptionLevel=level)
# write a new config json file based on state, paths, perfixes and delimeters
def write_client_cert_json_new(self, state, paths, prefixs, delimeters):
template_path = './pytests/security/' + x509main.CLIENT_CERT_AUTH_TEMPLATE
config_json = x509main.CACERTFILEPATH + x509main.CLIENT_CERT_AUTH_JSON
target_file = open(config_json, 'w')
source_file = open(template_path, 'r')
client_cert = '{"state" : ' + "'" + state + "'" + ", 'prefixes' : [ "
for line in source_file:
for path, prefix, delimeter in zip(paths, prefixs, delimeters):
line1 = line.replace("@2", "'" + path + "'")
line2 = line1.replace("@3", "'" + prefix + "'")
line3 = line2.replace("@4", "'" + delimeter + "'")
temp_client_cert = "{ " + line3 + " },"
client_cert = client_cert + temp_client_cert
client_cert = client_cert.replace("'", '"')
client_cert = client_cert[:-1]
client_cert = client_cert + " ]}"
log.info ("-- Log current config json file ---{0}".format(client_cert))
target_file.write(client_cert)
#upload new config file via commandline.
def _upload_cert_file_via_cli(self, user='Administrator', password='password'):
src_cert_file = x509main.CACERTFILEPATH + x509main.CLIENT_CERT_AUTH_JSON
dest_cert_file = self.install_path + x509main.CHAINFILEPATH + "/" + x509main.CLIENT_CERT_AUTH_JSON
self._copy_node_key_chain_cert(self.host, src_cert_file, dest_cert_file)
cli_command = 'ssl-manage'
options = "--set-client-auth " + dest_cert_file
remote_client = RemoteMachineShellConnection(self.host)
output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
options=options, cluster_host="localhost", user=user, password=password)
log.info (" -- Output of command ssl-manage with --set-client-auth is {0} and erorr is {1}".format(output, error))
| import logger
from lib.Cb_constants.CBServer import CbServer
log = logger.Logger.get_logger()
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection
import httplib2
import base64
import requests
import urllib.request, urllib.parse, urllib.error
import random
import os
import copy
import subprocess
import json
from pytests.security.ntonencryptionBase import ntonencryptionBase
class ServerInfo():
def __init__(self,
ip,
port,
ssh_username,
ssh_password,
ssh_key=''):
self.ip = ip
self.ssh_username = ssh_username
self.ssh_password = <PASSWORD>
self.port = port
self.ssh_key = ssh_key
class x509main:
CHAINCERTFILE = 'chain.pem'
NODECAKEYFILE = 'pkey.key'
CACERTFILE = "root.crt"
CAKEYFILE = "root.key"
WININSTALLPATH = "C:/Program Files/Couchbase/Server/var/lib/couchbase/"
LININSTALLPATH = "/opt/couchbase/var/lib/couchbase/"
MACINSTALLPATH = "/Users/couchbase/Library/Application Support/Couchbase/var/lib/couchbase/"
DOWNLOADPATH = "/tmp/"
CACERTFILEPATH = "/tmp/newcerts" + str(random.randint(1, 100)) + "/"
CHAINFILEPATH = "inbox"
GOCERTGENFILE = "gencert.go"
INCORRECT_ROOT_CERT = "incorrect_root_cert.crt"
SLAVE_HOST = ServerInfo('127.0.0.1', 22, 'root', 'couchbase')
CLIENT_CERT_AUTH_JSON = 'client_cert_auth1.json'
CLIENT_CERT_AUTH_TEMPLATE = 'client_cert_config_template.txt'
IP_ADDRESS = '172.16.1.174'
KEY_FILE = CACERTFILEPATH + "/" + CAKEYFILE
CERT_FILE = CACERTFILEPATH + "/" + CACERTFILE
CLIENT_CERT_KEY = CACERTFILEPATH + IP_ADDRESS + ".key"
CLIENT_CERT_PEM = CACERTFILEPATH + IP_ADDRESS + ".pem"
SRC_CHAIN_FILE = CACERTFILEPATH + "/long_chain" + IP_ADDRESS + ".pem"
def __init__(self,
host=None,
method='REST'):
if host is not None:
self.host = host
self.install_path = self._get_install_path(self.host)
self.disable_ssl_certificate_validation = False
if CbServer.use_https:
self.disable_ssl_certificate_validation = True
self.slave_host = x509main.SLAVE_HOST
def getLocalIPAddress(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('couchbase.com', 0))
return s.getsockname()[0]
'''
status, ipAddress = commands.getstatusoutput("ifconfig en0 | grep 'inet addr:' | cut -d: -f2 |awk '{print $1}'")
if '1' not in ipAddress:
status, ipAddress = commands.getstatusoutput("ifconfig eth0 | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | awk '{print $2}'")
return ipAddress
'''
def get_data_path(self,node):
"""Gets couchbase log directory, even for cluster_run
"""
_, dir = RestConnection(node).diag_eval(
'filename:absname(element(2, application:get_env(ns_server,path_config_datadir))).')
dir = dir.strip('"')
return str(dir)
def _generate_cert(self, servers, root_cn='Root\ Authority', type='go', encryption="", key_length=1024, client_ip=None, alt_names='default', dns=None, uri=None,wildcard_dns=None):
shell = RemoteMachineShellConnection(self.slave_host)
shell.execute_command("rm -rf " + x509main.CACERTFILEPATH)
shell.execute_command("mkdir " + x509main.CACERTFILEPATH)
if type == 'go':
files = []
cert_file = "./pytests/security/" + x509main.GOCERTGENFILE
output, error = shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + "root -common-name=" + root_cn)
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + "interm -sign-with=" + x509main.CACERTFILEPATH + "root -common-name=Intemediate\ Authority")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
for server in servers:
if "[" in server.ip:
server.ip = server.ip.replace("[", "").replace("]", "")
output, error = shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + server.ip + " -sign-with=" + x509main.CACERTFILEPATH + "interm -common-name=" + server.ip + " -final=true")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cat " + x509main.CACERTFILEPATH + server.ip + ".crt " + x509main.CACERTFILEPATH + "interm.crt > " + " " + x509main.CACERTFILEPATH + "long_chain" + server.ip + ".pem")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
shell.execute_command("go run " + cert_file + " -store-to=" + x509main.CACERTFILEPATH + "incorrect_root_cert -common-name=Incorrect\ Authority")
elif type == 'openssl':
files = []
v3_ca = "./pytests/security/v3_ca.crt"
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + "ca.key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -x509 -days 3650 -sha256 -key " + x509main.CACERTFILEPATH + "ca.key -out " + x509main.CACERTFILEPATH + "ca.pem -subj '/C=UA/O=My Company/CN=My Company Root CA'")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + "int.key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -key " + x509main.CACERTFILEPATH + "int.key -out " + x509main.CACERTFILEPATH + "int.csr -subj '/C=UA/O=My Company/CN=My Company Intermediate CA'")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl x509 -req -in " + x509main.CACERTFILEPATH + "int.csr -CA " + x509main.CACERTFILEPATH + "ca.pem -CAkey " + x509main.CACERTFILEPATH + "ca.key -CAcreateserial -CAserial " \
+ x509main.CACERTFILEPATH + "rootCA.srl -extfile ./pytests/security/v3_ca.ext -out " + x509main.CACERTFILEPATH + "int.pem -days 365 -sha256")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
for server in servers:
# check if the ip address is ipv6 raw ip address, remove [] brackets
if "[" in server.ip:
server.ip = server.ip.replace("[", "").replace("]", "")
from shutil import copyfile
copyfile("./pytests/security/clientconf.conf", "./pytests/security/clientconf3.conf")
fin = open("./pytests/security/clientconf3.conf", "a+")
if ".com" in server.ip and wildcard_dns is None:
fin.write("\nDNS.0 = {0}".format(server.ip))
elif wildcard_dns:
fin.write("\nDNS.0 = {0}".format(wildcard_dns))
else:
fin.write("\nIP.0 = {0}".format(server.ip.replace('[', '').replace(']', '')))
fin.close()
import fileinput
import sys
for line in fileinput.input("./pytests/security/clientconf3.conf", inplace=1):
if "ip_address" in line:
line = line.replace("ip_address", server.ip)
sys.stdout.write(line)
# print file contents for easy debugging
fout = open("./pytests/security/clientconf3.conf", "r")
print((fout.read()))
fout.close()
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + server.ip + ".key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -key " + x509main.CACERTFILEPATH + server.ip + ".key -out " + x509main.CACERTFILEPATH + server.ip + ".csr -config ./pytests/security/clientconf3.conf")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl x509 -req -in " + x509main.CACERTFILEPATH + server.ip + ".csr -CA " + x509main.CACERTFILEPATH + "int.pem -CAkey " + \
x509main.CACERTFILEPATH + "int.key -CAcreateserial -CAserial " + x509main.CACERTFILEPATH + "intermediateCA.srl -out " + x509main.CACERTFILEPATH + server.ip + ".pem -days 365 -sha256 -extfile ./pytests/security/clientconf3.conf -extensions req_ext")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cat " + x509main.CACERTFILEPATH + server.ip + ".pem " + x509main.CACERTFILEPATH + "int.pem " + x509main.CACERTFILEPATH + "ca.pem > " + x509main.CACERTFILEPATH + "long_chain" + server.ip + ".pem")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cp " + x509main.CACERTFILEPATH + "ca.pem " + x509main.CACERTFILEPATH + "root.crt")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
os.remove("./pytests/security/clientconf3.conf")
# Check if client_ip is ipv6, remove []
if "[" in client_ip:
client_ip = client_ip.replace("[", "").replace("]", "")
from shutil import copyfile
copyfile("./pytests/security/clientconf.conf", "./pytests/security/clientconf2.conf")
fin = open("./pytests/security/clientconf2.conf", "a+")
if alt_names == 'default':
fin.write("\nDNS.1 = us.cbadminbucket.com")
fin.write("\nURI.1 = www.cbadminbucket.com")
elif alt_names == 'non_default':
if dns is not None:
dns = "\nDNS.1 = " + dns
fin.write(dns)
if uri is not None:
uri = "\nURI.1 = " + dns
fin.write(uri)
if ".com" in server.ip:
fin.write("\nDNS.0 = {0}".format(server.ip))
else:
fin.write("\nIP.0 = {0}".format(server.ip.replace('[', '').replace(']', '')))
fin.close()
# print file contents for easy debugging
fout = open("./pytests/security/clientconf2.conf", "r")
print((fout.read()))
fout.close()
# Generate Certificate for the client
output, error = shell.execute_command("openssl genrsa " + encryption + " -out " + x509main.CACERTFILEPATH + client_ip + ".key " + str(key_length))
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl req -new -key " + x509main.CACERTFILEPATH + client_ip + ".key -out " + x509main.CACERTFILEPATH + client_ip + ".csr -config ./pytests/security/clientconf2.conf")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("openssl x509 -req -in " + x509main.CACERTFILEPATH + client_ip + ".csr -CA " + x509main.CACERTFILEPATH + "int.pem -CAkey " + \
x509main.CACERTFILEPATH + "int.key -CAcreateserial -CAserial " + x509main.CACERTFILEPATH + "intermediateCA.srl -out " + x509main.CACERTFILEPATH + client_ip + ".pem -days 365 -sha256 -extfile ./pytests/security/clientconf2.conf -extensions req_ext")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
output, error = shell.execute_command("cat " + x509main.CACERTFILEPATH + client_ip + ".pem " + x509main.CACERTFILEPATH + "int.pem " + x509main.CACERTFILEPATH + "ca.pem > " + x509main.CACERTFILEPATH + "long_chain" + client_ip + ".pem")
log.info ('Output message is {0} and error message is {1}'.format(output, error))
os.remove("./pytests/security/clientconf2.conf")
# Top level method for setup of nodes in the cluster
def setup_cluster_nodes_ssl(self, servers=[], reload_cert=False):
# Make a copy of the servers not to change self.servers
copy_servers = copy.deepcopy(servers)
# For each server in cluster, setup a node certificates, create inbox folders and copy + chain cert
for server in copy_servers:
x509main(server)._setup_node_certificates(reload_cert=reload_cert, host=server)
# Create inbox folder and copy node cert and chain cert
def _setup_node_certificates(self, chain_cert=True, node_key=True, reload_cert=True, host=None):
if host == None:
host = self.host
self._create_inbox_folder(host)
if host.ip.count(':') > 0 and host.ip.count(']') > 0:
# raw ipv6? enclose in square brackets
host.ip = host.ip.replace('[', '').replace(']', '')
src_chain_file = x509main.CACERTFILEPATH + "/long_chain" + host.ip + ".pem"
dest_chain_file = self.install_path + x509main.CHAINFILEPATH + "/" + x509main.CHAINCERTFILE
src_node_key = x509main.CACERTFILEPATH + "/" + host.ip + ".key"
dest_node_key = self.install_path + x509main.CHAINFILEPATH + "/" + x509main.NODECAKEYFILE
if chain_cert:
self._copy_node_key_chain_cert(host, src_chain_file, dest_chain_file)
if node_key:
self._copy_node_key_chain_cert(host, src_node_key, dest_node_key)
if reload_cert:
status, content = self._reload_node_certificate(host)
return status, content
# Reload cert for self signed certificate
def _reload_node_certificate(self, host):
rest = RestConnection(host)
api = rest.baseUrl + "node/controller/reloadCertificate"
http = httplib2.Http(disable_ssl_certificate_validation=self.disable_ssl_certificate_validation)
status, content, header = rest._http_request(api, 'POST', headers=self._create_rest_headers('Administrator', 'password'))
return status, content
# Get the install path for different operating system
def _get_install_path(self, host):
shell = RemoteMachineShellConnection(host)
os_type = shell.extract_remote_info().distribution_type
log.info ("OS type is {0}".format(os_type))
if os_type == 'windows':
install_path = x509main.WININSTALLPATH
elif os_type == 'Mac':
install_path = x509main.MACINSTALLPATH
else:
#install_path = x509main.LININSTALLPATH
install_path = str(self.get_data_path(host)) + "/"
return install_path
# create inbox folder for host
def _create_inbox_folder(self, host):
shell = RemoteMachineShellConnection(self.host)
final_path = self.install_path + x509main.CHAINFILEPATH
shell.create_directory(final_path)
# delete all file inbox folder and remove inbox folder
def _delete_inbox_folder(self):
shell = RemoteMachineShellConnection(self.host)
final_path = self.install_path + x509main.CHAINFILEPATH
shell = RemoteMachineShellConnection(self.host)
os_type = shell.extract_remote_info().distribution_type
log.info ("OS type is {0}".format(os_type))
shell.delete_file(final_path, "root.crt")
shell.delete_file(final_path, "chain.pem")
shell.delete_file(final_path, "pkey.key")
if os_type == 'windows':
final_path = '/cygdrive/c/Program\ Files/Couchbase/Server/var/lib/couchbase/inbox'
shell.execute_command('rm -rf ' + final_path)
else:
shell.execute_command('rm -rf ' + final_path)
# Function to simply copy from source to destination
def _copy_node_key_chain_cert(self, host, src_path, dest_path):
shell = RemoteMachineShellConnection(host)
shell.copy_file_local_to_remote(src_path, dest_path)
def _create_rest_headers(self, username="Administrator", password="password"):
authorization = base64.encodebytes(('%s:%s' % (username, password)).encode()).decode()
return {'Content-Type': 'application/octet-stream',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
# Function that will upload file via rest
def _rest_upload_file(self, URL, file_path_name, username=None, password=None):
data = open(file_path_name, 'rb').read()
http = httplib2.Http(disable_ssl_certificate_validation=self.disable_ssl_certificate_validation)
status, content = http.request(URL, 'POST', headers=self._create_rest_headers(username, password), body=data)
log.info (" Status from rest file upload command is {0}".format(status))
log.info (" Content from rest file upload command is {0}".format(content))
return status, content
# Upload Cluster or root cert
def _upload_cluster_ca_certificate(self, username, password):
rest = RestConnection(self.host)
url = "controller/uploadClusterCA"
api = rest.baseUrl + url
self._rest_upload_file(api, x509main.CACERTFILEPATH + "/" + x509main.CACERTFILE, "Administrator", 'password')
# Upload security setting for client cert
def _upload_cluster_ca_settings(self, username, password):
temp = self.host
rest = RestConnection(temp)
url = "settings/clientCertAuth"
api = rest.baseUrl + url
status, content = self._rest_upload_file(api, x509main.CACERTFILEPATH + x509main.CLIENT_CERT_AUTH_JSON, "Administrator", 'password')
log.info (" --- Status from upload of client cert settings is {0} and Content is {1}".format(status, content))
return status, content
'''
Use requests module to execute rest api's
Steps:
1. Check if client_cert is set or not. This will define rest of the parameters for client certificates to set for connections
2. check what is the verb required for rest api, get, post, put and delete for each rest api
3. Call request with client certs, data that is passed for each request and headers for each request
4. Return text of the response to the calling function
Capture any exception in the code and return error
'''
def _validate_ssl_login(self, final_url=None, header=None, client_cert=False, verb='GET', data='', plain_curl=False, username='Administrator', password='password', host=None):
if verb == 'GET' and plain_curl:
r = requests.get(final_url, data=data)
return r.status_code, r.text
elif client_cert:
try:
if verb == 'GET':
r = requests.get(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), data=data)
elif verb == 'POST':
r = requests.post(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), data=data)
elif verb == 'PUT':
header = {'Content-type': 'Content-Type: application/json'}
r = requests.put(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), data=data, headers=header)
elif verb == 'DELETE':
header = {'Content-type': 'Content-Type: application/json'}
r = requests.delete(final_url, verify=x509main.CERT_FILE, cert=(x509main.CLIENT_CERT_PEM, x509main.CLIENT_CERT_KEY), headers=header)
return r.status_code, r.text
except Exception as ex:
log.info ("into exception form validate_ssl_login with client cert")
log.info (" Exception is {0}".format(ex))
return 'error','error'
else:
try:
r = requests.get("https://" + str(self.host.ip) + ":18091", verify=x509main.CERT_FILE)
if r.status_code == 200:
header = {'Content-type': 'application/x-www-form-urlencoded'}
params = urllib.parse.urlencode({'user':'{0}'.format(username), 'password':'{<PASSWORD>(password)})
r = requests.post("https://" + str(self.host.ip) + ":18091/uilogin", data=params, headers=header, verify=x509main.CERT_FILE)
return r.status_code
except Exception as ex:
log.info ("into exception form validate_ssl_login")
log.info (" Exception is {0}".format(ex))
return 'error','error'
'''
Call in curl requests to execute rest api's
1. check for the verb that is GET or post or delete and decide which header to use
2. check if the request is a simple curl to execute a rest api, no cert and no client auth
3. Check if client cert is going to be used, in that case pass in the root cert, client key and cert
4. Check the request is not for client cert, then pass in just the root cert
5. Form the url, add url, header and ulr
6. Add any data is there
7. Execute the curl command
retun the output of the curl command
'''
def _validate_curl(self, final_url=None, headers=None, client_cert=False, verb='GET', data='', plain_curl=False, username='Administrator', password='password'):
if verb == 'GET':
final_verb = 'curl -v'
elif verb == 'POST':
final_verb = 'curl -v -X POST'
elif verb == 'DELETE':
final_verb = 'curl -v -X DELETE'
if plain_curl:
main_url = final_verb
elif client_cert:
main_url = final_verb + " --cacert " + x509main.SRC_CHAIN_FILE + " --cert-type PEM --cert " + x509main.CLIENT_CERT_PEM + " --key-type PEM --key " + x509main.CLIENT_CERT_KEY
else:
main_url = final_verb + " --cacert " + x509main.CERT_FILE
cmd = str(main_url) + " " + str(headers) + " " + str(final_url)
if data is not None:
cmd = cmd + " -d " + data
log.info("Running command : {0}".format(cmd))
output = subprocess.check_output(cmd, shell=True)
return output
'''
Define what needs to be called for the authentication
1. check if host is none, get it from the object
2. check if the curl has to be plain execution, no cert at all else make it a https
3. if the execution needs to be done via curl or via python requests
Return the value of result.
'''
def _execute_command_clientcert(self, host=None, url=None, port=18091, headers=None, client_cert=False, curl=False, verb='GET', data=None,
plain_curl=False, username='Administrator', password='password'):
if host is None:
host = self.host.ip
if plain_curl:
final_url = "http://" + str(host) + ":" + str(port) + str(url)
else:
final_url = "https://" + str(host) + ":" + str(port) + str(url)
if curl:
result = self._validate_curl(final_url, headers, client_cert, verb, data, plain_curl, username, password)
return result
else:
status, result = self._validate_ssl_login(final_url, headers, client_cert, verb, data, plain_curl, username, password)
return status, result
# Get current root cert from the cluster
def _get_cluster_ca_cert(self):
rest = RestConnection(self.host)
api = rest.baseUrl + "pools/default/certificate?extended=true"
status, content, header = rest._http_request(api, 'GET')
return status, content, header
# Setup master node
# 1. Upload Cluster cert i.e
# 2. Setup other nodes for certificates
# 3. Create the cert.json file which contains state, path, prefixes and delimeters
# 4. Upload the cert.json file
def setup_master(self, state=None, paths=None, prefixs=None, delimeters=None, mode='rest', user='Administrator', password='password'):
level = ntonencryptionBase().get_encryption_level_cli(self.host)
if level:
ntonencryptionBase().disable_nton_cluster([self.host])
copy_host = copy.deepcopy(self.host)
x509main(copy_host)._upload_cluster_ca_certificate(user, password)
x509main(copy_host)._setup_node_certificates()
if state is not None:
self.write_client_cert_json_new(state, paths, prefixs, delimeters)
if mode == 'rest':
x509main(copy_host)._upload_cluster_ca_settings(user, password)
elif mode == 'cli':
x509main(copy_host)._upload_cert_file_via_cli(user, password)
if level:
ntonencryptionBase().setup_nton_cluster([self.host], clusterEncryptionLevel=level)
# write a new config json file based on state, paths, perfixes and delimeters
def write_client_cert_json_new(self, state, paths, prefixs, delimeters):
template_path = './pytests/security/' + x509main.CLIENT_CERT_AUTH_TEMPLATE
config_json = x509main.CACERTFILEPATH + x509main.CLIENT_CERT_AUTH_JSON
target_file = open(config_json, 'w')
source_file = open(template_path, 'r')
client_cert = '{"state" : ' + "'" + state + "'" + ", 'prefixes' : [ "
for line in source_file:
for path, prefix, delimeter in zip(paths, prefixs, delimeters):
line1 = line.replace("@2", "'" + path + "'")
line2 = line1.replace("@3", "'" + prefix + "'")
line3 = line2.replace("@4", "'" + delimeter + "'")
temp_client_cert = "{ " + line3 + " },"
client_cert = client_cert + temp_client_cert
client_cert = client_cert.replace("'", '"')
client_cert = client_cert[:-1]
client_cert = client_cert + " ]}"
log.info ("-- Log current config json file ---{0}".format(client_cert))
target_file.write(client_cert)
#upload new config file via commandline.
def _upload_cert_file_via_cli(self, user='Administrator', password='password'):
src_cert_file = x509main.CACERTFILEPATH + x509main.CLIENT_CERT_AUTH_JSON
dest_cert_file = self.install_path + x509main.CHAINFILEPATH + "/" + x509main.CLIENT_CERT_AUTH_JSON
self._copy_node_key_chain_cert(self.host, src_cert_file, dest_cert_file)
cli_command = 'ssl-manage'
options = "--set-client-auth " + dest_cert_file
remote_client = RemoteMachineShellConnection(self.host)
output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
options=options, cluster_host="localhost", user=user, password=password)
log.info (" -- Output of command ssl-manage with --set-client-auth is {0} and erorr is {1}".format(output, error))
| en | 0.802973 | status, ipAddress = commands.getstatusoutput("ifconfig en0 | grep 'inet addr:' | cut -d: -f2 |awk '{print $1}'") if '1' not in ipAddress: status, ipAddress = commands.getstatusoutput("ifconfig eth0 | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | awk '{print $2}'") return ipAddress Gets couchbase log directory, even for cluster_run # check if the ip address is ipv6 raw ip address, remove [] brackets # print file contents for easy debugging # Check if client_ip is ipv6, remove [] # print file contents for easy debugging # Generate Certificate for the client # Top level method for setup of nodes in the cluster # Make a copy of the servers not to change self.servers # For each server in cluster, setup a node certificates, create inbox folders and copy + chain cert # Create inbox folder and copy node cert and chain cert # raw ipv6? enclose in square brackets # Reload cert for self signed certificate # Get the install path for different operating system #install_path = x509main.LININSTALLPATH # create inbox folder for host # delete all file inbox folder and remove inbox folder # Function to simply copy from source to destination # Function that will upload file via rest # Upload Cluster or root cert # Upload security setting for client cert Use requests module to execute rest api's Steps: 1. Check if client_cert is set or not. This will define rest of the parameters for client certificates to set for connections 2. check what is the verb required for rest api, get, post, put and delete for each rest api 3. Call request with client certs, data that is passed for each request and headers for each request 4. Return text of the response to the calling function Capture any exception in the code and return error Call in curl requests to execute rest api's 1. check for the verb that is GET or post or delete and decide which header to use 2. check if the request is a simple curl to execute a rest api, no cert and no client auth 3. Check if client cert is going to be used, in that case pass in the root cert, client key and cert 4. Check the request is not for client cert, then pass in just the root cert 5. Form the url, add url, header and ulr 6. Add any data is there 7. Execute the curl command retun the output of the curl command Define what needs to be called for the authentication 1. check if host is none, get it from the object 2. check if the curl has to be plain execution, no cert at all else make it a https 3. if the execution needs to be done via curl or via python requests Return the value of result. # Get current root cert from the cluster # Setup master node # 1. Upload Cluster cert i.e # 2. Setup other nodes for certificates # 3. Create the cert.json file which contains state, path, prefixes and delimeters # 4. Upload the cert.json file # write a new config json file based on state, paths, perfixes and delimeters #upload new config file via commandline. | 1.792002 | 2 |
test/wsgi/invocation/rewrite/test_path_rewriting_route_handler_decorator.py | keotl/jivago | 12 | 6632811 | <gh_stars>10-100
import unittest
from unittest import mock
from jivago.wsgi.invocation.rewrite.path_rewriting_route_handler_decorator import PathRewritingRouteHandlerDecorator
from jivago.wsgi.invocation.route_handler import RouteHandler
from test_utils.request_builder import RequestBuilder
NEW_PATH = "/new-path"
class PathRewritingRouteHandlerDecoratorTest(unittest.TestCase):
def setUp(self):
self.decorated_route_handler: RouteHandler = mock.create_autospec(RouteHandler)
self.route_handler = PathRewritingRouteHandlerDecorator(self.decorated_route_handler, NEW_PATH)
def test_whenInvoking_thenPassModifiedRequestWithModifiedPath(self):
request = RequestBuilder().path("/old-path").build()
self.route_handler.invoke(request)
_, args, _ = self.decorated_route_handler.invoke.mock_calls[0]
gotten_request = args[0]
self.assertEqual(NEW_PATH, gotten_request.path)
| import unittest
from unittest import mock
from jivago.wsgi.invocation.rewrite.path_rewriting_route_handler_decorator import PathRewritingRouteHandlerDecorator
from jivago.wsgi.invocation.route_handler import RouteHandler
from test_utils.request_builder import RequestBuilder
NEW_PATH = "/new-path"
class PathRewritingRouteHandlerDecoratorTest(unittest.TestCase):
def setUp(self):
self.decorated_route_handler: RouteHandler = mock.create_autospec(RouteHandler)
self.route_handler = PathRewritingRouteHandlerDecorator(self.decorated_route_handler, NEW_PATH)
def test_whenInvoking_thenPassModifiedRequestWithModifiedPath(self):
request = RequestBuilder().path("/old-path").build()
self.route_handler.invoke(request)
_, args, _ = self.decorated_route_handler.invoke.mock_calls[0]
gotten_request = args[0]
self.assertEqual(NEW_PATH, gotten_request.path) | none | 1 | 2.821739 | 3 |
|
untwisted/splits.py | iogf/untwisted | 33 | 6632812 | from untwisted.event import LOAD, Event
import sys
class Fixed:
"""
"""
class FOUND(Event):
pass
def __init__(self, ssock, size=4):
ssock.add_map(LOAD, self.update)
self.arr = bytearray()
self.size = size
def update(self, ssock, data):
self.arr.extend(data)
mem = memoryview(self.arr)
for ind in range(self.size, len(self.arr) + 1, self.size):
ssock.drive(Fixed.FOUND, mem[ind - self.size:ind].tobytes())
else:
del mem
del self.arr[:ind]
class Breaker:
"""
"""
def __init__(self, device, delim=b' '):
self.delim = delim
device.add_map(Terminator.FOUND, self.handle_found)
def handle_found(self, device, data):
lst = data.split(self.delim)
device.drive(lst.pop(0), *lst)
class Terminator:
"""
Used to tokenize messages, it works on LOAD event and spawns
Terminator.FOUND when it finds a given delimiter.
"""
class FOUND(Event):
pass
def __init__(self, ssock, delim=b'\r\n'):
self.delim = delim
self.arr = bytearray()
ssock.add_map(LOAD, self.update)
self.ssock = ssock
def update(self, ssock, data):
self.arr.extend(data)
chunks = self.arr.split(self.delim)
if chunks:
self.raise_events(chunks)
def raise_events(self, chunks):
self.arr.extend(chunks.pop(-1))
for ind in chunks:
self.ssock.drive(Terminator.FOUND, bytes(ind))
self.arr.clear()
class Accumulator:
"""
Just an accumulator on LOAD.
"""
def __init__(self, ssock):
ssock.add_map(LOAD, self.update)
ssock.accumulator = self
self.data = bytearray()
def update(self, ssock, data):
self.data.extend(data)
class AccUntil:
"""
An accumulator that splits a message into two chunks based
on a delimiter. It spawns AccUntil.DONE when such a delimiter is found.
"""
class DONE(Event):
pass
def __init__(self, ssock, delim=b'\r\n\r\n'):
self.delim = delim
self.arr = bytearray()
self.ssock = ssock
def start(self, data=b''):
self.ssock.add_map(LOAD, self.update)
self.update(self.ssock, data)
def update(self, ssock, data):
self.arr.extend(data)
if self.delim in self.arr:
self.stop()
def stop(self):
self.ssock.del_map(LOAD, self.update)
data = bytes(self.arr)
a, b = data.split(self.delim, 1)
self.ssock.drive(AccUntil.DONE, a, b)
class TmpFile:
class DONE(Event):
pass
def __init__(self, ssock):
self.ssock = ssock
self.fd = None
self.size = None
def start(self, fd, size=0, init_data=b''):
self.fd = fd
self.size = size
self.ssock.add_map(LOAD, self.update)
self.update(self.ssock, init_data)
def stop(self, data):
lsize = self.size - self.fd.tell()
self.fd.write(data[:lsize])
self.ssock.del_map(LOAD, self.update)
self.ssock.drive(TmpFile.DONE, self.fd, data[lsize:])
def update(self, ssock, data):
lsize = self.size - self.fd.tell()
if len(data) >= lsize:
self.stop(data)
else:
self.fd.write(data)
def logcon(ssock, fd=sys.stdout):
def log(ssock, data):
fd.write('%s\n' % data)
ssock.add_map(Terminator.FOUND, log)
| from untwisted.event import LOAD, Event
import sys
class Fixed:
"""
"""
class FOUND(Event):
pass
def __init__(self, ssock, size=4):
ssock.add_map(LOAD, self.update)
self.arr = bytearray()
self.size = size
def update(self, ssock, data):
self.arr.extend(data)
mem = memoryview(self.arr)
for ind in range(self.size, len(self.arr) + 1, self.size):
ssock.drive(Fixed.FOUND, mem[ind - self.size:ind].tobytes())
else:
del mem
del self.arr[:ind]
class Breaker:
"""
"""
def __init__(self, device, delim=b' '):
self.delim = delim
device.add_map(Terminator.FOUND, self.handle_found)
def handle_found(self, device, data):
lst = data.split(self.delim)
device.drive(lst.pop(0), *lst)
class Terminator:
"""
Used to tokenize messages, it works on LOAD event and spawns
Terminator.FOUND when it finds a given delimiter.
"""
class FOUND(Event):
pass
def __init__(self, ssock, delim=b'\r\n'):
self.delim = delim
self.arr = bytearray()
ssock.add_map(LOAD, self.update)
self.ssock = ssock
def update(self, ssock, data):
self.arr.extend(data)
chunks = self.arr.split(self.delim)
if chunks:
self.raise_events(chunks)
def raise_events(self, chunks):
self.arr.extend(chunks.pop(-1))
for ind in chunks:
self.ssock.drive(Terminator.FOUND, bytes(ind))
self.arr.clear()
class Accumulator:
"""
Just an accumulator on LOAD.
"""
def __init__(self, ssock):
ssock.add_map(LOAD, self.update)
ssock.accumulator = self
self.data = bytearray()
def update(self, ssock, data):
self.data.extend(data)
class AccUntil:
"""
An accumulator that splits a message into two chunks based
on a delimiter. It spawns AccUntil.DONE when such a delimiter is found.
"""
class DONE(Event):
pass
def __init__(self, ssock, delim=b'\r\n\r\n'):
self.delim = delim
self.arr = bytearray()
self.ssock = ssock
def start(self, data=b''):
self.ssock.add_map(LOAD, self.update)
self.update(self.ssock, data)
def update(self, ssock, data):
self.arr.extend(data)
if self.delim in self.arr:
self.stop()
def stop(self):
self.ssock.del_map(LOAD, self.update)
data = bytes(self.arr)
a, b = data.split(self.delim, 1)
self.ssock.drive(AccUntil.DONE, a, b)
class TmpFile:
class DONE(Event):
pass
def __init__(self, ssock):
self.ssock = ssock
self.fd = None
self.size = None
def start(self, fd, size=0, init_data=b''):
self.fd = fd
self.size = size
self.ssock.add_map(LOAD, self.update)
self.update(self.ssock, init_data)
def stop(self, data):
lsize = self.size - self.fd.tell()
self.fd.write(data[:lsize])
self.ssock.del_map(LOAD, self.update)
self.ssock.drive(TmpFile.DONE, self.fd, data[lsize:])
def update(self, ssock, data):
lsize = self.size - self.fd.tell()
if len(data) >= lsize:
self.stop(data)
else:
self.fd.write(data)
def logcon(ssock, fd=sys.stdout):
def log(ssock, data):
fd.write('%s\n' % data)
ssock.add_map(Terminator.FOUND, log)
| en | 0.840916 | Used to tokenize messages, it works on LOAD event and spawns Terminator.FOUND when it finds a given delimiter. Just an accumulator on LOAD. An accumulator that splits a message into two chunks based on a delimiter. It spawns AccUntil.DONE when such a delimiter is found. | 2.569561 | 3 |
src/test.py | joagonzalez/tateti | 1 | 6632813 | <filename>src/test.py
from game import Game, Player, Board
import sys
def test_player():
print("Player() class tests")
dimension = 3
board_player = Board(dimension)
print("Imprimimos tablero vacio: ")
print(board_player)
board_player.update_board([0, 2], 'X')
board_player.update_board([0, 0], 'O')
board_player.update_board([1, 2], 'X')
board_player.update_board([2, 2], 'X')
board_player.update_board([1, 0], 'X')
board_player.update_board([2, 0], 'O')
board_player.update_board([0, 1], 'O')
board_player.update_board([1, 1], 'X')
#board_player.update_board([2, 1], 'X')
print(board_player)
player_1 = Player('Joaquin', 0, 0, 0)
player_2 = Player('Xano', 1, 1, 1)
print(player_1)
print(player_2)
player_1.movement(board_player)
print(board_player)
print(board_player.is_tateti())
def test_board():
print("Board() class tests")
dimension = int(sys.argv[1])
board = Board(dimension)
board_2 = Board(dimension)
print("Imprimimos tablero vacio: ")
print(board)
board.update_board([0, 2], 'X')
board.update_board([0, 0], 'O')
board.update_board([1, 2], 'X')
board.update_board([2, 2], 'X')
board.update_board([1, 0], 'X')
board.update_board([2, 0], 'O')
board.update_board([0, 1], 'O')
board.update_board([1, 1], 'X')
board.update_board([2, 1], 'X')
if dimension == 4:
board.update_board([3, 3], 'X')
print("Imprimimos tablero con contenido: ")
print(board)
print(board.is_tateti())
print(board.get_board())
print(board.get_id())
print(board.get_dimension())
# board_2
print(board_2)
print(board_2.is_tateti())
board_2.update_board([0, 0], 'X')
print(board_2)
print(board_2.is_tateti())
def test_game():
print("Game() class tests")
game_1 = Game('Joaquin', 'Morita', 0, 1, 0, 1, 0, 1)
game_2 = Game('Julia', 'Ramiro', 0, 1, 0, 1, 0, 1)
print(game_1)
print
print(game_2)
print
print(game_1.get_player('Morita'))
print
print(game_1.get_board())
game_1.movement('Joaquin')
print(game_1.get_board())
board = game_1.get_board()
print(board.get_id())
print(game_1.is_winner())
if __name__ == "__main__":
test_board()
test_player()
test_game() | <filename>src/test.py
from game import Game, Player, Board
import sys
def test_player():
print("Player() class tests")
dimension = 3
board_player = Board(dimension)
print("Imprimimos tablero vacio: ")
print(board_player)
board_player.update_board([0, 2], 'X')
board_player.update_board([0, 0], 'O')
board_player.update_board([1, 2], 'X')
board_player.update_board([2, 2], 'X')
board_player.update_board([1, 0], 'X')
board_player.update_board([2, 0], 'O')
board_player.update_board([0, 1], 'O')
board_player.update_board([1, 1], 'X')
#board_player.update_board([2, 1], 'X')
print(board_player)
player_1 = Player('Joaquin', 0, 0, 0)
player_2 = Player('Xano', 1, 1, 1)
print(player_1)
print(player_2)
player_1.movement(board_player)
print(board_player)
print(board_player.is_tateti())
def test_board():
print("Board() class tests")
dimension = int(sys.argv[1])
board = Board(dimension)
board_2 = Board(dimension)
print("Imprimimos tablero vacio: ")
print(board)
board.update_board([0, 2], 'X')
board.update_board([0, 0], 'O')
board.update_board([1, 2], 'X')
board.update_board([2, 2], 'X')
board.update_board([1, 0], 'X')
board.update_board([2, 0], 'O')
board.update_board([0, 1], 'O')
board.update_board([1, 1], 'X')
board.update_board([2, 1], 'X')
if dimension == 4:
board.update_board([3, 3], 'X')
print("Imprimimos tablero con contenido: ")
print(board)
print(board.is_tateti())
print(board.get_board())
print(board.get_id())
print(board.get_dimension())
# board_2
print(board_2)
print(board_2.is_tateti())
board_2.update_board([0, 0], 'X')
print(board_2)
print(board_2.is_tateti())
def test_game():
print("Game() class tests")
game_1 = Game('Joaquin', 'Morita', 0, 1, 0, 1, 0, 1)
game_2 = Game('Julia', 'Ramiro', 0, 1, 0, 1, 0, 1)
print(game_1)
print
print(game_2)
print
print(game_1.get_player('Morita'))
print
print(game_1.get_board())
game_1.movement('Joaquin')
print(game_1.get_board())
board = game_1.get_board()
print(board.get_id())
print(game_1.is_winner())
if __name__ == "__main__":
test_board()
test_player()
test_game() | en | 0.265241 | #board_player.update_board([2, 1], 'X') # board_2 | 3.301651 | 3 |
tests/validators/test_shared_potential_id_validator.py | NOWUM/EnSysMod | 1 | 6632814 | <gh_stars>1-10
from typing import Type, List, Tuple, Dict, Any
import pytest
from pydantic import BaseModel, ValidationError
from ensysmod.model import EnergyComponentType
from ensysmod.schemas import EnergyComponentUpdate, EnergyComponentCreate
schemas_with_shared_potential_id_required: List[Tuple[Type[BaseModel], Dict[str, Any]]] = []
schemas_with_shared_potential_id_optional: List[Tuple[Type[BaseModel], Dict[str, Any]]] = [
(EnergyComponentUpdate, {}),
(EnergyComponentCreate,
{"name": "test", "description": "foo", "ref_dataset": 42, "type": EnergyComponentType.SOURCE})
]
schemas_with_shared_potential_id = schemas_with_shared_potential_id_required + schemas_with_shared_potential_id_optional
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id_optional)
def test_ok_missing_shared_potential_id(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id is optional for a schema
"""
schema(**data)
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id_optional)
def test_ok_none_shared_potential_id(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id is optional for a schema
"""
schema(shared_potential_id=None, **data)
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id)
def test_error_long_shared_potential_id(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id is not longer than 100 characters
"""
with pytest.raises(ValidationError) as exc_info:
schema(shared_potential_id="a" * 101, **data)
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("shared_potential_id",)
assert exc_info.value.errors()[0]["msg"] == "Shared potential id must not be longer than 100 characters."
assert exc_info.value.errors()[0]["type"] == "value_error"
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id)
def test_ok_shared_potential_ids(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id with everything between 1 and 100 characters is valid
"""
schema(shared_potential_id="a", **data)
schema(shared_potential_id="a" * 100, **data)
| from typing import Type, List, Tuple, Dict, Any
import pytest
from pydantic import BaseModel, ValidationError
from ensysmod.model import EnergyComponentType
from ensysmod.schemas import EnergyComponentUpdate, EnergyComponentCreate
schemas_with_shared_potential_id_required: List[Tuple[Type[BaseModel], Dict[str, Any]]] = []
schemas_with_shared_potential_id_optional: List[Tuple[Type[BaseModel], Dict[str, Any]]] = [
(EnergyComponentUpdate, {}),
(EnergyComponentCreate,
{"name": "test", "description": "foo", "ref_dataset": 42, "type": EnergyComponentType.SOURCE})
]
schemas_with_shared_potential_id = schemas_with_shared_potential_id_required + schemas_with_shared_potential_id_optional
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id_optional)
def test_ok_missing_shared_potential_id(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id is optional for a schema
"""
schema(**data)
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id_optional)
def test_ok_none_shared_potential_id(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id is optional for a schema
"""
schema(shared_potential_id=None, **data)
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id)
def test_error_long_shared_potential_id(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id is not longer than 100 characters
"""
with pytest.raises(ValidationError) as exc_info:
schema(shared_potential_id="a" * 101, **data)
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("shared_potential_id",)
assert exc_info.value.errors()[0]["msg"] == "Shared potential id must not be longer than 100 characters."
assert exc_info.value.errors()[0]["type"] == "value_error"
@pytest.mark.parametrize("schema,data", schemas_with_shared_potential_id)
def test_ok_shared_potential_ids(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a shared potential id with everything between 1 and 100 characters is valid
"""
schema(shared_potential_id="a", **data)
schema(shared_potential_id="a" * 100, **data) | en | 0.926534 | Test that a shared potential id is optional for a schema Test that a shared potential id is optional for a schema Test that a shared potential id is not longer than 100 characters Test that a shared potential id with everything between 1 and 100 characters is valid | 2.384945 | 2 |
mri_tools/shell_utils.py | movermeyer/mri-tools | 0 | 6632815 | <gh_stars>0
import os
import subprocess
import six
__author__ = '<NAME>'
__date__ = "2015-05-07"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def binary_in_path(command_name):
"""Check if the given command name for a binary exists in the users path.
Args:
command_name (str): the name of the command to check for existence and executability
Returns:
bool: true if the command can be found and is executable, false otherwise.
"""
for path_dir in os.environ["PATH"].split(os.pathsep):
full_path = os.path.join(path_dir.strip('"'), command_name)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return True
return False
def get_fsl_path():
"""Get the path to the FSL dir
Returns:
str: The path to the FSL directory.
Raises:
EnvironmentError: If the path could not be found.
"""
fsl_path = os.environ.get('FSLDIR', '')
if not fsl_path:
raise EnvironmentError('The Environment variable FSLDIR is not set')
return fsl_path
def get_fsl_command(application_name):
"""Get the correct command name for the given FSL application.
Args:
application_name (str): The name of the application we want the correct command of.
Returns:
str: Either the given application name or the name with fsl-5.0 prepended to it.
Raises:
EnvironmentError: if the fsl program could not be found
"""
if binary_in_path(application_name):
return application_name
prefixed_name = 'fsl-5.0' + application_name
if binary_in_path(prefixed_name):
return prefixed_name
raise EnvironmentError('Could not find FSL program {}'.format(application_name))
def run_command(command, shell=False):
"""Run a shell command.
Args:
command (str or list): the shell command to run
shell (bool): the subprocess flag for shell
Raises:
RuntimeError: if the command returned with exit code -1
Returns:
str: the stdout of the command
"""
if isinstance(command, six.string_types):
command = command.split(' ')
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
stdout, stderr = process.communicate()
rc = process.returncode
if rc == 1:
raise RuntimeError('Error in command. Error message: ' + str(stderr))
return stdout
def bash_function_exists(function_name):
"""Check if the bash function with the given name exists.
Runs the command 'which <function_name>' to check if the function exists.
Args:
function_name (str): the function name to check for existence
Returns:
boolean: if the command exists
"""
try:
run_command('which {}'.format(function_name))
return True
except RuntimeError:
return False
| import os
import subprocess
import six
__author__ = '<NAME>'
__date__ = "2015-05-07"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def binary_in_path(command_name):
"""Check if the given command name for a binary exists in the users path.
Args:
command_name (str): the name of the command to check for existence and executability
Returns:
bool: true if the command can be found and is executable, false otherwise.
"""
for path_dir in os.environ["PATH"].split(os.pathsep):
full_path = os.path.join(path_dir.strip('"'), command_name)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return True
return False
def get_fsl_path():
"""Get the path to the FSL dir
Returns:
str: The path to the FSL directory.
Raises:
EnvironmentError: If the path could not be found.
"""
fsl_path = os.environ.get('FSLDIR', '')
if not fsl_path:
raise EnvironmentError('The Environment variable FSLDIR is not set')
return fsl_path
def get_fsl_command(application_name):
"""Get the correct command name for the given FSL application.
Args:
application_name (str): The name of the application we want the correct command of.
Returns:
str: Either the given application name or the name with fsl-5.0 prepended to it.
Raises:
EnvironmentError: if the fsl program could not be found
"""
if binary_in_path(application_name):
return application_name
prefixed_name = 'fsl-5.0' + application_name
if binary_in_path(prefixed_name):
return prefixed_name
raise EnvironmentError('Could not find FSL program {}'.format(application_name))
def run_command(command, shell=False):
"""Run a shell command.
Args:
command (str or list): the shell command to run
shell (bool): the subprocess flag for shell
Raises:
RuntimeError: if the command returned with exit code -1
Returns:
str: the stdout of the command
"""
if isinstance(command, six.string_types):
command = command.split(' ')
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
stdout, stderr = process.communicate()
rc = process.returncode
if rc == 1:
raise RuntimeError('Error in command. Error message: ' + str(stderr))
return stdout
def bash_function_exists(function_name):
"""Check if the bash function with the given name exists.
Runs the command 'which <function_name>' to check if the function exists.
Args:
function_name (str): the function name to check for existence
Returns:
boolean: if the command exists
"""
try:
run_command('which {}'.format(function_name))
return True
except RuntimeError:
return False | en | 0.720749 | Check if the given command name for a binary exists in the users path. Args: command_name (str): the name of the command to check for existence and executability Returns: bool: true if the command can be found and is executable, false otherwise. Get the path to the FSL dir Returns: str: The path to the FSL directory. Raises: EnvironmentError: If the path could not be found. Get the correct command name for the given FSL application. Args: application_name (str): The name of the application we want the correct command of. Returns: str: Either the given application name or the name with fsl-5.0 prepended to it. Raises: EnvironmentError: if the fsl program could not be found Run a shell command. Args: command (str or list): the shell command to run shell (bool): the subprocess flag for shell Raises: RuntimeError: if the command returned with exit code -1 Returns: str: the stdout of the command Check if the bash function with the given name exists. Runs the command 'which <function_name>' to check if the function exists. Args: function_name (str): the function name to check for existence Returns: boolean: if the command exists | 2.983705 | 3 |
userbot/modules/sqlsender.py | TAMILVIP007/javes-3.0 | 1 | 6632816 | from userbot import client, CMD_HELP, CMD_LIST
from telethon import events
from userbot.events import javes05, rekcah05, zzaacckkyy, remove_plugin, load_module
from telethon import functions, types
from telethon.tl.types import InputMessagesFilterDocument
from userbot.utils import command, remove_plugin, load_module
from pathlib import Path
from userbot import LOAD_PLUG
from datetime import datetime
DELETE_TIMEOUT = 5
import sys, asyncio, traceback, os, importlib
import userbot.utils
from userbot import CMD_HELP, ALIVE_NAME, PM_MESSAGE, JAVES_NAME, JAVES_MSG, ORI_MSG
JAVES_NNAME = str(JAVES_NAME) if JAVES_NAME else str(JAVES_MSG)
@zzaacckkyy(pattern="^!sqlsend (?P<shortname>\w+)$", outgoing=True)
async def send(event):
if event.fwd_from:
return
message_id = event.message.id
input_str = event.pattern_match["shortname"]
the_plugin_file = "./userbot/modules/sql_helper/{}.py".format(input_str)
start = datetime.now()
await event.client.send_file( # pylint:disable=E0602
event.chat_id,
the_plugin_file,
force_document=True,
allow_cache=False,
reply_to=message_id
)
end = datetime.now()
time_taken_in_ms = (end - start).seconds
await event.edit("Uploaded {} in {} seconds".format(input_str, time_taken_in_ms))
await asyncio.sleep(DELETE_TIMEOUT)
await event.delete()
CMD_HELP.update({
"sqlsender":
"`!sqlsend <sql_helpername>`\
\n**Usage:** send the sql helper\
\n\n``\
\n****\
"
})
| from userbot import client, CMD_HELP, CMD_LIST
from telethon import events
from userbot.events import javes05, rekcah05, zzaacckkyy, remove_plugin, load_module
from telethon import functions, types
from telethon.tl.types import InputMessagesFilterDocument
from userbot.utils import command, remove_plugin, load_module
from pathlib import Path
from userbot import LOAD_PLUG
from datetime import datetime
DELETE_TIMEOUT = 5
import sys, asyncio, traceback, os, importlib
import userbot.utils
from userbot import CMD_HELP, ALIVE_NAME, PM_MESSAGE, JAVES_NAME, JAVES_MSG, ORI_MSG
JAVES_NNAME = str(JAVES_NAME) if JAVES_NAME else str(JAVES_MSG)
@zzaacckkyy(pattern="^!sqlsend (?P<shortname>\w+)$", outgoing=True)
async def send(event):
if event.fwd_from:
return
message_id = event.message.id
input_str = event.pattern_match["shortname"]
the_plugin_file = "./userbot/modules/sql_helper/{}.py".format(input_str)
start = datetime.now()
await event.client.send_file( # pylint:disable=E0602
event.chat_id,
the_plugin_file,
force_document=True,
allow_cache=False,
reply_to=message_id
)
end = datetime.now()
time_taken_in_ms = (end - start).seconds
await event.edit("Uploaded {} in {} seconds".format(input_str, time_taken_in_ms))
await asyncio.sleep(DELETE_TIMEOUT)
await event.delete()
CMD_HELP.update({
"sqlsender":
"`!sqlsend <sql_helpername>`\
\n**Usage:** send the sql helper\
\n\n``\
\n****\
"
})
| de | 0.372393 | # pylint:disable=E0602 | 1.944765 | 2 |
agent/data/CIFAR10/cifar10.py | kashu98/Deep-Agent | 0 | 6632817 | import pickle
import numpy as np
import os.path
class CIFAR10:
def __init__(self):
self.dataset = {}
self.path = os.path.dirname(os.path.abspath(__file__))
def _load_data(self, filename):
with open(filename, 'rb') as file:
dataset = pickle.load(file, encoding='bytes')
return (dataset[b'data'], dataset[b'labels'])
def _creat_pickle(self):
img, label = self._load_data(self.path + '/data_batch_1')
for i in range(4):
np.r_[img, self._load_data(self.path + '/data_batch_' + str(i+2))[0]]
np.r_[label, self._load_data(self.path + '/data_batch_' + str(i+2))[1]]
self.dataset['train_img'] = img
self.dataset['train_label'] = label
self.dataset['test_img'] = self._load_data(self.path + '/test_batch')[0]
self.dataset['test_label'] = self._load_data(self.path + '/test_batch')[1]
with open(self.path + '/cifar10.pkl', 'wb') as file:
pickle.dump(self.dataset, file, -1)
def _one_hot_label(self, X):
T = np.zeros((X.size, 10))
for idx, row in enumerate(T):
row[X[idx]] = 1
return T
def load_data(self, normalize=True, flatten=False, one_hot_label=False, option='train', **kwargs):
'''
## Arguments
normalize : if true, normalize the input pixel
one_hot_label : if true, creat one hot label
flatten : if true, load the image as a line
option: select option
train: return train data only\n
test: return test data only\n
both: return both train and test data
'''
if not os.path.exists(self.path + '/cifar10.pkl'):
self._creat_pickle()
with open(self.path + '/cifar10.pkl', 'rb') as file:
dataset = pickle.load(file)
if normalize:
for i in ('train_img', 'test_img'):
dataset[i] = dataset[i].astype(np.float32)
dataset[i] /= 255.0
dataset[i] += 0.01
if one_hot_label:
dataset['train_label'] = self._one_hot_label(dataset['train_label'])
dataset['test_label'] = self._one_hot_label(dataset['test_label'])
if not flatten:
for i in ('train_img', 'test_img'):
dataset[i] = dataset[i].reshape(-1, 3, 32, 32)
if option == 'train':
return (dataset['train_img'], dataset['train_label'])
elif option == 'test':
return (dataset['test_img'], dataset['test_label'])
elif option == 'both':
return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label'])
if __name__ == '__main__':
cifar = CIFAR10()
data = cifar.load_data(True, False, False, 'both')
print(data)
| import pickle
import numpy as np
import os.path
class CIFAR10:
def __init__(self):
self.dataset = {}
self.path = os.path.dirname(os.path.abspath(__file__))
def _load_data(self, filename):
with open(filename, 'rb') as file:
dataset = pickle.load(file, encoding='bytes')
return (dataset[b'data'], dataset[b'labels'])
def _creat_pickle(self):
img, label = self._load_data(self.path + '/data_batch_1')
for i in range(4):
np.r_[img, self._load_data(self.path + '/data_batch_' + str(i+2))[0]]
np.r_[label, self._load_data(self.path + '/data_batch_' + str(i+2))[1]]
self.dataset['train_img'] = img
self.dataset['train_label'] = label
self.dataset['test_img'] = self._load_data(self.path + '/test_batch')[0]
self.dataset['test_label'] = self._load_data(self.path + '/test_batch')[1]
with open(self.path + '/cifar10.pkl', 'wb') as file:
pickle.dump(self.dataset, file, -1)
def _one_hot_label(self, X):
T = np.zeros((X.size, 10))
for idx, row in enumerate(T):
row[X[idx]] = 1
return T
def load_data(self, normalize=True, flatten=False, one_hot_label=False, option='train', **kwargs):
'''
## Arguments
normalize : if true, normalize the input pixel
one_hot_label : if true, creat one hot label
flatten : if true, load the image as a line
option: select option
train: return train data only\n
test: return test data only\n
both: return both train and test data
'''
if not os.path.exists(self.path + '/cifar10.pkl'):
self._creat_pickle()
with open(self.path + '/cifar10.pkl', 'rb') as file:
dataset = pickle.load(file)
if normalize:
for i in ('train_img', 'test_img'):
dataset[i] = dataset[i].astype(np.float32)
dataset[i] /= 255.0
dataset[i] += 0.01
if one_hot_label:
dataset['train_label'] = self._one_hot_label(dataset['train_label'])
dataset['test_label'] = self._one_hot_label(dataset['test_label'])
if not flatten:
for i in ('train_img', 'test_img'):
dataset[i] = dataset[i].reshape(-1, 3, 32, 32)
if option == 'train':
return (dataset['train_img'], dataset['train_label'])
elif option == 'test':
return (dataset['test_img'], dataset['test_label'])
elif option == 'both':
return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label'])
if __name__ == '__main__':
cifar = CIFAR10()
data = cifar.load_data(True, False, False, 'both')
print(data)
| en | 0.366333 | ## Arguments normalize : if true, normalize the input pixel one_hot_label : if true, creat one hot label flatten : if true, load the image as a line option: select option train: return train data only\n test: return test data only\n both: return both train and test data | 2.618009 | 3 |
apps/API/serializers/category_serializers.py | ExpoAshique/ProveBanking__s | 0 | 6632818 | <filename>apps/API/serializers/category_serializers.py<gh_stars>0
from rest_framework import serializers
from categories.models import Category
class CategorySerializer(serializers.ModelSerializer):
label = serializers.CharField(source='name')
value = serializers.CharField(source='id')
class Meta:
model = Category
fields = ('id', 'name', 'kind', 'label', 'value')
| <filename>apps/API/serializers/category_serializers.py<gh_stars>0
from rest_framework import serializers
from categories.models import Category
class CategorySerializer(serializers.ModelSerializer):
label = serializers.CharField(source='name')
value = serializers.CharField(source='id')
class Meta:
model = Category
fields = ('id', 'name', 'kind', 'label', 'value')
| none | 1 | 1.999258 | 2 |
|
python/ray/serve/tests/test_util.py | 77loopin/ray | 39 | 6632819 | import json
import numpy as np
import pytest
import ray
from ray.serve.utils import ServeEncoder
from ray._private.utils import import_attr
def test_bytes_encoder():
data_before = {"inp": {"nest": b"bytes"}}
data_after = {"inp": {"nest": "bytes"}}
assert json.loads(json.dumps(data_before, cls=ServeEncoder)) == data_after
def test_numpy_encoding():
data = [1, 2]
floats = np.array(data).astype(np.float32)
ints = floats.astype(np.int32)
uints = floats.astype(np.uint32)
assert json.loads(json.dumps(floats, cls=ServeEncoder)) == data
assert json.loads(json.dumps(ints, cls=ServeEncoder)) == data
assert json.loads(json.dumps(uints, cls=ServeEncoder)) == data
def test_import_attr():
assert (import_attr("ray.serve.BackendConfig") ==
ray.serve.config.BackendConfig)
assert (import_attr("ray.serve.config.BackendConfig") ==
ray.serve.config.BackendConfig)
policy_cls = import_attr("ray.serve.controller.TrafficPolicy")
assert policy_cls == ray.serve.controller.TrafficPolicy
policy = policy_cls({"endpoint1": 0.5, "endpoint2": 0.5})
with pytest.raises(ValueError):
policy.set_traffic_dict({"endpoint1": 0.5, "endpoint2": 0.6})
policy.set_traffic_dict({"endpoint1": 0.4, "endpoint2": 0.6})
print(repr(policy))
# Very meta...
import_attr_2 = import_attr("ray._private.utils.import_attr")
assert import_attr_2 == import_attr
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| import json
import numpy as np
import pytest
import ray
from ray.serve.utils import ServeEncoder
from ray._private.utils import import_attr
def test_bytes_encoder():
data_before = {"inp": {"nest": b"bytes"}}
data_after = {"inp": {"nest": "bytes"}}
assert json.loads(json.dumps(data_before, cls=ServeEncoder)) == data_after
def test_numpy_encoding():
data = [1, 2]
floats = np.array(data).astype(np.float32)
ints = floats.astype(np.int32)
uints = floats.astype(np.uint32)
assert json.loads(json.dumps(floats, cls=ServeEncoder)) == data
assert json.loads(json.dumps(ints, cls=ServeEncoder)) == data
assert json.loads(json.dumps(uints, cls=ServeEncoder)) == data
def test_import_attr():
assert (import_attr("ray.serve.BackendConfig") ==
ray.serve.config.BackendConfig)
assert (import_attr("ray.serve.config.BackendConfig") ==
ray.serve.config.BackendConfig)
policy_cls = import_attr("ray.serve.controller.TrafficPolicy")
assert policy_cls == ray.serve.controller.TrafficPolicy
policy = policy_cls({"endpoint1": 0.5, "endpoint2": 0.5})
with pytest.raises(ValueError):
policy.set_traffic_dict({"endpoint1": 0.5, "endpoint2": 0.6})
policy.set_traffic_dict({"endpoint1": 0.4, "endpoint2": 0.6})
print(repr(policy))
# Very meta...
import_attr_2 = import_attr("ray._private.utils.import_attr")
assert import_attr_2 == import_attr
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| gl | 0.179081 | # Very meta... | 2.082432 | 2 |
portfolio/Python/scrapy/dyersonline/googe_shopping_api.py | 0--key/lib | 0 | 6632820 | import csv
import codecs
import cStringIO
import os
import copy
import json
from decimal import Decimal
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from product_spiders.items import Product, ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
KEYS = ['AIzaSyDmC2E8OgTrtikhGt5OlVaY8GqqSu696KE', '<KEY>',
'<KEY>',]
class GoogleSpider(BaseSpider):
name = 'google.com_DO'
allowed_domains = ['googleapis.com']
def start_requests(self):
csv_file = UnicodeReader(open(os.path.join(HERE, 'skus.csv')))
for i, row in enumerate(csv_file):
sku = row[0]
query = (row[4]).replace(' ', '+')
url = 'https://www.googleapis.com/shopping/search/v1/public/products' + \
'?key=%s&country=US&' + \
'q=%s&rankBy=price%%3Aascending'
yield Request(url % (KEYS[i % len(KEYS)], query), meta={'sku': sku})
def parse(self, response):
data = json.loads(response.body)
if not data['totalItems']:
return
item = data['items'][0]
pr = Product()
pr['name'] = (item['product']['title'] + ' ' + item.get('product', {}).get('author', {}).get('name', '')).strip()
pr['url'] = item['product']['link']
pr['price'] = Decimal(str(data['items'][0]['product']['inventories'][0]['price']))
pr['sku'] = response.meta['sku']
yield pr
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| import csv
import codecs
import cStringIO
import os
import copy
import json
from decimal import Decimal
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from product_spiders.items import Product, ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
KEYS = ['AIzaSyDmC2E8OgTrtikhGt5OlVaY8GqqSu696KE', '<KEY>',
'<KEY>',]
class GoogleSpider(BaseSpider):
name = 'google.com_DO'
allowed_domains = ['googleapis.com']
def start_requests(self):
csv_file = UnicodeReader(open(os.path.join(HERE, 'skus.csv')))
for i, row in enumerate(csv_file):
sku = row[0]
query = (row[4]).replace(' ', '+')
url = 'https://www.googleapis.com/shopping/search/v1/public/products' + \
'?key=%s&country=US&' + \
'q=%s&rankBy=price%%3Aascending'
yield Request(url % (KEYS[i % len(KEYS)], query), meta={'sku': sku})
def parse(self, response):
data = json.loads(response.body)
if not data['totalItems']:
return
item = data['items'][0]
pr = Product()
pr['name'] = (item['product']['title'] + ' ' + item.get('product', {}).get('author', {}).get('name', '')).strip()
pr['url'] = item['product']['link']
pr['price'] = Decimal(str(data['items'][0]['product']['inventories'][0]['price']))
pr['sku'] = response.meta['sku']
yield pr
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| en | 0.914293 | Iterator that reads an encoded stream and reencodes the input to UTF-8 A CSV reader which will iterate over lines in the CSV file "f", which is encoded in the given encoding. A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. # Redirect output to a queue # Fetch UTF-8 output from the queue ... # ... and reencode it into the target encoding # write to the target stream # empty queue | 2.681036 | 3 |
api/api/getNewCSV.py | Servatom/30DaysOfCloudLeaderBoard | 10 | 6632821 | <filename>api/api/getNewCSV.py
import imaplib
import base64
import email
import os
import pandas as pd
import schedule
import time
import os
sender_email = os.environ.get('SENDER_EMAIL')
if sender_email == None:
sender_email = "<EMAIL>"
last_read_email = ""
def generateRandomString():
return base64.b64encode(os.urandom(5)).decode('utf-8')
def convertToCSV(filePath):
if "xlsx" in filePath:
read_file = pd.read_excel (filePath)
fileName = filePath+".csv"
read_file.to_csv (fileName, index = None, header=True)
os.system('rm "'+filePath + '"')
# call function to fill db
os.system('python3 fillDB.py "' + fileName + '"&')
else:
print("File not in xlsx format")
os.system("rm "+filePath)
def checkEmail():
global last_read_email
with open("database/loop.txt", "a+") as myfile:
myfile.write("\n")
myfile.write(str(time.time()))
email_user = os.environ.get('EMAIL_USER')
email_password = <PASSWORD>('EMAIL_PASS')
mail = imaplib.IMAP4_SSL('imap.gmail.com', 993)
mail.login(email_user, email_password)
mail.select('inbox')
type, data = mail.search(None, 'FROM ' + sender_email)
mail_ids = data[0]
id_list = mail_ids.split()
# check attachment for latest email
latest_email_id = id_list[-1]
typ, data = mail.fetch(latest_email_id, '(RFC822)')
raw_email = data[0][1]
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
for part in email_message.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
# get date of email
date = email_message['Date']
if date == last_read_email:
return
last_read_email = date
fileName = part.get_filename()
if bool(fileName):
filePath = os.path.join(r"./", fileName)
if not os.path.isfile(filePath) and "xlsx" in fileName:
print("File not found")
print("Downloading File")
fp = open(filePath, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
print("File Downloaded")
convertToCSV(filePath)
else:
print("File already exists")
# before this loop first check email and then run web scraper in background
isScraper = os.environ.get('SCRAPER_SWITCH')
if isScraper == "true":
checkEmail()
os.system("python refreshDB.py &") # web scraper turned on
time.sleep(60*60)
while True:
checkEmail()
# append loop in txt file
with open("database/loop.txt", "a+") as myfile:
myfile.write("\n")
myfile.write(str(time.time()))
time.sleep(60*60)
else:
print("Competition has ended") | <filename>api/api/getNewCSV.py
import imaplib
import base64
import email
import os
import pandas as pd
import schedule
import time
import os
sender_email = os.environ.get('SENDER_EMAIL')
if sender_email == None:
sender_email = "<EMAIL>"
last_read_email = ""
def generateRandomString():
return base64.b64encode(os.urandom(5)).decode('utf-8')
def convertToCSV(filePath):
if "xlsx" in filePath:
read_file = pd.read_excel (filePath)
fileName = filePath+".csv"
read_file.to_csv (fileName, index = None, header=True)
os.system('rm "'+filePath + '"')
# call function to fill db
os.system('python3 fillDB.py "' + fileName + '"&')
else:
print("File not in xlsx format")
os.system("rm "+filePath)
def checkEmail():
global last_read_email
with open("database/loop.txt", "a+") as myfile:
myfile.write("\n")
myfile.write(str(time.time()))
email_user = os.environ.get('EMAIL_USER')
email_password = <PASSWORD>('EMAIL_PASS')
mail = imaplib.IMAP4_SSL('imap.gmail.com', 993)
mail.login(email_user, email_password)
mail.select('inbox')
type, data = mail.search(None, 'FROM ' + sender_email)
mail_ids = data[0]
id_list = mail_ids.split()
# check attachment for latest email
latest_email_id = id_list[-1]
typ, data = mail.fetch(latest_email_id, '(RFC822)')
raw_email = data[0][1]
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
for part in email_message.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
# get date of email
date = email_message['Date']
if date == last_read_email:
return
last_read_email = date
fileName = part.get_filename()
if bool(fileName):
filePath = os.path.join(r"./", fileName)
if not os.path.isfile(filePath) and "xlsx" in fileName:
print("File not found")
print("Downloading File")
fp = open(filePath, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
print("File Downloaded")
convertToCSV(filePath)
else:
print("File already exists")
# before this loop first check email and then run web scraper in background
isScraper = os.environ.get('SCRAPER_SWITCH')
if isScraper == "true":
checkEmail()
os.system("python refreshDB.py &") # web scraper turned on
time.sleep(60*60)
while True:
checkEmail()
# append loop in txt file
with open("database/loop.txt", "a+") as myfile:
myfile.write("\n")
myfile.write(str(time.time()))
time.sleep(60*60)
else:
print("Competition has ended") | en | 0.773106 | # call function to fill db # check attachment for latest email # get date of email # before this loop first check email and then run web scraper in background # web scraper turned on # append loop in txt file | 2.926975 | 3 |
tests/test_timer.py | wkschwartz/shutdown | 0 | 6632822 | <reponame>wkschwartz/shutdown
# © 2018, <NAME>. All rights reserved. See the LICENSE file.
import os
import signal
import sys
import time
import unittest
from wrapitup import request, reset, Timer
class TestTimer(unittest.TestCase):
# Python makes few guarantees about the precision of its various clocks.
# https://stackoverflow.com/a/43773780
# Pypy seems to have occasional time-out problems on a first run but runs
# okay on the second run. Slowing down time seems to help on the first run.
if os.name == 'nt' or sys.implementation.name == 'pypy':
time_limit = 0.1
decimal_places = 1
elif os.name == 'posix':
time_limit = 0.001
decimal_places = 3
def test_wrapitup_timer(self):
"Calling request causes Timer.expired to return True."
request()
self.assertTrue(Timer().expired())
def test_bad_time_limit(self):
self.assertRaises(TypeError, Timer, type)
self.assertRaises(TypeError, Timer, 1j)
self.assertRaises(TypeError, Timer, '1')
self.assertRaises(TypeError, Timer, None)
self.assertRaises(ValueError, Timer, float('nan'))
def test_default_no_time_limit(self):
"Test that the default time limit is None."
s = Timer()
t1 = s.remaining()
u1 = s.expired()
time.sleep(self.time_limit)
t2 = s.remaining()
u2 = s.expired()
self.assertEqual(t1, float('inf'))
self.assertEqual(t2, float('inf'))
self.assertFalse(u1)
self.assertFalse(u2)
self.assertFalse(s.expired())
def test_time_limit(self):
s = Timer(self.time_limit)
t1 = s.remaining()
u1 = s.expired()
time.sleep(self.time_limit / 2)
t2 = s.remaining()
u2 = s.expired()
time.sleep(self.time_limit / 2)
t3 = s.remaining()
u3 = s.expired()
self.assertAlmostEqual(t1, self.time_limit, places=self.decimal_places)
self.assertGreater(t1, self.time_limit / 2)
self.assertFalse(u1)
self.assertGreater(t1 - t2, self.time_limit / 2, {"t1": t1, "t2": t2})
self.assertFalse(u2)
self.assertLess(t3, 0)
self.assertTrue(u3)
s.stop()
self.assertTrue(s.expired())
s = Timer(self.time_limit)
s.stop()
self.assertFalse(s.expired())
time.sleep(self.time_limit)
self.assertFalse(s.expired()) # The return value should not change
def test_stop(self):
s = Timer()
time.sleep(self.time_limit) # Needed on Windows
self.assertGreater(s.stop(), 0)
self.assertAlmostEqual(s.stop(), self.time_limit, places=self.decimal_places)
s = Timer(self.time_limit)
time.sleep(s.remaining())
self.assertGreater(s.stop(), self.time_limit)
self.assertAlmostEqual(
s.stop(), self.time_limit, places=self.decimal_places - 1)
def test_remaining(self):
# Zero when shutdown requested
s = Timer(self.time_limit)
request()
self.assertEqual(s.remaining(), 0)
reset()
# Greater than zero before timing out, less after
s.start(self.time_limit)
self.assertGreater(s.remaining(), 0)
time.sleep(self.time_limit)
self.assertLess(s.remaining(), 0)
# Always zero after stopping
s.start(self.time_limit)
self.assertGreater(self.time_limit, s.stop())
self.assertEqual(s.remaining(), 0)
@unittest.skipIf(
not hasattr(signal, 'setitimer'),
"Requires signal.setitimer (Unix only)"
)
def test_alarm(self):
called = False
def handler(signum, stack_frame):
nonlocal called
called = True
prev_handler = signal.signal(signal.SIGALRM, handler)
prev_delay, prev_interval = signal.setitimer(signal.ITIMER_REAL, 10, 5)
if prev_delay:
outer = Timer(prev_delay) # pragma: no cover
try:
s = Timer(self.time_limit)
delay, interval = s.alarm()
self.assertAlmostEqual(delay, 10, places=3)
self.assertAlmostEqual(interval, 5, places=3)
time.sleep(self.time_limit)
self.assertTrue(called)
self.assertLess(s.remaining(), 0)
self.assertRaisesRegex(ValueError, r'expired.*-\d\.\d', s.alarm)
finally:
if prev_delay:
signal.setitimer( # pragma: no cover
signal.ITIMER_REAL, outer.remaining(), prev_interval)
else:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
signal.signal(signal.SIGALRM, prev_handler)
| # © 2018, <NAME>. All rights reserved. See the LICENSE file.
import os
import signal
import sys
import time
import unittest
from wrapitup import request, reset, Timer
class TestTimer(unittest.TestCase):
# Python makes few guarantees about the precision of its various clocks.
# https://stackoverflow.com/a/43773780
# Pypy seems to have occasional time-out problems on a first run but runs
# okay on the second run. Slowing down time seems to help on the first run.
if os.name == 'nt' or sys.implementation.name == 'pypy':
time_limit = 0.1
decimal_places = 1
elif os.name == 'posix':
time_limit = 0.001
decimal_places = 3
def test_wrapitup_timer(self):
"Calling request causes Timer.expired to return True."
request()
self.assertTrue(Timer().expired())
def test_bad_time_limit(self):
self.assertRaises(TypeError, Timer, type)
self.assertRaises(TypeError, Timer, 1j)
self.assertRaises(TypeError, Timer, '1')
self.assertRaises(TypeError, Timer, None)
self.assertRaises(ValueError, Timer, float('nan'))
def test_default_no_time_limit(self):
"Test that the default time limit is None."
s = Timer()
t1 = s.remaining()
u1 = s.expired()
time.sleep(self.time_limit)
t2 = s.remaining()
u2 = s.expired()
self.assertEqual(t1, float('inf'))
self.assertEqual(t2, float('inf'))
self.assertFalse(u1)
self.assertFalse(u2)
self.assertFalse(s.expired())
def test_time_limit(self):
s = Timer(self.time_limit)
t1 = s.remaining()
u1 = s.expired()
time.sleep(self.time_limit / 2)
t2 = s.remaining()
u2 = s.expired()
time.sleep(self.time_limit / 2)
t3 = s.remaining()
u3 = s.expired()
self.assertAlmostEqual(t1, self.time_limit, places=self.decimal_places)
self.assertGreater(t1, self.time_limit / 2)
self.assertFalse(u1)
self.assertGreater(t1 - t2, self.time_limit / 2, {"t1": t1, "t2": t2})
self.assertFalse(u2)
self.assertLess(t3, 0)
self.assertTrue(u3)
s.stop()
self.assertTrue(s.expired())
s = Timer(self.time_limit)
s.stop()
self.assertFalse(s.expired())
time.sleep(self.time_limit)
self.assertFalse(s.expired()) # The return value should not change
def test_stop(self):
s = Timer()
time.sleep(self.time_limit) # Needed on Windows
self.assertGreater(s.stop(), 0)
self.assertAlmostEqual(s.stop(), self.time_limit, places=self.decimal_places)
s = Timer(self.time_limit)
time.sleep(s.remaining())
self.assertGreater(s.stop(), self.time_limit)
self.assertAlmostEqual(
s.stop(), self.time_limit, places=self.decimal_places - 1)
def test_remaining(self):
# Zero when shutdown requested
s = Timer(self.time_limit)
request()
self.assertEqual(s.remaining(), 0)
reset()
# Greater than zero before timing out, less after
s.start(self.time_limit)
self.assertGreater(s.remaining(), 0)
time.sleep(self.time_limit)
self.assertLess(s.remaining(), 0)
# Always zero after stopping
s.start(self.time_limit)
self.assertGreater(self.time_limit, s.stop())
self.assertEqual(s.remaining(), 0)
@unittest.skipIf(
not hasattr(signal, 'setitimer'),
"Requires signal.setitimer (Unix only)"
)
def test_alarm(self):
called = False
def handler(signum, stack_frame):
nonlocal called
called = True
prev_handler = signal.signal(signal.SIGALRM, handler)
prev_delay, prev_interval = signal.setitimer(signal.ITIMER_REAL, 10, 5)
if prev_delay:
outer = Timer(prev_delay) # pragma: no cover
try:
s = Timer(self.time_limit)
delay, interval = s.alarm()
self.assertAlmostEqual(delay, 10, places=3)
self.assertAlmostEqual(interval, 5, places=3)
time.sleep(self.time_limit)
self.assertTrue(called)
self.assertLess(s.remaining(), 0)
self.assertRaisesRegex(ValueError, r'expired.*-\d\.\d', s.alarm)
finally:
if prev_delay:
signal.setitimer( # pragma: no cover
signal.ITIMER_REAL, outer.remaining(), prev_interval)
else:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
signal.signal(signal.SIGALRM, prev_handler) | en | 0.927071 | # © 2018, <NAME>. All rights reserved. See the LICENSE file. # Python makes few guarantees about the precision of its various clocks. # https://stackoverflow.com/a/43773780 # Pypy seems to have occasional time-out problems on a first run but runs # okay on the second run. Slowing down time seems to help on the first run. # The return value should not change # Needed on Windows # Zero when shutdown requested # Greater than zero before timing out, less after # Always zero after stopping # pragma: no cover # pragma: no cover | 2.552399 | 3 |
gallon.py | chlbnd/eureciclo | 0 | 6632823 | from bottle import Bottle
from itertools import combinations
class Gallon:
def __init__(self, gallons):
self.__gallons = gallons
self.__liters = gallons
if isinstance(gallons, list):
self.__liters = sum(gallons)
@property
def gallons(self):
return self.__gallons
@property
def liters(self):
return self.__liters
@liters.setter
def liters(self, liters):
self.__liters -= liters
def fill(self, bottles: Bottle):
comb_list = list()
for idx in range(len(bottles.bottles)):
comb_list.extend([comb for comb in combinations(bottles.bottles, idx + 1)])
sum_list = list(map(sum, comb_list))
liters = self.liters
if liters in sum_list:
idx = sum_list.index(liters)
return sorted(comb_list[idx])
delta = [(value - liters) for idx, value in enumerate(sum_list)]
try:
surplus = min([value for value in delta if value > 0])
except:
surplus = liters
try:
lack = min([abs(value) for value in delta if value < 0])
except:
lack = liters
if surplus <= lack:
idx = delta.index(surplus)
return comb_list[idx]
idx = delta.index(lack * -1)
return comb_list[idx]
| from bottle import Bottle
from itertools import combinations
class Gallon:
def __init__(self, gallons):
self.__gallons = gallons
self.__liters = gallons
if isinstance(gallons, list):
self.__liters = sum(gallons)
@property
def gallons(self):
return self.__gallons
@property
def liters(self):
return self.__liters
@liters.setter
def liters(self, liters):
self.__liters -= liters
def fill(self, bottles: Bottle):
comb_list = list()
for idx in range(len(bottles.bottles)):
comb_list.extend([comb for comb in combinations(bottles.bottles, idx + 1)])
sum_list = list(map(sum, comb_list))
liters = self.liters
if liters in sum_list:
idx = sum_list.index(liters)
return sorted(comb_list[idx])
delta = [(value - liters) for idx, value in enumerate(sum_list)]
try:
surplus = min([value for value in delta if value > 0])
except:
surplus = liters
try:
lack = min([abs(value) for value in delta if value < 0])
except:
lack = liters
if surplus <= lack:
idx = delta.index(surplus)
return comb_list[idx]
idx = delta.index(lack * -1)
return comb_list[idx]
| none | 1 | 3.083138 | 3 |
|
aries_cloudagent/wallet/tests/test_indy_wallet.py | msembinelli/aries-cloudagent-python | 1 | 6632824 | import base64
import json
import os
import pytest
from asynctest import mock as async_mock
from asynctest import TestCase as AsyncTestCase
import indy.anoncreds
import indy.crypto
import indy.did
import indy.wallet
from aries_cloudagent.wallet.basic import BasicWallet
from aries_cloudagent.wallet.indy import IndyWallet
from .. import indy as test_module
from . import test_basic_wallet
@pytest.fixture()
async def basic_wallet():
wallet = BasicWallet()
await wallet.open()
yield wallet
await wallet.close()
@pytest.fixture()
async def wallet():
key = await IndyWallet.generate_wallet_key()
wallet = IndyWallet(
{
"auto_create": True,
"auto_remove": True,
"name": "test-wallet",
"key": key,
"key_derivation_method": "RAW", # much slower tests with argon-hashed keys
}
)
await wallet.open()
yield wallet
await wallet.close()
@pytest.mark.indy
class TestIndyWallet(test_basic_wallet.TestBasicWallet):
"""Apply all BasicWallet tests against IndyWallet"""
@pytest.mark.asyncio
async def test_properties(self, wallet):
assert wallet.name
assert wallet.type == "indy"
assert wallet.handle
none_wallet = IndyWallet()
assert none_wallet.name == IndyWallet.DEFAULT_NAME
assert "IndyWallet" in str(wallet)
assert wallet.created
assert wallet.master_secret_id == wallet.name
assert wallet._wallet_config
"""
@pytest.mark.asyncio
async def test_catpol(self, wallet):
with pytest.raises(test_module.WalletError):
await wallet.get_credential_definition_tag_policy("cred-def-id") # invalid
CD_ID = f"{self.test_did}:3:CL:1234:tag"
catpol = await wallet.get_credential_definition_tag_policy(CD_ID)
assert catpol is None
with async_mock.patch.object(
indy.anoncreds,
"prover_set_credential_attr_tag_policy",
async_mock.CoroutineMock(),
) as mock_catpol:
mock_catpol.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.set_credential_definition_tag_policy(CD_ID)
assert "outlier" in str(excinfo.value)
"""
@pytest.mark.asyncio
async def test_rotate_did_keypair_x(self, wallet):
info = await wallet.create_local_did(self.test_seed, self.test_did)
with async_mock.patch.object(
indy.did, "replace_keys_start", async_mock.CoroutineMock()
) as mock_repl_start:
mock_repl_start.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.rotate_did_keypair_start(self.test_did)
assert "outlier" in str(excinfo.value)
with async_mock.patch.object(
indy.did, "replace_keys_apply", async_mock.CoroutineMock()
) as mock_repl_apply:
mock_repl_apply.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.rotate_did_keypair_apply(self.test_did)
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_create_signing_key_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "create_key", async_mock.CoroutineMock()
) as mock_create_key:
mock_create_key.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.create_signing_key()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_create_local_did_x(self, wallet):
with async_mock.patch.object(
indy.did, "create_and_store_my_did", async_mock.CoroutineMock()
) as mock_create:
mock_create.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.create_local_did()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_get_signing_key_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "get_key_metadata", async_mock.CoroutineMock()
) as mock_signing:
mock_signing.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.get_signing_key(None)
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_get_local_did_x(self, wallet):
with async_mock.patch.object(
indy.did, "get_my_did_with_meta", async_mock.CoroutineMock()
) as mock_my:
mock_my.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.get_local_did(None)
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_verify_message_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "crypto_verify", async_mock.CoroutineMock()
) as mock_verify:
mock_verify.side_effect = test_module.IndyError( # outlier
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.verify_message(
b"hello world", b"signature", self.test_verkey
)
assert "outlier" in str(excinfo.value)
mock_verify.side_effect = test_module.IndyError( # plain wrong
test_module.ErrorCode.CommonInvalidStructure
)
assert not await wallet.verify_message(
b"hello world", b"signature", self.test_verkey
)
@pytest.mark.asyncio
async def test_pack_message_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "pack_message", async_mock.CoroutineMock()
) as mock_pack:
mock_pack.side_effect = test_module.IndyError( # outlier
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.pack_message(
b"hello world", [self.test_verkey,],
)
assert "outlier" in str(excinfo.value)
@pytest.mark.indy
class TestWalletCompat:
""" Tests for wallet compatibility."""
test_seed = "testseed000000000000000000000001"
test_did = "55GkHamhTU1ZbTbV2ab9DE"
test_verkey = "<KEY>"
test_message = "test message"
@pytest.mark.asyncio
async def test_compare_pack_unpack(self, basic_wallet, wallet):
"""
Ensure that python-based pack/unpack is compatible with indy-sdk implementation
"""
await basic_wallet.create_local_did(self.test_seed)
py_packed = await basic_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await wallet.create_local_did(self.test_seed)
packed = await wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
py_unpacked, from_vk, to_vk = await basic_wallet.unpack_message(packed)
assert self.test_message == py_unpacked
unpacked, from_vk, to_vk = await wallet.unpack_message(py_packed)
assert self.test_message == unpacked
@pytest.mark.asyncio
async def test_mock_coverage(self):
"""
Coverage through mock framework.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
assert fake_wallet._wallet_access
await fake_wallet.close()
await fake_wallet.remove()
@pytest.mark.asyncio
async def test_mock_coverage_wallet_exists_x(self):
"""
Coverage through mock framework: raise on creation of existing wallet
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_create.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletAlreadyExistsError
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.create()
assert "Wallet was not removed by SDK" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_wallet_create_x(self):
"""
Coverage through mock framework: raise on creation outlier
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_create.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.create()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_remove_x(self):
"""
Coverage through mock framework: exception on removal.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_delete.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
assert fake_wallet._wallet_access
await fake_wallet.close()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.remove()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_double_open(self):
"""
Coverage through mock framework: double-open (no-op).
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
fake_wallet._handle = 1234
await fake_wallet.open() # open an open wallet: should be OK
assert fake_wallet._wallet_access
await fake_wallet.close()
await fake_wallet.remove()
@pytest.mark.asyncio
async def test_mock_coverage_not_found_after_creation(self):
"""
Coverage through mock framework: missing created wallet.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletNotFoundError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": True,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_not_found(self):
"""
Coverage through mock framework: missing wallet on open.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletNotFoundError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletNotFoundError) as excinfo:
await fake_wallet.open()
assert "Wallet test_pg_wallet not found" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_indy_already_open_x(self):
"""
Coverage through mock framework: indy thinks wallet is open, aca-py does not.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletAlreadyOpenedError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "Wallet test_pg_wallet is already open" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_x(self):
"""
Coverage through mock framework: outlier on wallet open.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_master_secret_x(self):
"""
Coverage through mock framework: outlier on master secret creation
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_master.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_master_secret_exists(self):
"""
Coverage through mock framework: open, master secret exists (OK).
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_master.side_effect = test_module.IndyError(
test_module.ErrorCode.AnoncredsMasterSecretDuplicateNameError
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
assert fake_wallet._master_secret_id == fake_wallet.name
fake_wallet._handle = 1234
await fake_wallet.open() # open an open wallet: should be OK
assert fake_wallet._wallet_access
await fake_wallet.close()
await fake_wallet.remove()
# TODO get these to run in docker ci/cd
@pytest.mark.asyncio
@pytest.mark.postgres
async def test_postgres_wallet_works(self):
"""
Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet)
"""
postgres_url = os.environ.get("POSTGRES_URL")
if not postgres_url:
pytest.fail("POSTGRES_URL not configured")
wallet_key = await IndyWallet.generate_wallet_key()
postgres_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": '{"url":"' + postgres_url + '"}',
"storage_creds": '{"account":"postgres","password":"<PASSWORD>","admin_account":"postgres","admin_password":"<PASSWORD>"}',
}
)
await postgres_wallet.create()
await postgres_wallet.open()
assert postgres_wallet._wallet_access
await postgres_wallet.create_local_did(self.test_seed)
py_packed = await postgres_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await postgres_wallet.close()
await postgres_wallet.remove()
# TODO get these to run in docker ci/cd
@pytest.mark.asyncio
@pytest.mark.postgres
async def test_postgres_wallet_scheme_works(self):
"""
Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet)
"""
postgres_url = os.environ.get("POSTGRES_URL")
if not postgres_url:
pytest.fail("POSTGRES_URL not configured")
wallet_key = await IndyWallet.generate_wallet_key()
postgres_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": '{"url":"'
+ postgres_url
+ '", "wallet_scheme":"MultiWalletSingleTable"}',
"storage_creds": '{"account":"postgres","password":"<PASSWORD>","admin_account":"postgres","admin_password":"<PASSWORD>"}',
}
)
await postgres_wallet.create()
await postgres_wallet.open()
with pytest.raises(WalletError) as excinfo:
await wallet.create()
assert "Wallet was not removed" in str(excinfo.value)
await postgres_wallet.create_local_did(self.test_seed)
py_packed = await postgres_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await postgres_wallet.close()
await postgres_wallet.remove()
# TODO get these to run in docker ci/cd
@pytest.mark.asyncio
@pytest.mark.postgres
async def test_postgres_wallet_scheme2_works(self):
"""
Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet)
"""
postgres_url = os.environ.get("POSTGRES_URL")
if not postgres_url:
pytest.fail("POSTGRES_URL not configured")
wallet_key = await IndyWallet.generate_wallet_key()
postgres_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": '{"url":"'
+ postgres_url
+ '", "wallet_scheme":"MultiWalletSingleTableSharedPool"}',
"storage_creds": '{"account":"postgres","password":"<PASSWORD>","admin_account":"postgres","admin_password":"<PASSWORD>"}',
}
)
await postgres_wallet.create()
await postgres_wallet.open()
await postgres_wallet.create_local_did(self.test_seed)
py_packed = await postgres_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await postgres_wallet.close()
await postgres_wallet.remove()
| import base64
import json
import os
import pytest
from asynctest import mock as async_mock
from asynctest import TestCase as AsyncTestCase
import indy.anoncreds
import indy.crypto
import indy.did
import indy.wallet
from aries_cloudagent.wallet.basic import BasicWallet
from aries_cloudagent.wallet.indy import IndyWallet
from .. import indy as test_module
from . import test_basic_wallet
@pytest.fixture()
async def basic_wallet():
wallet = BasicWallet()
await wallet.open()
yield wallet
await wallet.close()
@pytest.fixture()
async def wallet():
key = await IndyWallet.generate_wallet_key()
wallet = IndyWallet(
{
"auto_create": True,
"auto_remove": True,
"name": "test-wallet",
"key": key,
"key_derivation_method": "RAW", # much slower tests with argon-hashed keys
}
)
await wallet.open()
yield wallet
await wallet.close()
@pytest.mark.indy
class TestIndyWallet(test_basic_wallet.TestBasicWallet):
"""Apply all BasicWallet tests against IndyWallet"""
@pytest.mark.asyncio
async def test_properties(self, wallet):
assert wallet.name
assert wallet.type == "indy"
assert wallet.handle
none_wallet = IndyWallet()
assert none_wallet.name == IndyWallet.DEFAULT_NAME
assert "IndyWallet" in str(wallet)
assert wallet.created
assert wallet.master_secret_id == wallet.name
assert wallet._wallet_config
"""
@pytest.mark.asyncio
async def test_catpol(self, wallet):
with pytest.raises(test_module.WalletError):
await wallet.get_credential_definition_tag_policy("cred-def-id") # invalid
CD_ID = f"{self.test_did}:3:CL:1234:tag"
catpol = await wallet.get_credential_definition_tag_policy(CD_ID)
assert catpol is None
with async_mock.patch.object(
indy.anoncreds,
"prover_set_credential_attr_tag_policy",
async_mock.CoroutineMock(),
) as mock_catpol:
mock_catpol.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.set_credential_definition_tag_policy(CD_ID)
assert "outlier" in str(excinfo.value)
"""
@pytest.mark.asyncio
async def test_rotate_did_keypair_x(self, wallet):
info = await wallet.create_local_did(self.test_seed, self.test_did)
with async_mock.patch.object(
indy.did, "replace_keys_start", async_mock.CoroutineMock()
) as mock_repl_start:
mock_repl_start.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.rotate_did_keypair_start(self.test_did)
assert "outlier" in str(excinfo.value)
with async_mock.patch.object(
indy.did, "replace_keys_apply", async_mock.CoroutineMock()
) as mock_repl_apply:
mock_repl_apply.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.rotate_did_keypair_apply(self.test_did)
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_create_signing_key_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "create_key", async_mock.CoroutineMock()
) as mock_create_key:
mock_create_key.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.create_signing_key()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_create_local_did_x(self, wallet):
with async_mock.patch.object(
indy.did, "create_and_store_my_did", async_mock.CoroutineMock()
) as mock_create:
mock_create.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.create_local_did()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_get_signing_key_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "get_key_metadata", async_mock.CoroutineMock()
) as mock_signing:
mock_signing.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.get_signing_key(None)
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_get_local_did_x(self, wallet):
with async_mock.patch.object(
indy.did, "get_my_did_with_meta", async_mock.CoroutineMock()
) as mock_my:
mock_my.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.get_local_did(None)
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_verify_message_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "crypto_verify", async_mock.CoroutineMock()
) as mock_verify:
mock_verify.side_effect = test_module.IndyError( # outlier
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.verify_message(
b"hello world", b"signature", self.test_verkey
)
assert "outlier" in str(excinfo.value)
mock_verify.side_effect = test_module.IndyError( # plain wrong
test_module.ErrorCode.CommonInvalidStructure
)
assert not await wallet.verify_message(
b"hello world", b"signature", self.test_verkey
)
@pytest.mark.asyncio
async def test_pack_message_x(self, wallet):
with async_mock.patch.object(
indy.crypto, "pack_message", async_mock.CoroutineMock()
) as mock_pack:
mock_pack.side_effect = test_module.IndyError( # outlier
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
with pytest.raises(test_module.WalletError) as excinfo:
await wallet.pack_message(
b"hello world", [self.test_verkey,],
)
assert "outlier" in str(excinfo.value)
@pytest.mark.indy
class TestWalletCompat:
""" Tests for wallet compatibility."""
test_seed = "testseed000000000000000000000001"
test_did = "55GkHamhTU1ZbTbV2ab9DE"
test_verkey = "<KEY>"
test_message = "test message"
@pytest.mark.asyncio
async def test_compare_pack_unpack(self, basic_wallet, wallet):
"""
Ensure that python-based pack/unpack is compatible with indy-sdk implementation
"""
await basic_wallet.create_local_did(self.test_seed)
py_packed = await basic_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await wallet.create_local_did(self.test_seed)
packed = await wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
py_unpacked, from_vk, to_vk = await basic_wallet.unpack_message(packed)
assert self.test_message == py_unpacked
unpacked, from_vk, to_vk = await wallet.unpack_message(py_packed)
assert self.test_message == unpacked
@pytest.mark.asyncio
async def test_mock_coverage(self):
"""
Coverage through mock framework.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
assert fake_wallet._wallet_access
await fake_wallet.close()
await fake_wallet.remove()
@pytest.mark.asyncio
async def test_mock_coverage_wallet_exists_x(self):
"""
Coverage through mock framework: raise on creation of existing wallet
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_create.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletAlreadyExistsError
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.create()
assert "Wallet was not removed by SDK" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_wallet_create_x(self):
"""
Coverage through mock framework: raise on creation outlier
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_create.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.create()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_remove_x(self):
"""
Coverage through mock framework: exception on removal.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_delete.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
assert fake_wallet._wallet_access
await fake_wallet.close()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.remove()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_double_open(self):
"""
Coverage through mock framework: double-open (no-op).
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
fake_wallet._handle = 1234
await fake_wallet.open() # open an open wallet: should be OK
assert fake_wallet._wallet_access
await fake_wallet.close()
await fake_wallet.remove()
@pytest.mark.asyncio
async def test_mock_coverage_not_found_after_creation(self):
"""
Coverage through mock framework: missing created wallet.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletNotFoundError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": True,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_not_found(self):
"""
Coverage through mock framework: missing wallet on open.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletNotFoundError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletNotFoundError) as excinfo:
await fake_wallet.open()
assert "Wallet test_pg_wallet not found" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_indy_already_open_x(self):
"""
Coverage through mock framework: indy thinks wallet is open, aca-py does not.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.WalletAlreadyOpenedError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "Wallet test_pg_wallet is already open" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_x(self):
"""
Coverage through mock framework: outlier on wallet open.
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_open.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_master_secret_x(self):
"""
Coverage through mock framework: outlier on master secret creation
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_master.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonIOError, {"message": "outlier"}
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": True,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
with pytest.raises(test_module.WalletError) as excinfo:
await fake_wallet.open()
assert "outlier" in str(excinfo.value)
@pytest.mark.asyncio
async def test_mock_coverage_open_master_secret_exists(self):
"""
Coverage through mock framework: open, master secret exists (OK).
"""
wallet_key = await IndyWallet.generate_wallet_key()
storage_config_json = json.dumps({"url": "dummy"})
storage_creds_json = json.dumps(
{
"account": "postgres",
"password": "<PASSWORD>",
"admin_account": "postgres",
"admin_password": "<PASSWORD>",
},
)
with async_mock.patch.object(
test_module, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
mock_master.side_effect = test_module.IndyError(
test_module.ErrorCode.AnoncredsMasterSecretDuplicateNameError
)
fake_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": storage_config_json,
"storage_creds": storage_creds_json,
}
)
mock_load.assert_called_once_with(storage_config_json, storage_creds_json)
await fake_wallet.create()
await fake_wallet.open()
assert fake_wallet._master_secret_id == fake_wallet.name
fake_wallet._handle = 1234
await fake_wallet.open() # open an open wallet: should be OK
assert fake_wallet._wallet_access
await fake_wallet.close()
await fake_wallet.remove()
# TODO get these to run in docker ci/cd
@pytest.mark.asyncio
@pytest.mark.postgres
async def test_postgres_wallet_works(self):
"""
Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet)
"""
postgres_url = os.environ.get("POSTGRES_URL")
if not postgres_url:
pytest.fail("POSTGRES_URL not configured")
wallet_key = await IndyWallet.generate_wallet_key()
postgres_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": '{"url":"' + postgres_url + '"}',
"storage_creds": '{"account":"postgres","password":"<PASSWORD>","admin_account":"postgres","admin_password":"<PASSWORD>"}',
}
)
await postgres_wallet.create()
await postgres_wallet.open()
assert postgres_wallet._wallet_access
await postgres_wallet.create_local_did(self.test_seed)
py_packed = await postgres_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await postgres_wallet.close()
await postgres_wallet.remove()
# TODO get these to run in docker ci/cd
@pytest.mark.asyncio
@pytest.mark.postgres
async def test_postgres_wallet_scheme_works(self):
"""
Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet)
"""
postgres_url = os.environ.get("POSTGRES_URL")
if not postgres_url:
pytest.fail("POSTGRES_URL not configured")
wallet_key = await IndyWallet.generate_wallet_key()
postgres_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": '{"url":"'
+ postgres_url
+ '", "wallet_scheme":"MultiWalletSingleTable"}',
"storage_creds": '{"account":"postgres","password":"<PASSWORD>","admin_account":"postgres","admin_password":"<PASSWORD>"}',
}
)
await postgres_wallet.create()
await postgres_wallet.open()
with pytest.raises(WalletError) as excinfo:
await wallet.create()
assert "Wallet was not removed" in str(excinfo.value)
await postgres_wallet.create_local_did(self.test_seed)
py_packed = await postgres_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await postgres_wallet.close()
await postgres_wallet.remove()
# TODO get these to run in docker ci/cd
@pytest.mark.asyncio
@pytest.mark.postgres
async def test_postgres_wallet_scheme2_works(self):
"""
Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet)
"""
postgres_url = os.environ.get("POSTGRES_URL")
if not postgres_url:
pytest.fail("POSTGRES_URL not configured")
wallet_key = await IndyWallet.generate_wallet_key()
postgres_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": '{"url":"'
+ postgres_url
+ '", "wallet_scheme":"MultiWalletSingleTableSharedPool"}',
"storage_creds": '{"account":"postgres","password":"<PASSWORD>","admin_account":"postgres","admin_password":"<PASSWORD>"}',
}
)
await postgres_wallet.create()
await postgres_wallet.open()
await postgres_wallet.create_local_did(self.test_seed)
py_packed = await postgres_wallet.pack_message(
self.test_message, [self.test_verkey], self.test_verkey
)
await postgres_wallet.close()
await postgres_wallet.remove()
| en | 0.771409 | # much slower tests with argon-hashed keys Apply all BasicWallet tests against IndyWallet @pytest.mark.asyncio async def test_catpol(self, wallet): with pytest.raises(test_module.WalletError): await wallet.get_credential_definition_tag_policy("cred-def-id") # invalid CD_ID = f"{self.test_did}:3:CL:1234:tag" catpol = await wallet.get_credential_definition_tag_policy(CD_ID) assert catpol is None with async_mock.patch.object( indy.anoncreds, "prover_set_credential_attr_tag_policy", async_mock.CoroutineMock(), ) as mock_catpol: mock_catpol.side_effect = test_module.IndyError( test_module.ErrorCode.CommonIOError, {"message": "outlier"} ) with pytest.raises(test_module.WalletError) as excinfo: await wallet.set_credential_definition_tag_policy(CD_ID) assert "outlier" in str(excinfo.value) # outlier # plain wrong # outlier Tests for wallet compatibility. Ensure that python-based pack/unpack is compatible with indy-sdk implementation Coverage through mock framework. Coverage through mock framework: raise on creation of existing wallet Coverage through mock framework: raise on creation outlier Coverage through mock framework: exception on removal. Coverage through mock framework: double-open (no-op). # open an open wallet: should be OK Coverage through mock framework: missing created wallet. Coverage through mock framework: missing wallet on open. Coverage through mock framework: indy thinks wallet is open, aca-py does not. Coverage through mock framework: outlier on wallet open. Coverage through mock framework: outlier on master secret creation Coverage through mock framework: open, master secret exists (OK). # open an open wallet: should be OK # TODO get these to run in docker ci/cd Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet) # TODO get these to run in docker ci/cd Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet) # TODO get these to run in docker ci/cd Ensure that postgres wallet operations work (create and open wallet, create did, drop wallet) | 2.022673 | 2 |
Table4/streamsdemo/app.py | thomasmilner/ddbgsidemo | 1 | 6632825 | <gh_stars>1-10
import json
import boto3
import traceback
from botocore.exceptions import ClientError
def updateDDBTable(tableName,pkValue,team,counter):
dynamodb = boto3.resource('dynamodb')
#Get table name from stream. Updates will be written back to same table
dynamodb_table = dynamodb.Table(tableName)
#loop through collection
dynamodb_table.update_item(
Key={
'pk1': pkValue,
'sk1': '0'
},
UpdateExpression="set cnt = ((if_not_exists(cnt,:init)) + :num), gsi_pk1 = :zero, gsi_sk1 = :team, team = :team ", #if record doesn't exist, create it
ExpressionAttributeValues={
':init': 0, #new record will be created with 0 + num value
':num': counter,
':zero': '0',
':team': team
},
ReturnValues="NONE"
)
def updateCounter(tableName,pkValue,team,counter):
#persist changes to table
updateDDBTable(tableName,pkValue,team,counter)
def parseStreamArn(streamARN):
tableName = streamARN.split(':')[5].split('/')[1]
return(tableName)
def _lambda_handler(event, context):
records = event['Records']
record1 = records[0]
tableName = parseStreamArn(record1['eventSourceARN'])
for record in records:
event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE
pkValue = record['dynamodb']['Keys']['pk1']['S']
team = record['dynamodb']['NewImage']['team']['S']
#print(keyValue)
if (event_name == 'INSERT') and "cnt" not in record['dynamodb']["NewImage"]:
print(event_name)
updateCounter(tableName,pkValue,team,1)
#if (event_name == 'REMOVE') and "sales_cnt" not in record['dynamodb']["NewImage"]:
# updateCounter(tableName,pkValue,skValue,-1)
return 'Successfully processed {} records.'.format(len(event['Records']))
def lambda_handler(event, context):
try:
return _lambda_handler(event, context)
except Exception:
print (traceback.format_exc())
| import json
import boto3
import traceback
from botocore.exceptions import ClientError
def updateDDBTable(tableName,pkValue,team,counter):
dynamodb = boto3.resource('dynamodb')
#Get table name from stream. Updates will be written back to same table
dynamodb_table = dynamodb.Table(tableName)
#loop through collection
dynamodb_table.update_item(
Key={
'pk1': pkValue,
'sk1': '0'
},
UpdateExpression="set cnt = ((if_not_exists(cnt,:init)) + :num), gsi_pk1 = :zero, gsi_sk1 = :team, team = :team ", #if record doesn't exist, create it
ExpressionAttributeValues={
':init': 0, #new record will be created with 0 + num value
':num': counter,
':zero': '0',
':team': team
},
ReturnValues="NONE"
)
def updateCounter(tableName,pkValue,team,counter):
#persist changes to table
updateDDBTable(tableName,pkValue,team,counter)
def parseStreamArn(streamARN):
tableName = streamARN.split(':')[5].split('/')[1]
return(tableName)
def _lambda_handler(event, context):
records = event['Records']
record1 = records[0]
tableName = parseStreamArn(record1['eventSourceARN'])
for record in records:
event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE
pkValue = record['dynamodb']['Keys']['pk1']['S']
team = record['dynamodb']['NewImage']['team']['S']
#print(keyValue)
if (event_name == 'INSERT') and "cnt" not in record['dynamodb']["NewImage"]:
print(event_name)
updateCounter(tableName,pkValue,team,1)
#if (event_name == 'REMOVE') and "sales_cnt" not in record['dynamodb']["NewImage"]:
# updateCounter(tableName,pkValue,skValue,-1)
return 'Successfully processed {} records.'.format(len(event['Records']))
def lambda_handler(event, context):
try:
return _lambda_handler(event, context)
except Exception:
print (traceback.format_exc()) | en | 0.639995 | #Get table name from stream. Updates will be written back to same table #loop through collection #if record doesn't exist, create it #new record will be created with 0 + num value #persist changes to table # INSERT, MODIFY, REMOVE #print(keyValue) #if (event_name == 'REMOVE') and "sales_cnt" not in record['dynamodb']["NewImage"]: # updateCounter(tableName,pkValue,skValue,-1) | 2.363498 | 2 |
src/precise_nlp/const/path.py | kpwhri/precise_nlp | 0 | 6632826 | """
Variable overview:
If a total, then divided by <3, 3+
Otherwise, yes=1, no=0
"""
# adenoma detected
ADENOMA_STATUS = 'adenoma_status'
ADENOMA_STATUS_ADV = 'adenoma_status_adv'
ADENOMA_DISTAL = 'adenoma_distal'
ADENOMA_DISTAL_COUNT = 'adenoma_distal_count'
ADENOMA_PROXIMAL = 'adenoma_proximal'
ADENOMA_PROXIMAL_COUNT = 'adenoma_proximal_count'
ADENOMA_RECTAL = 'adenoma_rectal'
ADENOMA_RECTAL_COUNT = 'adenoma_rectal_count'
ADENOMA_UNKNOWN = 'adenoma_unknown'
ADENOMA_UNKNOWN_COUNT = 'adenoma_unknown_count'
JAR_ADENOMA_COUNT_ADV = 'jar_adenoma_count_adv'
JAR_ADENOMA_DISTAL_COUNT = 'jar_adenoma_distal_count'
JAR_ADENOMA_PROXIMAL_COUNT = 'jar_adenoma_proximal_count'
JAR_ADENOMA_RECTAL_COUNT = 'jar_adenoma_rectal_count'
JAR_ADENOMA_UNKNOWN_COUNT = 'jar_adenoma_unknown_count'
# tubulovillous adenoma detected
TUBULOVILLOUS = 'tubulovillous'
# tubular adenoma detected
TUBULAR = 'tubular'
# villous adenoma detected (not tubulovillous)
VILLOUS = 'villous'
# either villous or tubulovillous adenoma detected
ANY_VILLOUS = 'any_villous'
PROXIMAL_VILLOUS = 'proximal_villous'
DISTAL_VILLOUS = 'distal_villous'
RECTAL_VILLOUS = 'rectal_villous'
UNKNOWN_VILLOUS = 'unknown_villous'
# high-grade dysplasia detected
HIGHGRADE_DYSPLASIA = 'highgrade_dysplasia'
SIMPLE_HIGHGRADE_DYSPLASIA = 'simple_highgrade_dysplasia'
# number of adenomas found
ADENOMA_COUNT = 'adenoma_count'
ADENOMA_COUNT_ADV = 'adenoma_count_adv'
# has adenoma >= X size
LARGE_ADENOMA = 'large_adenoma'
# sessile serrated adenoma/polyp; SSA/SSP
JAR_SESSILE_SERRATED_ADENOMA_COUNT = 'jar_sessile_serrated_adenoma_cnt'
# number of carcinomas
CARCINOMA_COUNT = 'jar_carcinoma_count'
CARCINOMA_MAYBE_COUNT = 'jar_carcinoma_maybe_count'
CARCINOMA_POSSIBLE_COUNT = 'jar_carcinoma_possible_count'
CARCINOMA_IN_SITU_COUNT = 'jar_carcinoma_in_situ_count'
CARCINOMA_IN_SITU_POSSIBLE_COUNT = 'jar_carcinoma_in_situ_poss_cnt'
CARCINOMA_IN_SITU_MAYBE_COUNT = 'jar_carcinoma_in_situ_maybe_cnt'
| """
Variable overview:
If a total, then divided by <3, 3+
Otherwise, yes=1, no=0
"""
# adenoma detected
ADENOMA_STATUS = 'adenoma_status'
ADENOMA_STATUS_ADV = 'adenoma_status_adv'
ADENOMA_DISTAL = 'adenoma_distal'
ADENOMA_DISTAL_COUNT = 'adenoma_distal_count'
ADENOMA_PROXIMAL = 'adenoma_proximal'
ADENOMA_PROXIMAL_COUNT = 'adenoma_proximal_count'
ADENOMA_RECTAL = 'adenoma_rectal'
ADENOMA_RECTAL_COUNT = 'adenoma_rectal_count'
ADENOMA_UNKNOWN = 'adenoma_unknown'
ADENOMA_UNKNOWN_COUNT = 'adenoma_unknown_count'
JAR_ADENOMA_COUNT_ADV = 'jar_adenoma_count_adv'
JAR_ADENOMA_DISTAL_COUNT = 'jar_adenoma_distal_count'
JAR_ADENOMA_PROXIMAL_COUNT = 'jar_adenoma_proximal_count'
JAR_ADENOMA_RECTAL_COUNT = 'jar_adenoma_rectal_count'
JAR_ADENOMA_UNKNOWN_COUNT = 'jar_adenoma_unknown_count'
# tubulovillous adenoma detected
TUBULOVILLOUS = 'tubulovillous'
# tubular adenoma detected
TUBULAR = 'tubular'
# villous adenoma detected (not tubulovillous)
VILLOUS = 'villous'
# either villous or tubulovillous adenoma detected
ANY_VILLOUS = 'any_villous'
PROXIMAL_VILLOUS = 'proximal_villous'
DISTAL_VILLOUS = 'distal_villous'
RECTAL_VILLOUS = 'rectal_villous'
UNKNOWN_VILLOUS = 'unknown_villous'
# high-grade dysplasia detected
HIGHGRADE_DYSPLASIA = 'highgrade_dysplasia'
SIMPLE_HIGHGRADE_DYSPLASIA = 'simple_highgrade_dysplasia'
# number of adenomas found
ADENOMA_COUNT = 'adenoma_count'
ADENOMA_COUNT_ADV = 'adenoma_count_adv'
# has adenoma >= X size
LARGE_ADENOMA = 'large_adenoma'
# sessile serrated adenoma/polyp; SSA/SSP
JAR_SESSILE_SERRATED_ADENOMA_COUNT = 'jar_sessile_serrated_adenoma_cnt'
# number of carcinomas
CARCINOMA_COUNT = 'jar_carcinoma_count'
CARCINOMA_MAYBE_COUNT = 'jar_carcinoma_maybe_count'
CARCINOMA_POSSIBLE_COUNT = 'jar_carcinoma_possible_count'
CARCINOMA_IN_SITU_COUNT = 'jar_carcinoma_in_situ_count'
CARCINOMA_IN_SITU_POSSIBLE_COUNT = 'jar_carcinoma_in_situ_poss_cnt'
CARCINOMA_IN_SITU_MAYBE_COUNT = 'jar_carcinoma_in_situ_maybe_cnt'
| en | 0.648924 | Variable overview: If a total, then divided by <3, 3+ Otherwise, yes=1, no=0 # adenoma detected # tubulovillous adenoma detected # tubular adenoma detected # villous adenoma detected (not tubulovillous) # either villous or tubulovillous adenoma detected # high-grade dysplasia detected # number of adenomas found # has adenoma >= X size # sessile serrated adenoma/polyp; SSA/SSP # number of carcinomas | 1.675837 | 2 |
IRIS_data_download/IRIS_download_support/obspy/signal/_sosfilt.py | earthinversion/Fnet_IRIS_data_automated_download | 2 | 6632827 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Filename: _sosfilt.py
# Purpose: Backport of Second-Order Section Filtering from SciPy 0.16.0
# Author: <NAME> + SciPy authors
# ---------------------------------------------------------------------
"""
Backport of Second-Order Section Filtering from SciPy 0.16.0
:copyright:
The ObsPy Development Team (<EMAIL>)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import numpy as np
from scipy.signal import lfilter, zpk2tf
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print(zc.real)
[ 1. 2. 2. 2.]
>>> print(zc.imag)
[ 1. 1. 1. 2.]
>>> print(zr)
[ 1. 3. 4.]
"""
z = np.atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return np.array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = np.diff(np.concatenate(([0], same_real, [0])))
run_starts = np.where(diffs > 0)[0]
run_stops = np.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def _zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. Although they can operate on analog filters, the results may
be sub-optimal.
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = _zpk2sos(z, p, k)
The coefficents of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> _zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> _zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return np.array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = np.zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> z, p, k = signal.ellip(13, 0.009, 80, 0.05, output='zpk')
>>> sos = _zpk2sos(z, p, k)
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = _sosfilt(sos, x)
>>> plt.figure() # doctest: +ELLIPSIS
<...Figure ...>
>>> plt.plot(y_tf, 'r', label='TF') # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at ...>]
>>> plt.plot(y_sos, 'k', label='SOS') # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at ...>]
>>> plt.legend(loc='best') # doctest: +ELLIPSIS
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
x = np.asarray(x)
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = np.zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Filename: _sosfilt.py
# Purpose: Backport of Second-Order Section Filtering from SciPy 0.16.0
# Author: <NAME> + SciPy authors
# ---------------------------------------------------------------------
"""
Backport of Second-Order Section Filtering from SciPy 0.16.0
:copyright:
The ObsPy Development Team (<EMAIL>)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import numpy as np
from scipy.signal import lfilter, zpk2tf
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print(zc.real)
[ 1. 2. 2. 2.]
>>> print(zc.imag)
[ 1. 1. 1. 2.]
>>> print(zr)
[ 1. 3. 4.]
"""
z = np.atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return np.array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = np.diff(np.concatenate(([0], same_real, [0])))
run_starts = np.where(diffs > 0)[0]
run_stops = np.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def _zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. Although they can operate on analog filters, the results may
be sub-optimal.
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = _zpk2sos(z, p, k)
The coefficents of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> _zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> _zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return np.array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = np.zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> z, p, k = signal.ellip(13, 0.009, 80, 0.05, output='zpk')
>>> sos = _zpk2sos(z, p, k)
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = _sosfilt(sos, x)
>>> plt.figure() # doctest: +ELLIPSIS
<...Figure ...>
>>> plt.plot(y_tf, 'r', label='TF') # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at ...>]
>>> plt.plot(y_sos, 'k', label='SOS') # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at ...>]
>>> plt.legend(loc='best') # doctest: +ELLIPSIS
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
x = np.asarray(x)
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = np.zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True) | en | 0.783971 | #!/usr/bin/env python # -*- coding: utf-8 -*- # ---------------------------------------------------------------------- # Filename: _sosfilt.py # Purpose: Backport of Second-Order Section Filtering from SciPy 0.16.0 # Author: <NAME> + SciPy authors # --------------------------------------------------------------------- Backport of Second-Order Section Filtering from SciPy 0.16.0 :copyright: The ObsPy Development Team (<EMAIL>) :license: GNU Lesser General Public License, Version 3 (https://www.gnu.org/copyleft/lesser.html) # NOQA Split into complex and real parts, combining conjugate pairs. The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`) elements. Every complex element must be part of a complex-conjugate pair, which are combined into a single number (with positive imaginary part) in the output. Two complex numbers are considered a conjugate pair if their real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. Parameters ---------- z : array_like Vector of complex numbers to be sorted and split tol : float, optional Relative tolerance for testing realness and conjugate equality. Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for float64) Returns ------- zc : ndarray Complex elements of `z`, with each pair represented by a single value having positive imaginary part, sorted first by real part, and then by magnitude of imaginary part. The pairs are averaged when combined to reduce error. zr : ndarray Real elements of `z` (those having imaginary part less than `tol` times their magnitude), sorted by value. Raises ------ ValueError If there are any complex numbers in `z` for which a conjugate cannot be found. See Also -------- _cplxpair Examples -------- >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] >>> zc, zr = _cplxreal(a) >>> print(zc.real) [ 1. 2. 2. 2.] >>> print(zc.imag) [ 1. 1. 1. 2.] >>> print(zr) [ 1. 3. 4.] # Get tolerance from dtype of input # Sort by real part, magnitude of imaginary part (speed up further sorting) # Split reals from conjugate pairs # Input is entirely real # Split positive and negative halves of conjugates # Find runs of (approximately) the same real part # Sort each run by their imaginary parts # Check that negatives match positives # Average out numerical inaccuracy in real vs imag parts of pairs Get the next closest real or complex element based on distance Return second-order sections from zeros, poles, and gain of a system Parameters ---------- z : array_like Zeros of the transfer function. p : array_like Poles of the transfer function. k : float System gain. pairing : {'nearest', 'keep_odd'}, optional The method to use to combine pairs of poles and zeros into sections. See Notes below. Returns ------- sos : ndarray Array of second-order filter coefficients, with shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. See Also -------- sosfilt Notes ----- The algorithm used to convert ZPK to SOS format is designed to minimize errors due to numerical precision issues. The pairing algorithm attempts to minimize the peak gain of each biquadratic section. This is done by pairing poles with the nearest zeros, starting with the poles closest to the unit circle. *Algorithms* The current algorithms are designed specifically for use with digital filters. Although they can operate on analog filters, the results may be sub-optimal. The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'`` algorithms are mostly shared. The ``nearest`` algorithm attempts to minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under the constraint that odd-order systems should retain one section as first order. The algorithm steps and are as follows: As a pre-processing step, add poles or zeros to the origin as necessary to obtain the same number of poles and zeros for pairing. If ``pairing == 'nearest'`` and there are an odd number of poles, add an additional pole and a zero at the origin. The following steps are then iterated over until no more poles or zeros remain: 1. Take the (next remaining) pole (complex or real) closest to the unit circle to begin a new filter section. 2. If the pole is real and there are no other remaining real poles [#]_, add the closest real zero to the section and leave it as a first order section. Note that after this step we are guaranteed to be left with an even number of real poles, complex poles, real zeros, and complex zeros for subsequent pairing iterations. 3. Else: 1. If the pole is complex and the zero is the only remaining real zero*, then pair the pole with the *next* closest zero (guaranteed to be complex). This is necessary to ensure that there will be a real zero remaining to eventually create a first-order section (thus keeping the odd order). 2. Else pair the pole with the closest remaining zero (complex or real). 3. Proceed to complete the second-order section by adding another pole and zero to the current pole and zero in the section: 1. If the current pole and zero are both complex, add their conjugates. 2. Else if the pole is complex and the zero is real, add the conjugate pole and the next closest real zero. 3. Else if the pole is real and the zero is complex, add the conjugate zero and the real pole closest to those zeros. 4. Else (we must have a real pole and real zero) add the next real pole closest to the unit circle, and then add the real zero closest to that pole. .. [#] This conditional can only be met for specific odd-order inputs with the ``pairing == 'keep_odd'`` method. Examples -------- Design a 6th order low-pass elliptic digital filter for a system with a sampling rate of 8000 Hz that has a pass-band corner frequency of 1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and the attenuation in the stop-band should be at least 90 dB. In the following call to `signal.ellip`, we could use ``output='sos'``, but for this example, we'll use ``output='zpk'``, and then convert to SOS format with `zpk2sos`: >>> from scipy import signal >>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk') Now convert to SOS format. >>> sos = _zpk2sos(z, p, k) The coefficents of the numerators of the sections: >>> sos[:, :3] array([[ 0.0014154 , 0.00248707, 0.0014154 ], [ 1. , 0.72965193, 1. ], [ 1. , 0.17594966, 1. ]]) The symmetry in the coefficients occurs because all the zeros are on the unit circle. The coefficients of the denominators of the sections: >>> sos[:, 3:] array([[ 1. , -1.32543251, 0.46989499], [ 1. , -1.26117915, 0.6262586 ], [ 1. , -1.25707217, 0.86199667]]) The next example shows the effect of the `pairing` option. We have a system with three poles and three zeros, so the SOS array will have shape (2, 6). The means there is, in effect, an extra pole and an extra zero at the origin in the SOS representation. >>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) >>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) With ``pairing='nearest'`` (the default), we obtain >>> _zpk2sos(z1, p1, 1) array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ], [ 1. , 1. , 0. , 1. , -1.6 , 0.65]]) The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles {0, 0.75}, and the second section has the zeros {-1, 0} and poles {0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin have been assigned to different sections. With ``pairing='keep_odd'``, we obtain: >>> _zpk2sos(z1, p1, 1, pairing='keep_odd') array([[ 1. , 1. , 0. , 1. , -0.75, 0. ], [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) The extra pole and zero at the origin are in the same section. The first section is, in effect, a first-order section. # TODO in the near future: # 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259). # 2. Make `decimate` use `sosfilt` instead of `lfilter`. # 3. Make sosfilt automatically simplify sections to first order # when possible. Note this might make `sosfiltfilt` a bit harder (ICs). # 4. Further optimizations of the section ordering / pole-zero pairing. # See the wiki for other potential issues. # ensure we have the same number of poles and zeros, and make copies # Ensure we have complex conjugate pairs # (note that _cplxreal only gives us one element of each complex pair): # Select the next "worst" pole # Pair that pole with a zero # Special case to set a first-order section # Special case to ensure we choose a complex zero to pair # with so later (setting up a first-order section) # Pair the pole with the closest zero (real or complex) # Now that we have p1 and z1, figure out what p2 and z2 need to be # complex pole, complex zero # complex pole, real zero # real pole, complex zero # real pole, real zero # pick the next "worst" pole to use # find a real zero to match the added pole # we've consumed all poles and zeros # Construct the system, reversing order so the "worst" are last Filter data along one dimension using cascaded second-order sections Filter a data sequence, `x`, using a digital IIR filter defined by `sos`. This is implemented by performing `lfilter` for each second-order section. See `lfilter` for details. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. Each row corresponds to a second-order section, with the first three columns providing the numerator coefficients and the last three providing the denominator coefficients. x : array_like An N-dimensional input array. axis : int, optional The axis of the input data array along which to apply the linear filter. The filter is applied to each subarray along this axis. Default is -1. zi : array_like, optional Initial conditions for the cascaded filter delays. It is a (at least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]`` replaced by 2. If `zi` is None or is not given then initial rest (i.e. all zeros) is assumed. Note that these initial conditions are *not* the same as the initial conditions given by `lfiltic` or `lfilter_zi`. Returns ------- y : ndarray The output of the digital filter. zf : ndarray, optional If `zi` is None, this is not returned, otherwise, `zf` holds the final filter delay values. See Also -------- zpk2sos, sos2zpk, sosfilt_zi Notes ----- The filter function is implemented as a series of second-order filters with direct-form II transposed structure. It is designed to minimize numerical precision errors for high-order filters. Examples -------- Plot a 13th-order filter's impulse response using both `lfilter` and `sosfilt`, showing the instability that results from trying to do a 13th-order filter in a single stage (the numerical error pushes some poles outside of the unit circle): >>> import matplotlib.pyplot as plt >>> from scipy import signal >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba') >>> z, p, k = signal.ellip(13, 0.009, 80, 0.05, output='zpk') >>> sos = _zpk2sos(z, p, k) >>> x = np.zeros(700) >>> x[0] = 1. >>> y_tf = signal.lfilter(b, a, x) >>> y_sos = _sosfilt(sos, x) >>> plt.figure() # doctest: +ELLIPSIS <...Figure ...> >>> plt.plot(y_tf, 'r', label='TF') # doctest: +ELLIPSIS [<matplotlib.lines.Line2D object at ...>] >>> plt.plot(y_sos, 'k', label='SOS') # doctest: +ELLIPSIS [<matplotlib.lines.Line2D object at ...>] >>> plt.legend(loc='best') # doctest: +ELLIPSIS <matplotlib.legend.Legend object at ...> >>> plt.show() | 2.323811 | 2 |
example/migrations/0001_initial.py | tim-mccurrach/django-s3-upload | 26 | 6632828 | # Generated by Django 3.1 on 2020-08-25 12:15
import django.db.models.deletion
from django.db import migrations, models
import s3upload.fields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Cat",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"custom_filename",
s3upload.fields.S3UploadField(blank=True, dest="custom_filename"),
),
],
),
migrations.CreateModel(
name="Kitten",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("video", s3upload.fields.S3UploadField(blank=True, dest="vids")),
("image", s3upload.fields.S3UploadField(blank=True, dest="imgs")),
("pdf", s3upload.fields.S3UploadField(blank=True, dest="files")),
(
"mother",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="example.cat"
),
),
],
),
]
| # Generated by Django 3.1 on 2020-08-25 12:15
import django.db.models.deletion
from django.db import migrations, models
import s3upload.fields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Cat",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"custom_filename",
s3upload.fields.S3UploadField(blank=True, dest="custom_filename"),
),
],
),
migrations.CreateModel(
name="Kitten",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("video", s3upload.fields.S3UploadField(blank=True, dest="vids")),
("image", s3upload.fields.S3UploadField(blank=True, dest="imgs")),
("pdf", s3upload.fields.S3UploadField(blank=True, dest="files")),
(
"mother",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="example.cat"
),
),
],
),
]
| en | 0.751062 | # Generated by Django 3.1 on 2020-08-25 12:15 | 1.714233 | 2 |
samples/model-builder/create_batch_prediction_job_sample_test.py | dizcology/python-aiplatform | 180 | 6632829 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import create_batch_prediction_job_sample
import test_constants as constants
def test_create_batch_prediction_job_sample(
mock_sdk_init, mock_model, mock_init_model, mock_batch_predict_model
):
create_batch_prediction_job_sample.create_batch_prediction_job_sample(
project=constants.PROJECT,
location=constants.LOCATION,
model_resource_name=constants.MODEL_NAME,
job_display_name=constants.DISPLAY_NAME,
gcs_source=constants.GCS_SOURCES,
gcs_destination=constants.GCS_DESTINATION,
)
mock_sdk_init.assert_called_once_with(
project=constants.PROJECT, location=constants.LOCATION
)
mock_init_model.assert_called_once_with(constants.MODEL_NAME)
mock_batch_predict_model.assert_called_once_with(
job_display_name=constants.DISPLAY_NAME,
gcs_source=constants.GCS_SOURCES,
gcs_destination_prefix=constants.GCS_DESTINATION,
sync=True,
)
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import create_batch_prediction_job_sample
import test_constants as constants
def test_create_batch_prediction_job_sample(
mock_sdk_init, mock_model, mock_init_model, mock_batch_predict_model
):
create_batch_prediction_job_sample.create_batch_prediction_job_sample(
project=constants.PROJECT,
location=constants.LOCATION,
model_resource_name=constants.MODEL_NAME,
job_display_name=constants.DISPLAY_NAME,
gcs_source=constants.GCS_SOURCES,
gcs_destination=constants.GCS_DESTINATION,
)
mock_sdk_init.assert_called_once_with(
project=constants.PROJECT, location=constants.LOCATION
)
mock_init_model.assert_called_once_with(constants.MODEL_NAME)
mock_batch_predict_model.assert_called_once_with(
job_display_name=constants.DISPLAY_NAME,
gcs_source=constants.GCS_SOURCES,
gcs_destination_prefix=constants.GCS_DESTINATION,
sync=True,
) | en | 0.859583 | # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.934279 | 2 |
python_autocomplete/dataset/__init__.py | wwnew/python_autocomplete | 116 | 6632830 | <filename>python_autocomplete/dataset/__init__.py<gh_stars>100-1000
import string
from typing import Dict, List, Tuple
ID_CHARS = set(string.ascii_letters + string.digits + '_')
class Tokenizer:
n_tokens: int
itos: List[str]
stoi: Dict[str, int]
is_trained: int
def encode(self, data: str, *, is_silent: bool = True):
raise NotImplementedError
def train(self, data: str):
pass
def rstrip(self, data: str) -> Tuple[str, List[int]]:
return data, self.encode(data)
| <filename>python_autocomplete/dataset/__init__.py<gh_stars>100-1000
import string
from typing import Dict, List, Tuple
ID_CHARS = set(string.ascii_letters + string.digits + '_')
class Tokenizer:
n_tokens: int
itos: List[str]
stoi: Dict[str, int]
is_trained: int
def encode(self, data: str, *, is_silent: bool = True):
raise NotImplementedError
def train(self, data: str):
pass
def rstrip(self, data: str) -> Tuple[str, List[int]]:
return data, self.encode(data)
| none | 1 | 2.741866 | 3 |
|
python/FindAllAnagramsinaString.py | JumHorn/leetcode | 1 | 6632831 | <gh_stars>1-10
from typing import List
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
shash, phash, res = [0] * 26, [0] * 26, []
slen, plen = len(s), len(p)
if slen < plen:
return res
for c in p:
phash[ord(c) - ord('a')] += 1
for i in range(plen):
shash[ord(s[i]) - ord('a')] += 1
if shash == phash:
res.append(0)
for i in range(slen - plen):
shash[ord(s[i]) - ord('a')] -= 1
shash[ord(s[i+plen]) - ord('a')] += 1
if shash == phash:
res.append(i + 1)
return res
| from typing import List
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
shash, phash, res = [0] * 26, [0] * 26, []
slen, plen = len(s), len(p)
if slen < plen:
return res
for c in p:
phash[ord(c) - ord('a')] += 1
for i in range(plen):
shash[ord(s[i]) - ord('a')] += 1
if shash == phash:
res.append(0)
for i in range(slen - plen):
shash[ord(s[i]) - ord('a')] -= 1
shash[ord(s[i+plen]) - ord('a')] += 1
if shash == phash:
res.append(i + 1)
return res | none | 1 | 3.212631 | 3 |
|
olympic_games/migrations/0002_auto_20190514_2130.py | PircK/OPB | 0 | 6632832 | <gh_stars>0
# Generated by Django 2.2.1 on 2019-05-14 21:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('olympic_games', '0001_initial'),
]
operations = [
]
| # Generated by Django 2.2.1 on 2019-05-14 21:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('olympic_games', '0001_initial'),
]
operations = [
] | en | 0.731575 | # Generated by Django 2.2.1 on 2019-05-14 21:30 | 1.343032 | 1 |
tensorflow_lattice/python/pwl_calibration_lib.py | liar7252/lattice | 0 | 6632833 | <filename>tensorflow_lattice/python/pwl_calibration_lib.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of algorithms required for PWL calibration layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from enum import Enum
import six
import tensorflow as tf
class BoundConstraintsType(Enum):
"""Type of bound constraints for PWL calibration.
- NONE: no constraints.
- BOUND: output range can be anywhere within bounds.
- CLAMPED: output range must exactly match bounds.
"""
NONE = 0
BOUND = 1
CLAMPED = 2
def convert_all_constraints(output_min, output_max, clamp_min, clamp_max):
"""Converts parameters of PWL calibration layer to internal format.
Args:
output_min: None for unconstrained bound or some numeric value.
output_max: None for unconstrained bound or some numeric value.
clamp_min: Whether to clamp pwl calibrator to value if `output_min` is not
None.
clamp_max: Whether to clamp pwl calibrator to value if `output_max` is not
None.
Returns:
"value" as float and appropriate value of
`tfl.pwl_calibration_lib.BoundConstraintsType` enum which corresponds to
`output_min(max)` and `clamp_min(max)`.
"""
if output_min is None:
output_max, output_max_constraints = _convert_constraints(
output_max, clamp_max)
output_min = output_max
output_min_constraints = BoundConstraintsType.NONE
elif output_max is None:
output_min, output_min_constraints = _convert_constraints(
output_min, clamp_min)
output_max = output_min
output_max_constraints = BoundConstraintsType.NONE
else:
output_min, output_min_constraints = _convert_constraints(
output_min, clamp_min)
output_max, output_max_constraints = _convert_constraints(
output_max, clamp_max)
return output_min, output_max, output_min_constraints, output_max_constraints
def _convert_constraints(value, clamp_to_value):
"""Converts constraints for output_min/max to internal format.
Args:
value: None for unconstrained bound or some numeric value.
clamp_to_value: Whether to clamp pwl calibrator to value if value isn't None
Returns:
"value" as float and appropriate value of
`tfl.pwl_calibration_lib.BoundConstraintsType` enum which
corresponds to `value` and `clamp_to_value`.
"""
if value is None:
return 0.0, BoundConstraintsType.NONE
else:
value = float(value)
if clamp_to_value:
return value, BoundConstraintsType.CLAMPED
else:
return value, BoundConstraintsType.BOUND
def compute_interpolation_weights(inputs, keypoints, lengths):
"""Computes weights for PWL calibration.
Args:
inputs: Tensor of shape: `(D0, D1, ..., DN, 1)` which represents inputs to
to the pwl function. A typical shape is: `(batch_size, 1)`.
keypoints: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
left keypoint of pieces of piecewise linear function along X axis.
lengths: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
lengths of pieces of piecewise linear function along X axis.
Returns:
Interpolation weights tensor of shape: `(D0, D1, ..., DN, num_keypoints)`.
"""
weights = (inputs - keypoints) / lengths
weights = tf.minimum(weights, 1.0)
weights = tf.maximum(weights, 0.0)
# Prepend 1.0 at the beginning to add bias unconditionally.
return tf.concat([tf.ones_like(inputs), weights], axis=-1)
def linear_initializer(shape,
output_min,
output_max,
monotonicity,
keypoints=None,
dtype=None):
"""Initializes PWL calibration layer to represent linear function.
PWL calibration layer weights have shape `(knum_keypoints, units)`. First row
represents bias. All remaining represent delta in y-value compare to previous
point. Aka heights of segments.
Args:
shape: Requested shape. Must be `(num_keypoints, units)`.
output_min: Minimum value of PWL calibration output after initialization.
output_max: Maximum value of PWL calibration output after initialization.
monotonicity: If one of {0, 1}, the returned function will go from
`(input_min, output_min)` to `(input_max, output_max)`. If set to -1, the
returned function will go from `(input_min, output_max)` to `(input_max,
output_min)`.
keypoints: If not provided (None or []), all pieces of returned function
will have equal heights (i.e. `y[i+1] - y[i]` is constant). If provided,
all pieces of returned function will have equal slopes (i.e. `(y[i+1] -
y[i]) / (x[i+1] - x[i])` is constant).
dtype: dtype.
Returns:
PWLCalibration layer weights initialized according to params.
Raises:
ValueError: If given parameters are inconsistent.
"""
verify_hyperparameters(
input_keypoints=keypoints,
output_min=output_min,
output_max=output_max,
monotonicity=monotonicity,
weights_shape=shape)
num_keypoints, units = int(shape[0]), int(shape[1])
if keypoints is None:
# Subtract 1 for bias which will be handled separately.
num_pieces = num_keypoints - 1
segment_height = (output_max - output_min) / num_pieces
heights_tensor = tf.constant(
[segment_height] * num_pieces, shape=[num_pieces, 1], dtype=dtype)
else:
keypoints_tensor = tf.constant(
keypoints, shape=[num_keypoints, 1], dtype=dtype)
lengths_tensor = keypoints_tensor[1:] - keypoints_tensor[0:-1]
output_range = output_max - output_min
heights_tensor = (
lengths_tensor * (output_range / tf.reduce_sum(lengths_tensor)))
if units > 1:
heights_tensor = tf.tile(heights_tensor, multiples=[1, units])
if monotonicity == -1:
bias = output_max
heights_tensor = -heights_tensor
else:
bias = output_min
bias_tensor = tf.constant(bias, shape=[1, units], dtype=dtype)
return tf.concat([bias_tensor, heights_tensor], axis=0)
def _approximately_project_bounds_only(bias, heights, output_min, output_max,
output_min_constraints,
output_max_constraints):
"""Bounds constraints implementation for PWL calibration layer.
Maps given weights of PWL calibration layer into some point which satisfies
given bounds by capping the function based on the bounds. This is not an exact
projection in L2 norm, but it is sufficiently accurate and efficient in
practice for non monotonic functions.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
output_min: Minimum possible output of pwl function.
output_max: Maximum possible output of pwl function.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
Raises:
ValueError: If `output_min(max)_constraints` is set to "CLAMPED" which is
not supported.
Returns:
Projected bias and heights.
"""
if (output_min_constraints == BoundConstraintsType.CLAMPED or
output_max_constraints == BoundConstraintsType.CLAMPED):
raise ValueError("Clamping is not implemented for non monotonic functions.")
if (output_min_constraints == BoundConstraintsType.NONE and
output_max_constraints == BoundConstraintsType.NONE):
return bias, heights
# Compute cumulative sums - they correspond to our calibrator outputs at
# keypoints. Simply clip them according to config and compute new heights
# using clipped cumulative sums.
sums = tf.cumsum(tf.concat([bias, heights], axis=0))
if output_min_constraints == BoundConstraintsType.BOUND:
sums = tf.maximum(sums, output_min)
if output_max_constraints == BoundConstraintsType.BOUND:
sums = tf.minimum(sums, output_max)
bias = sums[0:1]
heights = sums[1:] - sums[:-1]
return bias, heights
def _project_bounds_considering_monotonicity(bias, heights, monotonicity,
output_min, output_max,
output_min_constraints,
output_max_constraints):
"""Bounds projection given monotonicity constraints.
Projects weights of PWLCalibration layer into nearest in terms of l2 distance
point which satisfies bounds constraints taking into account that function
is monotonic.
Algorithm:
To minimize L2 distance to projected point we want to distribute update
through heights as evenly as possible. A simplified description of the
algorithm for and increasing function is as follows:
Consider only increasing function.
```
delta = (output_max - (bias + sum(heights[:]))) / (num_heights + 1)
bias = max(bias + delta, output_min)
heights[:] += delta
```
Some details which were omitted above:
* If `output_min_constraints == "CAPPED"` then `bias` variable becomes
constant (this means we can't add delta to it).
* if `output_max_constraints != "CAPPED"` we are looking only for negative
delta because we are not required to stretch function to meet upper bound.
* If function is decreasing we multiply everything by -1 and switch min and
max to make it increasing.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
monotonicity: 1 for increasing, -1 for decreasing.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
Returns:
Projected bias and heights tensors.
Raises:
ValueError: If monotonicity is not in: {-1, 1}
"""
if monotonicity not in [-1, 1]:
raise ValueError("Monotonicity should be one of: [-1, 1]. It is: " +
str(monotonicity))
if monotonicity == -1:
# Reduce computation of projection of decreasing function to computation of
# projection of increasing function by multiplying everything by -1 and
# swapping maximums and minimums.
(projected_bias,
projected_heights) = _project_bounds_considering_monotonicity(
bias=-bias,
heights=-heights,
monotonicity=1,
output_min=None if output_max is None else -output_max,
output_max=None if output_min is None else -output_min,
output_min_constraints=output_max_constraints,
output_max_constraints=output_min_constraints)
return -projected_bias, -projected_heights
bct = BoundConstraintsType
if output_max_constraints != bct.NONE:
num_heights = float(heights.shape.dims[0].value)
sum_heights = tf.reduce_sum(heights, axis=0)
# For each possible output_min_constraints value compute projected bias and
# heights_delta.
if output_min_constraints == bct.CLAMPED:
# If output_min is clamped - bias must have fixed value and number of free
# parameters is equal to number of heights.
bias = tf.constant(output_min, shape=bias.shape, dtype=bias.dtype)
heights_delta = (output_max - (bias + sum_heights)) / num_heights
elif output_min_constraints == bct.BOUND:
# If output_min is not clamped then number of free parameters is
# num_heights + 1.
bias_delta = (output_max - (bias + sum_heights)) / (num_heights + 1)
if output_max_constraints != bct.CLAMPED:
# If output_max is not clamped - there is no need to stretch our
# function. We need only to squeeze it.
bias_delta = tf.minimum(bias_delta, 0.0)
bias = tf.maximum(bias + bias_delta, output_min)
# For this branch compute heights delta _after_ we applied bias projection
# because heights are not bound by output_min constraint unlike bias.
heights_delta = (output_max - (bias + sum_heights)) / num_heights
else:
bias_delta = (output_max - (bias + sum_heights)) / (num_heights + 1)
# For this branch heights delta and bias delta are same because none of
# them are bounded from below.
heights_delta = bias_delta
if output_max_constraints != bct.CLAMPED:
# If output_max is not clamped - there is no need to stretch our
# function. We need only to squeeze it.
bias_delta = tf.minimum(bias_delta, 0.0)
bias += bias_delta
if output_max_constraints != bct.CLAMPED:
# If output_max is not clamped - there is no need to stretch our function.
# We need only to squeeze it.
heights_delta = tf.minimum(heights_delta, 0.0)
heights += heights_delta
else:
# No need to do anything with heights if there are no output_max
# constraints.
if output_min_constraints == bct.CLAMPED:
bias = tf.constant(output_min, shape=bias.shape, dtype=bias.dtype)
elif output_min_constraints == bct.BOUND:
bias = tf.maximum(bias, output_min)
return bias, heights
def _project_convexity(heights, lengths, convexity, constraint_group):
"""Convexity projection for given 'constraint_group'.
Since an exact single step projection is not possible for convexity
constraints, we break the constraints into two independent groups and apply
Dykstra's alternating projections algorithm. Each group consists of a list of
pairs where each pair represents constraints on 2 consequtive heights.
Groups:
```
g0 = [(h0, h1), (h2, h3), (h4, h5), ...]
g1 = [(h1, h2), (h3, h4), (h5, h6), ...]
```
We know how to project single pair of adjacent heights:
h0_prime = min/max(h0, (l0 / (l0 + l1)) * (h0 + h1))
h1_prime = min/max(h1, (l1 / (l0 + l1)) * (h0 + h1))
where l0 and l1 stand for lengths of segment which correspond to h0 and h1 and
choise of min or max functions depends on convexity direction.
We can see that all pairs within same group are independent so we know how to
project such group of constraints in single pass.
This function breaks heights and their lengths into given constraint group
and does projection for this group.
Args:
heights: `(num_heights, units)`-shape tensor which represents heights.
lengths: `(num_heights)`-shape tensor which represents lengths of segments
which correspond to heights.
convexity: -1 or 1 where 1 stands for convex function and -1 for concave.
constraint_group: 0 or 1 which represent group from description above.
Returns:
Projected heights for given constraint group.
"""
verify_hyperparameters(
convexity=convexity,
lengths=lengths,
weights_shape=[heights.shape[0] + 1, heights.shape[1]])
if constraint_group not in [0, 1]:
raise ValueError("constraint_group must be one of: [0, 1]. "
"Given: %s" % constraint_group)
if convexity == 0 or heights.shape[0] == 1:
return heights
num_heights = heights.shape.dims[0].value
# To avoid broadcasting when performing math ops with 'heights'.
lengths = tf.reshape(lengths, shape=(-1, 1))
# Split heigths and lengths into pairs which correspond to given constraint
# group. In order to do this we need to split heights into odd and even. We
# can possibly omit last element of larger set to ensure that both sets have
# same number of elements.
num_0 = (num_heights - constraint_group + 1) // 2
num_1 = (num_heights - constraint_group) // 2
if num_1 == num_0:
last_index = None
else:
last_index = -1
heights_0 = heights[constraint_group:last_index:2]
lengths_0 = lengths[constraint_group:last_index:2]
heights_1 = heights[constraint_group + 1::2]
lengths_1 = lengths[constraint_group + 1::2]
# h0_prime = (l0 / (l0 + l1)) * (h0 + h1) = l0 * base
# h1_prime = (l1 / (l0 + l1)) * (h0 + h1) = l1 * base
base = (heights_0 + heights_1) / (lengths_0 + lengths_1)
heights_0_prime = lengths_0 * base
heights_1_prime = lengths_1 * base
if convexity == 1:
heights_0 = tf.minimum(heights_0, heights_0_prime)
heights_1 = tf.maximum(heights_1, heights_1_prime)
else:
heights_0 = tf.maximum(heights_0, heights_0_prime)
heights_1 = tf.minimum(heights_1, heights_1_prime)
# Now we need to merge heights in such way that elements from 'heights_0' and
# 'heights_1' alternate:
# merged = [heights_0[0], heights_1[0], heights_0[1], heights_1[1], ...]
# Achieve this by concatenating along axis=1 so after concatenation elements
# from 'heights_0' and 'heights_1' will alternate in memory and reshape will
# give us desired result.
projected_heights = tf.reshape(
tf.concat([heights_0, heights_1], axis=1), shape=[-1, heights.shape[1]])
weights_pieces = [projected_heights]
if constraint_group == 1:
# First height was skipped during initial split.
weights_pieces = [heights[0:1]] + weights_pieces
if last_index == -1:
# Last height was skipped during initial split.
weights_pieces.append(heights[-1:])
if len(weights_pieces) == 1:
return weights_pieces[0]
else:
return tf.concat(weights_pieces, axis=0)
def _project_monotonicity(heights, monotonicity):
"""Projects into monotonic function."""
if monotonicity == 0:
return heights
elif monotonicity == 1:
return tf.maximum(heights, 0.0)
else:
return tf.minimum(heights, 0.0)
def project_all_constraints(weights,
monotonicity,
output_min,
output_max,
output_min_constraints,
output_max_constraints,
convexity,
lengths,
num_projection_iterations=8):
"""Jointly projects into all supported constraints.
For all combinations of constraints except the case where bounds constraints
are specified without monotonicity constraints we properly project into
nearest point with respect to L2 norm. For later case we use a heuristic to
map input point into some feasible point with no guarantees on how close this
point is to the true projection.
If only bounds or only monotonicity constraints are specified there will be a
single step projection. For all other combinations of constraints we use
num_projection_iterations iterations of Dykstra's alternating projection
algorithm to jointly project onto all the given constraints. Dykstra's
algorithm gives us proper projection with respect to L2 norm but approaches it
from "wrong" side. That's why in order to ensure that constraints are strictly
met we'll do approximate projections in the end which project strictly into
feasible space, but it's not an exact projection with respect to the L2 norm.
With enough iterations of the Dykstra's algorithm, the impact of such
approximate projection should be negligible.
With bound and convexity constraints and no specified monotonicity, this
method does not fully satisfy the constrains. Increasing the number of
iterations can reduce the constraint violation in such cases.
Args:
weights: `(num_keypoints, units)`-shape tensor which represents weights of
PWL calibration layer.
monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity
constraints.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
convexity: 1 for convex, -1 for concave, 0 for no convexity constraints.
lengths: Lengths of pieces of piecewise linear function. Needed only if
convexity projection is specified.
num_projection_iterations: Number of iterations of Dykstra's alternating
projection algorithm.
Returns:
Projected weights tensor.
"""
bias = weights[0:1]
heights = weights[1:]
def body(projection_counter, bias, heights, last_bias_change,
last_heights_change):
"""The body of tf.while_loop implementing a step of Dykstra's projection.
Args:
projection_counter: The counter tensor or number at the beginning of the
iteration.
bias: Bias tensor at the beginning of the iteration.
heights: Heights tensor at the beginning of the iteration.
last_bias_change: Dict that stores the last change in the bias after
projecting onto each subset of constraints.
last_heights_change: Dict that stores the last change in the heights after
projecting onto each subset of constraints.
Returns:
The tuple `(num_projection_counter, bias, heights, last_bias_change,
last_heights_change)` at the end of the iteration.
"""
last_bias_change = copy.copy(last_bias_change)
last_heights_change = copy.copy(last_heights_change)
num_projections = 0
# ******************** BOUNDS *********************
bct = BoundConstraintsType
if output_min_constraints != bct.NONE or output_max_constraints != bct.NONE:
rolled_back_bias = bias - last_bias_change["BOUNDS"]
rolled_back_heights = heights - last_heights_change["BOUNDS"]
if monotonicity != 0:
bias, heights = _project_bounds_considering_monotonicity(
bias=rolled_back_bias,
heights=rolled_back_heights,
monotonicity=monotonicity,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
else:
bias, heights = _approximately_project_bounds_only(
bias=rolled_back_bias,
heights=rolled_back_heights,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
last_bias_change["BOUNDS"] = bias - rolled_back_bias
last_heights_change["BOUNDS"] = heights - rolled_back_heights
num_projections += 1
# ******************** MONOTONICITY *********************
if monotonicity != 0:
rolled_back_heights = heights - last_heights_change["MONOTONICITY"]
heights = _project_monotonicity(
heights=rolled_back_heights, monotonicity=monotonicity)
last_heights_change["MONOTONICITY"] = heights - rolled_back_heights
num_projections += 1
# ******************** CONVEXITY *********************
if convexity != 0:
if heights.shape[0] >= 2:
rolled_back_heights = heights - last_heights_change["CONVEXITY_0"]
heights = _project_convexity(
heights=rolled_back_heights,
lengths=lengths,
convexity=convexity,
constraint_group=0)
last_heights_change["CONVEXITY_0"] = heights - rolled_back_heights
num_projections += 1
if heights.shape[0] >= 3:
rolled_back_heights = heights - last_heights_change["CONVEXITY_1"]
heights = _project_convexity(
heights=rolled_back_heights,
lengths=lengths,
convexity=convexity,
constraint_group=1)
last_heights_change["CONVEXITY_1"] = heights - rolled_back_heights
num_projections += 1
return (projection_counter + num_projections, bias, heights,
last_bias_change, last_heights_change)
# Call the body of the loop once to see if Dykstra's is needed.
# If there is only one set of projections, apply it without a loop.
# Running the body of the loop also finds the required last_bias_change
# and last_heights_change keys. The set of keys in the input and output of the
# body of tf.while_loop must be the same across iterations.
zero_bias = tf.zeros_like(bias)
zero_heights = tf.zeros_like(heights)
last_bias_change = collections.defaultdict(lambda: zero_bias)
last_heights_change = collections.defaultdict(lambda: zero_heights)
(num_projections, projected_bias, projected_heights, last_bias_change,
last_heights_change) = body(0, bias, heights, last_bias_change,
last_heights_change)
if num_projections <= 1:
return tf.concat([projected_bias, projected_heights], axis=0)
def cond(projection_counter, bias, heights, last_bias_change,
last_heights_change):
del bias, heights, last_bias_change, last_heights_change
return tf.less(projection_counter,
num_projection_iterations * num_projections)
# Apply Dykstra's algorithm with tf.while_loop.
projection_counter = tf.constant(0)
last_bias_change = {k: zero_bias for k in last_bias_change}
last_heights_change = {k: zero_heights for k in last_heights_change}
(_, bias, heights, _,
_) = tf.while_loop(cond, body, (projection_counter, bias, heights,
last_bias_change, last_heights_change))
# Since Dykstra's algorithm is iterative in order to strictly meet constraints
# we use approximate projection algorithm to finalize them.
return _finalize_constraints(
bias=bias,
heights=heights,
monotonicity=monotonicity,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints,
convexity=convexity,
lengths=lengths)
def _squeeze_by_scaling(bias, heights, monotonicity, output_min, output_max,
output_min_constraints, output_max_constraints):
"""Squeezes monotonic calibrators by scaling in order to meet bounds.
Projection by scaling is not exact with respect to the L2 norm, but maintains
convexity unlike projection by shift.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
monotonicity: 1 for increasing, -1 for decreasing.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
Returns:
Projected bias and heights.
"""
if monotonicity == -1:
if output_min_constraints == BoundConstraintsType.NONE:
return bias, heights
# Reduce computation of projection of decreasing function to computation of
# projection of increasing function by multiplying everything by -1 and
# swapping maximums and minimums.
bias, heights = _squeeze_by_scaling(
bias=-bias,
heights=-heights,
monotonicity=1,
output_min=None if output_max is None else -output_max,
output_max=None if output_min is None else -output_min,
output_min_constraints=output_max_constraints,
output_max_constraints=output_min_constraints)
return -bias, -heights
if output_max_constraints == BoundConstraintsType.NONE:
return bias, heights
delta = output_max - bias
# For better stability use tf.where rather than the more standard approach:
# heights *= tf.reduce_sum(heights) / max(delta, eps)
# in order to keep everything strictly unchanged for small deltas, rather than
# increase heights by factor 1/eps and still don't meet constraints.
scaling_factor = tf.where(delta > 0.001,
tf.reduce_sum(heights, axis=0) / delta,
tf.ones_like(delta))
heights = heights / tf.maximum(scaling_factor, 1.0)
return bias, heights
def _approximately_project_convexity(heights, lengths, convexity):
"""Strictly projects convexity, but is not exact with respect to the L2 norm.
Projects by iterating over pieces of piecewise linear function left to right
and aligning current slope with previous one if it violates convexity.
Args:
heights: `(num_heights, units)`-shape tensor which represents heights.
lengths: `(num_heights)`-shape tensor which represents lengths of segments
which correspond to heights.
convexity: -1 or 1 where 1 stands for convex function and -1 for concave.
Returns:
Projected heights.
"""
if convexity == 0:
return heights
heights = tf.unstack(heights, axis=0)
lengths = tf.unstack(lengths, axis=0)
for i in range(1, len(heights)):
temp = heights[i - 1] * (lengths[i] / lengths[i - 1])
if convexity == 1:
heights[i] = tf.maximum(heights[i], temp)
else:
heights[i] = tf.minimum(heights[i], temp)
return tf.stack(heights, axis=0)
def _finalize_constraints(bias, heights, monotonicity, output_min, output_max,
output_min_constraints, output_max_constraints,
convexity, lengths):
"""Strictly projects onto the given constraint, approximate w.r.t the L2 norm.
Dykstra's algorithm gives us proper projection with respect to L2 norm but
approaches it from "wrong" side. In order to ensure that constraints are
strictly met we'll do approximate projections in the end which project
strictly into feasible space, but it's not an exact projection with respect to
the L2 norm. With enough iterations of the Dykstra's algorithm, the impact of
such approximate projection should be negligible.
With bound and convexity constraints and no specified monotonicity, this
method does not fully satisfy the constrains. Increasing the number of
iterations can reduce the constraint violation in such cases. Fortunately it
does not seem to be common config.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity
constraints.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
convexity: 1 for convex, -1 for concave, 0 for no convexity constraints.
lengths: Lengths of pieces of piecewise linear function. Needed only if
convexity projection is specified.
Returns:
Projected weights tensor.
"""
# Convexity and monotonicity projections don't violate each other, but both
# might lead to bounds violation, so do them first and fix bounds after.
if monotonicity != 0:
heights = _project_monotonicity(heights=heights, monotonicity=monotonicity)
if convexity != 0:
heights = _approximately_project_convexity(
heights=heights, lengths=lengths, convexity=convexity)
bct = BoundConstraintsType
if output_min_constraints != bct.NONE or output_max_constraints != bct.NONE:
if monotonicity != 0 and convexity != 0:
# Both monotonicity and convexity projection can only increase upper bound
# so we only need to take care of decreasing it back.
bias, heights = _squeeze_by_scaling(
bias=bias,
heights=heights,
monotonicity=monotonicity,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
else:
# This bounds projection might violate convexity. Unfortunately bounds
# projections with convexity and without monotonicity are are difficult to
# achieve strictly and might be violated. so ignore this for now. In order
# to minimize projection error consider increasing
# num_projection_iterations.
if output_min_constraints == bct.CLAMPED:
output_min_constraints = bct.BOUND
if output_max_constraints == bct.CLAMPED:
output_max_constraints = bct.BOUND
bias, heights = _approximately_project_bounds_only(
bias=bias,
heights=heights,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
return tf.concat([bias, heights], axis=0)
def assert_constraints(outputs,
monotonicity,
output_min,
output_max,
clamp_min=False,
clamp_max=False,
debug_tensors=None,
eps=1e-6):
"""Asserts that 'outputs' satisfiy constraints.
Args:
outputs: Tensor of shape `(num_output_values, units)` which represents
outputs of pwl calibration layer which will be tested against the given
constraints. If monotonicity is specified these outputs must be for
consequtive inputs.
monotonicity: One of {-1, 0, 1}. -1 for decreasing, 1 for increasing 0 means
no monotonicity checks.
output_min: Lower bound or None.
output_max: Upper bound or None.
clamp_min: Whether one of outputs must match output_min.
clamp_max: Whther one of outputs must match output_max.
debug_tensors: None or list of anything convertible to tensor (for example
tensors or strings) which will be printed in case of constraints
violation.
eps: Allowed constraints violation.
Raises:
ValueError: If monotonicity is not one of {-1, 0, 1}
Returns:
List of assertion ops in graph mode or immideately asserts in eager mode.
"""
info = ["Outputs: ", outputs, "Epsilon: ", eps]
if debug_tensors:
info += debug_tensors
asserts = []
if output_min is not None:
min_output = tf.reduce_min(outputs, axis=0)
if clamp_min:
asserts.append(
tf.Assert(
tf.reduce_all(tf.abs(min_output - output_min) <= eps),
data=["Clamp_min violation.", "output_min:", output_min] + info,
summarize=outputs.shape[0]))
else:
asserts.append(
tf.Assert(
tf.reduce_all(min_output >= output_min - eps),
data=["Lower bound violation.", "output_min:", output_min] + info,
summarize=outputs.shape[0]))
if output_max is not None:
max_output = tf.reduce_max(outputs, axis=0)
if clamp_max:
asserts.append(
tf.Assert(
tf.reduce_all(tf.abs(max_output - output_max) <= eps),
data=["Clamp_max violation.", "output_max:", output_max] + info,
summarize=outputs.shape[0]))
else:
asserts.append(
tf.Assert(
tf.reduce_all(max_output <= output_max + eps),
data=["Upper bound violation.", "output_max:", output_max] + info,
summarize=outputs.shape[0]))
if monotonicity not in [-1, 0, 1]:
raise ValueError("'monotonicity' must be one of: [-1, 0, 1]. It is: %s" %
monotonicity)
if monotonicity != 0:
diffs = (outputs[1:] - outputs[0:-1])
asserts.append(
tf.Assert(
tf.reduce_min(diffs * monotonicity) >= -eps,
data=["Monotonicity violation.", "monotonicity:", monotonicity] +
info,
summarize=outputs.shape[0]))
return asserts
def verify_hyperparameters(input_keypoints=None,
output_min=None,
output_max=None,
monotonicity=None,
convexity=None,
is_cyclic=False,
lengths=None,
weights_shape=None):
"""Verifies that all given hyperparameters are consistent.
See PWLCalibration class level comment for detailed description of arguments.
Args:
input_keypoints: `input_keypoints` of PWLCalibration layer.
output_min: Smallest output of PWLCalibration layer.
output_max: Largest output of PWLCalibration layer.
monotonicity: `monotonicity` hyperparameter of PWLCalibration layer.
convexity: `convexity` hyperparameter of PWLCalibration layer.
is_cyclic: `is_cyclic` hyperparameter of PWLCalibration layer.
lengths: Lengths of pieces of piecewise linear function.
weights_shape: Shape of weights of PWLCalibration layer.
Raises:
ValueError: If something is inconsistent.
"""
if input_keypoints is not None:
if tf.is_tensor(input_keypoints):
if len(input_keypoints.shape) != 1 or input_keypoints.shape[0] < 2:
raise ValueError("Input keypoints must be rank-1 tensor of size at "
"least 2. It is: " + str(input_keypoints))
else:
if len(input_keypoints) < 2:
raise ValueError("At least 2 input keypoints must be provided. "
"Given: " + str(input_keypoints))
if not all(input_keypoints[i] < input_keypoints[i + 1]
for i in range(len(input_keypoints) - 1)):
raise ValueError("Keypoints must be strictly increasing. They are: " +
str(input_keypoints))
if output_min is not None and output_max is not None:
if output_max < output_min:
raise ValueError("If specified output_max must be greater than "
"output_min. "
"They are: ({}, {})".format(output_min, output_max))
# It also raises errors if monotonicities specified incorrectly.
monotonicity = canonicalize_monotonicity(monotonicity)
convexity = canonicalize_convexity(convexity)
if is_cyclic and (monotonicity or convexity):
raise ValueError("'is_cyclic' can not be specified together with "
"'monotonicity'({}) or 'convexity'({}).".format(
monotonicity, convexity))
if weights_shape is not None:
if len(weights_shape) != 2 or weights_shape[0] < 2:
raise ValueError("PWLCalibrator weights must have shape: [k, units] where"
" k > 1. It is: " + str(weights_shape))
if lengths is not None and weights_shape is not None:
if tf.is_tensor(lengths):
num_lengths = lengths.shape[0]
else:
num_lengths = len(lengths)
if num_lengths + 1 != weights_shape[0]:
raise ValueError("Number of lengths must be equal to number of weights "
"minus one. Lengths: %s, weights_shape: %s" %
(lengths, weights_shape))
def canonicalize_monotonicity(monotonicity):
"""Converts string constants representing monotonicity into integers.
Args:
monotonicity: monotonicity hyperparameter of `PWLCalibration` layer.
Raises:
ValueError if monotonicity is invalid.
Returns:
monotonicity represented as -1, 0 or 1.
"""
if monotonicity is None:
return None
if monotonicity in [-1, 0, 1]:
return monotonicity
elif isinstance(monotonicity, six.string_types):
if monotonicity.lower() == "decreasing":
return -1
if monotonicity.lower() == "none":
return 0
if monotonicity.lower() == "increasing":
return 1
raise ValueError("'monotonicities' must be from: [-1, 0, 1, 'decreasing', "
"'none', 'increasing']. Given: %s" % monotonicity)
def canonicalize_convexity(convexity):
"""Converts string constants representing convexity into integers.
Args:
convexity: convexity hyperparameter of `PWLCalibration` layer.
Raises:
ValueError if convexity is invalid.
Returns:
convexity represented as -1, 0 or 1.
"""
if convexity is None:
return None
if convexity in [-1, 0, 1]:
return convexity
elif isinstance(convexity, six.string_types):
if convexity.lower() == "concave":
return -1
if convexity.lower() == "none":
return 0
if convexity.lower() == "convex":
return 1
raise ValueError("'convexity' must be from: [-1, 0, 1, 'concave', "
"'none', 'convex']. Given: %s" % convexity)
| <filename>tensorflow_lattice/python/pwl_calibration_lib.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of algorithms required for PWL calibration layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from enum import Enum
import six
import tensorflow as tf
class BoundConstraintsType(Enum):
"""Type of bound constraints for PWL calibration.
- NONE: no constraints.
- BOUND: output range can be anywhere within bounds.
- CLAMPED: output range must exactly match bounds.
"""
NONE = 0
BOUND = 1
CLAMPED = 2
def convert_all_constraints(output_min, output_max, clamp_min, clamp_max):
"""Converts parameters of PWL calibration layer to internal format.
Args:
output_min: None for unconstrained bound or some numeric value.
output_max: None for unconstrained bound or some numeric value.
clamp_min: Whether to clamp pwl calibrator to value if `output_min` is not
None.
clamp_max: Whether to clamp pwl calibrator to value if `output_max` is not
None.
Returns:
"value" as float and appropriate value of
`tfl.pwl_calibration_lib.BoundConstraintsType` enum which corresponds to
`output_min(max)` and `clamp_min(max)`.
"""
if output_min is None:
output_max, output_max_constraints = _convert_constraints(
output_max, clamp_max)
output_min = output_max
output_min_constraints = BoundConstraintsType.NONE
elif output_max is None:
output_min, output_min_constraints = _convert_constraints(
output_min, clamp_min)
output_max = output_min
output_max_constraints = BoundConstraintsType.NONE
else:
output_min, output_min_constraints = _convert_constraints(
output_min, clamp_min)
output_max, output_max_constraints = _convert_constraints(
output_max, clamp_max)
return output_min, output_max, output_min_constraints, output_max_constraints
def _convert_constraints(value, clamp_to_value):
"""Converts constraints for output_min/max to internal format.
Args:
value: None for unconstrained bound or some numeric value.
clamp_to_value: Whether to clamp pwl calibrator to value if value isn't None
Returns:
"value" as float and appropriate value of
`tfl.pwl_calibration_lib.BoundConstraintsType` enum which
corresponds to `value` and `clamp_to_value`.
"""
if value is None:
return 0.0, BoundConstraintsType.NONE
else:
value = float(value)
if clamp_to_value:
return value, BoundConstraintsType.CLAMPED
else:
return value, BoundConstraintsType.BOUND
def compute_interpolation_weights(inputs, keypoints, lengths):
"""Computes weights for PWL calibration.
Args:
inputs: Tensor of shape: `(D0, D1, ..., DN, 1)` which represents inputs to
to the pwl function. A typical shape is: `(batch_size, 1)`.
keypoints: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
left keypoint of pieces of piecewise linear function along X axis.
lengths: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
lengths of pieces of piecewise linear function along X axis.
Returns:
Interpolation weights tensor of shape: `(D0, D1, ..., DN, num_keypoints)`.
"""
weights = (inputs - keypoints) / lengths
weights = tf.minimum(weights, 1.0)
weights = tf.maximum(weights, 0.0)
# Prepend 1.0 at the beginning to add bias unconditionally.
return tf.concat([tf.ones_like(inputs), weights], axis=-1)
def linear_initializer(shape,
output_min,
output_max,
monotonicity,
keypoints=None,
dtype=None):
"""Initializes PWL calibration layer to represent linear function.
PWL calibration layer weights have shape `(knum_keypoints, units)`. First row
represents bias. All remaining represent delta in y-value compare to previous
point. Aka heights of segments.
Args:
shape: Requested shape. Must be `(num_keypoints, units)`.
output_min: Minimum value of PWL calibration output after initialization.
output_max: Maximum value of PWL calibration output after initialization.
monotonicity: If one of {0, 1}, the returned function will go from
`(input_min, output_min)` to `(input_max, output_max)`. If set to -1, the
returned function will go from `(input_min, output_max)` to `(input_max,
output_min)`.
keypoints: If not provided (None or []), all pieces of returned function
will have equal heights (i.e. `y[i+1] - y[i]` is constant). If provided,
all pieces of returned function will have equal slopes (i.e. `(y[i+1] -
y[i]) / (x[i+1] - x[i])` is constant).
dtype: dtype.
Returns:
PWLCalibration layer weights initialized according to params.
Raises:
ValueError: If given parameters are inconsistent.
"""
verify_hyperparameters(
input_keypoints=keypoints,
output_min=output_min,
output_max=output_max,
monotonicity=monotonicity,
weights_shape=shape)
num_keypoints, units = int(shape[0]), int(shape[1])
if keypoints is None:
# Subtract 1 for bias which will be handled separately.
num_pieces = num_keypoints - 1
segment_height = (output_max - output_min) / num_pieces
heights_tensor = tf.constant(
[segment_height] * num_pieces, shape=[num_pieces, 1], dtype=dtype)
else:
keypoints_tensor = tf.constant(
keypoints, shape=[num_keypoints, 1], dtype=dtype)
lengths_tensor = keypoints_tensor[1:] - keypoints_tensor[0:-1]
output_range = output_max - output_min
heights_tensor = (
lengths_tensor * (output_range / tf.reduce_sum(lengths_tensor)))
if units > 1:
heights_tensor = tf.tile(heights_tensor, multiples=[1, units])
if monotonicity == -1:
bias = output_max
heights_tensor = -heights_tensor
else:
bias = output_min
bias_tensor = tf.constant(bias, shape=[1, units], dtype=dtype)
return tf.concat([bias_tensor, heights_tensor], axis=0)
def _approximately_project_bounds_only(bias, heights, output_min, output_max,
output_min_constraints,
output_max_constraints):
"""Bounds constraints implementation for PWL calibration layer.
Maps given weights of PWL calibration layer into some point which satisfies
given bounds by capping the function based on the bounds. This is not an exact
projection in L2 norm, but it is sufficiently accurate and efficient in
practice for non monotonic functions.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
output_min: Minimum possible output of pwl function.
output_max: Maximum possible output of pwl function.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
Raises:
ValueError: If `output_min(max)_constraints` is set to "CLAMPED" which is
not supported.
Returns:
Projected bias and heights.
"""
if (output_min_constraints == BoundConstraintsType.CLAMPED or
output_max_constraints == BoundConstraintsType.CLAMPED):
raise ValueError("Clamping is not implemented for non monotonic functions.")
if (output_min_constraints == BoundConstraintsType.NONE and
output_max_constraints == BoundConstraintsType.NONE):
return bias, heights
# Compute cumulative sums - they correspond to our calibrator outputs at
# keypoints. Simply clip them according to config and compute new heights
# using clipped cumulative sums.
sums = tf.cumsum(tf.concat([bias, heights], axis=0))
if output_min_constraints == BoundConstraintsType.BOUND:
sums = tf.maximum(sums, output_min)
if output_max_constraints == BoundConstraintsType.BOUND:
sums = tf.minimum(sums, output_max)
bias = sums[0:1]
heights = sums[1:] - sums[:-1]
return bias, heights
def _project_bounds_considering_monotonicity(bias, heights, monotonicity,
output_min, output_max,
output_min_constraints,
output_max_constraints):
"""Bounds projection given monotonicity constraints.
Projects weights of PWLCalibration layer into nearest in terms of l2 distance
point which satisfies bounds constraints taking into account that function
is monotonic.
Algorithm:
To minimize L2 distance to projected point we want to distribute update
through heights as evenly as possible. A simplified description of the
algorithm for and increasing function is as follows:
Consider only increasing function.
```
delta = (output_max - (bias + sum(heights[:]))) / (num_heights + 1)
bias = max(bias + delta, output_min)
heights[:] += delta
```
Some details which were omitted above:
* If `output_min_constraints == "CAPPED"` then `bias` variable becomes
constant (this means we can't add delta to it).
* if `output_max_constraints != "CAPPED"` we are looking only for negative
delta because we are not required to stretch function to meet upper bound.
* If function is decreasing we multiply everything by -1 and switch min and
max to make it increasing.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
monotonicity: 1 for increasing, -1 for decreasing.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
Returns:
Projected bias and heights tensors.
Raises:
ValueError: If monotonicity is not in: {-1, 1}
"""
if monotonicity not in [-1, 1]:
raise ValueError("Monotonicity should be one of: [-1, 1]. It is: " +
str(monotonicity))
if monotonicity == -1:
# Reduce computation of projection of decreasing function to computation of
# projection of increasing function by multiplying everything by -1 and
# swapping maximums and minimums.
(projected_bias,
projected_heights) = _project_bounds_considering_monotonicity(
bias=-bias,
heights=-heights,
monotonicity=1,
output_min=None if output_max is None else -output_max,
output_max=None if output_min is None else -output_min,
output_min_constraints=output_max_constraints,
output_max_constraints=output_min_constraints)
return -projected_bias, -projected_heights
bct = BoundConstraintsType
if output_max_constraints != bct.NONE:
num_heights = float(heights.shape.dims[0].value)
sum_heights = tf.reduce_sum(heights, axis=0)
# For each possible output_min_constraints value compute projected bias and
# heights_delta.
if output_min_constraints == bct.CLAMPED:
# If output_min is clamped - bias must have fixed value and number of free
# parameters is equal to number of heights.
bias = tf.constant(output_min, shape=bias.shape, dtype=bias.dtype)
heights_delta = (output_max - (bias + sum_heights)) / num_heights
elif output_min_constraints == bct.BOUND:
# If output_min is not clamped then number of free parameters is
# num_heights + 1.
bias_delta = (output_max - (bias + sum_heights)) / (num_heights + 1)
if output_max_constraints != bct.CLAMPED:
# If output_max is not clamped - there is no need to stretch our
# function. We need only to squeeze it.
bias_delta = tf.minimum(bias_delta, 0.0)
bias = tf.maximum(bias + bias_delta, output_min)
# For this branch compute heights delta _after_ we applied bias projection
# because heights are not bound by output_min constraint unlike bias.
heights_delta = (output_max - (bias + sum_heights)) / num_heights
else:
bias_delta = (output_max - (bias + sum_heights)) / (num_heights + 1)
# For this branch heights delta and bias delta are same because none of
# them are bounded from below.
heights_delta = bias_delta
if output_max_constraints != bct.CLAMPED:
# If output_max is not clamped - there is no need to stretch our
# function. We need only to squeeze it.
bias_delta = tf.minimum(bias_delta, 0.0)
bias += bias_delta
if output_max_constraints != bct.CLAMPED:
# If output_max is not clamped - there is no need to stretch our function.
# We need only to squeeze it.
heights_delta = tf.minimum(heights_delta, 0.0)
heights += heights_delta
else:
# No need to do anything with heights if there are no output_max
# constraints.
if output_min_constraints == bct.CLAMPED:
bias = tf.constant(output_min, shape=bias.shape, dtype=bias.dtype)
elif output_min_constraints == bct.BOUND:
bias = tf.maximum(bias, output_min)
return bias, heights
def _project_convexity(heights, lengths, convexity, constraint_group):
"""Convexity projection for given 'constraint_group'.
Since an exact single step projection is not possible for convexity
constraints, we break the constraints into two independent groups and apply
Dykstra's alternating projections algorithm. Each group consists of a list of
pairs where each pair represents constraints on 2 consequtive heights.
Groups:
```
g0 = [(h0, h1), (h2, h3), (h4, h5), ...]
g1 = [(h1, h2), (h3, h4), (h5, h6), ...]
```
We know how to project single pair of adjacent heights:
h0_prime = min/max(h0, (l0 / (l0 + l1)) * (h0 + h1))
h1_prime = min/max(h1, (l1 / (l0 + l1)) * (h0 + h1))
where l0 and l1 stand for lengths of segment which correspond to h0 and h1 and
choise of min or max functions depends on convexity direction.
We can see that all pairs within same group are independent so we know how to
project such group of constraints in single pass.
This function breaks heights and their lengths into given constraint group
and does projection for this group.
Args:
heights: `(num_heights, units)`-shape tensor which represents heights.
lengths: `(num_heights)`-shape tensor which represents lengths of segments
which correspond to heights.
convexity: -1 or 1 where 1 stands for convex function and -1 for concave.
constraint_group: 0 or 1 which represent group from description above.
Returns:
Projected heights for given constraint group.
"""
verify_hyperparameters(
convexity=convexity,
lengths=lengths,
weights_shape=[heights.shape[0] + 1, heights.shape[1]])
if constraint_group not in [0, 1]:
raise ValueError("constraint_group must be one of: [0, 1]. "
"Given: %s" % constraint_group)
if convexity == 0 or heights.shape[0] == 1:
return heights
num_heights = heights.shape.dims[0].value
# To avoid broadcasting when performing math ops with 'heights'.
lengths = tf.reshape(lengths, shape=(-1, 1))
# Split heigths and lengths into pairs which correspond to given constraint
# group. In order to do this we need to split heights into odd and even. We
# can possibly omit last element of larger set to ensure that both sets have
# same number of elements.
num_0 = (num_heights - constraint_group + 1) // 2
num_1 = (num_heights - constraint_group) // 2
if num_1 == num_0:
last_index = None
else:
last_index = -1
heights_0 = heights[constraint_group:last_index:2]
lengths_0 = lengths[constraint_group:last_index:2]
heights_1 = heights[constraint_group + 1::2]
lengths_1 = lengths[constraint_group + 1::2]
# h0_prime = (l0 / (l0 + l1)) * (h0 + h1) = l0 * base
# h1_prime = (l1 / (l0 + l1)) * (h0 + h1) = l1 * base
base = (heights_0 + heights_1) / (lengths_0 + lengths_1)
heights_0_prime = lengths_0 * base
heights_1_prime = lengths_1 * base
if convexity == 1:
heights_0 = tf.minimum(heights_0, heights_0_prime)
heights_1 = tf.maximum(heights_1, heights_1_prime)
else:
heights_0 = tf.maximum(heights_0, heights_0_prime)
heights_1 = tf.minimum(heights_1, heights_1_prime)
# Now we need to merge heights in such way that elements from 'heights_0' and
# 'heights_1' alternate:
# merged = [heights_0[0], heights_1[0], heights_0[1], heights_1[1], ...]
# Achieve this by concatenating along axis=1 so after concatenation elements
# from 'heights_0' and 'heights_1' will alternate in memory and reshape will
# give us desired result.
projected_heights = tf.reshape(
tf.concat([heights_0, heights_1], axis=1), shape=[-1, heights.shape[1]])
weights_pieces = [projected_heights]
if constraint_group == 1:
# First height was skipped during initial split.
weights_pieces = [heights[0:1]] + weights_pieces
if last_index == -1:
# Last height was skipped during initial split.
weights_pieces.append(heights[-1:])
if len(weights_pieces) == 1:
return weights_pieces[0]
else:
return tf.concat(weights_pieces, axis=0)
def _project_monotonicity(heights, monotonicity):
"""Projects into monotonic function."""
if monotonicity == 0:
return heights
elif monotonicity == 1:
return tf.maximum(heights, 0.0)
else:
return tf.minimum(heights, 0.0)
def project_all_constraints(weights,
monotonicity,
output_min,
output_max,
output_min_constraints,
output_max_constraints,
convexity,
lengths,
num_projection_iterations=8):
"""Jointly projects into all supported constraints.
For all combinations of constraints except the case where bounds constraints
are specified without monotonicity constraints we properly project into
nearest point with respect to L2 norm. For later case we use a heuristic to
map input point into some feasible point with no guarantees on how close this
point is to the true projection.
If only bounds or only monotonicity constraints are specified there will be a
single step projection. For all other combinations of constraints we use
num_projection_iterations iterations of Dykstra's alternating projection
algorithm to jointly project onto all the given constraints. Dykstra's
algorithm gives us proper projection with respect to L2 norm but approaches it
from "wrong" side. That's why in order to ensure that constraints are strictly
met we'll do approximate projections in the end which project strictly into
feasible space, but it's not an exact projection with respect to the L2 norm.
With enough iterations of the Dykstra's algorithm, the impact of such
approximate projection should be negligible.
With bound and convexity constraints and no specified monotonicity, this
method does not fully satisfy the constrains. Increasing the number of
iterations can reduce the constraint violation in such cases.
Args:
weights: `(num_keypoints, units)`-shape tensor which represents weights of
PWL calibration layer.
monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity
constraints.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
convexity: 1 for convex, -1 for concave, 0 for no convexity constraints.
lengths: Lengths of pieces of piecewise linear function. Needed only if
convexity projection is specified.
num_projection_iterations: Number of iterations of Dykstra's alternating
projection algorithm.
Returns:
Projected weights tensor.
"""
bias = weights[0:1]
heights = weights[1:]
def body(projection_counter, bias, heights, last_bias_change,
last_heights_change):
"""The body of tf.while_loop implementing a step of Dykstra's projection.
Args:
projection_counter: The counter tensor or number at the beginning of the
iteration.
bias: Bias tensor at the beginning of the iteration.
heights: Heights tensor at the beginning of the iteration.
last_bias_change: Dict that stores the last change in the bias after
projecting onto each subset of constraints.
last_heights_change: Dict that stores the last change in the heights after
projecting onto each subset of constraints.
Returns:
The tuple `(num_projection_counter, bias, heights, last_bias_change,
last_heights_change)` at the end of the iteration.
"""
last_bias_change = copy.copy(last_bias_change)
last_heights_change = copy.copy(last_heights_change)
num_projections = 0
# ******************** BOUNDS *********************
bct = BoundConstraintsType
if output_min_constraints != bct.NONE or output_max_constraints != bct.NONE:
rolled_back_bias = bias - last_bias_change["BOUNDS"]
rolled_back_heights = heights - last_heights_change["BOUNDS"]
if monotonicity != 0:
bias, heights = _project_bounds_considering_monotonicity(
bias=rolled_back_bias,
heights=rolled_back_heights,
monotonicity=monotonicity,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
else:
bias, heights = _approximately_project_bounds_only(
bias=rolled_back_bias,
heights=rolled_back_heights,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
last_bias_change["BOUNDS"] = bias - rolled_back_bias
last_heights_change["BOUNDS"] = heights - rolled_back_heights
num_projections += 1
# ******************** MONOTONICITY *********************
if monotonicity != 0:
rolled_back_heights = heights - last_heights_change["MONOTONICITY"]
heights = _project_monotonicity(
heights=rolled_back_heights, monotonicity=monotonicity)
last_heights_change["MONOTONICITY"] = heights - rolled_back_heights
num_projections += 1
# ******************** CONVEXITY *********************
if convexity != 0:
if heights.shape[0] >= 2:
rolled_back_heights = heights - last_heights_change["CONVEXITY_0"]
heights = _project_convexity(
heights=rolled_back_heights,
lengths=lengths,
convexity=convexity,
constraint_group=0)
last_heights_change["CONVEXITY_0"] = heights - rolled_back_heights
num_projections += 1
if heights.shape[0] >= 3:
rolled_back_heights = heights - last_heights_change["CONVEXITY_1"]
heights = _project_convexity(
heights=rolled_back_heights,
lengths=lengths,
convexity=convexity,
constraint_group=1)
last_heights_change["CONVEXITY_1"] = heights - rolled_back_heights
num_projections += 1
return (projection_counter + num_projections, bias, heights,
last_bias_change, last_heights_change)
# Call the body of the loop once to see if Dykstra's is needed.
# If there is only one set of projections, apply it without a loop.
# Running the body of the loop also finds the required last_bias_change
# and last_heights_change keys. The set of keys in the input and output of the
# body of tf.while_loop must be the same across iterations.
zero_bias = tf.zeros_like(bias)
zero_heights = tf.zeros_like(heights)
last_bias_change = collections.defaultdict(lambda: zero_bias)
last_heights_change = collections.defaultdict(lambda: zero_heights)
(num_projections, projected_bias, projected_heights, last_bias_change,
last_heights_change) = body(0, bias, heights, last_bias_change,
last_heights_change)
if num_projections <= 1:
return tf.concat([projected_bias, projected_heights], axis=0)
def cond(projection_counter, bias, heights, last_bias_change,
last_heights_change):
del bias, heights, last_bias_change, last_heights_change
return tf.less(projection_counter,
num_projection_iterations * num_projections)
# Apply Dykstra's algorithm with tf.while_loop.
projection_counter = tf.constant(0)
last_bias_change = {k: zero_bias for k in last_bias_change}
last_heights_change = {k: zero_heights for k in last_heights_change}
(_, bias, heights, _,
_) = tf.while_loop(cond, body, (projection_counter, bias, heights,
last_bias_change, last_heights_change))
# Since Dykstra's algorithm is iterative in order to strictly meet constraints
# we use approximate projection algorithm to finalize them.
return _finalize_constraints(
bias=bias,
heights=heights,
monotonicity=monotonicity,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints,
convexity=convexity,
lengths=lengths)
def _squeeze_by_scaling(bias, heights, monotonicity, output_min, output_max,
output_min_constraints, output_max_constraints):
"""Squeezes monotonic calibrators by scaling in order to meet bounds.
Projection by scaling is not exact with respect to the L2 norm, but maintains
convexity unlike projection by shift.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
monotonicity: 1 for increasing, -1 for decreasing.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
Returns:
Projected bias and heights.
"""
if monotonicity == -1:
if output_min_constraints == BoundConstraintsType.NONE:
return bias, heights
# Reduce computation of projection of decreasing function to computation of
# projection of increasing function by multiplying everything by -1 and
# swapping maximums and minimums.
bias, heights = _squeeze_by_scaling(
bias=-bias,
heights=-heights,
monotonicity=1,
output_min=None if output_max is None else -output_max,
output_max=None if output_min is None else -output_min,
output_min_constraints=output_max_constraints,
output_max_constraints=output_min_constraints)
return -bias, -heights
if output_max_constraints == BoundConstraintsType.NONE:
return bias, heights
delta = output_max - bias
# For better stability use tf.where rather than the more standard approach:
# heights *= tf.reduce_sum(heights) / max(delta, eps)
# in order to keep everything strictly unchanged for small deltas, rather than
# increase heights by factor 1/eps and still don't meet constraints.
scaling_factor = tf.where(delta > 0.001,
tf.reduce_sum(heights, axis=0) / delta,
tf.ones_like(delta))
heights = heights / tf.maximum(scaling_factor, 1.0)
return bias, heights
def _approximately_project_convexity(heights, lengths, convexity):
"""Strictly projects convexity, but is not exact with respect to the L2 norm.
Projects by iterating over pieces of piecewise linear function left to right
and aligning current slope with previous one if it violates convexity.
Args:
heights: `(num_heights, units)`-shape tensor which represents heights.
lengths: `(num_heights)`-shape tensor which represents lengths of segments
which correspond to heights.
convexity: -1 or 1 where 1 stands for convex function and -1 for concave.
Returns:
Projected heights.
"""
if convexity == 0:
return heights
heights = tf.unstack(heights, axis=0)
lengths = tf.unstack(lengths, axis=0)
for i in range(1, len(heights)):
temp = heights[i - 1] * (lengths[i] / lengths[i - 1])
if convexity == 1:
heights[i] = tf.maximum(heights[i], temp)
else:
heights[i] = tf.minimum(heights[i], temp)
return tf.stack(heights, axis=0)
def _finalize_constraints(bias, heights, monotonicity, output_min, output_max,
output_min_constraints, output_max_constraints,
convexity, lengths):
"""Strictly projects onto the given constraint, approximate w.r.t the L2 norm.
Dykstra's algorithm gives us proper projection with respect to L2 norm but
approaches it from "wrong" side. In order to ensure that constraints are
strictly met we'll do approximate projections in the end which project
strictly into feasible space, but it's not an exact projection with respect to
the L2 norm. With enough iterations of the Dykstra's algorithm, the impact of
such approximate projection should be negligible.
With bound and convexity constraints and no specified monotonicity, this
method does not fully satisfy the constrains. Increasing the number of
iterations can reduce the constraint violation in such cases. Fortunately it
does not seem to be common config.
Args:
bias: `(1, units)`-shape tensor which represents bias.
heights: `(num_heights, units)`-shape tensor which represents heights.
monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity
constraints.
output_min: Lower bound constraint of PWL calibration layer.
output_max: Upper bound constraint of PWL calibration layer.
output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's minimum value.
output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType`
describing the constraints on the layer's maximum value.
convexity: 1 for convex, -1 for concave, 0 for no convexity constraints.
lengths: Lengths of pieces of piecewise linear function. Needed only if
convexity projection is specified.
Returns:
Projected weights tensor.
"""
# Convexity and monotonicity projections don't violate each other, but both
# might lead to bounds violation, so do them first and fix bounds after.
if monotonicity != 0:
heights = _project_monotonicity(heights=heights, monotonicity=monotonicity)
if convexity != 0:
heights = _approximately_project_convexity(
heights=heights, lengths=lengths, convexity=convexity)
bct = BoundConstraintsType
if output_min_constraints != bct.NONE or output_max_constraints != bct.NONE:
if monotonicity != 0 and convexity != 0:
# Both monotonicity and convexity projection can only increase upper bound
# so we only need to take care of decreasing it back.
bias, heights = _squeeze_by_scaling(
bias=bias,
heights=heights,
monotonicity=monotonicity,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
else:
# This bounds projection might violate convexity. Unfortunately bounds
# projections with convexity and without monotonicity are are difficult to
# achieve strictly and might be violated. so ignore this for now. In order
# to minimize projection error consider increasing
# num_projection_iterations.
if output_min_constraints == bct.CLAMPED:
output_min_constraints = bct.BOUND
if output_max_constraints == bct.CLAMPED:
output_max_constraints = bct.BOUND
bias, heights = _approximately_project_bounds_only(
bias=bias,
heights=heights,
output_min=output_min,
output_max=output_max,
output_min_constraints=output_min_constraints,
output_max_constraints=output_max_constraints)
return tf.concat([bias, heights], axis=0)
def assert_constraints(outputs,
monotonicity,
output_min,
output_max,
clamp_min=False,
clamp_max=False,
debug_tensors=None,
eps=1e-6):
"""Asserts that 'outputs' satisfiy constraints.
Args:
outputs: Tensor of shape `(num_output_values, units)` which represents
outputs of pwl calibration layer which will be tested against the given
constraints. If monotonicity is specified these outputs must be for
consequtive inputs.
monotonicity: One of {-1, 0, 1}. -1 for decreasing, 1 for increasing 0 means
no monotonicity checks.
output_min: Lower bound or None.
output_max: Upper bound or None.
clamp_min: Whether one of outputs must match output_min.
clamp_max: Whther one of outputs must match output_max.
debug_tensors: None or list of anything convertible to tensor (for example
tensors or strings) which will be printed in case of constraints
violation.
eps: Allowed constraints violation.
Raises:
ValueError: If monotonicity is not one of {-1, 0, 1}
Returns:
List of assertion ops in graph mode or immideately asserts in eager mode.
"""
info = ["Outputs: ", outputs, "Epsilon: ", eps]
if debug_tensors:
info += debug_tensors
asserts = []
if output_min is not None:
min_output = tf.reduce_min(outputs, axis=0)
if clamp_min:
asserts.append(
tf.Assert(
tf.reduce_all(tf.abs(min_output - output_min) <= eps),
data=["Clamp_min violation.", "output_min:", output_min] + info,
summarize=outputs.shape[0]))
else:
asserts.append(
tf.Assert(
tf.reduce_all(min_output >= output_min - eps),
data=["Lower bound violation.", "output_min:", output_min] + info,
summarize=outputs.shape[0]))
if output_max is not None:
max_output = tf.reduce_max(outputs, axis=0)
if clamp_max:
asserts.append(
tf.Assert(
tf.reduce_all(tf.abs(max_output - output_max) <= eps),
data=["Clamp_max violation.", "output_max:", output_max] + info,
summarize=outputs.shape[0]))
else:
asserts.append(
tf.Assert(
tf.reduce_all(max_output <= output_max + eps),
data=["Upper bound violation.", "output_max:", output_max] + info,
summarize=outputs.shape[0]))
if monotonicity not in [-1, 0, 1]:
raise ValueError("'monotonicity' must be one of: [-1, 0, 1]. It is: %s" %
monotonicity)
if monotonicity != 0:
diffs = (outputs[1:] - outputs[0:-1])
asserts.append(
tf.Assert(
tf.reduce_min(diffs * monotonicity) >= -eps,
data=["Monotonicity violation.", "monotonicity:", monotonicity] +
info,
summarize=outputs.shape[0]))
return asserts
def verify_hyperparameters(input_keypoints=None,
output_min=None,
output_max=None,
monotonicity=None,
convexity=None,
is_cyclic=False,
lengths=None,
weights_shape=None):
"""Verifies that all given hyperparameters are consistent.
See PWLCalibration class level comment for detailed description of arguments.
Args:
input_keypoints: `input_keypoints` of PWLCalibration layer.
output_min: Smallest output of PWLCalibration layer.
output_max: Largest output of PWLCalibration layer.
monotonicity: `monotonicity` hyperparameter of PWLCalibration layer.
convexity: `convexity` hyperparameter of PWLCalibration layer.
is_cyclic: `is_cyclic` hyperparameter of PWLCalibration layer.
lengths: Lengths of pieces of piecewise linear function.
weights_shape: Shape of weights of PWLCalibration layer.
Raises:
ValueError: If something is inconsistent.
"""
if input_keypoints is not None:
if tf.is_tensor(input_keypoints):
if len(input_keypoints.shape) != 1 or input_keypoints.shape[0] < 2:
raise ValueError("Input keypoints must be rank-1 tensor of size at "
"least 2. It is: " + str(input_keypoints))
else:
if len(input_keypoints) < 2:
raise ValueError("At least 2 input keypoints must be provided. "
"Given: " + str(input_keypoints))
if not all(input_keypoints[i] < input_keypoints[i + 1]
for i in range(len(input_keypoints) - 1)):
raise ValueError("Keypoints must be strictly increasing. They are: " +
str(input_keypoints))
if output_min is not None and output_max is not None:
if output_max < output_min:
raise ValueError("If specified output_max must be greater than "
"output_min. "
"They are: ({}, {})".format(output_min, output_max))
# It also raises errors if monotonicities specified incorrectly.
monotonicity = canonicalize_monotonicity(monotonicity)
convexity = canonicalize_convexity(convexity)
if is_cyclic and (monotonicity or convexity):
raise ValueError("'is_cyclic' can not be specified together with "
"'monotonicity'({}) or 'convexity'({}).".format(
monotonicity, convexity))
if weights_shape is not None:
if len(weights_shape) != 2 or weights_shape[0] < 2:
raise ValueError("PWLCalibrator weights must have shape: [k, units] where"
" k > 1. It is: " + str(weights_shape))
if lengths is not None and weights_shape is not None:
if tf.is_tensor(lengths):
num_lengths = lengths.shape[0]
else:
num_lengths = len(lengths)
if num_lengths + 1 != weights_shape[0]:
raise ValueError("Number of lengths must be equal to number of weights "
"minus one. Lengths: %s, weights_shape: %s" %
(lengths, weights_shape))
def canonicalize_monotonicity(monotonicity):
"""Converts string constants representing monotonicity into integers.
Args:
monotonicity: monotonicity hyperparameter of `PWLCalibration` layer.
Raises:
ValueError if monotonicity is invalid.
Returns:
monotonicity represented as -1, 0 or 1.
"""
if monotonicity is None:
return None
if monotonicity in [-1, 0, 1]:
return monotonicity
elif isinstance(monotonicity, six.string_types):
if monotonicity.lower() == "decreasing":
return -1
if monotonicity.lower() == "none":
return 0
if monotonicity.lower() == "increasing":
return 1
raise ValueError("'monotonicities' must be from: [-1, 0, 1, 'decreasing', "
"'none', 'increasing']. Given: %s" % monotonicity)
def canonicalize_convexity(convexity):
"""Converts string constants representing convexity into integers.
Args:
convexity: convexity hyperparameter of `PWLCalibration` layer.
Raises:
ValueError if convexity is invalid.
Returns:
convexity represented as -1, 0 or 1.
"""
if convexity is None:
return None
if convexity in [-1, 0, 1]:
return convexity
elif isinstance(convexity, six.string_types):
if convexity.lower() == "concave":
return -1
if convexity.lower() == "none":
return 0
if convexity.lower() == "convex":
return 1
raise ValueError("'convexity' must be from: [-1, 0, 1, 'concave', "
"'none', 'convex']. Given: %s" % convexity)
| en | 0.834779 | # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Implementation of algorithms required for PWL calibration layer. Type of bound constraints for PWL calibration. - NONE: no constraints. - BOUND: output range can be anywhere within bounds. - CLAMPED: output range must exactly match bounds. Converts parameters of PWL calibration layer to internal format. Args: output_min: None for unconstrained bound or some numeric value. output_max: None for unconstrained bound or some numeric value. clamp_min: Whether to clamp pwl calibrator to value if `output_min` is not None. clamp_max: Whether to clamp pwl calibrator to value if `output_max` is not None. Returns: "value" as float and appropriate value of `tfl.pwl_calibration_lib.BoundConstraintsType` enum which corresponds to `output_min(max)` and `clamp_min(max)`. Converts constraints for output_min/max to internal format. Args: value: None for unconstrained bound or some numeric value. clamp_to_value: Whether to clamp pwl calibrator to value if value isn't None Returns: "value" as float and appropriate value of `tfl.pwl_calibration_lib.BoundConstraintsType` enum which corresponds to `value` and `clamp_to_value`. Computes weights for PWL calibration. Args: inputs: Tensor of shape: `(D0, D1, ..., DN, 1)` which represents inputs to to the pwl function. A typical shape is: `(batch_size, 1)`. keypoints: Rank-1 tensor of shape `(num_keypoints - 1)` which represents left keypoint of pieces of piecewise linear function along X axis. lengths: Rank-1 tensor of shape `(num_keypoints - 1)` which represents lengths of pieces of piecewise linear function along X axis. Returns: Interpolation weights tensor of shape: `(D0, D1, ..., DN, num_keypoints)`. # Prepend 1.0 at the beginning to add bias unconditionally. Initializes PWL calibration layer to represent linear function. PWL calibration layer weights have shape `(knum_keypoints, units)`. First row represents bias. All remaining represent delta in y-value compare to previous point. Aka heights of segments. Args: shape: Requested shape. Must be `(num_keypoints, units)`. output_min: Minimum value of PWL calibration output after initialization. output_max: Maximum value of PWL calibration output after initialization. monotonicity: If one of {0, 1}, the returned function will go from `(input_min, output_min)` to `(input_max, output_max)`. If set to -1, the returned function will go from `(input_min, output_max)` to `(input_max, output_min)`. keypoints: If not provided (None or []), all pieces of returned function will have equal heights (i.e. `y[i+1] - y[i]` is constant). If provided, all pieces of returned function will have equal slopes (i.e. `(y[i+1] - y[i]) / (x[i+1] - x[i])` is constant). dtype: dtype. Returns: PWLCalibration layer weights initialized according to params. Raises: ValueError: If given parameters are inconsistent. # Subtract 1 for bias which will be handled separately. Bounds constraints implementation for PWL calibration layer. Maps given weights of PWL calibration layer into some point which satisfies given bounds by capping the function based on the bounds. This is not an exact projection in L2 norm, but it is sufficiently accurate and efficient in practice for non monotonic functions. Args: bias: `(1, units)`-shape tensor which represents bias. heights: `(num_heights, units)`-shape tensor which represents heights. output_min: Minimum possible output of pwl function. output_max: Maximum possible output of pwl function. output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's minimum value. output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's maximum value. Raises: ValueError: If `output_min(max)_constraints` is set to "CLAMPED" which is not supported. Returns: Projected bias and heights. # Compute cumulative sums - they correspond to our calibrator outputs at # keypoints. Simply clip them according to config and compute new heights # using clipped cumulative sums. Bounds projection given monotonicity constraints. Projects weights of PWLCalibration layer into nearest in terms of l2 distance point which satisfies bounds constraints taking into account that function is monotonic. Algorithm: To minimize L2 distance to projected point we want to distribute update through heights as evenly as possible. A simplified description of the algorithm for and increasing function is as follows: Consider only increasing function. ``` delta = (output_max - (bias + sum(heights[:]))) / (num_heights + 1) bias = max(bias + delta, output_min) heights[:] += delta ``` Some details which were omitted above: * If `output_min_constraints == "CAPPED"` then `bias` variable becomes constant (this means we can't add delta to it). * if `output_max_constraints != "CAPPED"` we are looking only for negative delta because we are not required to stretch function to meet upper bound. * If function is decreasing we multiply everything by -1 and switch min and max to make it increasing. Args: bias: `(1, units)`-shape tensor which represents bias. heights: `(num_heights, units)`-shape tensor which represents heights. monotonicity: 1 for increasing, -1 for decreasing. output_min: Lower bound constraint of PWL calibration layer. output_max: Upper bound constraint of PWL calibration layer. output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's minimum value. output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's maximum value. Returns: Projected bias and heights tensors. Raises: ValueError: If monotonicity is not in: {-1, 1} # Reduce computation of projection of decreasing function to computation of # projection of increasing function by multiplying everything by -1 and # swapping maximums and minimums. # For each possible output_min_constraints value compute projected bias and # heights_delta. # If output_min is clamped - bias must have fixed value and number of free # parameters is equal to number of heights. # If output_min is not clamped then number of free parameters is # num_heights + 1. # If output_max is not clamped - there is no need to stretch our # function. We need only to squeeze it. # For this branch compute heights delta _after_ we applied bias projection # because heights are not bound by output_min constraint unlike bias. # For this branch heights delta and bias delta are same because none of # them are bounded from below. # If output_max is not clamped - there is no need to stretch our # function. We need only to squeeze it. # If output_max is not clamped - there is no need to stretch our function. # We need only to squeeze it. # No need to do anything with heights if there are no output_max # constraints. Convexity projection for given 'constraint_group'. Since an exact single step projection is not possible for convexity constraints, we break the constraints into two independent groups and apply Dykstra's alternating projections algorithm. Each group consists of a list of pairs where each pair represents constraints on 2 consequtive heights. Groups: ``` g0 = [(h0, h1), (h2, h3), (h4, h5), ...] g1 = [(h1, h2), (h3, h4), (h5, h6), ...] ``` We know how to project single pair of adjacent heights: h0_prime = min/max(h0, (l0 / (l0 + l1)) * (h0 + h1)) h1_prime = min/max(h1, (l1 / (l0 + l1)) * (h0 + h1)) where l0 and l1 stand for lengths of segment which correspond to h0 and h1 and choise of min or max functions depends on convexity direction. We can see that all pairs within same group are independent so we know how to project such group of constraints in single pass. This function breaks heights and their lengths into given constraint group and does projection for this group. Args: heights: `(num_heights, units)`-shape tensor which represents heights. lengths: `(num_heights)`-shape tensor which represents lengths of segments which correspond to heights. convexity: -1 or 1 where 1 stands for convex function and -1 for concave. constraint_group: 0 or 1 which represent group from description above. Returns: Projected heights for given constraint group. # To avoid broadcasting when performing math ops with 'heights'. # Split heigths and lengths into pairs which correspond to given constraint # group. In order to do this we need to split heights into odd and even. We # can possibly omit last element of larger set to ensure that both sets have # same number of elements. # h0_prime = (l0 / (l0 + l1)) * (h0 + h1) = l0 * base # h1_prime = (l1 / (l0 + l1)) * (h0 + h1) = l1 * base # Now we need to merge heights in such way that elements from 'heights_0' and # 'heights_1' alternate: # merged = [heights_0[0], heights_1[0], heights_0[1], heights_1[1], ...] # Achieve this by concatenating along axis=1 so after concatenation elements # from 'heights_0' and 'heights_1' will alternate in memory and reshape will # give us desired result. # First height was skipped during initial split. # Last height was skipped during initial split. Projects into monotonic function. Jointly projects into all supported constraints. For all combinations of constraints except the case where bounds constraints are specified without monotonicity constraints we properly project into nearest point with respect to L2 norm. For later case we use a heuristic to map input point into some feasible point with no guarantees on how close this point is to the true projection. If only bounds or only monotonicity constraints are specified there will be a single step projection. For all other combinations of constraints we use num_projection_iterations iterations of Dykstra's alternating projection algorithm to jointly project onto all the given constraints. Dykstra's algorithm gives us proper projection with respect to L2 norm but approaches it from "wrong" side. That's why in order to ensure that constraints are strictly met we'll do approximate projections in the end which project strictly into feasible space, but it's not an exact projection with respect to the L2 norm. With enough iterations of the Dykstra's algorithm, the impact of such approximate projection should be negligible. With bound and convexity constraints and no specified monotonicity, this method does not fully satisfy the constrains. Increasing the number of iterations can reduce the constraint violation in such cases. Args: weights: `(num_keypoints, units)`-shape tensor which represents weights of PWL calibration layer. monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity constraints. output_min: Lower bound constraint of PWL calibration layer. output_max: Upper bound constraint of PWL calibration layer. output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's minimum value. output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's maximum value. convexity: 1 for convex, -1 for concave, 0 for no convexity constraints. lengths: Lengths of pieces of piecewise linear function. Needed only if convexity projection is specified. num_projection_iterations: Number of iterations of Dykstra's alternating projection algorithm. Returns: Projected weights tensor. The body of tf.while_loop implementing a step of Dykstra's projection. Args: projection_counter: The counter tensor or number at the beginning of the iteration. bias: Bias tensor at the beginning of the iteration. heights: Heights tensor at the beginning of the iteration. last_bias_change: Dict that stores the last change in the bias after projecting onto each subset of constraints. last_heights_change: Dict that stores the last change in the heights after projecting onto each subset of constraints. Returns: The tuple `(num_projection_counter, bias, heights, last_bias_change, last_heights_change)` at the end of the iteration. # ******************** BOUNDS ********************* # ******************** MONOTONICITY ********************* # ******************** CONVEXITY ********************* # Call the body of the loop once to see if Dykstra's is needed. # If there is only one set of projections, apply it without a loop. # Running the body of the loop also finds the required last_bias_change # and last_heights_change keys. The set of keys in the input and output of the # body of tf.while_loop must be the same across iterations. # Apply Dykstra's algorithm with tf.while_loop. # Since Dykstra's algorithm is iterative in order to strictly meet constraints # we use approximate projection algorithm to finalize them. Squeezes monotonic calibrators by scaling in order to meet bounds. Projection by scaling is not exact with respect to the L2 norm, but maintains convexity unlike projection by shift. Args: bias: `(1, units)`-shape tensor which represents bias. heights: `(num_heights, units)`-shape tensor which represents heights. monotonicity: 1 for increasing, -1 for decreasing. output_min: Lower bound constraint of PWL calibration layer. output_max: Upper bound constraint of PWL calibration layer. output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's minimum value. output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's maximum value. Returns: Projected bias and heights. # Reduce computation of projection of decreasing function to computation of # projection of increasing function by multiplying everything by -1 and # swapping maximums and minimums. # For better stability use tf.where rather than the more standard approach: # heights *= tf.reduce_sum(heights) / max(delta, eps) # in order to keep everything strictly unchanged for small deltas, rather than # increase heights by factor 1/eps and still don't meet constraints. Strictly projects convexity, but is not exact with respect to the L2 norm. Projects by iterating over pieces of piecewise linear function left to right and aligning current slope with previous one if it violates convexity. Args: heights: `(num_heights, units)`-shape tensor which represents heights. lengths: `(num_heights)`-shape tensor which represents lengths of segments which correspond to heights. convexity: -1 or 1 where 1 stands for convex function and -1 for concave. Returns: Projected heights. Strictly projects onto the given constraint, approximate w.r.t the L2 norm. Dykstra's algorithm gives us proper projection with respect to L2 norm but approaches it from "wrong" side. In order to ensure that constraints are strictly met we'll do approximate projections in the end which project strictly into feasible space, but it's not an exact projection with respect to the L2 norm. With enough iterations of the Dykstra's algorithm, the impact of such approximate projection should be negligible. With bound and convexity constraints and no specified monotonicity, this method does not fully satisfy the constrains. Increasing the number of iterations can reduce the constraint violation in such cases. Fortunately it does not seem to be common config. Args: bias: `(1, units)`-shape tensor which represents bias. heights: `(num_heights, units)`-shape tensor which represents heights. monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity constraints. output_min: Lower bound constraint of PWL calibration layer. output_max: Upper bound constraint of PWL calibration layer. output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's minimum value. output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` describing the constraints on the layer's maximum value. convexity: 1 for convex, -1 for concave, 0 for no convexity constraints. lengths: Lengths of pieces of piecewise linear function. Needed only if convexity projection is specified. Returns: Projected weights tensor. # Convexity and monotonicity projections don't violate each other, but both # might lead to bounds violation, so do them first and fix bounds after. # Both monotonicity and convexity projection can only increase upper bound # so we only need to take care of decreasing it back. # This bounds projection might violate convexity. Unfortunately bounds # projections with convexity and without monotonicity are are difficult to # achieve strictly and might be violated. so ignore this for now. In order # to minimize projection error consider increasing # num_projection_iterations. Asserts that 'outputs' satisfiy constraints. Args: outputs: Tensor of shape `(num_output_values, units)` which represents outputs of pwl calibration layer which will be tested against the given constraints. If monotonicity is specified these outputs must be for consequtive inputs. monotonicity: One of {-1, 0, 1}. -1 for decreasing, 1 for increasing 0 means no monotonicity checks. output_min: Lower bound or None. output_max: Upper bound or None. clamp_min: Whether one of outputs must match output_min. clamp_max: Whther one of outputs must match output_max. debug_tensors: None or list of anything convertible to tensor (for example tensors or strings) which will be printed in case of constraints violation. eps: Allowed constraints violation. Raises: ValueError: If monotonicity is not one of {-1, 0, 1} Returns: List of assertion ops in graph mode or immideately asserts in eager mode. Verifies that all given hyperparameters are consistent. See PWLCalibration class level comment for detailed description of arguments. Args: input_keypoints: `input_keypoints` of PWLCalibration layer. output_min: Smallest output of PWLCalibration layer. output_max: Largest output of PWLCalibration layer. monotonicity: `monotonicity` hyperparameter of PWLCalibration layer. convexity: `convexity` hyperparameter of PWLCalibration layer. is_cyclic: `is_cyclic` hyperparameter of PWLCalibration layer. lengths: Lengths of pieces of piecewise linear function. weights_shape: Shape of weights of PWLCalibration layer. Raises: ValueError: If something is inconsistent. # It also raises errors if monotonicities specified incorrectly. Converts string constants representing monotonicity into integers. Args: monotonicity: monotonicity hyperparameter of `PWLCalibration` layer. Raises: ValueError if monotonicity is invalid. Returns: monotonicity represented as -1, 0 or 1. Converts string constants representing convexity into integers. Args: convexity: convexity hyperparameter of `PWLCalibration` layer. Raises: ValueError if convexity is invalid. Returns: convexity represented as -1, 0 or 1. | 2.295175 | 2 |
JumpscaleLibsExtra/tools/threefold_simulation/notebooks/hardware/supermicro_compute.py | threefoldtech/jumpscaleX_libs_extra | 1 | 6632834 | from Jumpscale import j
# as used in bancadati farm, compute frontend to archive
def bom_calc(environment):
from hardware.components.components_supermicro import bom_populate
environment.bom = bom_populate(environment.bom)
# see the bill of material sheet to define the devices
# we get a device starting from a template
server = environment.bom.device_get("archive_compute_frontend", environment=environment)
# the switch added to the node
switch = environment.bom.device_get("switch_48", environment=environment)
# an environment to simulate the overhead per node (eg. 1 switch per node)
environment.device_node_add("compute", server, 20)
environment.device_overhead_add("switch", switch, 2)
server.calc(bom=bom)
| from Jumpscale import j
# as used in bancadati farm, compute frontend to archive
def bom_calc(environment):
from hardware.components.components_supermicro import bom_populate
environment.bom = bom_populate(environment.bom)
# see the bill of material sheet to define the devices
# we get a device starting from a template
server = environment.bom.device_get("archive_compute_frontend", environment=environment)
# the switch added to the node
switch = environment.bom.device_get("switch_48", environment=environment)
# an environment to simulate the overhead per node (eg. 1 switch per node)
environment.device_node_add("compute", server, 20)
environment.device_overhead_add("switch", switch, 2)
server.calc(bom=bom)
| en | 0.833211 | # as used in bancadati farm, compute frontend to archive # see the bill of material sheet to define the devices # we get a device starting from a template # the switch added to the node # an environment to simulate the overhead per node (eg. 1 switch per node) | 2.491335 | 2 |
libraries/botbuilder-ai/botbuilder/ai/qna/models/ranker_types.py | Fl4v/botbuilder-python | 388 | 6632835 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class RankerTypes:
""" Default Ranker Behaviour. i.e. Ranking based on Questions and Answer. """
DEFAULT = "Default"
""" Ranker based on question Only. """
QUESTION_ONLY = "QuestionOnly"
""" Ranker based on Autosuggest for question field only. """
AUTO_SUGGEST_QUESTION = "AutoSuggestQuestion"
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class RankerTypes:
""" Default Ranker Behaviour. i.e. Ranking based on Questions and Answer. """
DEFAULT = "Default"
""" Ranker based on question Only. """
QUESTION_ONLY = "QuestionOnly"
""" Ranker based on Autosuggest for question field only. """
AUTO_SUGGEST_QUESTION = "AutoSuggestQuestion"
| en | 0.935715 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. Default Ranker Behaviour. i.e. Ranking based on Questions and Answer. Ranker based on question Only. Ranker based on Autosuggest for question field only. | 1.55711 | 2 |
asv/console.py | jni/asv | 0 | 6632836 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A set of utilities for writing output to the console.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import codecs
import contextlib
import locale
import logging
import os
import sys
import textwrap
import time
import six
from six.moves import xrange, input
WIN = (os.name == "nt")
def isatty(file):
"""
Returns `True` if `file` is a tty.
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not, so this assumes those are not
ttys.
"""
if hasattr(file, 'isatty'):
return file.isatty()
return False
def _decode_preferred_encoding(s):
"""
Decode the supplied byte string using the preferred encoding for
the locale (`locale.getpreferredencoding`) or, if the default
encoding is invalid, fall back first on utf-8, then on latin-1 if
the message cannot be decoded with utf-8.
"""
if six.PY3 and isinstance(s, bytes):
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = 'utf-8'
return s.decode(enc)
except UnicodeDecodeError:
return s.decode('latin-1', 'replace')
return s
def _color_text(text, color):
"""
Returns a string wrapped in ANSI color codes for coloring the
text in a terminal::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
'black': '0;30',
'red': '0;31',
'green': '0;32',
'brown': '0;33',
'blue': '0;34',
'magenta': '0;35',
'cyan': '0;36',
'lightgrey': '0;37',
'default': '0;39',
'darkgrey': '1;30',
'lightred': '1;31',
'lightgreen': '1;32',
'yellow': '1;33',
'lightblue': '1;34',
'lightmagenta': '1;35',
'lightcyan': '1;36',
'white': '1;37'}
color_code = color_mapping.get(color, '0;39')
return '\033[{0}m{1}\033[0m'.format(color_code, text)
# This is a table of Unicode characters that we want to have
# reasonable representations in ascii so they aren't just replaced
# with '?'. A complete solution to this problem would involve a
# third-party library such as "unidecode", but this handles the common
# cases of stuff coming from asv.
#
# You can find the characters that need an entry using:
# grep -P -n '[^\x00-\x7F]' -r *
# in the `asv` source directory.
_unicode_translations = {
ord('μ'): 'u',
ord('·'): '-',
ord('±'): '~'
}
def _write_with_fallback(s, write, fileobj):
"""
Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding
in case of a UnicodeEncodeError. Failing that attempt to write
with 'utf-8' or 'latin-1'. *fileobj* can be text or byte stream,
*s* can be unicode or bytes.
"""
try:
write(s)
return write
except (UnicodeEncodeError, TypeError):
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter('utf-8')
if isinstance(fileobj, io.TextIOBase):
# Get the byte stream
fileobj = fileobj.buffer
if six.PY3 and isinstance(s, bytes):
# Writers expect unicode input
s = _decode_preferred_encoding(s)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter('latin-1')
f = Writer(fileobj)
write = f.write
if six.PY3:
s = s.translate(_unicode_translations)
else:
for key, val in _unicode_translations.iteritems():
s = s.replace(unichr(key), val)
# If this doesn't work let the exception bubble up; I'm out of ideas
try:
write(s)
return write
except UnicodeEncodeError:
write(s.encode('ascii', 'replace').decode('ascii'))
return write
def color_print(*args, **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writeable file-like object, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get('file', sys.stdout)
end = kwargs.get('end', '\n')
write = file.write
if isatty(file) and not WIN:
for i in xrange(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ''
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
msg = _decode_preferred_encoding(msg)
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in xrange(0, len(args), 2):
msg = args[i]
msg = _decode_preferred_encoding(msg)
write = _write_with_fallback(msg, write, file)
write(end)
def get_answer_default(prompt, default, use_defaults=False):
color_print("{0} [{1}]: ".format(prompt, default), end='')
if use_defaults:
return default
x = input()
if x.strip() == '':
return default
return x
def truncate_left(s, l):
if len(s) > l:
return '...' + s[-(l - 3):]
else:
return s
class Log(object):
def __init__(self):
self._indent = 1
self._total = 0
self._count = 0
self._logger = logging.getLogger()
self._needs_newline = False
self._last_dot = time.time()
def _stream_formatter(self, record):
'''
The formatter for standard output
'''
if self._needs_newline:
color_print('')
parts = record.msg.split('\n', 1)
first_line = parts[0]
if len(parts) == 1:
rest = None
else:
rest = parts[1]
if self._total:
color_print('[{0:6.02f}%] '.format(
(float(self._count) / self._total) * 100.0), end='')
color_print('·' * self._indent, end='')
color_print(' ', end='')
if record.levelno < logging.DEBUG:
color = 'default'
elif record.levelno < logging.INFO:
color = 'default'
elif record.levelno < logging.WARN:
if self._indent == 1:
color = 'green'
elif self._indent == 2:
color = 'blue'
else:
color = 'default'
elif record.levelno < logging.ERROR:
color = 'brown'
else:
color = 'red'
indent = self._indent + 11
spaces = ' ' * indent
color_print(first_line, color, end='')
if rest is not None:
color_print('')
detail = textwrap.dedent(rest)
for line in detail.split('\n'):
color_print(spaces, end='')
color_print(line)
self._needs_newline = True
sys.stdout.flush()
@contextlib.contextmanager
def indent(self):
"""
A context manager to increase the indentation level.
"""
self._indent += 1
yield
self._indent -= 1
def dot(self):
if isatty(sys.stdout):
if time.time() > self._last_dot + 1.0:
color_print('.', 'darkgrey', end='')
sys.stdout.flush()
self._last_dot = time.time()
def set_nitems(self, n):
"""
Set the number of items in a lengthy process. Each of these
steps should be incremented through using `step`.
"""
self._total = n
def step(self):
"""
Write that a step has been completed. A percentage is
displayed along with it.
"""
self._count += 1
def enable(self, verbose=False):
sh = logging.StreamHandler()
sh.emit = self._stream_formatter
self._logger.addHandler(sh)
if verbose:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
@contextlib.contextmanager
def set_level(self, level):
orig_level = self._logger.level
self._logger.setLevel(level)
try:
yield
finally:
self._logger.setLevel(orig_level)
def is_debug_enabled(self):
return self._logger.getEffectiveLevel() <= logging.DEBUG
def info(self, *args, **kwargs):
self._logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
self._logger.warn(*args, **kwargs)
def debug(self, *args, **kwargs):
self._logger.debug(*args, **kwargs)
def error(self, *args, **kwargs):
self._logger.error(*args, **kwargs)
def add(self, msg):
_write_with_fallback(msg, sys.stdout.write, sys.stdout)
sys.stdout.flush()
log = Log()
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A set of utilities for writing output to the console.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import codecs
import contextlib
import locale
import logging
import os
import sys
import textwrap
import time
import six
from six.moves import xrange, input
WIN = (os.name == "nt")
def isatty(file):
"""
Returns `True` if `file` is a tty.
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not, so this assumes those are not
ttys.
"""
if hasattr(file, 'isatty'):
return file.isatty()
return False
def _decode_preferred_encoding(s):
"""
Decode the supplied byte string using the preferred encoding for
the locale (`locale.getpreferredencoding`) or, if the default
encoding is invalid, fall back first on utf-8, then on latin-1 if
the message cannot be decoded with utf-8.
"""
if six.PY3 and isinstance(s, bytes):
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = 'utf-8'
return s.decode(enc)
except UnicodeDecodeError:
return s.decode('latin-1', 'replace')
return s
def _color_text(text, color):
"""
Returns a string wrapped in ANSI color codes for coloring the
text in a terminal::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
'black': '0;30',
'red': '0;31',
'green': '0;32',
'brown': '0;33',
'blue': '0;34',
'magenta': '0;35',
'cyan': '0;36',
'lightgrey': '0;37',
'default': '0;39',
'darkgrey': '1;30',
'lightred': '1;31',
'lightgreen': '1;32',
'yellow': '1;33',
'lightblue': '1;34',
'lightmagenta': '1;35',
'lightcyan': '1;36',
'white': '1;37'}
color_code = color_mapping.get(color, '0;39')
return '\033[{0}m{1}\033[0m'.format(color_code, text)
# This is a table of Unicode characters that we want to have
# reasonable representations in ascii so they aren't just replaced
# with '?'. A complete solution to this problem would involve a
# third-party library such as "unidecode", but this handles the common
# cases of stuff coming from asv.
#
# You can find the characters that need an entry using:
# grep -P -n '[^\x00-\x7F]' -r *
# in the `asv` source directory.
_unicode_translations = {
ord('μ'): 'u',
ord('·'): '-',
ord('±'): '~'
}
def _write_with_fallback(s, write, fileobj):
"""
Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding
in case of a UnicodeEncodeError. Failing that attempt to write
with 'utf-8' or 'latin-1'. *fileobj* can be text or byte stream,
*s* can be unicode or bytes.
"""
try:
write(s)
return write
except (UnicodeEncodeError, TypeError):
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter('utf-8')
if isinstance(fileobj, io.TextIOBase):
# Get the byte stream
fileobj = fileobj.buffer
if six.PY3 and isinstance(s, bytes):
# Writers expect unicode input
s = _decode_preferred_encoding(s)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter('latin-1')
f = Writer(fileobj)
write = f.write
if six.PY3:
s = s.translate(_unicode_translations)
else:
for key, val in _unicode_translations.iteritems():
s = s.replace(unichr(key), val)
# If this doesn't work let the exception bubble up; I'm out of ideas
try:
write(s)
return write
except UnicodeEncodeError:
write(s.encode('ascii', 'replace').decode('ascii'))
return write
def color_print(*args, **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writeable file-like object, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get('file', sys.stdout)
end = kwargs.get('end', '\n')
write = file.write
if isatty(file) and not WIN:
for i in xrange(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ''
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
msg = _decode_preferred_encoding(msg)
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in xrange(0, len(args), 2):
msg = args[i]
msg = _decode_preferred_encoding(msg)
write = _write_with_fallback(msg, write, file)
write(end)
def get_answer_default(prompt, default, use_defaults=False):
color_print("{0} [{1}]: ".format(prompt, default), end='')
if use_defaults:
return default
x = input()
if x.strip() == '':
return default
return x
def truncate_left(s, l):
if len(s) > l:
return '...' + s[-(l - 3):]
else:
return s
class Log(object):
def __init__(self):
self._indent = 1
self._total = 0
self._count = 0
self._logger = logging.getLogger()
self._needs_newline = False
self._last_dot = time.time()
def _stream_formatter(self, record):
'''
The formatter for standard output
'''
if self._needs_newline:
color_print('')
parts = record.msg.split('\n', 1)
first_line = parts[0]
if len(parts) == 1:
rest = None
else:
rest = parts[1]
if self._total:
color_print('[{0:6.02f}%] '.format(
(float(self._count) / self._total) * 100.0), end='')
color_print('·' * self._indent, end='')
color_print(' ', end='')
if record.levelno < logging.DEBUG:
color = 'default'
elif record.levelno < logging.INFO:
color = 'default'
elif record.levelno < logging.WARN:
if self._indent == 1:
color = 'green'
elif self._indent == 2:
color = 'blue'
else:
color = 'default'
elif record.levelno < logging.ERROR:
color = 'brown'
else:
color = 'red'
indent = self._indent + 11
spaces = ' ' * indent
color_print(first_line, color, end='')
if rest is not None:
color_print('')
detail = textwrap.dedent(rest)
for line in detail.split('\n'):
color_print(spaces, end='')
color_print(line)
self._needs_newline = True
sys.stdout.flush()
@contextlib.contextmanager
def indent(self):
"""
A context manager to increase the indentation level.
"""
self._indent += 1
yield
self._indent -= 1
def dot(self):
if isatty(sys.stdout):
if time.time() > self._last_dot + 1.0:
color_print('.', 'darkgrey', end='')
sys.stdout.flush()
self._last_dot = time.time()
def set_nitems(self, n):
"""
Set the number of items in a lengthy process. Each of these
steps should be incremented through using `step`.
"""
self._total = n
def step(self):
"""
Write that a step has been completed. A percentage is
displayed along with it.
"""
self._count += 1
def enable(self, verbose=False):
sh = logging.StreamHandler()
sh.emit = self._stream_formatter
self._logger.addHandler(sh)
if verbose:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
@contextlib.contextmanager
def set_level(self, level):
orig_level = self._logger.level
self._logger.setLevel(level)
try:
yield
finally:
self._logger.setLevel(orig_level)
def is_debug_enabled(self):
return self._logger.getEffectiveLevel() <= logging.DEBUG
def info(self, *args, **kwargs):
self._logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
self._logger.warn(*args, **kwargs)
def debug(self, *args, **kwargs):
self._logger.debug(*args, **kwargs)
def error(self, *args, **kwargs):
self._logger.error(*args, **kwargs)
def add(self, msg):
_write_with_fallback(msg, sys.stdout.write, sys.stdout)
sys.stdout.flush()
log = Log()
| en | 0.769586 | # -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst A set of utilities for writing output to the console. Returns `True` if `file` is a tty. Most built-in Python file-like objects have an `isatty` member, but some user-defined types may not, so this assumes those are not ttys. Decode the supplied byte string using the preferred encoding for the locale (`locale.getpreferredencoding`) or, if the default encoding is invalid, fall back first on utf-8, then on latin-1 if the message cannot be decoded with utf-8. Returns a string wrapped in ANSI color codes for coloring the text in a terminal:: colored_text = color_text('Here is a message', 'blue') This won't actually effect the text until it is printed to the terminal. Parameters ---------- text : str The string to return, bounded by the color codes. color : str An ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). # This is a table of Unicode characters that we want to have # reasonable representations in ascii so they aren't just replaced # with '?'. A complete solution to this problem would involve a # third-party library such as "unidecode", but this handles the common # cases of stuff coming from asv. # # You can find the characters that need an entry using: # grep -P -n '[^\x00-\x7F]' -r * # in the `asv` source directory. Write the supplied string with the given write function like ``write(s)``, but use a writer for the locale's preferred encoding in case of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or 'latin-1'. *fileobj* can be text or byte stream, *s* can be unicode or bytes. # Let's try the next approach... # Get the byte stream # Writers expect unicode input # If this doesn't work let the exception bubble up; I'm out of ideas Prints colors and styles to the terminal uses ANSI escape sequences. :: color_print('This is the color ', 'default', 'GREEN', 'green') Parameters ---------- positional args : str The positional arguments come in pairs (*msg*, *color*), where *msg* is the string to display and *color* is the color to display it in. *color* is an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). file : writeable file-like object, optional Where to write to. Defaults to `sys.stdout`. If file is not a tty (as determined by calling its `isatty` member, if one exists), no coloring will be included. end : str, optional The ending of the message. Defaults to ``\\n``. The end will be printed after resetting any color or font state. The formatter for standard output A context manager to increase the indentation level. Set the number of items in a lengthy process. Each of these steps should be incremented through using `step`. Write that a step has been completed. A percentage is displayed along with it. | 3.233775 | 3 |
controller/annotations.py | euxhenh/cellar | 0 | 6632837 | import dash
import numpy as np
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from app import app, dbroot, logger
from .multiplexer import MultiplexerOutput
from .notifications import _prep_notification
def _fill_annotation(adata, cluster_id, value):
"""
Set the annotation for all points with label 'cluster_id' to 'value'.
"""
if isinstance(cluster_id, list):
cluster_id = cluster_id[0]
if isinstance(value, list):
value = value[0]
value = str(value)[:200]
if cluster_id.startswith('main-cluster'):
cluster_id = cluster_id[len('main-cluster'):]
elif cluster_id.startswith('side-cluster'):
cluster_id = cluster_id[len('side-cluster'):]
cluster_id = int(cluster_id)
if 'annotations' not in adata.obs:
adata.obs['annotations'] = np.array(
[""] * adata.shape[0], dtype='U200')
annotations_copy = adata.obs['annotations'].to_numpy().copy()
annotations_copy[adata.obs['labels'].to_numpy() == cluster_id] = value
adata.obs['annotations'] = annotations_copy
@app.callback(
Output("annotation-signal", "data"),
MultiplexerOutput("push-notification", "data"),
Input("annotation-store-btn", "n_clicks"),
State("main-annotation-select", "value"),
State("side-annotation-select", "value"),
State("annotation-input", "value"),
State("active-plot", "data"),
prevent_initial_call=True
)
def signal_annotation_change(n1, id1, id2, value, actp):
ctx = dash.callback_context
if not ctx.triggered or n1 is None:
raise PreventUpdate
an = 'a1' if actp == 1 else 'a2'
if an not in dbroot.adatas:
raise PreventUpdate
if 'adata' not in dbroot.adatas[an]:
raise PreventUpdate
if len(value) == 0:
return dash.no_update, _prep_notification(
"Empty annotation field.", "warning")
cluster_id = id1 if actp == 1 else id2
try:
_fill_annotation(dbroot.adatas[an]['adata'], cluster_id, value)
except Exception as e:
logger.error(str(e))
error_msg = "An error occurred when storing annotations."
logger.error(error_msg)
return dash.no_update, _prep_notification(error_msg, "danger")
return 1, dash.no_update
def get_update_annotation_table(prefix, an):
def _func(s1, s2, s3):
"""
Return a table of annotations based on the keys
obs['labels'] and obs['annotations']. Only the clusters
for which annotations exist (i.e., != "") will be displayed.
"""
ctx = dash.callback_context
if not ctx.triggered:
raise PreventUpdate
data = [
{
"cluster_id": "N/A",
"annotation": "N/A"
}
]
if an not in dbroot.adatas:
return data
if 'adata' not in dbroot.adatas[an]:
return data
# Need labels and annotations keys to be populated
if 'labels' not in dbroot.adatas[an]['adata'].obs:
# logger.warn("No labels found in adata.")
return data
if 'annotations' not in dbroot.adatas[an]['adata'].obs:
# logger.warn("No annotations found in adata.")
return data
# Get cluster ID's and the first index for each
# so that we can also get annotations
unq_labels, unq_indices = np.unique(
dbroot.adatas[an]['adata'].obs['labels'].to_numpy(),
return_index=True)
unq_annotations = dbroot.adatas[an][
'adata'][unq_indices].obs['annotations']
data = [
{'cluster_id': str(i), 'annotation': str(j)}
for i, j in zip(unq_labels, unq_annotations)
if j != ""
]
return data
return _func
for prefix, an in zip(['main', 'side'], ['a1', 'a2']):
app.callback(
Output(prefix + "-annotation-table", "data"),
Input("annotation-signal", "data"),
Input("data-loaded-annotation-table-signal", "data"),
Input(prefix + "-cluster-list-signal", "data"),
prevent_initial_call=True
)(get_update_annotation_table(prefix, an))
| import dash
import numpy as np
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from app import app, dbroot, logger
from .multiplexer import MultiplexerOutput
from .notifications import _prep_notification
def _fill_annotation(adata, cluster_id, value):
"""
Set the annotation for all points with label 'cluster_id' to 'value'.
"""
if isinstance(cluster_id, list):
cluster_id = cluster_id[0]
if isinstance(value, list):
value = value[0]
value = str(value)[:200]
if cluster_id.startswith('main-cluster'):
cluster_id = cluster_id[len('main-cluster'):]
elif cluster_id.startswith('side-cluster'):
cluster_id = cluster_id[len('side-cluster'):]
cluster_id = int(cluster_id)
if 'annotations' not in adata.obs:
adata.obs['annotations'] = np.array(
[""] * adata.shape[0], dtype='U200')
annotations_copy = adata.obs['annotations'].to_numpy().copy()
annotations_copy[adata.obs['labels'].to_numpy() == cluster_id] = value
adata.obs['annotations'] = annotations_copy
@app.callback(
Output("annotation-signal", "data"),
MultiplexerOutput("push-notification", "data"),
Input("annotation-store-btn", "n_clicks"),
State("main-annotation-select", "value"),
State("side-annotation-select", "value"),
State("annotation-input", "value"),
State("active-plot", "data"),
prevent_initial_call=True
)
def signal_annotation_change(n1, id1, id2, value, actp):
ctx = dash.callback_context
if not ctx.triggered or n1 is None:
raise PreventUpdate
an = 'a1' if actp == 1 else 'a2'
if an not in dbroot.adatas:
raise PreventUpdate
if 'adata' not in dbroot.adatas[an]:
raise PreventUpdate
if len(value) == 0:
return dash.no_update, _prep_notification(
"Empty annotation field.", "warning")
cluster_id = id1 if actp == 1 else id2
try:
_fill_annotation(dbroot.adatas[an]['adata'], cluster_id, value)
except Exception as e:
logger.error(str(e))
error_msg = "An error occurred when storing annotations."
logger.error(error_msg)
return dash.no_update, _prep_notification(error_msg, "danger")
return 1, dash.no_update
def get_update_annotation_table(prefix, an):
def _func(s1, s2, s3):
"""
Return a table of annotations based on the keys
obs['labels'] and obs['annotations']. Only the clusters
for which annotations exist (i.e., != "") will be displayed.
"""
ctx = dash.callback_context
if not ctx.triggered:
raise PreventUpdate
data = [
{
"cluster_id": "N/A",
"annotation": "N/A"
}
]
if an not in dbroot.adatas:
return data
if 'adata' not in dbroot.adatas[an]:
return data
# Need labels and annotations keys to be populated
if 'labels' not in dbroot.adatas[an]['adata'].obs:
# logger.warn("No labels found in adata.")
return data
if 'annotations' not in dbroot.adatas[an]['adata'].obs:
# logger.warn("No annotations found in adata.")
return data
# Get cluster ID's and the first index for each
# so that we can also get annotations
unq_labels, unq_indices = np.unique(
dbroot.adatas[an]['adata'].obs['labels'].to_numpy(),
return_index=True)
unq_annotations = dbroot.adatas[an][
'adata'][unq_indices].obs['annotations']
data = [
{'cluster_id': str(i), 'annotation': str(j)}
for i, j in zip(unq_labels, unq_annotations)
if j != ""
]
return data
return _func
for prefix, an in zip(['main', 'side'], ['a1', 'a2']):
app.callback(
Output(prefix + "-annotation-table", "data"),
Input("annotation-signal", "data"),
Input("data-loaded-annotation-table-signal", "data"),
Input(prefix + "-cluster-list-signal", "data"),
prevent_initial_call=True
)(get_update_annotation_table(prefix, an))
| en | 0.786363 | Set the annotation for all points with label 'cluster_id' to 'value'. Return a table of annotations based on the keys obs['labels'] and obs['annotations']. Only the clusters for which annotations exist (i.e., != "") will be displayed. # Need labels and annotations keys to be populated # logger.warn("No labels found in adata.") # logger.warn("No annotations found in adata.") # Get cluster ID's and the first index for each # so that we can also get annotations | 2.19364 | 2 |
pytar/__init__.py | douglas-archives/pytar | 0 | 6632838 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0.dev2'
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0.dev2'
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.023668 | 1 |
glue_genomics_viewers/genome_track/qt/data_viewer.py | gluesolutions/glue-genomics-viewers | 0 | 6632839 | import logging
import os
from echo import delay_callback
from matplotlib.axes._base import _TransformedBoundsLocator
import numpy as np
import pandas as pd
from glue.core import Subset
from glue.utils import nonpartial, defer_draw, decorate_all_methods
from glue.viewers.matplotlib.qt.data_viewer import MatplotlibDataViewer
import coolbox.api as cb
from coolbox.utilities.bed import ReadBed
from qtpy import QtWidgets
from ...data import BedgraphData, BedGraph, GenomeRange
from ...subsets import GenomicRangeSubsetState
from .layer_style_editor import GenomeTrackLayerStyleEditor
from .options_widget import GenomeTrackOptionsWidget
from ..layer_artist import GenomeProfileLayerArtist, GenomeLoopLayerArtist, GenomeTrackLayerArtist
from ..state import GenomeTrackState
from ..utils import PanTrackerMixin
__all__ = ['GenomeTrackViewer']
@decorate_all_methods(defer_draw)
class GenomeTrackViewer(MatplotlibDataViewer, PanTrackerMixin):
LABEL = 'Genome Track Viewer'
_layer_style_widget_cls = GenomeTrackLayerStyleEditor
_options_cls = GenomeTrackOptionsWidget
_state_cls = GenomeTrackState
large_data_size = 2e7
tools = ['select:xrange']
def __init__(self, session, parent=None, state=None):
super().__init__(session, parent=parent, state=state)
self._layer_artist_container.on_changed(self.reflow_tracks)
self.init_pan_tracking(self.axes)
self._setup_annotation_track()
self.state.add_callback('chr', self._redraw_annotation)
self.state.add_callback('start', self._redraw_annotation)
self.state.add_callback('end', self._redraw_annotation)
self.state.add_callback('show_annotations', self.reflow_tracks)
self._setup_zoom_to_layer_action()
def _setup_zoom_to_layer_action(self):
layer_artist_view = self._view.layer_list
act = QtWidgets.QAction('Zoom to Layer', layer_artist_view)
act.triggered.connect(nonpartial(self._zoom_to_layer))
layer_artist_view.addAction(act)
def _zoom_to_layer(self):
layer = self._view.layer_list.current_artist().layer
if not isinstance(layer, Subset):
return
try:
chr, start, end = layer.subset_state.extent()
except AttributeError:
return
with delay_callback(self.state, 'chr', 'start', 'end'):
self.state.chr = chr.lstrip('chr')
self.state.start = start
self.state.end = end
def _zoom_to_data_bounds(self, data):
bounds = data.get_chrom_bounds()
if not bounds: # empty dataset
return
with delay_callback(self.state, 'chr', 'start', 'end'):
chr, start, end = bounds[0]
self.state.chr = chr.lstrip('chr')
self.state.start = start
self.state.end = start + (end - start) / 1
def get_data_layer_artist(self, layer=None, layer_state=None):
cls = GenomeProfileLayerArtist if isinstance(layer, BedgraphData) else GenomeLoopLayerArtist
result = self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
result.state.add_callback('zorder', self.reflow_tracks)
result.state.add_callback('visible', self.reflow_tracks)
# Update view bounds on first layer
if not len(self._layer_artist_container):
self._zoom_to_data_bounds(layer.data)
return result
def get_subset_layer_artist(self, layer=None, layer_state=None):
data = layer.data if layer else None
cls = GenomeProfileLayerArtist if isinstance(data, BedgraphData) else GenomeLoopLayerArtist
result = self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
result.state.add_callback('zorder', self.reflow_tracks)
result.state.add_callback('visible', self.reflow_tracks)
return result
def on_pan_end(self):
self.reflow_tracks()
def apply_roi(self, roi, override_mode=None):
if len(self.layers) == 0:
return
x = roi.to_polygon()[0]
chr = self.state.chr
if not chr.startswith('chr'):
chr = 'chr' + chr
start, end = min(x), max(x)
state = GenomicRangeSubsetState(chr, start, end)
self.apply_subset_state(state, override_mode=override_mode)
def _setup_annotation_track(self):
self.annotations_ax = GenomeTrackLayerArtist._setup_track_axes(
self.axes, 'annotations', self.state)
annotation_path = os.environ.get('GLUEGENES_GENE_FILE', 'gencodeVM23_bed12.bed.bgz')
if os.path.exists(annotation_path) and annotation_path is not None:
pass
else:
annotation_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),'..','data','gencodeVM23_bed12.bed.bgz')
print(f'ANNOTATION PATH: {annotation_path}')
self.annotation = cb.BED(
annotation_path,
num_rows=16,
gene_style='simple',
bed_type='bed12',
fontsize=8,
)
self.annotations_ax.get_yaxis().set_visible(False)
self.annotations_ax.spines['left'].set_visible(False)
def _redraw_annotation(self, *args):
if self.panning:
return
self.annotations_ax.set_visible(self.state.show_annotations)
if not self.state.show_annotations:
self.axes.figure.canvas.draw_idle()
return
self.annotations_ax.clear()
start = max(int(self.state.start), 1)
end = max(int(self.state.end), 1)
start, end = min(start, end), max(start, end)
if end - start < 5e6: #Annotations over very large regions are slow and illegible
gr = GenomeRange('chr' + self.state.chr, start, end)
intervals = BedGraph._tabix_query(self.annotation.properties['file'], gr)
else:
intervals = None
if intervals:
parsed = list(ReadBed('\t'.join(l) for l in intervals))
bed_type = 'bed12'
df = self.annotation.intervals2dataframe(parsed, bed_type)
df.bed_type = bed_type
else:
df = pd.DataFrame()
self.annotation.is_draw_labels = True
self.annotation.plot_genes(
self.annotations_ax,
cb.GenomeRange(f"{self.state.chr}:{start}-{end}"),
df
)
self.axes.figure.canvas.draw_idle()
def reflow_tracks(self, *args):
"""
Reorder each track top->bottom sorted by zorder, removing any gaps of non-visible tracks.
"""
axes = {}
for artist in self._layer_artist_container.artists:
if not artist.state.visible:
continue
axes[artist.track_axes] = min(axes.get(artist.track_axes, np.inf), artist.zorder)
axes = list(sorted(axes, key=axes.get))
if self.state.show_annotations:
axes = [self.annotations_ax] + axes
track_cnt = len(axes)
height = 0.9 / track_cnt
for i, ax in enumerate(axes):
bounds = _TransformedBoundsLocator([0, i / track_cnt, 1, height], ax.axes.transAxes)
ax.set_axes_locator(bounds)
self._redraw_annotation()
| import logging
import os
from echo import delay_callback
from matplotlib.axes._base import _TransformedBoundsLocator
import numpy as np
import pandas as pd
from glue.core import Subset
from glue.utils import nonpartial, defer_draw, decorate_all_methods
from glue.viewers.matplotlib.qt.data_viewer import MatplotlibDataViewer
import coolbox.api as cb
from coolbox.utilities.bed import ReadBed
from qtpy import QtWidgets
from ...data import BedgraphData, BedGraph, GenomeRange
from ...subsets import GenomicRangeSubsetState
from .layer_style_editor import GenomeTrackLayerStyleEditor
from .options_widget import GenomeTrackOptionsWidget
from ..layer_artist import GenomeProfileLayerArtist, GenomeLoopLayerArtist, GenomeTrackLayerArtist
from ..state import GenomeTrackState
from ..utils import PanTrackerMixin
__all__ = ['GenomeTrackViewer']
@decorate_all_methods(defer_draw)
class GenomeTrackViewer(MatplotlibDataViewer, PanTrackerMixin):
LABEL = 'Genome Track Viewer'
_layer_style_widget_cls = GenomeTrackLayerStyleEditor
_options_cls = GenomeTrackOptionsWidget
_state_cls = GenomeTrackState
large_data_size = 2e7
tools = ['select:xrange']
def __init__(self, session, parent=None, state=None):
super().__init__(session, parent=parent, state=state)
self._layer_artist_container.on_changed(self.reflow_tracks)
self.init_pan_tracking(self.axes)
self._setup_annotation_track()
self.state.add_callback('chr', self._redraw_annotation)
self.state.add_callback('start', self._redraw_annotation)
self.state.add_callback('end', self._redraw_annotation)
self.state.add_callback('show_annotations', self.reflow_tracks)
self._setup_zoom_to_layer_action()
def _setup_zoom_to_layer_action(self):
layer_artist_view = self._view.layer_list
act = QtWidgets.QAction('Zoom to Layer', layer_artist_view)
act.triggered.connect(nonpartial(self._zoom_to_layer))
layer_artist_view.addAction(act)
def _zoom_to_layer(self):
layer = self._view.layer_list.current_artist().layer
if not isinstance(layer, Subset):
return
try:
chr, start, end = layer.subset_state.extent()
except AttributeError:
return
with delay_callback(self.state, 'chr', 'start', 'end'):
self.state.chr = chr.lstrip('chr')
self.state.start = start
self.state.end = end
def _zoom_to_data_bounds(self, data):
bounds = data.get_chrom_bounds()
if not bounds: # empty dataset
return
with delay_callback(self.state, 'chr', 'start', 'end'):
chr, start, end = bounds[0]
self.state.chr = chr.lstrip('chr')
self.state.start = start
self.state.end = start + (end - start) / 1
def get_data_layer_artist(self, layer=None, layer_state=None):
cls = GenomeProfileLayerArtist if isinstance(layer, BedgraphData) else GenomeLoopLayerArtist
result = self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
result.state.add_callback('zorder', self.reflow_tracks)
result.state.add_callback('visible', self.reflow_tracks)
# Update view bounds on first layer
if not len(self._layer_artist_container):
self._zoom_to_data_bounds(layer.data)
return result
def get_subset_layer_artist(self, layer=None, layer_state=None):
data = layer.data if layer else None
cls = GenomeProfileLayerArtist if isinstance(data, BedgraphData) else GenomeLoopLayerArtist
result = self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
result.state.add_callback('zorder', self.reflow_tracks)
result.state.add_callback('visible', self.reflow_tracks)
return result
def on_pan_end(self):
self.reflow_tracks()
def apply_roi(self, roi, override_mode=None):
if len(self.layers) == 0:
return
x = roi.to_polygon()[0]
chr = self.state.chr
if not chr.startswith('chr'):
chr = 'chr' + chr
start, end = min(x), max(x)
state = GenomicRangeSubsetState(chr, start, end)
self.apply_subset_state(state, override_mode=override_mode)
def _setup_annotation_track(self):
self.annotations_ax = GenomeTrackLayerArtist._setup_track_axes(
self.axes, 'annotations', self.state)
annotation_path = os.environ.get('GLUEGENES_GENE_FILE', 'gencodeVM23_bed12.bed.bgz')
if os.path.exists(annotation_path) and annotation_path is not None:
pass
else:
annotation_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),'..','data','gencodeVM23_bed12.bed.bgz')
print(f'ANNOTATION PATH: {annotation_path}')
self.annotation = cb.BED(
annotation_path,
num_rows=16,
gene_style='simple',
bed_type='bed12',
fontsize=8,
)
self.annotations_ax.get_yaxis().set_visible(False)
self.annotations_ax.spines['left'].set_visible(False)
def _redraw_annotation(self, *args):
if self.panning:
return
self.annotations_ax.set_visible(self.state.show_annotations)
if not self.state.show_annotations:
self.axes.figure.canvas.draw_idle()
return
self.annotations_ax.clear()
start = max(int(self.state.start), 1)
end = max(int(self.state.end), 1)
start, end = min(start, end), max(start, end)
if end - start < 5e6: #Annotations over very large regions are slow and illegible
gr = GenomeRange('chr' + self.state.chr, start, end)
intervals = BedGraph._tabix_query(self.annotation.properties['file'], gr)
else:
intervals = None
if intervals:
parsed = list(ReadBed('\t'.join(l) for l in intervals))
bed_type = 'bed12'
df = self.annotation.intervals2dataframe(parsed, bed_type)
df.bed_type = bed_type
else:
df = pd.DataFrame()
self.annotation.is_draw_labels = True
self.annotation.plot_genes(
self.annotations_ax,
cb.GenomeRange(f"{self.state.chr}:{start}-{end}"),
df
)
self.axes.figure.canvas.draw_idle()
def reflow_tracks(self, *args):
"""
Reorder each track top->bottom sorted by zorder, removing any gaps of non-visible tracks.
"""
axes = {}
for artist in self._layer_artist_container.artists:
if not artist.state.visible:
continue
axes[artist.track_axes] = min(axes.get(artist.track_axes, np.inf), artist.zorder)
axes = list(sorted(axes, key=axes.get))
if self.state.show_annotations:
axes = [self.annotations_ax] + axes
track_cnt = len(axes)
height = 0.9 / track_cnt
for i, ax in enumerate(axes):
bounds = _TransformedBoundsLocator([0, i / track_cnt, 1, height], ax.axes.transAxes)
ax.set_axes_locator(bounds)
self._redraw_annotation()
| en | 0.87334 | # empty dataset # Update view bounds on first layer #Annotations over very large regions are slow and illegible Reorder each track top->bottom sorted by zorder, removing any gaps of non-visible tracks. | 1.833156 | 2 |
api/tacticalrmm/alerts/migrations/0001_initial.py | infinite8co/tacticalrmm | 903 | 6632840 | # Generated by Django 3.1 on 2020-08-15 15:31
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("agents", "0012_auto_20200810_0544"),
]
operations = [
migrations.CreateModel(
name="Alert",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("subject", models.TextField(blank=True, null=True)),
("message", models.TextField(blank=True, null=True)),
("alert_time", models.DateTimeField(blank=True, null=True)),
("snooze_until", models.DateTimeField(blank=True, null=True)),
("resolved", models.BooleanField(default=False)),
(
"agent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="agent",
to="agents.agent",
),
),
],
),
] | # Generated by Django 3.1 on 2020-08-15 15:31
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("agents", "0012_auto_20200810_0544"),
]
operations = [
migrations.CreateModel(
name="Alert",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("subject", models.TextField(blank=True, null=True)),
("message", models.TextField(blank=True, null=True)),
("alert_time", models.DateTimeField(blank=True, null=True)),
("snooze_until", models.DateTimeField(blank=True, null=True)),
("resolved", models.BooleanField(default=False)),
(
"agent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="agent",
to="agents.agent",
),
),
],
),
] | en | 0.81096 | # Generated by Django 3.1 on 2020-08-15 15:31 | 1.648244 | 2 |
teuthology/task/clock.py | julpark-rh/teuthology | 117 | 6632841 | <reponame>julpark-rh/teuthology<filename>teuthology/task/clock.py<gh_stars>100-1000
"""
Clock synchronizer
"""
import logging
import contextlib
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Sync or skew clock
This will initially sync the clocks. Eventually it should let us also
skew by some number of seconds.
example::
tasks:
- clock:
- ceph:
- interactive:
to sync.
:param ctx: Context
:param config: Configuration
"""
log.info('Syncing clocks and checking initial clock skew...')
run.wait(
ctx.cluster.run(
args = [
'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'chronyd.service',
run.Raw(';'),
'sudo', 'ntpd', '-gq', run.Raw('||'),
'sudo', 'chronyc', 'makestep',
run.Raw(';'),
'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'chronyd.service',
run.Raw(';'),
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
timeout = 360,
wait=False,
)
)
try:
yield
finally:
log.info('Checking final clock skew...')
run.wait(
ctx.cluster.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
wait=False,
)
)
@contextlib.contextmanager
def check(ctx, config):
"""
Run ntpq at the start and the end of the task.
:param ctx: Context
:param config: Configuration
"""
log.info('Checking initial clock skew...')
run.wait(
ctx.cluster.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
wait=False,
)
)
try:
yield
finally:
log.info('Checking final clock skew...')
run.wait(
ctx.cluster.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
wait=False,
)
)
| """
Clock synchronizer
"""
import logging
import contextlib
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Sync or skew clock
This will initially sync the clocks. Eventually it should let us also
skew by some number of seconds.
example::
tasks:
- clock:
- ceph:
- interactive:
to sync.
:param ctx: Context
:param config: Configuration
"""
log.info('Syncing clocks and checking initial clock skew...')
run.wait(
ctx.cluster.run(
args = [
'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'chronyd.service',
run.Raw(';'),
'sudo', 'ntpd', '-gq', run.Raw('||'),
'sudo', 'chronyc', 'makestep',
run.Raw(';'),
'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'chronyd.service',
run.Raw(';'),
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
timeout = 360,
wait=False,
)
)
try:
yield
finally:
log.info('Checking final clock skew...')
run.wait(
ctx.cluster.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
wait=False,
)
)
@contextlib.contextmanager
def check(ctx, config):
"""
Run ntpq at the start and the end of the task.
:param ctx: Context
:param config: Configuration
"""
log.info('Checking initial clock skew...')
run.wait(
ctx.cluster.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
wait=False,
)
)
try:
yield
finally:
log.info('Checking final clock skew...')
run.wait(
ctx.cluster.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
wait=False,
)
) | en | 0.73688 | Clock synchronizer Sync or skew clock This will initially sync the clocks. Eventually it should let us also skew by some number of seconds. example:: tasks: - clock: - ceph: - interactive: to sync. :param ctx: Context :param config: Configuration Run ntpq at the start and the end of the task. :param ctx: Context :param config: Configuration | 2.837746 | 3 |
addvalue_id.py | Theropod/seaice-data-web-visualization | 0 | 6632842 | <reponame>Theropod/seaice-data-web-visualization
import json
inputGeojson='./asi-AMSR2-n6250-20141201-v5.4.reprojected.geo.json'
outputGeojson='asi-AMSR2-n6250-20141201-v5.4.reprojected_values.geo.json'
with open(inputGeojson, 'r') as f:
gdal_json = json.load(f)
i=0
for feature in gdal_json["features"]:
feature["id"]=i
i+=1
feature["value"]=feature["properties"]["DN"]
with open(outputGeojson, 'w') as f:
json.dump(gdal_json,f) | import json
inputGeojson='./asi-AMSR2-n6250-20141201-v5.4.reprojected.geo.json'
outputGeojson='asi-AMSR2-n6250-20141201-v5.4.reprojected_values.geo.json'
with open(inputGeojson, 'r') as f:
gdal_json = json.load(f)
i=0
for feature in gdal_json["features"]:
feature["id"]=i
i+=1
feature["value"]=feature["properties"]["DN"]
with open(outputGeojson, 'w') as f:
json.dump(gdal_json,f) | none | 1 | 2.450126 | 2 |
|
bin/bin_onePT/extra/mivr-4-fit-zAll.py | JohanComparat/nbody-npt-functions | 4 | 6632843 | import glob
import sys
import cPickle
from os.path import join
import numpy as n
import astropy.io.fits as fits
import os
import matplotlib
#matplotlib.use('pdf')
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
from scipy.optimize import minimize
dir='..'
dir_04 = join(dir,"MD_0.4Gpc")
dir_10 = join(dir,"MD_1Gpc")
dir_25 = join(dir,"MD_2.5Gpc")
dir_40 = join(dir,"MD_4Gpc")
dir_25N = join(dir,"MD_2.5GpcNW")
dir_40N = join(dir,"MD_4GpcNW")
data = fits.open( join("..", "M200c", "MD_M200c_summary.fits") )[1].data
NDecimal = 3
errorLog = 0.03
NminCount = 10
Npmin = 300
limits_04 = [Npmin*9.63 * 10**7, 5e12]
limits_10 = [Npmin*1.51 * 10**9., 5e13]
limits_25 = [Npmin*2.359 * 10**10., 5e14]
limits_40 = [Npmin* 9.6 * 10**10. , 5e15]
MPART = n.array([9.63 * 10**7, 1.51 * 10**9, 2.359 * 10**10, 9.6 * 10**10])
names = n.array(["SMD", "MDPL", "BigMD", "HMD", "BigMDNW", "HMDNW"])
zmin = -0.01
zmax = 0.01
def fitDataAll(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 2.3, p0 = n.array([ 0.65, 12.2, -23.3, -0.9, -9.8, 14.9, -0.2, 1.23, -6.7, -11.6, 0.03, -0.33, 1.3 ])):
"""
Plots the data to be used in the fits later in the analysis.
"""
# redshift selection
zSel = (data["redshift"]>zmin)&(data["redshift"]<zmax)
# mass selection
if cos == "cen":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0]))) | ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0]))) | ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0]))) | ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0])))
if cos == "sat":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0]))) | ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0]))) #| ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0]))) | ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0])))
# minimum number counts selection
nSel = (data['dN_counts_'+cos]>NminCount)
# data selection
ok = (zSel) & (mSel) & (nSel)
# axis definition
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
# fitting function definition
# loads redshift 0 best parameters
f=open(join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-params.pkl"), 'r')
res = cPickle.load(f)
f.close()
pInit = res.x
print pInit
# create redshift varying functions for the parameters
A_pr = lambda lz, A1, A2, A3 : 10**(pInit[0] + A1 * lz + A2 * lz**2. + A3 * lz**3.)
M_pr = lambda lz, m1, m2, m3 : 10**(pInit[1] + m1 * lz + m2 *lz**2. + m3 * lz**3.)
a_pr = lambda lz, a1, a2, a3, a4 : 10**(pInit[2] + a1 * lz + a2 *lz**2. + a3 *lz**3. + a4 *lz**4.)
b_pr = lambda lz, b1, b2, b3 : -10**(pInit[3] + b1 * lz + b2 *lz**2.+ b3 *lz**3.)
# generalized fitting function
vfG = lambda lg_v, lg_z, ps : n.log10( A_pr(lg_z, ps[0], ps[1], ps[2]) * (10**lg_v/M_pr(lg_z, ps[3], ps[4], ps[5]))**b_pr(lg_z, ps[10], ps[11], ps[12]) * n.e**(- (10**lg_v/M_pr(lg_z, ps[3], ps[4], ps[5]))**a_pr (lg_z, ps[6], ps[7], ps[8], ps[9]) ) )
# defines chi2
chi2fun = lambda ps : n.sum( (vfG(lg_M200c, lg_1pz, ps) - lg_MF_c)**2. / (errorLog)**2. )/(len(lg_MF_c) - len(ps))
# fits the parameters
res = minimize(chi2fun, p0, method='Powell',options={'xtol': 1e-8, 'disp': True, 'maxiter' : 5000000000000})
pOpt = res.x
cov = res.direc
chi2perpoint = lambda ps : (vfG(lg_M200c, lg_1pz, ps) - lg_MF_c)**2. / (errorLog)**2.
chi2pp = chi2perpoint(pOpt)
print pOpt
lg_M200c_model = n.arange(n.min(lg_M200c),n.max(lg_M200c)+0.1,0.1)
X,Y = n.meshgrid(lg_M200c_model, n.arange(zmin, zmax+0.02,0.02))
Z = vfG(X,n.log10(1+Y),pOpt)
outPointFile = join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-model-pts.txt")
n.savetxt(outPointFile ,n.transpose([n.hstack((X)), n.hstack((Y)), n.hstack((Z))]) )
f=open(join(dir,qty,"M200c-"+cos+"-cumulative-function-zAll-params.pkl"), 'w')
cPickle.dump(res, f)
f.close()
X,Y,Z = n.loadtxt(outPointFile, unpack=True)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(X, Z, c=Y, s=5, marker='o',label="model", rasterized=True)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[M_{200c}/(h^{-1}M_\odot)]$')
p.ylabel(r'log n(>M)')
gl = p.legend(loc=3,fontsize=10)
gl.set_frame_on(False)
p.ylim((-8, 1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,qty,"M200c-"+cos+"-cumulative-function-model-zAll.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(lg_M200c, lg_MF_c, c=chi2pp, s=5, marker='o',label="chi2", rasterized=True)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("chi2 per point")
p.xlabel(r'log$_{10}[M_{200c}/(h^{-1}M_\odot)]$')
p.ylabel(r'log n(>M)')
gl = p.legend(loc=3,fontsize=10)
gl.set_frame_on(False)
p.ylim((-8, 1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,qty,"M200c-"+cos+"-cumulative-function-chi2PP-zAll.png"))
p.clf()
# saves table of parameters fitted
f=open(join(dir,qty, "latex-parameters-"+qty+"-"+cos+".txt"),'w')
f.write( "$A(z)$ & " + str(n.round(pInit[0],NDecimal))+ " & "+str(n.round(pOpt[0],NDecimal))+" & "+str(n.round(pOpt[1],NDecimal))+ " & "+str(n.round(pOpt[2],NDecimal))+ "\\\\ \n")
f.write( "$M_{cut}(z)$ & " + str(n.round(pInit[1],NDecimal))+ " & "+str(n.round(pOpt[3],NDecimal))+" & "+str(n.round(pOpt[4],NDecimal))+ " & "+str(n.round(pOpt[5],NDecimal))+ "\\\\ \n")
f.write( "$\alpha(z)$ & " + str(n.round(pInit[2],NDecimal))+ " & "+str(n.round(pOpt[6],NDecimal))+" & "+str(n.round(pOpt[7],NDecimal))+ " & "+str(n.round(pOpt[8],NDecimal))+ " & "+str(n.round(pOpt[9],NDecimal))+ "\\\\ \n")
f.write( "$\beta(z)$ & " + str(n.round(pInit[3],NDecimal))+ " & "+str(n.round(pOpt[10],NDecimal))+" & "+str(n.round(pOpt[11],NDecimal))+ " & "+str(n.round(pOpt[12],NDecimal))+ "\\\\ \n")
f.close()
print "centrals"
fitDataAll(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 2.3, p0 =n.array([ 0.5, 13.1, -20.8, -0.89, -10.4, 13.6, -0.2, 0.84, -4.1, 6.5, 0.11, -0.82, 1.77 ]) )
print "satellites"
fitDataAll(qty = 'M200c', cos = "sat", zmin = -0.01, zmax = 2.3, p0 = n.array([ 0.8, 12., -22., -0.88, -9.8, 15., -0.28, 0.6, -0.3, 0.86, 0.15, -0.9, 1.8]))
| import glob
import sys
import cPickle
from os.path import join
import numpy as n
import astropy.io.fits as fits
import os
import matplotlib
#matplotlib.use('pdf')
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
from scipy.optimize import minimize
dir='..'
dir_04 = join(dir,"MD_0.4Gpc")
dir_10 = join(dir,"MD_1Gpc")
dir_25 = join(dir,"MD_2.5Gpc")
dir_40 = join(dir,"MD_4Gpc")
dir_25N = join(dir,"MD_2.5GpcNW")
dir_40N = join(dir,"MD_4GpcNW")
data = fits.open( join("..", "M200c", "MD_M200c_summary.fits") )[1].data
NDecimal = 3
errorLog = 0.03
NminCount = 10
Npmin = 300
limits_04 = [Npmin*9.63 * 10**7, 5e12]
limits_10 = [Npmin*1.51 * 10**9., 5e13]
limits_25 = [Npmin*2.359 * 10**10., 5e14]
limits_40 = [Npmin* 9.6 * 10**10. , 5e15]
MPART = n.array([9.63 * 10**7, 1.51 * 10**9, 2.359 * 10**10, 9.6 * 10**10])
names = n.array(["SMD", "MDPL", "BigMD", "HMD", "BigMDNW", "HMDNW"])
zmin = -0.01
zmax = 0.01
def fitDataAll(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 2.3, p0 = n.array([ 0.65, 12.2, -23.3, -0.9, -9.8, 14.9, -0.2, 1.23, -6.7, -11.6, 0.03, -0.33, 1.3 ])):
"""
Plots the data to be used in the fits later in the analysis.
"""
# redshift selection
zSel = (data["redshift"]>zmin)&(data["redshift"]<zmax)
# mass selection
if cos == "cen":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0]))) | ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0]))) | ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0]))) | ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0])))
if cos == "sat":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0]))) | ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0]))) #| ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0]))) | ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0])))
# minimum number counts selection
nSel = (data['dN_counts_'+cos]>NminCount)
# data selection
ok = (zSel) & (mSel) & (nSel)
# axis definition
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
# fitting function definition
# loads redshift 0 best parameters
f=open(join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-params.pkl"), 'r')
res = cPickle.load(f)
f.close()
pInit = res.x
print pInit
# create redshift varying functions for the parameters
A_pr = lambda lz, A1, A2, A3 : 10**(pInit[0] + A1 * lz + A2 * lz**2. + A3 * lz**3.)
M_pr = lambda lz, m1, m2, m3 : 10**(pInit[1] + m1 * lz + m2 *lz**2. + m3 * lz**3.)
a_pr = lambda lz, a1, a2, a3, a4 : 10**(pInit[2] + a1 * lz + a2 *lz**2. + a3 *lz**3. + a4 *lz**4.)
b_pr = lambda lz, b1, b2, b3 : -10**(pInit[3] + b1 * lz + b2 *lz**2.+ b3 *lz**3.)
# generalized fitting function
vfG = lambda lg_v, lg_z, ps : n.log10( A_pr(lg_z, ps[0], ps[1], ps[2]) * (10**lg_v/M_pr(lg_z, ps[3], ps[4], ps[5]))**b_pr(lg_z, ps[10], ps[11], ps[12]) * n.e**(- (10**lg_v/M_pr(lg_z, ps[3], ps[4], ps[5]))**a_pr (lg_z, ps[6], ps[7], ps[8], ps[9]) ) )
# defines chi2
chi2fun = lambda ps : n.sum( (vfG(lg_M200c, lg_1pz, ps) - lg_MF_c)**2. / (errorLog)**2. )/(len(lg_MF_c) - len(ps))
# fits the parameters
res = minimize(chi2fun, p0, method='Powell',options={'xtol': 1e-8, 'disp': True, 'maxiter' : 5000000000000})
pOpt = res.x
cov = res.direc
chi2perpoint = lambda ps : (vfG(lg_M200c, lg_1pz, ps) - lg_MF_c)**2. / (errorLog)**2.
chi2pp = chi2perpoint(pOpt)
print pOpt
lg_M200c_model = n.arange(n.min(lg_M200c),n.max(lg_M200c)+0.1,0.1)
X,Y = n.meshgrid(lg_M200c_model, n.arange(zmin, zmax+0.02,0.02))
Z = vfG(X,n.log10(1+Y),pOpt)
outPointFile = join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-model-pts.txt")
n.savetxt(outPointFile ,n.transpose([n.hstack((X)), n.hstack((Y)), n.hstack((Z))]) )
f=open(join(dir,qty,"M200c-"+cos+"-cumulative-function-zAll-params.pkl"), 'w')
cPickle.dump(res, f)
f.close()
X,Y,Z = n.loadtxt(outPointFile, unpack=True)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(X, Z, c=Y, s=5, marker='o',label="model", rasterized=True)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[M_{200c}/(h^{-1}M_\odot)]$')
p.ylabel(r'log n(>M)')
gl = p.legend(loc=3,fontsize=10)
gl.set_frame_on(False)
p.ylim((-8, 1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,qty,"M200c-"+cos+"-cumulative-function-model-zAll.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(lg_M200c, lg_MF_c, c=chi2pp, s=5, marker='o',label="chi2", rasterized=True)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("chi2 per point")
p.xlabel(r'log$_{10}[M_{200c}/(h^{-1}M_\odot)]$')
p.ylabel(r'log n(>M)')
gl = p.legend(loc=3,fontsize=10)
gl.set_frame_on(False)
p.ylim((-8, 1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,qty,"M200c-"+cos+"-cumulative-function-chi2PP-zAll.png"))
p.clf()
# saves table of parameters fitted
f=open(join(dir,qty, "latex-parameters-"+qty+"-"+cos+".txt"),'w')
f.write( "$A(z)$ & " + str(n.round(pInit[0],NDecimal))+ " & "+str(n.round(pOpt[0],NDecimal))+" & "+str(n.round(pOpt[1],NDecimal))+ " & "+str(n.round(pOpt[2],NDecimal))+ "\\\\ \n")
f.write( "$M_{cut}(z)$ & " + str(n.round(pInit[1],NDecimal))+ " & "+str(n.round(pOpt[3],NDecimal))+" & "+str(n.round(pOpt[4],NDecimal))+ " & "+str(n.round(pOpt[5],NDecimal))+ "\\\\ \n")
f.write( "$\alpha(z)$ & " + str(n.round(pInit[2],NDecimal))+ " & "+str(n.round(pOpt[6],NDecimal))+" & "+str(n.round(pOpt[7],NDecimal))+ " & "+str(n.round(pOpt[8],NDecimal))+ " & "+str(n.round(pOpt[9],NDecimal))+ "\\\\ \n")
f.write( "$\beta(z)$ & " + str(n.round(pInit[3],NDecimal))+ " & "+str(n.round(pOpt[10],NDecimal))+" & "+str(n.round(pOpt[11],NDecimal))+ " & "+str(n.round(pOpt[12],NDecimal))+ "\\\\ \n")
f.close()
print "centrals"
fitDataAll(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 2.3, p0 =n.array([ 0.5, 13.1, -20.8, -0.89, -10.4, 13.6, -0.2, 0.84, -4.1, 6.5, 0.11, -0.82, 1.77 ]) )
print "satellites"
fitDataAll(qty = 'M200c', cos = "sat", zmin = -0.01, zmax = 2.3, p0 = n.array([ 0.8, 12., -22., -0.88, -9.8, 15., -0.28, 0.6, -0.3, 0.86, 0.15, -0.9, 1.8]))
| en | 0.342936 | #matplotlib.use('pdf') Plots the data to be used in the fits later in the analysis. # redshift selection # mass selection #| ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0]))) | ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0]))) # minimum number counts selection # data selection # axis definition # fitting function definition # loads redshift 0 best parameters # create redshift varying functions for the parameters # generalized fitting function # defines chi2 # fits the parameters # saves table of parameters fitted | 1.904563 | 2 |
gae_mini_profiler/unformatter/__init__.py | danieldanciu/schoggi | 481 | 6632844 | <filename>gae_mini_profiler/unformatter/__init__.py<gh_stars>100-1000
import pprint
import re
import sys
_STRING = re.compile(r"^\s*(['\"])")
_NUMBER = re.compile(r"^\s*(\d+L?)\s*")
_BOOLEAN = re.compile(r"^\s*(True|False)\s*")
_DETAILS_OMMITTED = re.compile(r"^\s*\.\.\.\s*")
_LIST = re.compile(r"^\s*\[")
_LIST_SEPARATOR = re.compile(r"^\s*,\s*")
_LIST_END = re.compile(r"^\s*]\s*")
_DICT = re.compile(r"^\s*([\w-]+)<")
_DICT_FIELD = re.compile(r"^\s*([\w-]+)\s*=\s*")
_DICT_SEPARATOR = _LIST_SEPARATOR
_DICT_END = re.compile(r"^\s*>\s*")
def _consume_re(re, text):
m = re.match(text)
if not m:
return None, text
return m, text[m.end(0):]
def _parse_string(text, quote_char):
end = text.index(quote_char)
while text[end-1] == "\\":
end = text.index(quote_char, end+1)
return text[:end].decode('string_escape', 'ignore'), text[end+1:]
def _parse_list(text):
result = []
while True:
m, text = _consume_re(_LIST_END, text)
if m:
return result, text
element, text = _parse(text)
result.append(element)
_, text = _consume_re(_LIST_SEPARATOR, text)
def _parse_dict(text, name):
args = []
kwargs = {}
while True:
m, text = _consume_re(_DICT_END, text)
if m:
if args and kwargs:
obj = { 'args': args }
obj.update(kwargs)
elif args:
obj = args if len(args) > 1 else args[0]
elif kwargs:
obj = kwargs
else:
obj = None
return {name: obj}, text
m, text = _consume_re(_DICT_SEPARATOR, text)
if m:
continue
m, text = _consume_re(_DICT_FIELD, text)
if m:
element, text = _parse(text)
kwargs[ m.group(1).strip("_") ] = element
continue
element, text = _parse(text)
args.append(element)
def _parse(text):
m, text = _consume_re(_STRING, text)
if m:
return _parse_string(text, m.group(1))
m, text = _consume_re(_NUMBER, text)
if m:
value = long(m.group(1)[:-1]) if m.group(1).endswith("L") else int(m.group(1))
return value, text
m, text = _consume_re(_BOOLEAN, text)
if m:
return m.group(1) == "True", text
m, text = _consume_re(_DETAILS_OMMITTED, text)
if m:
return "...", text
m, text = _consume_re(_LIST, text)
if m:
return _parse_list(text)
m, text = _consume_re(_DICT, text)
if m:
return _parse_dict(text, m.group(1))
raise ValueError(text)
def unformat(text):
result, remainder = _parse(text)
assert remainder == ""
return result
def main():
from io import StringIO
f = open('examples.txt', 'r')
for line in f:
result = unformat(line.strip())
pprint.pprint(result)
raw_input('cont?')
f.close()
if __name__ == '__main__':
main()
| <filename>gae_mini_profiler/unformatter/__init__.py<gh_stars>100-1000
import pprint
import re
import sys
_STRING = re.compile(r"^\s*(['\"])")
_NUMBER = re.compile(r"^\s*(\d+L?)\s*")
_BOOLEAN = re.compile(r"^\s*(True|False)\s*")
_DETAILS_OMMITTED = re.compile(r"^\s*\.\.\.\s*")
_LIST = re.compile(r"^\s*\[")
_LIST_SEPARATOR = re.compile(r"^\s*,\s*")
_LIST_END = re.compile(r"^\s*]\s*")
_DICT = re.compile(r"^\s*([\w-]+)<")
_DICT_FIELD = re.compile(r"^\s*([\w-]+)\s*=\s*")
_DICT_SEPARATOR = _LIST_SEPARATOR
_DICT_END = re.compile(r"^\s*>\s*")
def _consume_re(re, text):
m = re.match(text)
if not m:
return None, text
return m, text[m.end(0):]
def _parse_string(text, quote_char):
end = text.index(quote_char)
while text[end-1] == "\\":
end = text.index(quote_char, end+1)
return text[:end].decode('string_escape', 'ignore'), text[end+1:]
def _parse_list(text):
result = []
while True:
m, text = _consume_re(_LIST_END, text)
if m:
return result, text
element, text = _parse(text)
result.append(element)
_, text = _consume_re(_LIST_SEPARATOR, text)
def _parse_dict(text, name):
args = []
kwargs = {}
while True:
m, text = _consume_re(_DICT_END, text)
if m:
if args and kwargs:
obj = { 'args': args }
obj.update(kwargs)
elif args:
obj = args if len(args) > 1 else args[0]
elif kwargs:
obj = kwargs
else:
obj = None
return {name: obj}, text
m, text = _consume_re(_DICT_SEPARATOR, text)
if m:
continue
m, text = _consume_re(_DICT_FIELD, text)
if m:
element, text = _parse(text)
kwargs[ m.group(1).strip("_") ] = element
continue
element, text = _parse(text)
args.append(element)
def _parse(text):
m, text = _consume_re(_STRING, text)
if m:
return _parse_string(text, m.group(1))
m, text = _consume_re(_NUMBER, text)
if m:
value = long(m.group(1)[:-1]) if m.group(1).endswith("L") else int(m.group(1))
return value, text
m, text = _consume_re(_BOOLEAN, text)
if m:
return m.group(1) == "True", text
m, text = _consume_re(_DETAILS_OMMITTED, text)
if m:
return "...", text
m, text = _consume_re(_LIST, text)
if m:
return _parse_list(text)
m, text = _consume_re(_DICT, text)
if m:
return _parse_dict(text, m.group(1))
raise ValueError(text)
def unformat(text):
result, remainder = _parse(text)
assert remainder == ""
return result
def main():
from io import StringIO
f = open('examples.txt', 'r')
for line in f:
result = unformat(line.strip())
pprint.pprint(result)
raw_input('cont?')
f.close()
if __name__ == '__main__':
main()
| none | 1 | 2.308692 | 2 |
|
pymono/models/Player.py | MrKomish/pymono | 1 | 6632845 | class Player:
def __init__(self, color):
self.color = color
self.cell_index = 0
self.money = 1500
self.is_current = False
self.freeze_turns_left = 0
self.is_in_jail = False
| class Player:
def __init__(self, color):
self.color = color
self.cell_index = 0
self.money = 1500
self.is_current = False
self.freeze_turns_left = 0
self.is_in_jail = False
| none | 1 | 2.516388 | 3 |
|
umm/cli/umm.py | zachcoleman/umm-cli-bot | 0 | 6632846 | import subprocess
from typing import List
import click
import requests
from pygments import console
from umm.cli.client import add_request, confirm_request, umm_request
from umm.server.__main__ import main
@click.command()
@click.option("--start", "-s", is_flag=True)
@click.option("--add", is_flag=True)
@click.argument("tags", nargs=-1)
def umm(start: bool, add: bool, tags: List[str]):
"""
Args:
start:
add:
tags:
Returns:
None
"""
if add:
msg = "input command"
msg = console.colorize("blue", msg)
command = click.prompt(msg)
msg = "input tags [/-separated list]"
msg = console.colorize("blue", msg)
tags = click.prompt(msg)
print(add_request(command, tags.split("/")))
return
if start:
print("umm server starting")
main()
return
try:
candidates = umm_request(tags)
except requests.exceptions.ConnectionError:
raise ConnectionError("can't connect to umm.server try `umm --start`")
if len(candidates["commands"]) == 0:
print("no candidate commands found")
return
for command in candidates["commands"]:
msg = f"{command['command']} [y/n/c/p/q]?"
msg = console.colorize("green", msg)
action_str = click.prompt(msg, default="y")
while action_str not in ["y", "n", "c", "p", "q"]:
print("invalid input. use [y/n/c/p/q]")
action_str = click.prompt(msg, default="y")
# continue and exit conditions
if action_str == "q":
return
elif action_str != "n":
break
# no candidate command selected found
if action_str == "n":
print("no command selected")
return
# if command has prompts
prompts = command.get("prompts", [])
in_data = []
for prompt in prompts:
in_data.append(click.prompt(f"{prompt} = ?"))
# build command
command_str = command["command"]
for i, value in enumerate(in_data):
command_str = command_str.replace(f"${i+1}", value)
# get action
command_actions = {
"y": ([command_str], {"shell": True}),
"c": ([f"echo '{command_str}' |pbcopy"], {"shell": True}),
"p": ([f"echo '{command_str}'"], {"shell": True}),
}
cmd, kwargs = command_actions[action_str]
subprocess.run(cmd, **kwargs)
_ = confirm_request(command["id"])
| import subprocess
from typing import List
import click
import requests
from pygments import console
from umm.cli.client import add_request, confirm_request, umm_request
from umm.server.__main__ import main
@click.command()
@click.option("--start", "-s", is_flag=True)
@click.option("--add", is_flag=True)
@click.argument("tags", nargs=-1)
def umm(start: bool, add: bool, tags: List[str]):
"""
Args:
start:
add:
tags:
Returns:
None
"""
if add:
msg = "input command"
msg = console.colorize("blue", msg)
command = click.prompt(msg)
msg = "input tags [/-separated list]"
msg = console.colorize("blue", msg)
tags = click.prompt(msg)
print(add_request(command, tags.split("/")))
return
if start:
print("umm server starting")
main()
return
try:
candidates = umm_request(tags)
except requests.exceptions.ConnectionError:
raise ConnectionError("can't connect to umm.server try `umm --start`")
if len(candidates["commands"]) == 0:
print("no candidate commands found")
return
for command in candidates["commands"]:
msg = f"{command['command']} [y/n/c/p/q]?"
msg = console.colorize("green", msg)
action_str = click.prompt(msg, default="y")
while action_str not in ["y", "n", "c", "p", "q"]:
print("invalid input. use [y/n/c/p/q]")
action_str = click.prompt(msg, default="y")
# continue and exit conditions
if action_str == "q":
return
elif action_str != "n":
break
# no candidate command selected found
if action_str == "n":
print("no command selected")
return
# if command has prompts
prompts = command.get("prompts", [])
in_data = []
for prompt in prompts:
in_data.append(click.prompt(f"{prompt} = ?"))
# build command
command_str = command["command"]
for i, value in enumerate(in_data):
command_str = command_str.replace(f"${i+1}", value)
# get action
command_actions = {
"y": ([command_str], {"shell": True}),
"c": ([f"echo '{command_str}' |pbcopy"], {"shell": True}),
"p": ([f"echo '{command_str}'"], {"shell": True}),
}
cmd, kwargs = command_actions[action_str]
subprocess.run(cmd, **kwargs)
_ = confirm_request(command["id"])
| en | 0.735051 | Args: start: add: tags: Returns: None # continue and exit conditions # no candidate command selected found # if command has prompts # build command # get action | 2.883229 | 3 |
vmaig_auth/forms.py | a1401358759/vmaig_blog | 1 | 6632847 | # -*- coding: utf-8 -*-
from django import forms
from vmaig_auth.models import VmaigUser
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.core.mail import send_mail
import base64
import logging
logger = logging.getLogger(__name__)
# 参考自django.contrib.auth.forms.UserCreationForm
class VmaigUserCreationForm(forms.ModelForm):
# 错误信息
error_messages = {
'duplicate_username': u"此用户已存在.",
'password_mismatch': u"两次密码不相等.",
'duplicate_email': u'此email已经存在.'
}
# 错误信息 invalid 表示username不合法的错误信息,
# required 表示没填的错误信息
username = forms.RegexField(
max_length=30,
regex=r'^[\w.@+-]+$',
error_messages={
'invalid': u"该值只能包含字母、数字和字符@/./+/-/_",
'required': u"用户名未填"
}
)
email = forms.EmailField(
error_messages={
'invalid': u"email格式错误",
'required': u'email未填'}
)
password1 = forms.CharField(
widget=forms.PasswordInput,
error_messages={
'required': u"密码未填"
}
)
password2 = forms.CharField(
widget=forms.PasswordInput,
error_messages={
'required': u"确认密码未填"
}
)
class Meta:
model = VmaigUser
fields = ("username", "email")
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
VmaigUser._default_manager.get(username=username)
except VmaigUser.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages["duplicate_username"]
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages["password_mismatch"]
)
return password2
def clean_email(self):
email = self.cleaned_data["email"]
# 判断是这个email 用户是否存在
try:
VmaigUser._default_manager.get(email=email)
except VmaigUser.DoesNotExist:
return email
raise forms.ValidationError(
self.error_messages["duplicate_email"]
)
def save(self, commit=True):
user = super(VmaigUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["<PASSWORD>"])
if commit:
user.save()
return user
class VmaigPasswordRestForm(forms.Form):
# 错误信息
error_messages = {
'email_error': u"此用户不存在或者用户名与email不对应.",
}
# 错误信息 invalid 表示username不合法的错误信息,
# required 表示没填的错误信息
username = forms.RegexField(
max_length=30,
regex=r'^[\w.@+-]+$',
error_messages={
'invalid': u"该值只能包含字母、数字和字符@/./+/-/_",
'required': u"用户名未填"}
)
email = forms.EmailField(
error_messages={
'invalid': u"email格式错误",
'required': u'email未填'}
)
def clean(self):
username = self.cleaned_data.get('username')
email = self.cleaned_data.get('email')
if username and email:
try:
self.user = VmaigUser.objects.get(
username=username, email=email, is_active=True
)
except VmaigUser.DoesNotExist:
raise forms.ValidationError(
self.error_messages["email_error"]
)
return self.cleaned_data
def save(self, from_email=None, request=None,
token_generator=default_token_generator):
email = self.cleaned_data['email']
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
uid = base64.urlsafe_b64encode(
force_bytes(self.user.pk)
).rstrip(b'\n=')
token = token_generator.make_token(self.user)
protocol = 'http'
title = u"重置 {} 的密码".format(site_name)
message = "".join([
u"你收到这封信是因为你请求重置你在网站 {} 上的账户密码\n\n".format(
site_name
),
u"请访问该页面并输入新密码:\n\n",
"{}://{}/resetpassword/{}/{}/\n\n".format(
protocol, domain, uid, token
),
u"你的用户名,如果已经忘记的话: {}\n\n".format(
self.user.username
),
u"感谢使用我们的站点!\n\n",
u"{} 团队\n\n\n".format(site_name)
])
try:
send_mail(title, message, from_email, [self.user.email])
except Exception as e:
logger.error(
u'[UserControl]用户重置密码邮件发送失败:[{}]/[{}]'.format(
username, email
)
)
| # -*- coding: utf-8 -*-
from django import forms
from vmaig_auth.models import VmaigUser
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.core.mail import send_mail
import base64
import logging
logger = logging.getLogger(__name__)
# 参考自django.contrib.auth.forms.UserCreationForm
class VmaigUserCreationForm(forms.ModelForm):
# 错误信息
error_messages = {
'duplicate_username': u"此用户已存在.",
'password_mismatch': u"两次密码不相等.",
'duplicate_email': u'此email已经存在.'
}
# 错误信息 invalid 表示username不合法的错误信息,
# required 表示没填的错误信息
username = forms.RegexField(
max_length=30,
regex=r'^[\w.@+-]+$',
error_messages={
'invalid': u"该值只能包含字母、数字和字符@/./+/-/_",
'required': u"用户名未填"
}
)
email = forms.EmailField(
error_messages={
'invalid': u"email格式错误",
'required': u'email未填'}
)
password1 = forms.CharField(
widget=forms.PasswordInput,
error_messages={
'required': u"密码未填"
}
)
password2 = forms.CharField(
widget=forms.PasswordInput,
error_messages={
'required': u"确认密码未填"
}
)
class Meta:
model = VmaigUser
fields = ("username", "email")
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
VmaigUser._default_manager.get(username=username)
except VmaigUser.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages["duplicate_username"]
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages["password_mismatch"]
)
return password2
def clean_email(self):
email = self.cleaned_data["email"]
# 判断是这个email 用户是否存在
try:
VmaigUser._default_manager.get(email=email)
except VmaigUser.DoesNotExist:
return email
raise forms.ValidationError(
self.error_messages["duplicate_email"]
)
def save(self, commit=True):
user = super(VmaigUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["<PASSWORD>"])
if commit:
user.save()
return user
class VmaigPasswordRestForm(forms.Form):
# 错误信息
error_messages = {
'email_error': u"此用户不存在或者用户名与email不对应.",
}
# 错误信息 invalid 表示username不合法的错误信息,
# required 表示没填的错误信息
username = forms.RegexField(
max_length=30,
regex=r'^[\w.@+-]+$',
error_messages={
'invalid': u"该值只能包含字母、数字和字符@/./+/-/_",
'required': u"用户名未填"}
)
email = forms.EmailField(
error_messages={
'invalid': u"email格式错误",
'required': u'email未填'}
)
def clean(self):
username = self.cleaned_data.get('username')
email = self.cleaned_data.get('email')
if username and email:
try:
self.user = VmaigUser.objects.get(
username=username, email=email, is_active=True
)
except VmaigUser.DoesNotExist:
raise forms.ValidationError(
self.error_messages["email_error"]
)
return self.cleaned_data
def save(self, from_email=None, request=None,
token_generator=default_token_generator):
email = self.cleaned_data['email']
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
uid = base64.urlsafe_b64encode(
force_bytes(self.user.pk)
).rstrip(b'\n=')
token = token_generator.make_token(self.user)
protocol = 'http'
title = u"重置 {} 的密码".format(site_name)
message = "".join([
u"你收到这封信是因为你请求重置你在网站 {} 上的账户密码\n\n".format(
site_name
),
u"请访问该页面并输入新密码:\n\n",
"{}://{}/resetpassword/{}/{}/\n\n".format(
protocol, domain, uid, token
),
u"你的用户名,如果已经忘记的话: {}\n\n".format(
self.user.username
),
u"感谢使用我们的站点!\n\n",
u"{} 团队\n\n\n".format(site_name)
])
try:
send_mail(title, message, from_email, [self.user.email])
except Exception as e:
logger.error(
u'[UserControl]用户重置密码邮件发送失败:[{}]/[{}]'.format(
username, email
)
)
| zh | 0.521661 | # -*- coding: utf-8 -*- # 参考自django.contrib.auth.forms.UserCreationForm # 错误信息 # 错误信息 invalid 表示username不合法的错误信息, # required 表示没填的错误信息 # Since User.username is unique, this check is redundant, # but it sets a nicer error message than the ORM. See #13147. # 判断是这个email 用户是否存在 # 错误信息 # 错误信息 invalid 表示username不合法的错误信息, # required 表示没填的错误信息 | 2.238331 | 2 |
caniusepython3/pylint_checker.py | macleodbroad-wf/caniusepython3 | 276 | 6632848 | # Copyright 2014 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pylint checker to enforce Python 2/3 compatible syntax.
See the documentation for what checkers pylint includes by default
which compliment this file.
"""
from __future__ import absolute_import, print_function
import token
import tokenize
from pylint import checkers, interfaces
class StrictPython3Checker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'python3'
msgs = {
# Errors for what will syntactically break in Python 3, warnings for
# everything else.
# Retired:
# 'W6001': 'filter built-in referenced'
# 'W6002': 'map built-in referenced'
# 'W6003': 'range built-in referenced'
# 'W6004': 'zip built-in referenced'
'W6005': ('open built-in referenced',
'open-builtin',
'Used when the open built-in function is referenced '
'(semantics different in Python 3; '
'use `from io import open`)',
{'maxversion': (3, 0)}),
}
_changed_builtins = frozenset(['open'])
def visit_name(self, node):
if hasattr(node, 'name') and getattr(node.lookup(node.name)[0], 'name', '') == '__builtin__':
if node.name in self._changed_builtins:
self.add_message(node.name + '-builtin', node=node)
class UnicodeChecker(checkers.BaseTokenChecker):
__implements__ = interfaces.ITokenChecker
name = 'python3'
msgs = {
'W6100': ('native string literal',
'native-string',
'Used when a string has no `b`/`u` prefix and '
'`from __future__ import unicode_literals` is not found '
'(strings w/ no prefix in Python 3 are Unicode)',
{'maxversion': (3, 0)}),
}
def process_tokens(self, tokens):
# Module docstring can be a native string.
# Also use as a flag to notice when __future__ statements are no longer
# valid to avoid wasting time checking every NAME token
# (which is < STRING).
module_start = True
line_num = 1
for type_, val, start, end, line in tokens:
if type_ in (token.NEWLINE, tokenize.NL):
line_num += 1
# Anything else means we are past the first string in the module,
# any comments (e.g. shebang), and no more __future__ statements
# are possible.
if type_ > token.NEWLINE and type_ < token.N_TOKENS:
module_start = False
elif type_ == token.STRING:
line_num += val.count('\n')
if not module_start and not val.startswith(('u', 'b')):
self.add_message('native-string', line=line_num)
elif module_start and type_ == token.NAME:
if len(line) >= 39: # Fast-fail check
if u'__future__' in line and u'unicode_literals' in line:
return
def register(linter):
linter.register_checker(StrictPython3Checker(linter))
linter.register_checker(UnicodeChecker(linter))
| # Copyright 2014 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pylint checker to enforce Python 2/3 compatible syntax.
See the documentation for what checkers pylint includes by default
which compliment this file.
"""
from __future__ import absolute_import, print_function
import token
import tokenize
from pylint import checkers, interfaces
class StrictPython3Checker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'python3'
msgs = {
# Errors for what will syntactically break in Python 3, warnings for
# everything else.
# Retired:
# 'W6001': 'filter built-in referenced'
# 'W6002': 'map built-in referenced'
# 'W6003': 'range built-in referenced'
# 'W6004': 'zip built-in referenced'
'W6005': ('open built-in referenced',
'open-builtin',
'Used when the open built-in function is referenced '
'(semantics different in Python 3; '
'use `from io import open`)',
{'maxversion': (3, 0)}),
}
_changed_builtins = frozenset(['open'])
def visit_name(self, node):
if hasattr(node, 'name') and getattr(node.lookup(node.name)[0], 'name', '') == '__builtin__':
if node.name in self._changed_builtins:
self.add_message(node.name + '-builtin', node=node)
class UnicodeChecker(checkers.BaseTokenChecker):
__implements__ = interfaces.ITokenChecker
name = 'python3'
msgs = {
'W6100': ('native string literal',
'native-string',
'Used when a string has no `b`/`u` prefix and '
'`from __future__ import unicode_literals` is not found '
'(strings w/ no prefix in Python 3 are Unicode)',
{'maxversion': (3, 0)}),
}
def process_tokens(self, tokens):
# Module docstring can be a native string.
# Also use as a flag to notice when __future__ statements are no longer
# valid to avoid wasting time checking every NAME token
# (which is < STRING).
module_start = True
line_num = 1
for type_, val, start, end, line in tokens:
if type_ in (token.NEWLINE, tokenize.NL):
line_num += 1
# Anything else means we are past the first string in the module,
# any comments (e.g. shebang), and no more __future__ statements
# are possible.
if type_ > token.NEWLINE and type_ < token.N_TOKENS:
module_start = False
elif type_ == token.STRING:
line_num += val.count('\n')
if not module_start and not val.startswith(('u', 'b')):
self.add_message('native-string', line=line_num)
elif module_start and type_ == token.NAME:
if len(line) >= 39: # Fast-fail check
if u'__future__' in line and u'unicode_literals' in line:
return
def register(linter):
linter.register_checker(StrictPython3Checker(linter))
linter.register_checker(UnicodeChecker(linter))
| en | 0.804327 | # Copyright 2014 Google Inc. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Pylint checker to enforce Python 2/3 compatible syntax. See the documentation for what checkers pylint includes by default which compliment this file. # Errors for what will syntactically break in Python 3, warnings for # everything else. # Retired: # 'W6001': 'filter built-in referenced' # 'W6002': 'map built-in referenced' # 'W6003': 'range built-in referenced' # 'W6004': 'zip built-in referenced' # Module docstring can be a native string. # Also use as a flag to notice when __future__ statements are no longer # valid to avoid wasting time checking every NAME token # (which is < STRING). # Anything else means we are past the first string in the module, # any comments (e.g. shebang), and no more __future__ statements # are possible. # Fast-fail check | 1.982241 | 2 |
wikis/urls.py | aristo-master/uzuwiki | 0 | 6632849 | """uzuwiki URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import include, path
from django.http import HttpResponseBadRequest
urlpatterns = [
path('', include('wikis.histories.urls')),
path('', include('wikis.maintenances.urls')),
path('', include('wikis.comments.urls')),
path('', include('wikis.attachments.urls')),
path('', include('wikis.pages.urls')),
]
| """uzuwiki URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import include, path
from django.http import HttpResponseBadRequest
urlpatterns = [
path('', include('wikis.histories.urls')),
path('', include('wikis.maintenances.urls')),
path('', include('wikis.comments.urls')),
path('', include('wikis.attachments.urls')),
path('', include('wikis.pages.urls')),
]
| en | 0.544413 | uzuwiki URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 2.610378 | 3 |
shortener/models.py | mariob0y/URLshortener | 1 | 6632850 | <gh_stars>1-10
from django.db import models
from django.conf import settings
class UrlMap(models.Model):
'''Storing of full and short URL'''
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
full_url = models.CharField(max_length=500)
short_url = models.CharField(max_length=50, unique=True, db_index=True)
usage_count = models.IntegerField(default=0)
max_count = models.IntegerField(default=-1)
lifespan = models.IntegerField(default=-1)
date_created = models.DateTimeField(auto_now_add=True)
date_expired = models.DateTimeField()
def __str__(self):
return '{} - {} - {}'.format(self.user, self.full_url, self.short_url)
class UrlProfile(models.Model):
'''Settings for URS shortening usage'''
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
enabled = models.BooleanField(default=True, null=True)
max_urls = models.IntegerField(default=-1, null=True, blank=True)
max_concurrent_urls = models.IntegerField(default=100, null=True,
blank=True)
default_lifespan = models.IntegerField(default=120, null=True,
blank=True)
default_max_uses = models.IntegerField(default=-1, null=True,
blank=True)
def __str__(self):
return '{}'.format(self.user)
| from django.db import models
from django.conf import settings
class UrlMap(models.Model):
'''Storing of full and short URL'''
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
full_url = models.CharField(max_length=500)
short_url = models.CharField(max_length=50, unique=True, db_index=True)
usage_count = models.IntegerField(default=0)
max_count = models.IntegerField(default=-1)
lifespan = models.IntegerField(default=-1)
date_created = models.DateTimeField(auto_now_add=True)
date_expired = models.DateTimeField()
def __str__(self):
return '{} - {} - {}'.format(self.user, self.full_url, self.short_url)
class UrlProfile(models.Model):
'''Settings for URS shortening usage'''
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
enabled = models.BooleanField(default=True, null=True)
max_urls = models.IntegerField(default=-1, null=True, blank=True)
max_concurrent_urls = models.IntegerField(default=100, null=True,
blank=True)
default_lifespan = models.IntegerField(default=120, null=True,
blank=True)
default_max_uses = models.IntegerField(default=-1, null=True,
blank=True)
def __str__(self):
return '{}'.format(self.user) | en | 0.769665 | Storing of full and short URL Settings for URS shortening usage | 2.194466 | 2 |
Subsets and Splits