max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
deep_rl/agent/PPO_agent.py | pladosz/MOHQA | 3 | 6632151 | <gh_stars>1-10
#######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from ..network import *
from ..component import *
from .BaseAgent import *
class PPOAgent_L2M_Mem(BaseAgent):
def __init__(self, config):
BaseAgent.__init__(self, config)
self.config = config
self.task = config.task_fn()
self.network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.opt = config.optimizer_fn(self.network.parameters())
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
self.memory = self.task.reset()
self.memory = config.state_normalizer(self.states)
self.mem_update_rate = 0.1
def iteration(self):
config = self.config
rollout = []
states = self.states
memory = self.memory
for _ in range(config.rollout_length):
actions, log_probs, _, values = self.network.predict(states,memory)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
memory = tensor((1-self.mem_update_rate)*memory+self.mem_update_rate*states)
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
memory = config.state_normalizer(memory)
rollout.append([states, memory, values.detach(), actions.detach(), log_probs.detach(), rewards, 1 - terminals])
states = next_states
self.states = states
self.memory = memory
pending_value = self.network.predict(states,memory)[-1]
rollout.append([states, memory, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, memory, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
memory = tensor(memory)
next_value = rollout[i + 1][2]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount * terminals * next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, memory, actions, log_probs, returns, advantages]
states, memory, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim = 0), zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_mem = memory[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
_, log_probs, entropy_loss, values = self.network.predict(sampled_states, sampled_mem, sampled_actions)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) - config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps
class PPOAgent(BaseAgent):
def __init__(self, config):
BaseAgent.__init__(self, config)
self.config = config
self.task = config.task_fn()
self.network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.opt = config.optimizer_fn(self.network.parameters())
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
def iteration(self):
config = self.config
rollout = []
states = self.states
for _ in range(config.rollout_length):
actions, log_probs, _, values = self.network.predict(states)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
rollout.append([states, values.detach(), actions.detach(), log_probs.detach(), rewards, 1 - terminals])
states = next_states
self.states = states
pending_value = self.network.predict(states)[-1]
rollout.append([states, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
next_value = rollout[i + 1][1]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount * terminals * next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, actions, log_probs, returns, advantages]
states, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim = 0), zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
_, log_probs, entropy_loss, values = self.network.predict(sampled_states, sampled_actions)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) - config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps
| #######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from ..network import *
from ..component import *
from .BaseAgent import *
class PPOAgent_L2M_Mem(BaseAgent):
def __init__(self, config):
BaseAgent.__init__(self, config)
self.config = config
self.task = config.task_fn()
self.network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.opt = config.optimizer_fn(self.network.parameters())
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
self.memory = self.task.reset()
self.memory = config.state_normalizer(self.states)
self.mem_update_rate = 0.1
def iteration(self):
config = self.config
rollout = []
states = self.states
memory = self.memory
for _ in range(config.rollout_length):
actions, log_probs, _, values = self.network.predict(states,memory)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
memory = tensor((1-self.mem_update_rate)*memory+self.mem_update_rate*states)
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
memory = config.state_normalizer(memory)
rollout.append([states, memory, values.detach(), actions.detach(), log_probs.detach(), rewards, 1 - terminals])
states = next_states
self.states = states
self.memory = memory
pending_value = self.network.predict(states,memory)[-1]
rollout.append([states, memory, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, memory, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
memory = tensor(memory)
next_value = rollout[i + 1][2]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount * terminals * next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, memory, actions, log_probs, returns, advantages]
states, memory, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim = 0), zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_mem = memory[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
_, log_probs, entropy_loss, values = self.network.predict(sampled_states, sampled_mem, sampled_actions)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) - config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps
class PPOAgent(BaseAgent):
def __init__(self, config):
BaseAgent.__init__(self, config)
self.config = config
self.task = config.task_fn()
self.network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.opt = config.optimizer_fn(self.network.parameters())
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
def iteration(self):
config = self.config
rollout = []
states = self.states
for _ in range(config.rollout_length):
actions, log_probs, _, values = self.network.predict(states)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
rollout.append([states, values.detach(), actions.detach(), log_probs.detach(), rewards, 1 - terminals])
states = next_states
self.states = states
pending_value = self.network.predict(states)[-1]
rollout.append([states, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
next_value = rollout[i + 1][1]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount * terminals * next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, actions, log_probs, returns, advantages]
states, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim = 0), zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
_, log_probs, entropy_loss, values = self.network.predict(sampled_states, sampled_actions)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) - config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps | de | 0.543062 | ####################################################################### # Copyright (C) 2017 <NAME>(<EMAIL>) # # Permission given to modify the code as long as you keep this # # declaration at the top # ####################################################################### | 1.975337 | 2 |
setup.py | cbrentharris/bricklayer | 0 | 6632152 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='Bricklayer',
version='1.0',
description='Lego Digital Designer Education Tool',
author='<NAME>',
author_email='<EMAIL>',
url='https://bitbucket.org/pbergero/deep-impac',
packages=find_packages(),
entry_points = {
'console_scripts' : [
'bricklayer = bricklayer:main'
]
},
test_suite='nose.collector',
tests_require=['nose'],
include_package_data=True,
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='Bricklayer',
version='1.0',
description='Lego Digital Designer Education Tool',
author='<NAME>',
author_email='<EMAIL>',
url='https://bitbucket.org/pbergero/deep-impac',
packages=find_packages(),
entry_points = {
'console_scripts' : [
'bricklayer = bricklayer:main'
]
},
test_suite='nose.collector',
tests_require=['nose'],
include_package_data=True,
)
| ru | 0.26433 | #!/usr/bin/env python | 1.306247 | 1 |
api/files/api/app/generate_csv.py | trackit/trackit-legacy | 2 | 6632153 | import csv
import StringIO
def _get_cost(rgg, name, tag_name, sub_header_name, tagged):
for rg in rgg:
if rg[sub_header_name].lower() == name:
if not tagged:
return rg['cost']
else:
for tagl in rg['tags']:
if tagl['name'] == tag_name:
return tagl['cost']
return 0
def _process(splitted_res, header_column, header_name, sub_header_name, tagged, available_tag, account):
fsplitted_res = splitted_res['months'] if account else splitted_res
for r in fsplitted_res:
if tagged:
for atag in available_tag:
gen = [_get_cost(r[header_name], p, atag, sub_header_name, tagged) for p in header_column]
if sum(gen):
yield '{}{},{}{},{}\n'.format(
'{},{},'.format(splitted_res['account_name'], splitted_res['account_id']) if account else '',
r['month'],
str(sum(gen)),
',{}'.format(atag) if tagged else '',
','.join(str(g) for g in gen))
else:
gen = [_get_cost(r[header_name], p, None, sub_header_name, tagged) for p in header_column]
yield '{}{},{},{}\n'.format(
'{},{},'.format(splitted_res['account_name'], splitted_res['account_id']) if account else '',
r['month'],
str(sum(gen)),
','.join(str(g) for g in gen))
def generate_csv_clean(data, header_name):
si = StringIO.StringIO()
writer = csv.DictWriter(si, header_name)
writer.writeheader()
for row in data:
writer.writerow(row)
return si.getvalue()
def generate_csv(data, header_name, sub_header_name, account=False, tagged=False):
header_column = set(
hc[sub_header_name].lower()
for r in data
for hc in r[header_name]
) if not account else set(
hc[sub_header_name].lower()
for p in data
for r in p['months']
for hc in r[header_name]
)
available_tag = set(
t['name']
for p in data
for r in p['months']
for hc in r[header_name]
for t in hc['tags']
) if tagged else None
yield '{}month,total{},{}\n'.format('account_name,account_id,' if account else '',
',tag' if tagged else '',
','.join(header_column))
if account:
for splitted_res in data:
for p in _process(splitted_res, header_column, header_name, sub_header_name, tagged, available_tag, account):
yield p
else:
for p in _process(data, header_column, header_name, sub_header_name, tagged, available_tag, account):
yield p
def get_csv_links():
res = [
{
'name': 'Monthly Cost By Region',
'link': 'monthlycostbyregion',
},
{
'name': 'Monthly Cost By Region By Account',
'link': 'monthlycostbyregionbyaccount',
},
{
'name': 'Monthly Cost By Region By Tag By Account',
'link': 'monthlycostbyregionbytagbyaccount',
},
{
'name': 'Monthly Cost By Product',
'link': 'monthlycostbyproduct',
},
{
'name': 'Monthly Cost By Product By Account',
'link': 'monthlycostbyproductbyaccount',
},
{
'name': 'Monthly Cost By Product By Tag By Account',
'link': 'monthlycostbyproductbytagbyaccount',
},
{
'name': 'Sizes of S3 buckets per name',
'link': 's3bucketsizepername',
},
{
'name': 'Sizes of S3 buckets per tag',
'link': 's3bucketsizepertag'
}
]
return sorted(res, key=lambda x: x['name'])
| import csv
import StringIO
def _get_cost(rgg, name, tag_name, sub_header_name, tagged):
for rg in rgg:
if rg[sub_header_name].lower() == name:
if not tagged:
return rg['cost']
else:
for tagl in rg['tags']:
if tagl['name'] == tag_name:
return tagl['cost']
return 0
def _process(splitted_res, header_column, header_name, sub_header_name, tagged, available_tag, account):
fsplitted_res = splitted_res['months'] if account else splitted_res
for r in fsplitted_res:
if tagged:
for atag in available_tag:
gen = [_get_cost(r[header_name], p, atag, sub_header_name, tagged) for p in header_column]
if sum(gen):
yield '{}{},{}{},{}\n'.format(
'{},{},'.format(splitted_res['account_name'], splitted_res['account_id']) if account else '',
r['month'],
str(sum(gen)),
',{}'.format(atag) if tagged else '',
','.join(str(g) for g in gen))
else:
gen = [_get_cost(r[header_name], p, None, sub_header_name, tagged) for p in header_column]
yield '{}{},{},{}\n'.format(
'{},{},'.format(splitted_res['account_name'], splitted_res['account_id']) if account else '',
r['month'],
str(sum(gen)),
','.join(str(g) for g in gen))
def generate_csv_clean(data, header_name):
si = StringIO.StringIO()
writer = csv.DictWriter(si, header_name)
writer.writeheader()
for row in data:
writer.writerow(row)
return si.getvalue()
def generate_csv(data, header_name, sub_header_name, account=False, tagged=False):
header_column = set(
hc[sub_header_name].lower()
for r in data
for hc in r[header_name]
) if not account else set(
hc[sub_header_name].lower()
for p in data
for r in p['months']
for hc in r[header_name]
)
available_tag = set(
t['name']
for p in data
for r in p['months']
for hc in r[header_name]
for t in hc['tags']
) if tagged else None
yield '{}month,total{},{}\n'.format('account_name,account_id,' if account else '',
',tag' if tagged else '',
','.join(header_column))
if account:
for splitted_res in data:
for p in _process(splitted_res, header_column, header_name, sub_header_name, tagged, available_tag, account):
yield p
else:
for p in _process(data, header_column, header_name, sub_header_name, tagged, available_tag, account):
yield p
def get_csv_links():
res = [
{
'name': 'Monthly Cost By Region',
'link': 'monthlycostbyregion',
},
{
'name': 'Monthly Cost By Region By Account',
'link': 'monthlycostbyregionbyaccount',
},
{
'name': 'Monthly Cost By Region By Tag By Account',
'link': 'monthlycostbyregionbytagbyaccount',
},
{
'name': 'Monthly Cost By Product',
'link': 'monthlycostbyproduct',
},
{
'name': 'Monthly Cost By Product By Account',
'link': 'monthlycostbyproductbyaccount',
},
{
'name': 'Monthly Cost By Product By Tag By Account',
'link': 'monthlycostbyproductbytagbyaccount',
},
{
'name': 'Sizes of S3 buckets per name',
'link': 's3bucketsizepername',
},
{
'name': 'Sizes of S3 buckets per tag',
'link': 's3bucketsizepertag'
}
]
return sorted(res, key=lambda x: x['name'])
| none | 1 | 2.907984 | 3 |
|
models/sphere_net_PFE.py | DorisWZG/Probabilistic-Face-Embeddings | 1 | 6632154 | <filename>models/sphere_net_PFE.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import tensorflow.contrib.slim as slim
model_params = {
'4': ([0, 0, 0, 0], [64, 128, 256, 512]),
'10': ([0, 1, 2, 0], [64, 128, 256, 512]),
'20': ([1, 2, 4, 1], [64, 128, 256, 512]),
'36': ([2, 4, 8, 2], [64, 128, 256, 512]),
'64': ([3, 8, 16, 3], [64, 128, 256, 512]),
}
batch_norm_params_last = {
'decay': 0.995,
'epsilon': 0.001,
'center': True,
'scale': False,
'updates_collections': None,
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
def parametric_relu(x):
num_channels = x.shape[-1].value
with tf.variable_scope('p_re_lu'):
alpha = tf.get_variable('alpha', (1,1,num_channels),
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
return tf.nn.relu(x) + alpha * tf.minimum(0.0, x)
def se_module(input_net, ratio=16, reuse = None, scope = None):
with tf.variable_scope(scope, 'SE', [input_net], reuse=reuse):
h,w,c = tuple([dim.value for dim in input_net.shape[1:4]])
assert c % ratio == 0
hidden_units = int(c / ratio)
squeeze = slim.avg_pool2d(input_net, [h,w], padding='VALID')
excitation = slim.flatten(squeeze)
excitation = slim.fully_connected(excitation, hidden_units, scope='se_fc1',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.relu)
excitation = slim.fully_connected(excitation, c, scope='se_fc2',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.sigmoid)
excitation = tf.reshape(excitation, [-1,1,1,c])
output_net = input_net * excitation
return output_net
def conv_module(net, num_res_layers, num_kernels, trans_kernel_size=3, trans_stride=2,
use_se=False, reuse=None, scope=None):
with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
net = slim.conv2d(net, num_kernels, kernel_size=trans_kernel_size, stride=trans_stride, padding='SAME',
weights_initializer=slim.xavier_initializer())
shortcut = net
for i in range(num_res_layers):
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
print('| ---- block_%d' % i)
if use_se:
net = se_module(net)
net = net + shortcut
shortcut = net
return net
def inference(images, embedding_size=512, reuse=None, scope='SphereNet'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(0.0),
normalizer_fn=None,
normalizer_params=None,
activation_fn=parametric_relu):
with tf.variable_scope('SphereNet', [images], reuse=reuse):
# Fix the moving mean and std when training PFE
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False):
print('SphereNet input shape:', [dim.value for dim in images.shape])
model_version = '64'
num_layers, num_kernels = model_params[model_version]
net = conv_module(images, num_layers[0], num_kernels[0], scope='conv1')
print('module_1 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[1], num_kernels[1], scope='conv2')
print('module_2 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[2], num_kernels[2], scope='conv3')
print('module_3 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[3], num_kernels[3], scope='conv4')
print('module_4 shape:', [dim.value for dim in net.shape])
net_ = net
net = slim.flatten(net)
prelogits = slim.fully_connected(net, embedding_size, scope='Bottleneck',
weights_initializer=slim.xavier_initializer(),
normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params_last,
activation_fn=None)
# Output used for PFE
mu = tf.nn.l2_normalize(prelogits, axis=1)
conv_final = net
return mu, conv_final
| <filename>models/sphere_net_PFE.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import tensorflow.contrib.slim as slim
model_params = {
'4': ([0, 0, 0, 0], [64, 128, 256, 512]),
'10': ([0, 1, 2, 0], [64, 128, 256, 512]),
'20': ([1, 2, 4, 1], [64, 128, 256, 512]),
'36': ([2, 4, 8, 2], [64, 128, 256, 512]),
'64': ([3, 8, 16, 3], [64, 128, 256, 512]),
}
batch_norm_params_last = {
'decay': 0.995,
'epsilon': 0.001,
'center': True,
'scale': False,
'updates_collections': None,
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
def parametric_relu(x):
num_channels = x.shape[-1].value
with tf.variable_scope('p_re_lu'):
alpha = tf.get_variable('alpha', (1,1,num_channels),
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
return tf.nn.relu(x) + alpha * tf.minimum(0.0, x)
def se_module(input_net, ratio=16, reuse = None, scope = None):
with tf.variable_scope(scope, 'SE', [input_net], reuse=reuse):
h,w,c = tuple([dim.value for dim in input_net.shape[1:4]])
assert c % ratio == 0
hidden_units = int(c / ratio)
squeeze = slim.avg_pool2d(input_net, [h,w], padding='VALID')
excitation = slim.flatten(squeeze)
excitation = slim.fully_connected(excitation, hidden_units, scope='se_fc1',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.relu)
excitation = slim.fully_connected(excitation, c, scope='se_fc2',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.sigmoid)
excitation = tf.reshape(excitation, [-1,1,1,c])
output_net = input_net * excitation
return output_net
def conv_module(net, num_res_layers, num_kernels, trans_kernel_size=3, trans_stride=2,
use_se=False, reuse=None, scope=None):
with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
net = slim.conv2d(net, num_kernels, kernel_size=trans_kernel_size, stride=trans_stride, padding='SAME',
weights_initializer=slim.xavier_initializer())
shortcut = net
for i in range(num_res_layers):
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
print('| ---- block_%d' % i)
if use_se:
net = se_module(net)
net = net + shortcut
shortcut = net
return net
def inference(images, embedding_size=512, reuse=None, scope='SphereNet'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(0.0),
normalizer_fn=None,
normalizer_params=None,
activation_fn=parametric_relu):
with tf.variable_scope('SphereNet', [images], reuse=reuse):
# Fix the moving mean and std when training PFE
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False):
print('SphereNet input shape:', [dim.value for dim in images.shape])
model_version = '64'
num_layers, num_kernels = model_params[model_version]
net = conv_module(images, num_layers[0], num_kernels[0], scope='conv1')
print('module_1 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[1], num_kernels[1], scope='conv2')
print('module_2 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[2], num_kernels[2], scope='conv3')
print('module_3 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[3], num_kernels[3], scope='conv4')
print('module_4 shape:', [dim.value for dim in net.shape])
net_ = net
net = slim.flatten(net)
prelogits = slim.fully_connected(net, embedding_size, scope='Bottleneck',
weights_initializer=slim.xavier_initializer(),
normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params_last,
activation_fn=None)
# Output used for PFE
mu = tf.nn.l2_normalize(prelogits, axis=1)
conv_final = net
return mu, conv_final
| en | 0.884501 | # Fix the moving mean and std when training PFE # Output used for PFE | 1.982342 | 2 |
edtslib/opaque_types.py | dartharnold/EDTS | 0 | 6632155 | import json
from .dist import Lightyears
class OpaqEncoder(json.JSONEncoder):
def default(self, obj):
try:
return obj.to_opaq()
except Exception as e:
return "Don't know how to serialise {}: {}".format(type(obj), e)
class Opaq(object):
def __repr__(self):
return str(vars(self))
def to_opaq(self):
return vars(self)
class Fuel(Opaq):
def __init__(self, **args):
for attr in ['min', 'max', 'initial', 'cost', 'final']:
setattr(self, attr, args.get(attr, 0.0))
class Refuel(Opaq):
def __init__(self, **args):
self.amount = args.get('amount', 0.0)
self.percent = args.get('percent', 0)
class Jumps(Opaq):
def __init__(self, **args):
self.min = args.get('min', 1)
self.max = args.get('max', 1)
class Location(Opaq):
def __init__(self, **args):
self.system = args.get('system')
self.station = args.get('station')
class WaypointTime(Opaq):
def __init__(self, **args):
self.accurate = args.get('accurate', True)
self.cruise = args.get('cruise', 0)
self.jumps = args.get('jumps', Jumps(min = 0, max = 0))
class Waypoint(Opaq):
def __init__(self, **args):
self.distance = args.get('distance', Lightyears(0))
self.direct = args.get('direct', self.distance)
self.jumps = args.get('jumps', Jumps())
self.time = args.get('time', WaypointTime())
def to_opaq(self):
return vars(self)
| import json
from .dist import Lightyears
class OpaqEncoder(json.JSONEncoder):
def default(self, obj):
try:
return obj.to_opaq()
except Exception as e:
return "Don't know how to serialise {}: {}".format(type(obj), e)
class Opaq(object):
def __repr__(self):
return str(vars(self))
def to_opaq(self):
return vars(self)
class Fuel(Opaq):
def __init__(self, **args):
for attr in ['min', 'max', 'initial', 'cost', 'final']:
setattr(self, attr, args.get(attr, 0.0))
class Refuel(Opaq):
def __init__(self, **args):
self.amount = args.get('amount', 0.0)
self.percent = args.get('percent', 0)
class Jumps(Opaq):
def __init__(self, **args):
self.min = args.get('min', 1)
self.max = args.get('max', 1)
class Location(Opaq):
def __init__(self, **args):
self.system = args.get('system')
self.station = args.get('station')
class WaypointTime(Opaq):
def __init__(self, **args):
self.accurate = args.get('accurate', True)
self.cruise = args.get('cruise', 0)
self.jumps = args.get('jumps', Jumps(min = 0, max = 0))
class Waypoint(Opaq):
def __init__(self, **args):
self.distance = args.get('distance', Lightyears(0))
self.direct = args.get('direct', self.distance)
self.jumps = args.get('jumps', Jumps())
self.time = args.get('time', WaypointTime())
def to_opaq(self):
return vars(self)
| none | 1 | 2.680574 | 3 |
|
lib/xslt/reader/__init__.py | zepheira/amara | 6 | 6632156 | ########################################################################
# amara/xslt/reader/__init__.py
"""
Classes for the creation of a stylesheet object
"""
import cStringIO
from xml.dom import Node
from xml.sax import SAXParseException
from xml.sax.handler import property_dom_node
from amara import sax
from amara.lib import IriError, inputsource
from amara.lib.xmlstring import isspace
from amara.namespaces import XML_NAMESPACE, XMLNS_NAMESPACE, XSL_NAMESPACE
from amara.xslt import XsltError, XsltStaticError
from amara.xslt import extensions, exslt
from amara.xslt.tree import *
__all__ = ['stylesheet_reader']
# Whitespace stripping rules for a stylesheet:
# preserve all whitespace within xsl:text elements;
# strip whitespace from all other elements
_XSLT_WHITESPACE_STRIPPING = ((XSL_NAMESPACE, 'text', False), (None, '*', True))
# pseudo-nodes for save/restore of variable bindings
class push_variables_node(xslt_node):
pseudo_node = True
def __init__(self, root, scope):
xslt_node.__init__(self, root)
self._scope = scope
return
def instantiate(self, context):
variables = context.variables
self._scope.append(variables)
context.variables = variables.copy()
return
class pop_variables_node(xslt_node):
pseudo_node = True
def __init__(self, root, scope):
xslt_node.__init__(self, root)
self._scope = scope
return
def instantiate(self, context):
scope = self._scope
context.variables = scope[-1]
del scope[-1]
return
ELEMENT_CLASSES = {
'apply-imports': apply_imports_element.apply_imports_element,
'apply-templates': apply_templates_element.apply_templates_element,
'attribute': attribute_element.attribute_element,
'call-template': call_template_element.call_template_element,
'choose': choose_elements.choose_element,
'otherwise': choose_elements.otherwise_element,
'when': choose_elements.when_element,
'comment': comment_element.comment_element,
'copy': copy_element.copy_element,
'copy-of': copy_of_element.copy_of_element,
'element': element_element.element_element,
'fallback': fallback_elements.fallback_element,
'for-each': for_each_element.for_each_element,
'if': if_element.if_element,
'message': message_element.message_element,
'number': number_element.number_element,
'processing-instruction':
processing_instruction_element.processing_instruction_element,
'stylesheet': transform_element.transform_element,
'transform': transform_element.transform_element,
'template': template_element.template_element,
'text': text_element.text_element,
'value-of': value_of_element.value_of_element,
'variable': variable_elements.variable_element,
'param': variable_elements.param_element,
'sort': sort_element.sort_element,
'with-param': with_param_element.with_param_element,
'import': declaration_elements.import_element,
'include': declaration_elements.include_element,
'strip-space': declaration_elements.strip_space_element,
'preserve-space': declaration_elements.preserve_space_element,
'output': declaration_elements.output_element,
'key': declaration_elements.key_element,
'decimal-format': declaration_elements.decimal_format_element,
'namespace-alias': declaration_elements.namespace_alias_element,
'attribute-set': declaration_elements.attribute_set_element,
}
# The XSL attributes allowed on literal elements
_RESULT_ELEMENT_XSL_ATTRS = {
'exclude-result-prefixes' : attribute_types.prefixes(),
'extension-element-prefixes' : attribute_types.prefixes(),
'use-attribute-sets' : attribute_types.qnames(),
'version' : attribute_types.number(),
}
_RESULT_ELEMENT_ATTR_INFO = attribute_types.any_avt()
_root_content_model = content_model.alt(
content_model.qname(XSL_NAMESPACE, 'xsl:stylesheet'),
content_model.qname(XSL_NAMESPACE, 'xsl:transform'),
content_model.result_elements)
_XSLT_ROOT_VALIDATION = _root_content_model.compile()
_LITERAL_ELEMENT_VALIDATION = content_model.template.compile()
class parse_state:
"""
Stores the current state of the parser.
Constructor arguments/instance variables:
validation - validation state for the current containing node.
localVariables - set of in-scope variable bindings to determine
variable shadowing.
forwardsCompatible - flag indicating whether or not forwards-compatible
processing is enabled.
currentNamespaces - set of in-scope namespaces for the current node.
extensionNamespaces - set of namespaces defining extension elements
outputNamespaces - set of in-scope namespaces for literal result elements
"""
def __init__(self, node, validation, localVariables, forwardsCompatible,
currentNamespaces, extensionNamespaces, outputNamespaces):
self.node = node
self.validation = validation
self.localVariables = localVariables
self.forwardsCompatible = forwardsCompatible
self.currentNamespaces = currentNamespaces
self.extensionNamespaces = extensionNamespaces
self.outputNamespaces = outputNamespaces
return
class stylesheet_reader(object):
"""
This class can be used to read, from a variety of sources, a
stylesheet and all its included and imported stylesheets, building
from them a single, compact representation of an XSLT stylesheet
tree (an Ft.Xml.Xslt.Stylesheet.Stylesheet object).
This is done with the most efficient parsing method available, and
avoids creating a Domlette document for each document it reads.
"""
# runtime instance variables
_input_source = None
_locator = None
_stylesheet = None
_root = None
def __init__(self):
self._import_index = 0
self._global_vars = {}
self._visited_stylesheet_uris = {}
self._document_state_stack = []
self._element_state_stack = []
self._extelements = {}
self._extelements.update(exslt.extension_elements)
self._extelements.update(extensions.extension_elements)
self._extelement_cache = {}
return
def reset(self):
self._root = None
self._import_index = 0
self._global_vars = {}
self._visited_stylesheet_uris = {}
self._document_state_stack = []
self._element_state_stack = []
return
def addExtensionElementMapping(self, elementMapping):
"""
Add a mapping of extension element names to classes to the
existing mapping of extension elements.
This should only be used for standalone uses of this class. The
only known standalone use for this class is for creating compiled
stylesheets. The benefits of compiled stylesheets are now so minor
that this use case may also disappear and then so will this function.
You have been warned.
"""
self._extelements.update(elementMapping)
for name in elementMapping:
if name in self._extelement_cache:
del self._extelement_cache[name]
return
# -- ContentHandler interface --------------------------------------
def setDocumentLocator(self, locator):
"""
Callback interface for SAX.
"""
# Save the current document state for nested parsing (inclusions)
document_state = (self._locator, self._stylesheet)
self._document_state_stack.append(document_state)
self._locator = locator
self._stylesheet = None
return
def startDocument(self):
"""
Callback interface for SAX.
"""
# Our root is always a document
# We use a document for this because of error checking and
# because we explicitly pass ownerDocument to the nodes as
# they are created
document_uri = self._locator.getSystemId()
root = xslt_root(document_uri)
if not self._root:
self._root = root
self._element_state_stack.append(
parse_state(node=root,
validation=_XSLT_ROOT_VALIDATION,
localVariables={},
forwardsCompatible=False,
currentNamespaces={'xml': XML_NAMESPACE, None: None},
extensionNamespaces={},
outputNamespaces={},
)
)
# for recursive include checks for xsl:include/xsl:import
self._visited_stylesheet_uris[document_uri] = True
# namespaces added for the next element
self._new_namespaces = {}
return
def endDocument(self):
"""
Callback interface for SAX.
"""
stack = self._element_state_stack
state = stack[-1]
del stack[-1]
root = state.node
# ----------------------------------------------------------
# remove URI from recursive inclusion checking
del self._visited_stylesheet_uris[root.baseUri]
# ----------------------------------------------------------
# finalize the children for the document
#root.children = tuple(state.nodes)
# ----------------------------------------------------------
# finalize the stylesheet AST
if stack:
# An xsl:import or xsl:include
# Merge the top-level elements into the "parent" stylesheet
# IMPLEMENTATION NOTE: stack[-1] is the import/include element,
# stack[-2] is the "parent" stylesheet
stack[-2].node._merge(self._stylesheet)
#parent_node = stack[-2].node
#for child in self._stylesheet.children:
# child.parent = parent_node
else:
# A top-most stylesheet
stylesheet = self._root.stylesheet
if stylesheet is not self._stylesheet:
# An additional stylesheet (e.g., an <?xml-stylesheet ...?>);
# treat it as an xsl:import into the "master" stylesheet.
stylesheet.reset()
# Always update the precedence from the included stylesheet
# because it may have contained imports thus increasing its
# import precedence.
self._import_index += 1
stylesheet.import_precedence = self._import_index
# Merge the top-level elements into the "master" stylesheet
stylesheet._merge(self._stylesheet)
#stylesheet.children += self._stylesheet.children
#for child in self._stylesheet.children:
# child.parent = stylesheet
else:
# Prepare for a possible subsequent parse.
self._import_index += 1
# Prepare the "master" stylesheet
stylesheet.setup()
document_state = self._document_state_stack[-1]
del self._document_state_stack[-1]
self._locator, self._stylesheet = document_state
return
def startPrefixMapping(self, prefix, uri):
"""
Callback interface for SAX.
"""
self._new_namespaces[prefix] = uri
return
def startElementNS(self, expandedName, qualifiedName, attribs,
_literal_element=literal_element.literal_element,
_element_classes=ELEMENT_CLASSES,
_element_cache={}, ):
"""
Callback interface for SAX.
"""
parent_state = self._element_state_stack[-1]
state = parse_state(**parent_state.__dict__)
self._element_state_stack.append(state)
# ----------------------------------------------------------
# update in-scope namespaces
if self._new_namespaces:
d = state.currentNamespaces = state.currentNamespaces.copy()
d.update(self._new_namespaces)
d = state.outputNamespaces = state.outputNamespaces.copy()
for prefix, uri in self._new_namespaces.iteritems():
if uri not in (XML_NAMESPACE, XSL_NAMESPACE):
d[prefix] = uri
# reset for next element
self._new_namespaces = {}
# ----------------------------------------------------------
# get the class defining this element
namespace, local = expandedName
xsl_class = ext_class = None
if namespace == XSL_NAMESPACE:
try:
xsl_class, validation, validation_token, legal_attrs = \
_element_cache[local]
except KeyError:
# We need to try to import (and cache) it
try:
xsl_class = _element_classes[local]
except KeyError:
if not state.forwardsCompatible:
raise XsltStaticError(XsltError.XSLT_ILLEGAL_ELEMENT,
parent_state.node, element=local)
xsl_class = fallback_elements.undefined_xslt_element
validation_token = content_model.RESULT_ELEMENT
else:
validation_token = expandedName
validation = xsl_class.content_model.compile()
legal_attrs = xsl_class.attribute_types.items()
_element_cache[local] = (
xsl_class, validation, validation_token, legal_attrs)
elif namespace in state.extensionNamespaces:
try:
ext_class, validation, legal_attrs = \
self._extelement_cache[expandedName]
except KeyError:
try:
ext_class = self._extelements[expandedName]
except KeyError:
ext_class = fallback_elements.undefined_extension_element
validation = ext_class.content_model.compile()
legal_attrs = ext_class.attribute_types
if legal_attrs is not None:
legal_attrs = legal_attrs.items()
self._extelement_cache[expandedName] = (
ext_class, validation, legal_attrs)
validation_token = content_model.RESULT_ELEMENT
else:
validation = _LITERAL_ELEMENT_VALIDATION
validation_token = content_model.RESULT_ELEMENT
state.validation = validation
# ----------------------------------------------------------
# verify that this element can be declared here
try:
next = parent_state.validation[validation_token]
except KeyError:
#self._debug_validation(expandedName)
# ignore whatever elements are defined within an undefined
# element as an exception will occur when/if this element
# is actually instantiated
if not isinstance(parent_state.node,
fallback_elements.undefined_extension_element):
raise XsltStaticError(XsltError.ILLEGAL_ELEMENT_CHILD,
parent_state.node, element=qualifiedName)
else:
# save this state for next go round
parent_state.validation = next
# ----------------------------------------------------------
# create the instance defining this element
klass = (xsl_class or ext_class or _literal_element)
state.node = instance = klass(self._root, expandedName, qualifiedName,
state.currentNamespaces)
instance.baseUri = self._locator.getSystemId()
instance.lineNumber = self._locator.getLineNumber()
instance.columnNumber = self._locator.getColumnNumber()
instance.import_precedence = self._import_index
if xsl_class: # -- XSLT element --------------------------------
# Handle attributes in the null-namespace
standand_attributes = local in ('stylesheet', 'transform')
inst_dict = instance.__dict__
for attr_name, attr_info in legal_attrs:
attr_expanded = (None, attr_name)
if attr_expanded in attribs:
value = attribs[attr_expanded]
del attribs[attr_expanded]
elif attr_info.required:
raise XsltStaticError(XsltError.MISSING_REQUIRED_ATTRIBUTE,
instance, element=qualifiedName,
attribute=attr_name)
else:
value = None
try:
value = attr_info.prepare(instance, value)
except XsltError, e:
#raise self._mutate_exception(e, qualifiedName)
raise
if standand_attributes:
self._stylesheet = instance
self._handle_standard_attr(state, instance, attr_name,
value)
else:
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
inst_dict['_' + attr_name] = value
if attribs:
# Process attributes with a namespace-uri and check for
# any illegal attributes in the null-namespace
for expanded in attribs:
attr_ns, attr_name = expanded
if attr_ns is None:
if not state.forwardsCompatible:
raise XsltStaticError(
XsltError.ILLEGAL_NULL_NAMESPACE_ATTR, instance,
attribute=attr_name, element=qualifiedName)
else:
instance.setAttribute(attr_ns, attr_name,
attribs[expanded])
# XSLT Spec 2.6 - Combining Stylesheets
if local in ('import', 'include'):
self._combine_stylesheet(instance, (local == 'import'))
elif ext_class: # -- extension element -------------------------
validate_attributes = (legal_attrs is not None)
if validate_attributes:
# Handle attributes in the null-namespace
inst_dict = instance.__dict__
for attr_name, attr_info in legal_attrs:
attr_expanded = (None, attr_name)
if attr_expanded in attribs:
value = attribs[attr_expanded]
del attribs[attr_expanded]
elif attr_info.required:
raise XsltStaticError(
XsltError.MISSING_REQUIRED_ATTRIBUTE, instance,
element=qualifiedName, attribute=attr_name)
else:
value = None
try:
value = attr_info.prepare(instance, value)
except XsltError, e:
#raise self._mutate_exception(e, qualifiedName)
raise
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
inst_dict['_' + attr_name] = value
# Process attributes with a namespace-uri and check for
# any illegal attributes in the null-namespace
if attribs:
for expanded in attribs:
attr_ns, attr_name = expanded
value = attribs[expanded]
if validate_attributes and attr_ns is None:
raise XsltStaticError(
XsltError.ILLEGAL_NULL_NAMESPACE_ATTR, instance,
attribute=attr_name, element=qualifiedName)
elif attr_ns == XSL_NAMESPACE:
self._handle_result_element_attr(state, instance,
qualifiedName,
attr_name, value)
else:
instance.setAttribute(attr_ns, attr_name, value)
else: # -- literal result element ------------------------------
output_attrs = []
for expanded in attribs:
attr_ns, attr_local = expanded
value = attribs[expanded]
if attr_ns == XSL_NAMESPACE:
self._handle_result_element_attr(state, instance,
qualifiedName,
attr_local, value)
else:
# prepare attributes for literal output
value = _RESULT_ELEMENT_ATTR_INFO.prepare(instance, value)
attr_qname = attribs.getQNameByName(expanded)
output_attrs.append((attr_qname, attr_ns, value))
# save information for literal output
instance._output_namespace = namespace
instance._output_nss = state.outputNamespaces
instance._output_attrs = output_attrs
# Check for top-level result-element in null namespace
if parent_state.node is self._stylesheet and \
not namespace and not state.forwardsCompatible:
raise XsltStaticError(XsltError.ILLEGAL_ELEMENT_CHILD,
parent_state.node, element=qualifiedName)
return
def endElementNS(self, expandedName, qualifiedName,
_literal_element=literal_element.literal_element,
_variable_element=variable_elements.variable_element):
"""
Callback interface for SAX.
"""
stack = self._element_state_stack
state = stack[-1]
del stack[-1]
parent_state = stack[-1]
element = state.node
# ----------------------------------------------------------
# verify that this element has all required content
try:
state.validation[content_model.END_ELEMENT]
except KeyError:
if expandedName == (XSL_NAMESPACE, u'choose'):
raise XsltStaticError(XsltError.MISSING_REQUIRED_ELEMENT,
element, element=element.nodeName,
child='xsl:when')
raise
# ----------------------------------------------------------
# setup variable context
if state.localVariables is not parent_state.localVariables:
# add context save/restore nodes
binding_stack = []
node = push_variables_node(self._root, binding_stack)
element.insertChild(0, node)
node = pop_variables_node(self._root, binding_stack)
element.appendChild(node)
# ----------------------------------------------------------
# finalize the children for this element
#element.children = tuple(state.nodes)
#for child in element.children:
# if child.doesSetup:
#s child.setup()
del state
# ----------------------------------------------------------
# update parent state
parent_node = parent_state.node
if self._stylesheet is None and parent_node is element.root:
# a literal result element as stylesheet
assert isinstance(element, _literal_element), element
try:
version = element._version
except AttributeError:
raise XsltStaticError(XsltError.LITERAL_RESULT_MISSING_VERSION,
element)
# Reset the root's validation as it has already seen an element.
parent_state.validation = _XSLT_ROOT_VALIDATION
# FIXME: use the prefix from the document for the XSL namespace
stylesheet = (XSL_NAMESPACE, u'stylesheet')
self.startElementNS(stylesheet, u'xsl:stylesheet',
{(None, u'version') : version})
template = (XSL_NAMESPACE, u'template')
self.startElementNS(template, u'xsl:template',
{(None, u'match') : u'/'})
# make this element the template's content
# Note, this MUST index the stack as the stack has changed
# due to the startElementNS() calls.
stack[-1].node.appendChild(element)
self.endElementNS(template, u'xsl:template')
self.endElementNS(stylesheet, u'xsl:stylesheet')
return
parent_node.appendChild(element)
if isinstance(element, _variable_element):
name = element._name
if parent_node is self._stylesheet:
# global variables
if name in self._global_vars:
existing = self._global_vars[name]
if self._import_index > existing:
self._global_vars[name] = self._import_index
elif self._import_index == existing:
raise XsltStaticError(XsltError.DUPLICATE_TOP_LEVEL_VAR,
element, variable=name)
else:
self._global_vars[name] = self._import_index
else:
# local variables
# it is safe to ignore import precedence here
local_vars = parent_state.localVariables
if name in local_vars:
raise XsltStaticError(XsltError.ILLEGAL_SHADOWING,
element, variable=name)
# Copy on use
if local_vars is stack[-2].localVariables:
local_vars = local_vars.copy()
parent_state.localVariables = local_vars
local_vars[name] = True
return
def characters(self, data):
"""
Callback interface for SAX.
"""
parent_state = self._element_state_stack[-1]
# verify that the current element can have text children
try:
next = parent_state.validation[content_model.TEXT_NODE]
except KeyError:
# If the parent can have element children, but not text nodes,
# ignore pure whitespace nodes. This clarification is from
# XSLT 2.0 [3.4] Whitespace Stripping.
# e.g. xsl:stylesheet, xsl:apply-templates, xsl:choose
#self._debug_validation(content_model.TEXT_NODE)
#if (content_model.EMPTY in parent_state.validation or
# not isspace(data)):
if 1:
if len(data) > 10:
data = data[:10] + '...'
raise XsltStaticError(XsltError.ILLEGAL_TEXT_CHILD,
parent_state.node, data=data,
element=parent_state.node.nodeName)
#self._debug_validation(content_model.TEXT_NODE)
else:
# update validation
parent_state.validation = next
node = xslt_text(self._root, data)
parent_state.node.appendChild(node)
return
# -- utility functions ---------------------------------------------
def _combine_stylesheet(self, element, is_import):
href = element._href
try:
new_source = self._input_source.resolve(href,
self._input_source.uri)
except (OSError, IriError):
# FIXME: create special inputsource for 4xslt command-line
#for uri in self._alt_base_uris:
# try:
# new_href = self._input_source.getUriResolver().normalize(href, uri)
# #Do we need to figure out a way to pass the hint here?
# new_source = self._input_source.factory.fromUri(new_href)
# break
# except (OSError, IriError):
# pass
#else:
raise XsltStaticError(XsltError.INCLUDE_NOT_FOUND, element,
uri=href, base=self._locator.getSystemId())
# XSLT Spec 2.6.1, Detect circular references in stylesheets
# Note, it is NOT an error to include/import the same stylesheet
# multiple times, rather that it may lead to duplicate definitions
# which are handled regardless (variables, params, templates, ...)
if new_source.uri in self._visited_stylesheet_uris:
raise XsltStaticError(XsltError.CIRCULAR_INCLUDE, element,
uri=new_source.uri)
self.parse(new_source)
self._import_index += is_import
# Always update the precedence as the included stylesheet may have
# contained imports thus increasing the import precedence.
self._stylesheet.import_precedence = self._import_index
return
def _handle_standard_attr(self, state, instance, name, value):
if name == 'extension-element-prefixes':
# a whitespace separated list of prefixes
ext = state.extensionNamespaces = state.extensionNamespaces.copy()
out = state.outputNamespaces = state.outputNamespaces.copy()
for prefix in value:
# add the namespace URI to the set of extension namespaces
try:
uri = instance.namespaces[prefix]
except KeyError:
raise XsltStaticError(XsltError.UNDEFINED_PREFIX, instance,
prefix=prefix or '#default')
ext[uri] = True
# remove all matching namespace URIs
for output_prefix, output_uri in out.items():
if output_uri == uri:
del out[output_prefix]
elif name == 'exclude-result-prefixes':
# a whitespace separated list of prefixes
out = state.outputNamespaces = state.outputNamespaces.copy()
for prefix in value:
try:
uri = instance.namespaces[prefix]
except KeyError:
raise XsltStaticError(XsltError.UNDEFINED_PREFIX, instance,
prefix=prefix or '#default')
# remove all matching namespace URIs
for output_prefix, output_uri in out.items():
if output_uri == uri:
del out[output_prefix]
elif name == 'version':
# XSLT Spec 2.5 - Forwards-Compatible Processing
state.forwardsCompatible = (value != 1.0)
instance._version = value
else:
if '-' in name:
name = name.replace('-', '_')
instance.__dict__['_' + name] = value
return
def _handle_result_element_attr(self, state, instance, elementName,
attributeName, value):
try:
attr_info = _RESULT_ELEMENT_XSL_ATTRS[attributeName]
except KeyError:
raise XsltStaticError(XsltError.ILLEGAL_XSL_NAMESPACE_ATTR,
instance, attribute=attributeName,
element=elementName)
value = attr_info.prepare(instance, value)
self._handle_standard_attr(state, instance, attributeName, value)
return
def _mutate_exception(self, exception, elementName):
assert isinstance(exception, XsltError)
exception.message = MessageSource.EXPRESSION_POSITION_INFO % (
self._locator.getSystemId(), self._locator.getLineNumber(),
self._locator.getColumnNumber(), elementName, exception.message)
return exception
# -- debugging routines --------------------------------------------
def _debug_validation(self, token=None):
from pprint import pprint
state = self._element_state_stack[-1]
parent = state.node
print '='*60
print 'parent =',parent
print 'parent class =',parent.__class__
print 'parent content =', parent.content_model
print 'initial validation'
pprint(parent.content_model.compile())
print 'current validation'
pprint(state.validation)
if token:
print 'token', token
print '='*60
return
# -- parsing routines ----------------------------------------------
def fromDocument(self, document, baseUri='', factory=None):
"""
Read in a stylesheet source document from a Domlette and add it to
the stylesheet tree. If a document with the same URI has already been
read, the cached version will be used instead (so duplicate imports,
includes, or stylesheet appends do not result in multiple reads).
"""
if not baseUri:
if hasattr(document, 'documentURI'):
baseUri = document.documentURI
elif hasattr(document, 'baseURI'):
baseUri = document.baseURI
else:
raise TypeError('baseUri required')
if factory is None:
factory = inputsource.default_factory
# check cache
if self._root is not None:
# We prefer to use an already-parsed doc, as it has had its
# external entities and XIncludes resolved already
if uri in self._root.sourceNodes:
document = self._root.sourceNodes[baseUri]
# It's OK to use cached string content, but we have no idea
# whether we're using the same InputSource class as was used to
# parse it the first time, and we don't cache external entities
# or XIncludes, so there is the possibility of those things
# being resolved differently this time around. Oh well.
elif uri in self._root.sources:
content = self._root.sources[baseUri]
isrc = factory.fromString(content, baseUri)
# temporarily uncache it so `parse()` will process it;
# `parse()` will add it back to the cache when finished
del self._root.sources[baseUri]
return self.parse(isrc)
isrc = factory.fromStream(None, baseUri)
features = []
properties = [(property_dom_node, document)]
stylesheet = self._parseSrc(isrc, features, properties)
# Cache for XSLT document() function
self._root.sourceNodes[baseUri] = document
return stylesheet
def parse(self, source):
"""
Read in a stylesheet source document from an InputSource and add it to
the stylesheet tree. If a document with the same URI has already been
read, the cached version will be used instead (so duplicate imports,
includes, or stylesheet appends do not result in multiple reads).
"""
uri = source.uri
#Check cache
content = ''
if self._root is not None:
# We prefer to use an already-parsed doc, as it has had its
# external entities and XIncludes resolved already
if uri in self._root.sourceNodes:
doc = self._root.sourceNodes[uri]
# temporarily uncache it so fromDocument will process it;
# fromDocument will add it back to the cache when finished
del self._root.sourceNodes[uri]
return self.fromDocument(doc, baseUri=uri)
# It's OK to use cached string content, but we have no idea
# whether we're using the same InputSource class as was used to
# parse it the first time, and we don't cache external entities
# or XIncludes, so there is the possibility of those things
# being resolved differently this time around. Oh well.
elif uri in self._root.sources:
content = self._root.sources[uri]
source = inputsource(content, uri)
if not content:
content = source.stream.read()
source = inputsource(cStringIO.StringIO(content), source.uri)
#features = [(sax.FEATURE_PROCESS_XINCLUDES, True)]
features, properties = [], []
stylesheet = self._parseSrc(source, features, properties)
# Cache the string content for subsequent uses
# e.g., xsl:import/xsl:include and document()
self._root.sources[uri] = content
return stylesheet
def _parseSrc(self, isrc, features, properties):
parser = sax.create_parser()
parser.setContentHandler(self)
for featurename, value in features:
parser.setFeature(featurename, value)
# Always set whitespace rules property
parser.setProperty(sax.PROPERTY_WHITESPACE_RULES,
_XSLT_WHITESPACE_STRIPPING)
for propertyname, value in properties:
parser.setProperty(propertyname, value)
prev_source = self._input_source
try:
self._input_source = isrc
try:
parser.parse(isrc)
except SAXParseException, e:
e = e.getException() or e
if isinstance(e, XsltError):
raise e
raise XsltError(XsltError.STYLESHEET_PARSE_ERROR,
uri=isrc.uri, text=str(e))
finally:
self._input_source = prev_source
return self._root.stylesheet
| ########################################################################
# amara/xslt/reader/__init__.py
"""
Classes for the creation of a stylesheet object
"""
import cStringIO
from xml.dom import Node
from xml.sax import SAXParseException
from xml.sax.handler import property_dom_node
from amara import sax
from amara.lib import IriError, inputsource
from amara.lib.xmlstring import isspace
from amara.namespaces import XML_NAMESPACE, XMLNS_NAMESPACE, XSL_NAMESPACE
from amara.xslt import XsltError, XsltStaticError
from amara.xslt import extensions, exslt
from amara.xslt.tree import *
__all__ = ['stylesheet_reader']
# Whitespace stripping rules for a stylesheet:
# preserve all whitespace within xsl:text elements;
# strip whitespace from all other elements
_XSLT_WHITESPACE_STRIPPING = ((XSL_NAMESPACE, 'text', False), (None, '*', True))
# pseudo-nodes for save/restore of variable bindings
class push_variables_node(xslt_node):
pseudo_node = True
def __init__(self, root, scope):
xslt_node.__init__(self, root)
self._scope = scope
return
def instantiate(self, context):
variables = context.variables
self._scope.append(variables)
context.variables = variables.copy()
return
class pop_variables_node(xslt_node):
pseudo_node = True
def __init__(self, root, scope):
xslt_node.__init__(self, root)
self._scope = scope
return
def instantiate(self, context):
scope = self._scope
context.variables = scope[-1]
del scope[-1]
return
ELEMENT_CLASSES = {
'apply-imports': apply_imports_element.apply_imports_element,
'apply-templates': apply_templates_element.apply_templates_element,
'attribute': attribute_element.attribute_element,
'call-template': call_template_element.call_template_element,
'choose': choose_elements.choose_element,
'otherwise': choose_elements.otherwise_element,
'when': choose_elements.when_element,
'comment': comment_element.comment_element,
'copy': copy_element.copy_element,
'copy-of': copy_of_element.copy_of_element,
'element': element_element.element_element,
'fallback': fallback_elements.fallback_element,
'for-each': for_each_element.for_each_element,
'if': if_element.if_element,
'message': message_element.message_element,
'number': number_element.number_element,
'processing-instruction':
processing_instruction_element.processing_instruction_element,
'stylesheet': transform_element.transform_element,
'transform': transform_element.transform_element,
'template': template_element.template_element,
'text': text_element.text_element,
'value-of': value_of_element.value_of_element,
'variable': variable_elements.variable_element,
'param': variable_elements.param_element,
'sort': sort_element.sort_element,
'with-param': with_param_element.with_param_element,
'import': declaration_elements.import_element,
'include': declaration_elements.include_element,
'strip-space': declaration_elements.strip_space_element,
'preserve-space': declaration_elements.preserve_space_element,
'output': declaration_elements.output_element,
'key': declaration_elements.key_element,
'decimal-format': declaration_elements.decimal_format_element,
'namespace-alias': declaration_elements.namespace_alias_element,
'attribute-set': declaration_elements.attribute_set_element,
}
# The XSL attributes allowed on literal elements
_RESULT_ELEMENT_XSL_ATTRS = {
'exclude-result-prefixes' : attribute_types.prefixes(),
'extension-element-prefixes' : attribute_types.prefixes(),
'use-attribute-sets' : attribute_types.qnames(),
'version' : attribute_types.number(),
}
_RESULT_ELEMENT_ATTR_INFO = attribute_types.any_avt()
_root_content_model = content_model.alt(
content_model.qname(XSL_NAMESPACE, 'xsl:stylesheet'),
content_model.qname(XSL_NAMESPACE, 'xsl:transform'),
content_model.result_elements)
_XSLT_ROOT_VALIDATION = _root_content_model.compile()
_LITERAL_ELEMENT_VALIDATION = content_model.template.compile()
class parse_state:
"""
Stores the current state of the parser.
Constructor arguments/instance variables:
validation - validation state for the current containing node.
localVariables - set of in-scope variable bindings to determine
variable shadowing.
forwardsCompatible - flag indicating whether or not forwards-compatible
processing is enabled.
currentNamespaces - set of in-scope namespaces for the current node.
extensionNamespaces - set of namespaces defining extension elements
outputNamespaces - set of in-scope namespaces for literal result elements
"""
def __init__(self, node, validation, localVariables, forwardsCompatible,
currentNamespaces, extensionNamespaces, outputNamespaces):
self.node = node
self.validation = validation
self.localVariables = localVariables
self.forwardsCompatible = forwardsCompatible
self.currentNamespaces = currentNamespaces
self.extensionNamespaces = extensionNamespaces
self.outputNamespaces = outputNamespaces
return
class stylesheet_reader(object):
"""
This class can be used to read, from a variety of sources, a
stylesheet and all its included and imported stylesheets, building
from them a single, compact representation of an XSLT stylesheet
tree (an Ft.Xml.Xslt.Stylesheet.Stylesheet object).
This is done with the most efficient parsing method available, and
avoids creating a Domlette document for each document it reads.
"""
# runtime instance variables
_input_source = None
_locator = None
_stylesheet = None
_root = None
def __init__(self):
self._import_index = 0
self._global_vars = {}
self._visited_stylesheet_uris = {}
self._document_state_stack = []
self._element_state_stack = []
self._extelements = {}
self._extelements.update(exslt.extension_elements)
self._extelements.update(extensions.extension_elements)
self._extelement_cache = {}
return
def reset(self):
self._root = None
self._import_index = 0
self._global_vars = {}
self._visited_stylesheet_uris = {}
self._document_state_stack = []
self._element_state_stack = []
return
def addExtensionElementMapping(self, elementMapping):
"""
Add a mapping of extension element names to classes to the
existing mapping of extension elements.
This should only be used for standalone uses of this class. The
only known standalone use for this class is for creating compiled
stylesheets. The benefits of compiled stylesheets are now so minor
that this use case may also disappear and then so will this function.
You have been warned.
"""
self._extelements.update(elementMapping)
for name in elementMapping:
if name in self._extelement_cache:
del self._extelement_cache[name]
return
# -- ContentHandler interface --------------------------------------
def setDocumentLocator(self, locator):
"""
Callback interface for SAX.
"""
# Save the current document state for nested parsing (inclusions)
document_state = (self._locator, self._stylesheet)
self._document_state_stack.append(document_state)
self._locator = locator
self._stylesheet = None
return
def startDocument(self):
"""
Callback interface for SAX.
"""
# Our root is always a document
# We use a document for this because of error checking and
# because we explicitly pass ownerDocument to the nodes as
# they are created
document_uri = self._locator.getSystemId()
root = xslt_root(document_uri)
if not self._root:
self._root = root
self._element_state_stack.append(
parse_state(node=root,
validation=_XSLT_ROOT_VALIDATION,
localVariables={},
forwardsCompatible=False,
currentNamespaces={'xml': XML_NAMESPACE, None: None},
extensionNamespaces={},
outputNamespaces={},
)
)
# for recursive include checks for xsl:include/xsl:import
self._visited_stylesheet_uris[document_uri] = True
# namespaces added for the next element
self._new_namespaces = {}
return
def endDocument(self):
"""
Callback interface for SAX.
"""
stack = self._element_state_stack
state = stack[-1]
del stack[-1]
root = state.node
# ----------------------------------------------------------
# remove URI from recursive inclusion checking
del self._visited_stylesheet_uris[root.baseUri]
# ----------------------------------------------------------
# finalize the children for the document
#root.children = tuple(state.nodes)
# ----------------------------------------------------------
# finalize the stylesheet AST
if stack:
# An xsl:import or xsl:include
# Merge the top-level elements into the "parent" stylesheet
# IMPLEMENTATION NOTE: stack[-1] is the import/include element,
# stack[-2] is the "parent" stylesheet
stack[-2].node._merge(self._stylesheet)
#parent_node = stack[-2].node
#for child in self._stylesheet.children:
# child.parent = parent_node
else:
# A top-most stylesheet
stylesheet = self._root.stylesheet
if stylesheet is not self._stylesheet:
# An additional stylesheet (e.g., an <?xml-stylesheet ...?>);
# treat it as an xsl:import into the "master" stylesheet.
stylesheet.reset()
# Always update the precedence from the included stylesheet
# because it may have contained imports thus increasing its
# import precedence.
self._import_index += 1
stylesheet.import_precedence = self._import_index
# Merge the top-level elements into the "master" stylesheet
stylesheet._merge(self._stylesheet)
#stylesheet.children += self._stylesheet.children
#for child in self._stylesheet.children:
# child.parent = stylesheet
else:
# Prepare for a possible subsequent parse.
self._import_index += 1
# Prepare the "master" stylesheet
stylesheet.setup()
document_state = self._document_state_stack[-1]
del self._document_state_stack[-1]
self._locator, self._stylesheet = document_state
return
def startPrefixMapping(self, prefix, uri):
"""
Callback interface for SAX.
"""
self._new_namespaces[prefix] = uri
return
def startElementNS(self, expandedName, qualifiedName, attribs,
_literal_element=literal_element.literal_element,
_element_classes=ELEMENT_CLASSES,
_element_cache={}, ):
"""
Callback interface for SAX.
"""
parent_state = self._element_state_stack[-1]
state = parse_state(**parent_state.__dict__)
self._element_state_stack.append(state)
# ----------------------------------------------------------
# update in-scope namespaces
if self._new_namespaces:
d = state.currentNamespaces = state.currentNamespaces.copy()
d.update(self._new_namespaces)
d = state.outputNamespaces = state.outputNamespaces.copy()
for prefix, uri in self._new_namespaces.iteritems():
if uri not in (XML_NAMESPACE, XSL_NAMESPACE):
d[prefix] = uri
# reset for next element
self._new_namespaces = {}
# ----------------------------------------------------------
# get the class defining this element
namespace, local = expandedName
xsl_class = ext_class = None
if namespace == XSL_NAMESPACE:
try:
xsl_class, validation, validation_token, legal_attrs = \
_element_cache[local]
except KeyError:
# We need to try to import (and cache) it
try:
xsl_class = _element_classes[local]
except KeyError:
if not state.forwardsCompatible:
raise XsltStaticError(XsltError.XSLT_ILLEGAL_ELEMENT,
parent_state.node, element=local)
xsl_class = fallback_elements.undefined_xslt_element
validation_token = content_model.RESULT_ELEMENT
else:
validation_token = expandedName
validation = xsl_class.content_model.compile()
legal_attrs = xsl_class.attribute_types.items()
_element_cache[local] = (
xsl_class, validation, validation_token, legal_attrs)
elif namespace in state.extensionNamespaces:
try:
ext_class, validation, legal_attrs = \
self._extelement_cache[expandedName]
except KeyError:
try:
ext_class = self._extelements[expandedName]
except KeyError:
ext_class = fallback_elements.undefined_extension_element
validation = ext_class.content_model.compile()
legal_attrs = ext_class.attribute_types
if legal_attrs is not None:
legal_attrs = legal_attrs.items()
self._extelement_cache[expandedName] = (
ext_class, validation, legal_attrs)
validation_token = content_model.RESULT_ELEMENT
else:
validation = _LITERAL_ELEMENT_VALIDATION
validation_token = content_model.RESULT_ELEMENT
state.validation = validation
# ----------------------------------------------------------
# verify that this element can be declared here
try:
next = parent_state.validation[validation_token]
except KeyError:
#self._debug_validation(expandedName)
# ignore whatever elements are defined within an undefined
# element as an exception will occur when/if this element
# is actually instantiated
if not isinstance(parent_state.node,
fallback_elements.undefined_extension_element):
raise XsltStaticError(XsltError.ILLEGAL_ELEMENT_CHILD,
parent_state.node, element=qualifiedName)
else:
# save this state for next go round
parent_state.validation = next
# ----------------------------------------------------------
# create the instance defining this element
klass = (xsl_class or ext_class or _literal_element)
state.node = instance = klass(self._root, expandedName, qualifiedName,
state.currentNamespaces)
instance.baseUri = self._locator.getSystemId()
instance.lineNumber = self._locator.getLineNumber()
instance.columnNumber = self._locator.getColumnNumber()
instance.import_precedence = self._import_index
if xsl_class: # -- XSLT element --------------------------------
# Handle attributes in the null-namespace
standand_attributes = local in ('stylesheet', 'transform')
inst_dict = instance.__dict__
for attr_name, attr_info in legal_attrs:
attr_expanded = (None, attr_name)
if attr_expanded in attribs:
value = attribs[attr_expanded]
del attribs[attr_expanded]
elif attr_info.required:
raise XsltStaticError(XsltError.MISSING_REQUIRED_ATTRIBUTE,
instance, element=qualifiedName,
attribute=attr_name)
else:
value = None
try:
value = attr_info.prepare(instance, value)
except XsltError, e:
#raise self._mutate_exception(e, qualifiedName)
raise
if standand_attributes:
self._stylesheet = instance
self._handle_standard_attr(state, instance, attr_name,
value)
else:
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
inst_dict['_' + attr_name] = value
if attribs:
# Process attributes with a namespace-uri and check for
# any illegal attributes in the null-namespace
for expanded in attribs:
attr_ns, attr_name = expanded
if attr_ns is None:
if not state.forwardsCompatible:
raise XsltStaticError(
XsltError.ILLEGAL_NULL_NAMESPACE_ATTR, instance,
attribute=attr_name, element=qualifiedName)
else:
instance.setAttribute(attr_ns, attr_name,
attribs[expanded])
# XSLT Spec 2.6 - Combining Stylesheets
if local in ('import', 'include'):
self._combine_stylesheet(instance, (local == 'import'))
elif ext_class: # -- extension element -------------------------
validate_attributes = (legal_attrs is not None)
if validate_attributes:
# Handle attributes in the null-namespace
inst_dict = instance.__dict__
for attr_name, attr_info in legal_attrs:
attr_expanded = (None, attr_name)
if attr_expanded in attribs:
value = attribs[attr_expanded]
del attribs[attr_expanded]
elif attr_info.required:
raise XsltStaticError(
XsltError.MISSING_REQUIRED_ATTRIBUTE, instance,
element=qualifiedName, attribute=attr_name)
else:
value = None
try:
value = attr_info.prepare(instance, value)
except XsltError, e:
#raise self._mutate_exception(e, qualifiedName)
raise
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
inst_dict['_' + attr_name] = value
# Process attributes with a namespace-uri and check for
# any illegal attributes in the null-namespace
if attribs:
for expanded in attribs:
attr_ns, attr_name = expanded
value = attribs[expanded]
if validate_attributes and attr_ns is None:
raise XsltStaticError(
XsltError.ILLEGAL_NULL_NAMESPACE_ATTR, instance,
attribute=attr_name, element=qualifiedName)
elif attr_ns == XSL_NAMESPACE:
self._handle_result_element_attr(state, instance,
qualifiedName,
attr_name, value)
else:
instance.setAttribute(attr_ns, attr_name, value)
else: # -- literal result element ------------------------------
output_attrs = []
for expanded in attribs:
attr_ns, attr_local = expanded
value = attribs[expanded]
if attr_ns == XSL_NAMESPACE:
self._handle_result_element_attr(state, instance,
qualifiedName,
attr_local, value)
else:
# prepare attributes for literal output
value = _RESULT_ELEMENT_ATTR_INFO.prepare(instance, value)
attr_qname = attribs.getQNameByName(expanded)
output_attrs.append((attr_qname, attr_ns, value))
# save information for literal output
instance._output_namespace = namespace
instance._output_nss = state.outputNamespaces
instance._output_attrs = output_attrs
# Check for top-level result-element in null namespace
if parent_state.node is self._stylesheet and \
not namespace and not state.forwardsCompatible:
raise XsltStaticError(XsltError.ILLEGAL_ELEMENT_CHILD,
parent_state.node, element=qualifiedName)
return
def endElementNS(self, expandedName, qualifiedName,
_literal_element=literal_element.literal_element,
_variable_element=variable_elements.variable_element):
"""
Callback interface for SAX.
"""
stack = self._element_state_stack
state = stack[-1]
del stack[-1]
parent_state = stack[-1]
element = state.node
# ----------------------------------------------------------
# verify that this element has all required content
try:
state.validation[content_model.END_ELEMENT]
except KeyError:
if expandedName == (XSL_NAMESPACE, u'choose'):
raise XsltStaticError(XsltError.MISSING_REQUIRED_ELEMENT,
element, element=element.nodeName,
child='xsl:when')
raise
# ----------------------------------------------------------
# setup variable context
if state.localVariables is not parent_state.localVariables:
# add context save/restore nodes
binding_stack = []
node = push_variables_node(self._root, binding_stack)
element.insertChild(0, node)
node = pop_variables_node(self._root, binding_stack)
element.appendChild(node)
# ----------------------------------------------------------
# finalize the children for this element
#element.children = tuple(state.nodes)
#for child in element.children:
# if child.doesSetup:
#s child.setup()
del state
# ----------------------------------------------------------
# update parent state
parent_node = parent_state.node
if self._stylesheet is None and parent_node is element.root:
# a literal result element as stylesheet
assert isinstance(element, _literal_element), element
try:
version = element._version
except AttributeError:
raise XsltStaticError(XsltError.LITERAL_RESULT_MISSING_VERSION,
element)
# Reset the root's validation as it has already seen an element.
parent_state.validation = _XSLT_ROOT_VALIDATION
# FIXME: use the prefix from the document for the XSL namespace
stylesheet = (XSL_NAMESPACE, u'stylesheet')
self.startElementNS(stylesheet, u'xsl:stylesheet',
{(None, u'version') : version})
template = (XSL_NAMESPACE, u'template')
self.startElementNS(template, u'xsl:template',
{(None, u'match') : u'/'})
# make this element the template's content
# Note, this MUST index the stack as the stack has changed
# due to the startElementNS() calls.
stack[-1].node.appendChild(element)
self.endElementNS(template, u'xsl:template')
self.endElementNS(stylesheet, u'xsl:stylesheet')
return
parent_node.appendChild(element)
if isinstance(element, _variable_element):
name = element._name
if parent_node is self._stylesheet:
# global variables
if name in self._global_vars:
existing = self._global_vars[name]
if self._import_index > existing:
self._global_vars[name] = self._import_index
elif self._import_index == existing:
raise XsltStaticError(XsltError.DUPLICATE_TOP_LEVEL_VAR,
element, variable=name)
else:
self._global_vars[name] = self._import_index
else:
# local variables
# it is safe to ignore import precedence here
local_vars = parent_state.localVariables
if name in local_vars:
raise XsltStaticError(XsltError.ILLEGAL_SHADOWING,
element, variable=name)
# Copy on use
if local_vars is stack[-2].localVariables:
local_vars = local_vars.copy()
parent_state.localVariables = local_vars
local_vars[name] = True
return
def characters(self, data):
"""
Callback interface for SAX.
"""
parent_state = self._element_state_stack[-1]
# verify that the current element can have text children
try:
next = parent_state.validation[content_model.TEXT_NODE]
except KeyError:
# If the parent can have element children, but not text nodes,
# ignore pure whitespace nodes. This clarification is from
# XSLT 2.0 [3.4] Whitespace Stripping.
# e.g. xsl:stylesheet, xsl:apply-templates, xsl:choose
#self._debug_validation(content_model.TEXT_NODE)
#if (content_model.EMPTY in parent_state.validation or
# not isspace(data)):
if 1:
if len(data) > 10:
data = data[:10] + '...'
raise XsltStaticError(XsltError.ILLEGAL_TEXT_CHILD,
parent_state.node, data=data,
element=parent_state.node.nodeName)
#self._debug_validation(content_model.TEXT_NODE)
else:
# update validation
parent_state.validation = next
node = xslt_text(self._root, data)
parent_state.node.appendChild(node)
return
# -- utility functions ---------------------------------------------
def _combine_stylesheet(self, element, is_import):
href = element._href
try:
new_source = self._input_source.resolve(href,
self._input_source.uri)
except (OSError, IriError):
# FIXME: create special inputsource for 4xslt command-line
#for uri in self._alt_base_uris:
# try:
# new_href = self._input_source.getUriResolver().normalize(href, uri)
# #Do we need to figure out a way to pass the hint here?
# new_source = self._input_source.factory.fromUri(new_href)
# break
# except (OSError, IriError):
# pass
#else:
raise XsltStaticError(XsltError.INCLUDE_NOT_FOUND, element,
uri=href, base=self._locator.getSystemId())
# XSLT Spec 2.6.1, Detect circular references in stylesheets
# Note, it is NOT an error to include/import the same stylesheet
# multiple times, rather that it may lead to duplicate definitions
# which are handled regardless (variables, params, templates, ...)
if new_source.uri in self._visited_stylesheet_uris:
raise XsltStaticError(XsltError.CIRCULAR_INCLUDE, element,
uri=new_source.uri)
self.parse(new_source)
self._import_index += is_import
# Always update the precedence as the included stylesheet may have
# contained imports thus increasing the import precedence.
self._stylesheet.import_precedence = self._import_index
return
def _handle_standard_attr(self, state, instance, name, value):
if name == 'extension-element-prefixes':
# a whitespace separated list of prefixes
ext = state.extensionNamespaces = state.extensionNamespaces.copy()
out = state.outputNamespaces = state.outputNamespaces.copy()
for prefix in value:
# add the namespace URI to the set of extension namespaces
try:
uri = instance.namespaces[prefix]
except KeyError:
raise XsltStaticError(XsltError.UNDEFINED_PREFIX, instance,
prefix=prefix or '#default')
ext[uri] = True
# remove all matching namespace URIs
for output_prefix, output_uri in out.items():
if output_uri == uri:
del out[output_prefix]
elif name == 'exclude-result-prefixes':
# a whitespace separated list of prefixes
out = state.outputNamespaces = state.outputNamespaces.copy()
for prefix in value:
try:
uri = instance.namespaces[prefix]
except KeyError:
raise XsltStaticError(XsltError.UNDEFINED_PREFIX, instance,
prefix=prefix or '#default')
# remove all matching namespace URIs
for output_prefix, output_uri in out.items():
if output_uri == uri:
del out[output_prefix]
elif name == 'version':
# XSLT Spec 2.5 - Forwards-Compatible Processing
state.forwardsCompatible = (value != 1.0)
instance._version = value
else:
if '-' in name:
name = name.replace('-', '_')
instance.__dict__['_' + name] = value
return
def _handle_result_element_attr(self, state, instance, elementName,
attributeName, value):
try:
attr_info = _RESULT_ELEMENT_XSL_ATTRS[attributeName]
except KeyError:
raise XsltStaticError(XsltError.ILLEGAL_XSL_NAMESPACE_ATTR,
instance, attribute=attributeName,
element=elementName)
value = attr_info.prepare(instance, value)
self._handle_standard_attr(state, instance, attributeName, value)
return
def _mutate_exception(self, exception, elementName):
assert isinstance(exception, XsltError)
exception.message = MessageSource.EXPRESSION_POSITION_INFO % (
self._locator.getSystemId(), self._locator.getLineNumber(),
self._locator.getColumnNumber(), elementName, exception.message)
return exception
# -- debugging routines --------------------------------------------
def _debug_validation(self, token=None):
from pprint import pprint
state = self._element_state_stack[-1]
parent = state.node
print '='*60
print 'parent =',parent
print 'parent class =',parent.__class__
print 'parent content =', parent.content_model
print 'initial validation'
pprint(parent.content_model.compile())
print 'current validation'
pprint(state.validation)
if token:
print 'token', token
print '='*60
return
# -- parsing routines ----------------------------------------------
def fromDocument(self, document, baseUri='', factory=None):
"""
Read in a stylesheet source document from a Domlette and add it to
the stylesheet tree. If a document with the same URI has already been
read, the cached version will be used instead (so duplicate imports,
includes, or stylesheet appends do not result in multiple reads).
"""
if not baseUri:
if hasattr(document, 'documentURI'):
baseUri = document.documentURI
elif hasattr(document, 'baseURI'):
baseUri = document.baseURI
else:
raise TypeError('baseUri required')
if factory is None:
factory = inputsource.default_factory
# check cache
if self._root is not None:
# We prefer to use an already-parsed doc, as it has had its
# external entities and XIncludes resolved already
if uri in self._root.sourceNodes:
document = self._root.sourceNodes[baseUri]
# It's OK to use cached string content, but we have no idea
# whether we're using the same InputSource class as was used to
# parse it the first time, and we don't cache external entities
# or XIncludes, so there is the possibility of those things
# being resolved differently this time around. Oh well.
elif uri in self._root.sources:
content = self._root.sources[baseUri]
isrc = factory.fromString(content, baseUri)
# temporarily uncache it so `parse()` will process it;
# `parse()` will add it back to the cache when finished
del self._root.sources[baseUri]
return self.parse(isrc)
isrc = factory.fromStream(None, baseUri)
features = []
properties = [(property_dom_node, document)]
stylesheet = self._parseSrc(isrc, features, properties)
# Cache for XSLT document() function
self._root.sourceNodes[baseUri] = document
return stylesheet
def parse(self, source):
"""
Read in a stylesheet source document from an InputSource and add it to
the stylesheet tree. If a document with the same URI has already been
read, the cached version will be used instead (so duplicate imports,
includes, or stylesheet appends do not result in multiple reads).
"""
uri = source.uri
#Check cache
content = ''
if self._root is not None:
# We prefer to use an already-parsed doc, as it has had its
# external entities and XIncludes resolved already
if uri in self._root.sourceNodes:
doc = self._root.sourceNodes[uri]
# temporarily uncache it so fromDocument will process it;
# fromDocument will add it back to the cache when finished
del self._root.sourceNodes[uri]
return self.fromDocument(doc, baseUri=uri)
# It's OK to use cached string content, but we have no idea
# whether we're using the same InputSource class as was used to
# parse it the first time, and we don't cache external entities
# or XIncludes, so there is the possibility of those things
# being resolved differently this time around. Oh well.
elif uri in self._root.sources:
content = self._root.sources[uri]
source = inputsource(content, uri)
if not content:
content = source.stream.read()
source = inputsource(cStringIO.StringIO(content), source.uri)
#features = [(sax.FEATURE_PROCESS_XINCLUDES, True)]
features, properties = [], []
stylesheet = self._parseSrc(source, features, properties)
# Cache the string content for subsequent uses
# e.g., xsl:import/xsl:include and document()
self._root.sources[uri] = content
return stylesheet
def _parseSrc(self, isrc, features, properties):
parser = sax.create_parser()
parser.setContentHandler(self)
for featurename, value in features:
parser.setFeature(featurename, value)
# Always set whitespace rules property
parser.setProperty(sax.PROPERTY_WHITESPACE_RULES,
_XSLT_WHITESPACE_STRIPPING)
for propertyname, value in properties:
parser.setProperty(propertyname, value)
prev_source = self._input_source
try:
self._input_source = isrc
try:
parser.parse(isrc)
except SAXParseException, e:
e = e.getException() or e
if isinstance(e, XsltError):
raise e
raise XsltError(XsltError.STYLESHEET_PARSE_ERROR,
uri=isrc.uri, text=str(e))
finally:
self._input_source = prev_source
return self._root.stylesheet
| en | 0.655522 | ######################################################################## # amara/xslt/reader/__init__.py Classes for the creation of a stylesheet object # Whitespace stripping rules for a stylesheet: # preserve all whitespace within xsl:text elements; # strip whitespace from all other elements # pseudo-nodes for save/restore of variable bindings # The XSL attributes allowed on literal elements Stores the current state of the parser. Constructor arguments/instance variables: validation - validation state for the current containing node. localVariables - set of in-scope variable bindings to determine variable shadowing. forwardsCompatible - flag indicating whether or not forwards-compatible processing is enabled. currentNamespaces - set of in-scope namespaces for the current node. extensionNamespaces - set of namespaces defining extension elements outputNamespaces - set of in-scope namespaces for literal result elements This class can be used to read, from a variety of sources, a stylesheet and all its included and imported stylesheets, building from them a single, compact representation of an XSLT stylesheet tree (an Ft.Xml.Xslt.Stylesheet.Stylesheet object). This is done with the most efficient parsing method available, and avoids creating a Domlette document for each document it reads. # runtime instance variables Add a mapping of extension element names to classes to the existing mapping of extension elements. This should only be used for standalone uses of this class. The only known standalone use for this class is for creating compiled stylesheets. The benefits of compiled stylesheets are now so minor that this use case may also disappear and then so will this function. You have been warned. # -- ContentHandler interface -------------------------------------- Callback interface for SAX. # Save the current document state for nested parsing (inclusions) Callback interface for SAX. # Our root is always a document # We use a document for this because of error checking and # because we explicitly pass ownerDocument to the nodes as # they are created # for recursive include checks for xsl:include/xsl:import # namespaces added for the next element Callback interface for SAX. # ---------------------------------------------------------- # remove URI from recursive inclusion checking # ---------------------------------------------------------- # finalize the children for the document #root.children = tuple(state.nodes) # ---------------------------------------------------------- # finalize the stylesheet AST # An xsl:import or xsl:include # Merge the top-level elements into the "parent" stylesheet # IMPLEMENTATION NOTE: stack[-1] is the import/include element, # stack[-2] is the "parent" stylesheet #parent_node = stack[-2].node #for child in self._stylesheet.children: # child.parent = parent_node # A top-most stylesheet # An additional stylesheet (e.g., an <?xml-stylesheet ...?>); # treat it as an xsl:import into the "master" stylesheet. # Always update the precedence from the included stylesheet # because it may have contained imports thus increasing its # import precedence. # Merge the top-level elements into the "master" stylesheet #stylesheet.children += self._stylesheet.children #for child in self._stylesheet.children: # child.parent = stylesheet # Prepare for a possible subsequent parse. # Prepare the "master" stylesheet Callback interface for SAX. Callback interface for SAX. # ---------------------------------------------------------- # update in-scope namespaces # reset for next element # ---------------------------------------------------------- # get the class defining this element # We need to try to import (and cache) it # ---------------------------------------------------------- # verify that this element can be declared here #self._debug_validation(expandedName) # ignore whatever elements are defined within an undefined # element as an exception will occur when/if this element # is actually instantiated # save this state for next go round # ---------------------------------------------------------- # create the instance defining this element # -- XSLT element -------------------------------- # Handle attributes in the null-namespace #raise self._mutate_exception(e, qualifiedName) # Process attributes with a namespace-uri and check for # any illegal attributes in the null-namespace # XSLT Spec 2.6 - Combining Stylesheets # -- extension element ------------------------- # Handle attributes in the null-namespace #raise self._mutate_exception(e, qualifiedName) # Process attributes with a namespace-uri and check for # any illegal attributes in the null-namespace # -- literal result element ------------------------------ # prepare attributes for literal output # save information for literal output # Check for top-level result-element in null namespace Callback interface for SAX. # ---------------------------------------------------------- # verify that this element has all required content # ---------------------------------------------------------- # setup variable context # add context save/restore nodes # ---------------------------------------------------------- # finalize the children for this element #element.children = tuple(state.nodes) #for child in element.children: # if child.doesSetup: #s child.setup() # ---------------------------------------------------------- # update parent state # a literal result element as stylesheet # Reset the root's validation as it has already seen an element. # FIXME: use the prefix from the document for the XSL namespace # make this element the template's content # Note, this MUST index the stack as the stack has changed # due to the startElementNS() calls. # global variables # local variables # it is safe to ignore import precedence here # Copy on use Callback interface for SAX. # verify that the current element can have text children # If the parent can have element children, but not text nodes, # ignore pure whitespace nodes. This clarification is from # XSLT 2.0 [3.4] Whitespace Stripping. # e.g. xsl:stylesheet, xsl:apply-templates, xsl:choose #self._debug_validation(content_model.TEXT_NODE) #if (content_model.EMPTY in parent_state.validation or # not isspace(data)): #self._debug_validation(content_model.TEXT_NODE) # update validation # -- utility functions --------------------------------------------- # FIXME: create special inputsource for 4xslt command-line #for uri in self._alt_base_uris: # try: # new_href = self._input_source.getUriResolver().normalize(href, uri) # #Do we need to figure out a way to pass the hint here? # new_source = self._input_source.factory.fromUri(new_href) # break # except (OSError, IriError): # pass #else: # XSLT Spec 2.6.1, Detect circular references in stylesheets # Note, it is NOT an error to include/import the same stylesheet # multiple times, rather that it may lead to duplicate definitions # which are handled regardless (variables, params, templates, ...) # Always update the precedence as the included stylesheet may have # contained imports thus increasing the import precedence. # a whitespace separated list of prefixes # add the namespace URI to the set of extension namespaces # remove all matching namespace URIs # a whitespace separated list of prefixes # remove all matching namespace URIs # XSLT Spec 2.5 - Forwards-Compatible Processing # -- debugging routines -------------------------------------------- # -- parsing routines ---------------------------------------------- Read in a stylesheet source document from a Domlette and add it to the stylesheet tree. If a document with the same URI has already been read, the cached version will be used instead (so duplicate imports, includes, or stylesheet appends do not result in multiple reads). # check cache # We prefer to use an already-parsed doc, as it has had its # external entities and XIncludes resolved already # It's OK to use cached string content, but we have no idea # whether we're using the same InputSource class as was used to # parse it the first time, and we don't cache external entities # or XIncludes, so there is the possibility of those things # being resolved differently this time around. Oh well. # temporarily uncache it so `parse()` will process it; # `parse()` will add it back to the cache when finished # Cache for XSLT document() function Read in a stylesheet source document from an InputSource and add it to the stylesheet tree. If a document with the same URI has already been read, the cached version will be used instead (so duplicate imports, includes, or stylesheet appends do not result in multiple reads). #Check cache # We prefer to use an already-parsed doc, as it has had its # external entities and XIncludes resolved already # temporarily uncache it so fromDocument will process it; # fromDocument will add it back to the cache when finished # It's OK to use cached string content, but we have no idea # whether we're using the same InputSource class as was used to # parse it the first time, and we don't cache external entities # or XIncludes, so there is the possibility of those things # being resolved differently this time around. Oh well. #features = [(sax.FEATURE_PROCESS_XINCLUDES, True)] # Cache the string content for subsequent uses # e.g., xsl:import/xsl:include and document() # Always set whitespace rules property | 2.439318 | 2 |
tests/onegov/election_day/models/test_subscriber.py | politbuero-kampagnen/onegov-cloud | 0 | 6632157 | from onegov.election_day.models import EmailSubscriber
from onegov.election_day.models import SmsSubscriber
from onegov.election_day.models import Subscriber
def test_subscriber(session):
session.add(Subscriber(address='endpoint', locale='de_CH'))
session.add(EmailSubscriber(address='<EMAIL>', locale='fr_CH'))
session.add(SmsSubscriber(address='+41791112233', locale='it_CH'))
session.flush()
assert session.query(Subscriber).count() == 3
subscriber = session.query(EmailSubscriber).one()
assert subscriber.id
assert subscriber.address == '<EMAIL>'
assert subscriber.locale == 'fr_CH'
subscriber = session.query(SmsSubscriber).one()
assert subscriber.id
assert subscriber.address == '+41791112233'
assert subscriber.locale == 'it_CH'
| from onegov.election_day.models import EmailSubscriber
from onegov.election_day.models import SmsSubscriber
from onegov.election_day.models import Subscriber
def test_subscriber(session):
session.add(Subscriber(address='endpoint', locale='de_CH'))
session.add(EmailSubscriber(address='<EMAIL>', locale='fr_CH'))
session.add(SmsSubscriber(address='+41791112233', locale='it_CH'))
session.flush()
assert session.query(Subscriber).count() == 3
subscriber = session.query(EmailSubscriber).one()
assert subscriber.id
assert subscriber.address == '<EMAIL>'
assert subscriber.locale == 'fr_CH'
subscriber = session.query(SmsSubscriber).one()
assert subscriber.id
assert subscriber.address == '+41791112233'
assert subscriber.locale == 'it_CH'
| none | 1 | 2.394027 | 2 |
|
groundup/urls.py | onhan/free-site | 0 | 6632158 | <filename>groundup/urls.py
"""groundup URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.urls import include, path
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import TemplateView
from django.views.generic.base import RedirectView
from django.contrib.flatpages.sitemaps import FlatPageSitemap
from django.contrib.flatpages import views
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sitemaps.views import sitemap
from newsroom.models import Article
from newsroom.models import Author
from gallery.models import Photograph
from filebrowser.sites import site
from ajax_select import urls as ajax_select_urls
article_dict = {
'queryset': Article.objects.published(),
'date_field': 'published',
}
author_dict = {
'queryset': Author.objects.all(),
'date_field': 'modified',
}
photo_dict = {
'queryset': Photograph.objects.all(),
'date_field': 'modified',
}
app_name = 'groundup'
urlpatterns = [
path('admin/filebrowser/', site.urls),
path('grappelli/', include('grappelli.urls')),
url(r'^ajax_select/', include(ajax_select_urls)),
path('admin/login/', RedirectView.as_view(url='/accounts/login/')),
path('admin/', admin.site.urls),
path('imagegallery/', include('gallery.urls')),
path('', include('newsroom.urls')),
path('', include('payment.urls')),
path('', include('letters.urls')),
path('', include('agony.urls')),
path('', include('security.urls')),
path('', include('allauth_2fa.urls')),
path('accounts/', include('allauth.urls')),
url(r'^sitemap\.xml$', sitemap,
{'sitemaps':
{'articles': GenericSitemap(article_dict,
priority=0.5),
'authors': GenericSitemap(author_dict,
priority=0.5,
changefreq='weekly'),
'photos': GenericSitemap(photo_dict,
priority=0.5),
'flatpages': FlatPageSitemap}},
name='django.contrib.sitemaps.views.sitemap'),
path('cache/', include('clearcache.urls', namespace="cache")),
url(r'^robots\.txt',
TemplateView.as_view(template_name='robots.txt',
content_type='text/plain'),
name='robots.txt'),
url(r'^404testing',
TemplateView.as_view(template_name='404.html'),
name='test404'),
url(r'^500testing',
TemplateView.as_view(template_name='500.html'),
name='test500'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
urlpatterns += [
url(r'^(?P<url>.*/)$', views.flatpage),
]
| <filename>groundup/urls.py
"""groundup URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.urls import include, path
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import TemplateView
from django.views.generic.base import RedirectView
from django.contrib.flatpages.sitemaps import FlatPageSitemap
from django.contrib.flatpages import views
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sitemaps.views import sitemap
from newsroom.models import Article
from newsroom.models import Author
from gallery.models import Photograph
from filebrowser.sites import site
from ajax_select import urls as ajax_select_urls
article_dict = {
'queryset': Article.objects.published(),
'date_field': 'published',
}
author_dict = {
'queryset': Author.objects.all(),
'date_field': 'modified',
}
photo_dict = {
'queryset': Photograph.objects.all(),
'date_field': 'modified',
}
app_name = 'groundup'
urlpatterns = [
path('admin/filebrowser/', site.urls),
path('grappelli/', include('grappelli.urls')),
url(r'^ajax_select/', include(ajax_select_urls)),
path('admin/login/', RedirectView.as_view(url='/accounts/login/')),
path('admin/', admin.site.urls),
path('imagegallery/', include('gallery.urls')),
path('', include('newsroom.urls')),
path('', include('payment.urls')),
path('', include('letters.urls')),
path('', include('agony.urls')),
path('', include('security.urls')),
path('', include('allauth_2fa.urls')),
path('accounts/', include('allauth.urls')),
url(r'^sitemap\.xml$', sitemap,
{'sitemaps':
{'articles': GenericSitemap(article_dict,
priority=0.5),
'authors': GenericSitemap(author_dict,
priority=0.5,
changefreq='weekly'),
'photos': GenericSitemap(photo_dict,
priority=0.5),
'flatpages': FlatPageSitemap}},
name='django.contrib.sitemaps.views.sitemap'),
path('cache/', include('clearcache.urls', namespace="cache")),
url(r'^robots\.txt',
TemplateView.as_view(template_name='robots.txt',
content_type='text/plain'),
name='robots.txt'),
url(r'^404testing',
TemplateView.as_view(template_name='404.html'),
name='test404'),
url(r'^500testing',
TemplateView.as_view(template_name='500.html'),
name='test500'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
urlpatterns += [
url(r'^(?P<url>.*/)$', views.flatpage),
]
| en | 0.614838 | groundup URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) | 2.435251 | 2 |
android/deps.bzl | Dig-Doug/rules_proto | 0 | 6632159 | load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
load(
"//:deps.bzl",
"build_bazel_rules_android",
"com_google_protobuf",
"com_google_protobuf_lite",
"io_grpc_grpc_java",
)
load(
"//protobuf:deps.bzl",
"protobuf",
)
def com_google_guava_guava_android(**kwargs):
if "com_google_guava_guava_android" not in native.existing_rules():
jvm_maven_import_external(
name = "com_google_guava_guava_android",
artifact = "com.google.guava:guava:27.0.1-android",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "caf0955aed29a1e6d149f85cfb625a89161b5cf88e0e246552b7ffa358204e28",
)
def android_proto_compile(**kwargs):
protobuf(**kwargs)
com_google_protobuf_lite(**kwargs)
def android_grpc_compile(**kwargs):
android_proto_compile(**kwargs)
io_grpc_grpc_java(**kwargs)
def android_proto_library(**kwargs):
android_proto_compile(**kwargs)
build_bazel_rules_android(**kwargs)
com_google_guava_guava_android(**kwargs)
def android_grpc_library(**kwargs):
android_grpc_compile(**kwargs)
android_proto_library(**kwargs)
| load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
load(
"//:deps.bzl",
"build_bazel_rules_android",
"com_google_protobuf",
"com_google_protobuf_lite",
"io_grpc_grpc_java",
)
load(
"//protobuf:deps.bzl",
"protobuf",
)
def com_google_guava_guava_android(**kwargs):
if "com_google_guava_guava_android" not in native.existing_rules():
jvm_maven_import_external(
name = "com_google_guava_guava_android",
artifact = "com.google.guava:guava:27.0.1-android",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "caf0955aed29a1e6d149f85cfb625a89161b5cf88e0e246552b7ffa358204e28",
)
def android_proto_compile(**kwargs):
protobuf(**kwargs)
com_google_protobuf_lite(**kwargs)
def android_grpc_compile(**kwargs):
android_proto_compile(**kwargs)
io_grpc_grpc_java(**kwargs)
def android_proto_library(**kwargs):
android_proto_compile(**kwargs)
build_bazel_rules_android(**kwargs)
com_google_guava_guava_android(**kwargs)
def android_grpc_library(**kwargs):
android_grpc_compile(**kwargs)
android_proto_library(**kwargs)
| none | 1 | 1.518277 | 2 |
|
experiments/voe_mogaze.py | anish-pratheepkumar/anish-pratheepkumar.github.io | 1 | 6632160 | import os
import sys
import click
project_dir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, project_dir)
os.environ['PATH'] += os.pathsep + project_dir
# -------------------------
from experiments import config
@click.command()
@click.option("--architecture", "-a", help='select one architecture: red OR prednet', default="prednet")
@click.option("--stage_1", help='Execute Prediction Stage', default=True)
@click.option("--avoid_goal", help='True for avoiding goal', default=False)
def run(architecture, stage_1=True, avoid_goal=False):
print("running, {} stage 1: {}, avoid_goal {}".format(architecture, stage_1, avoid_goal))
config.ARCHITECTURE = architecture
config.ACTION = "p1_1"
config.VIZ_VOE_CHULL = False
config.AVOID_GOAL = avoid_goal
from utils.evaluation.mogaze_vis import apply_mogaze_settings
apply_mogaze_settings(avoid_goal=avoid_goal)
from utils.evaluation import mogaze_gen_qpos_for_hulls
if stage_1:
mogaze_gen_qpos_for_hulls.run()
config.reset_tf()
from utils.data_mogaze_vis.mogaze_utils import build_env
chull_viz_vol_occc_error.play(build_env, force_update=False, scenarios=[""])
from utils.evaluation import chull_viz_vol_occc_error
if __name__ == '__main__':
run()
# =====================================================RED==============================================================
# voc_avg_step_error = 52.050476178331344
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error ± 0.5std (%) | 33.354 ± 8.13| 36.354 ± 8.60| 40.999 ± 9.90| 42.783 ± 10.25| 46.690 ± 11.05| 48.500 ± 11.27| 50.737 ± 11.45| 52.050 ± 11.39|
# voc_error ± 0.5std (%) &$ 33.4 \pm 8.1$&$ 36.4 \pm 8.6$&$ 41.0 \pm 9.9$&$ 42.8 \pm 10.3$&$ 46.7 \pm 11.1$&$ 48.5 \pm 11.3$&$ 50.7 \pm 11.5$&$ 52.1 \pm 11.4$\\
# End time: 1624159094.5830145
# =====================================================PredNet==========================================================
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error ± std (%) | 14.832 ± 6.68| 17.682 ± 6.19| 23.536 ± 8.29| 26.041 ± 9.50| 31.637 ± 11.02| 34.106 ± 11.63| 37.335 ± 12.23| 39.510 ± 12.47|
# voc_error ± std (%) $&$ 14.8 \pm 6.7$&$ 17.7 \pm 6.19$&$ 23.6 \pm 8.3$&$ 26.1 \pm 9.5$&$ 31.6 \pm 11.0$&$ 34.1 \pm 11.6$&$ 37.3 \pm 12.2$&$ 39.5 \pm 12.5$\\
# End time: 1624193836.6961882
# run("red")
# run("red", True, False)
# run("prednet", True, False) # Prednet MSM
# run("prednet", False, True) #Prednet MSMwg
# ==================================NEW======================================================================
# ==================Prednet MSM=============
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error ± std (%) | 15.71 & 18.57 & 23.97 & 26.74 & 32.06 & 34.44 & 37.70 & 39.82 \\
# ==================Prednet MSMwG=============
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error & 15.10 & 17.54 & 23.73 & 26.47 & 33.17 & 35.48 & 38.32 & 40.31 \\
# ==================RED=======================
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error & 32.94 & 35.78 & 40.53 & 42.36 & 46.36 & 48.02 & 50.21 & 51.56
| import os
import sys
import click
project_dir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, project_dir)
os.environ['PATH'] += os.pathsep + project_dir
# -------------------------
from experiments import config
@click.command()
@click.option("--architecture", "-a", help='select one architecture: red OR prednet', default="prednet")
@click.option("--stage_1", help='Execute Prediction Stage', default=True)
@click.option("--avoid_goal", help='True for avoiding goal', default=False)
def run(architecture, stage_1=True, avoid_goal=False):
print("running, {} stage 1: {}, avoid_goal {}".format(architecture, stage_1, avoid_goal))
config.ARCHITECTURE = architecture
config.ACTION = "p1_1"
config.VIZ_VOE_CHULL = False
config.AVOID_GOAL = avoid_goal
from utils.evaluation.mogaze_vis import apply_mogaze_settings
apply_mogaze_settings(avoid_goal=avoid_goal)
from utils.evaluation import mogaze_gen_qpos_for_hulls
if stage_1:
mogaze_gen_qpos_for_hulls.run()
config.reset_tf()
from utils.data_mogaze_vis.mogaze_utils import build_env
chull_viz_vol_occc_error.play(build_env, force_update=False, scenarios=[""])
from utils.evaluation import chull_viz_vol_occc_error
if __name__ == '__main__':
run()
# =====================================================RED==============================================================
# voc_avg_step_error = 52.050476178331344
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error ± 0.5std (%) | 33.354 ± 8.13| 36.354 ± 8.60| 40.999 ± 9.90| 42.783 ± 10.25| 46.690 ± 11.05| 48.500 ± 11.27| 50.737 ± 11.45| 52.050 ± 11.39|
# voc_error ± 0.5std (%) &$ 33.4 \pm 8.1$&$ 36.4 \pm 8.6$&$ 41.0 \pm 9.9$&$ 42.8 \pm 10.3$&$ 46.7 \pm 11.1$&$ 48.5 \pm 11.3$&$ 50.7 \pm 11.5$&$ 52.1 \pm 11.4$\\
# End time: 1624159094.5830145
# =====================================================PredNet==========================================================
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error ± std (%) | 14.832 ± 6.68| 17.682 ± 6.19| 23.536 ± 8.29| 26.041 ± 9.50| 31.637 ± 11.02| 34.106 ± 11.63| 37.335 ± 12.23| 39.510 ± 12.47|
# voc_error ± std (%) $&$ 14.8 \pm 6.7$&$ 17.7 \pm 6.19$&$ 23.6 \pm 8.3$&$ 26.1 \pm 9.5$&$ 31.6 \pm 11.0$&$ 34.1 \pm 11.6$&$ 37.3 \pm 12.2$&$ 39.5 \pm 12.5$\\
# End time: 1624193836.6961882
# run("red")
# run("red", True, False)
# run("prednet", True, False) # Prednet MSM
# run("prednet", False, True) #Prednet MSMwg
# ==================================NEW======================================================================
# ==================Prednet MSM=============
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error ± std (%) | 15.71 & 18.57 & 23.97 & 26.74 & 32.06 & 34.44 & 37.70 & 39.82 \\
# ==================Prednet MSMwG=============
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error & 15.10 & 17.54 & 23.73 & 26.47 & 33.17 & 35.48 & 38.32 & 40.31 \\
# ==================RED=======================
# prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 |
# voc_error & 32.94 & 35.78 & 40.53 & 42.36 & 46.36 & 48.02 & 50.21 & 51.56
| en | 0.227106 | # ------------------------- # =====================================================RED============================================================== # voc_avg_step_error = 52.050476178331344 # prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 | # voc_error ± 0.5std (%) | 33.354 ± 8.13| 36.354 ± 8.60| 40.999 ± 9.90| 42.783 ± 10.25| 46.690 ± 11.05| 48.500 ± 11.27| 50.737 ± 11.45| 52.050 ± 11.39| # voc_error ± 0.5std (%) &$ 33.4 \pm 8.1$&$ 36.4 \pm 8.6$&$ 41.0 \pm 9.9$&$ 42.8 \pm 10.3$&$ 46.7 \pm 11.1$&$ 48.5 \pm 11.3$&$ 50.7 \pm 11.5$&$ 52.1 \pm 11.4$\\ # End time: 1624159094.5830145 # =====================================================PredNet========================================================== # prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 | # voc_error ± std (%) | 14.832 ± 6.68| 17.682 ± 6.19| 23.536 ± 8.29| 26.041 ± 9.50| 31.637 ± 11.02| 34.106 ± 11.63| 37.335 ± 12.23| 39.510 ± 12.47| # voc_error ± std (%) $&$ 14.8 \pm 6.7$&$ 17.7 \pm 6.19$&$ 23.6 \pm 8.3$&$ 26.1 \pm 9.5$&$ 31.6 \pm 11.0$&$ 34.1 \pm 11.6$&$ 37.3 \pm 12.2$&$ 39.5 \pm 12.5$\\ # End time: 1624193836.6961882 # run("red") # run("red", True, False) # run("prednet", True, False) # Prednet MSM # run("prednet", False, True) #Prednet MSMwg # ==================================NEW====================================================================== # ==================Prednet MSM============= # prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 | # voc_error ± std (%) | 15.71 & 18.57 & 23.97 & 26.74 & 32.06 & 34.44 & 37.70 & 39.82 \\ # ==================Prednet MSMwG============= # prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 | # voc_error & 15.10 & 17.54 & 23.73 & 26.47 & 33.17 & 35.48 & 38.32 & 40.31 \\ # ==================RED======================= # prediction_step | 80 | 160 | 320 | 400 | 600 | 720 | 880 | 1000 | # voc_error & 32.94 & 35.78 & 40.53 & 42.36 & 46.36 & 48.02 & 50.21 & 51.56 | 2.010335 | 2 |
Exam6-practice/specialNumbers.py | nikolayvutov/Python | 0 | 6632161 | <reponame>nikolayvutov/Python
n = int(input())
for i in range(1, 10):
for j in range(1, 10):
for k in range(1, 10):
for l in range(1, 10):
if n % i == 0 and n % j == 0 and n % k == 0 and n % l == 0:
print('{0}{1}{2}{3}'.format(i, j, k ,l), end=' ') | n = int(input())
for i in range(1, 10):
for j in range(1, 10):
for k in range(1, 10):
for l in range(1, 10):
if n % i == 0 and n % j == 0 and n % k == 0 and n % l == 0:
print('{0}{1}{2}{3}'.format(i, j, k ,l), end=' ') | none | 1 | 3.445095 | 3 |
|
vision/camcalib/fusion.py | Photon26/wrs-main-210414 | 0 | 6632162 | import cv2
import math
import yaml
import numpy as np
from cv2 import aruco
from sklearn import cluster
import utiltools.robotmath as rm
from pandaplotutils import pandactrl
def trackobject_multicamfusion(camcaps, cammtxs, camdists, camrelhomos, aruco_dict, arucomarkersize = 100, nframe = 5, denoise = True, bandwidth=10):
"""
:param camcaps: a list of cv2.VideoCaptures
:param cammtxs: a list of mtx for each of the camcaps
:param camdists: as list of dist for each of the camcaps
:param camrelhomos: a list of relative homogeneous matrices
:param aruco_dict: NOTE this is not things like aruco.DICT_6x6_250, instead, it is the return value of aruco.Dictionary_get
:param nframe: number of frames used for fusion
:param denoise:
:param bandwidth: the bandwidth for meanshift, a large bandwidth leads to tracking instead of clustering, a small bandwith will be very costly
:return:
author: weiwei
date: 20190422
"""
parameters = aruco.DetectorParameters_create()
framelist = {}
for i in range(nframe):
for capid, cap in enumerate(camcaps):
ret, frame = cap.read()
corners, ids, rejectedImgPoints = aruco.detectMarkers(frame, aruco_dict, parameters=parameters, ids=np.array([[1,2,3,4]]))
ids = ids.get()
if ids is not None:
rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, arucomarkersize, cammtxs[capid], camdists[capid])
for i in range(ids.size):
rot = cv2.Rodrigues(rvecs[i])[0]
pos = tvecs[i][0].ravel()
if capid > 0:
matinb = np.dot(rm.homoinverse(camrelhomos[capid-1]), rm.homobuild(pos, rot))
rot = matinb[:3, :3]
pos = matinb[:3, 3]
idslist = ids.ravel().tolist()
if idslist[i] in framelist:
framelist[idslist[i]].append([pos, rot])
else:
framelist[idslist[i]] = [[pos, rot]]
import time
frameavglist = {}
for id in framelist:
posveclist = [frame[0] for frame in framelist[id]]
rotmatlist = [frame[1] for frame in framelist[id]]
if len(posveclist) >= nframe:
posvecavg = rm.posvec_average(posveclist, bandwidth, denoise)
rotmatavg = rm.rotmat_average(rotmatlist, bandwidth, denoise)
frameavglist[id] = [posvecavg, rotmatavg]
return frameavglist
if __name__=='__main__':
# squaremarkersize = 40
#
# calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs0/', savename='cam0_calib.yaml')
# calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs2/', savename='cam2_calib.yaml')
# calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs4/', savename='cam4_calib.yaml')
# find_rhomo(basecamyamlpath = 'cam0_calib.yaml', relcamyamlpath = 'cam2_calib.yaml', savename = 'homo_rb20.yaml')
# find_rhomo(basecamyamlpath = 'cam0_calib.yaml', relcamyamlpath = 'cam4_calib.yaml', savename = 'homo_rb40.yaml')
base = pandactrl.World(camp=[2700, 300, 2700], lookatpos=[0, 0, 0])
# framenp = base.pggen.genAxis()
# framenp.reparentTo(base.render)
# base.run()
base.pggen.plotAxis(base.render)
homo_rb20 = yaml.load(open('homo_rb20.yaml', 'r'), Loader=yaml.UnsafeLoader)
homo_rb40 = yaml.load(open('homo_rb40.yaml', 'r'), Loader=yaml.UnsafeLoader)
# draw in 3d to validate
pandamat4homo_r2 = base.pg.np4ToMat4(rm.homoinverse(homo_rb20))
base.pggen.plotAxis(base.render, spos = pandamat4homo_r2.getRow3(3), pandamat3 = pandamat4homo_r2.getUpper3())
pandamat4homo_r4 = base.pg.np4ToMat4(rm.homoinverse(homo_rb40))
base.pggen.plotAxis(base.render, spos = pandamat4homo_r4.getRow3(3), pandamat3 = pandamat4homo_r4.getUpper3())
# show in videos
mtx0, dist0, rvecs0, tvecs0, candfiles0 = yaml.load(open('cam0_calib.yaml', 'r'), Loader=yaml.UnsafeLoader)
mtx2, dist2, rvecs2, tvecs2, candfiles2 = yaml.load(open('cam2_calib.yaml', 'r'), Loader=yaml.UnsafeLoader)
mtx4, dist4, rvecs4, tvecs4, candfiles4 = yaml.load(open('cam4_calib.yaml', 'r'), Loader=yaml.UnsafeLoader)
import time
# arucomarkersize = int(40*.57)
# aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
cap0 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(2)
cap4 = cv2.VideoCapture(4)
camcaps = [cap0, cap2, cap4]
cammtxs = [mtx0, mtx2, mtx4]
camdists = [dist0, dist2, dist4]
camrelhomos = [homo_rb20, homo_rb40]
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
arucomarkersize = 100
nframe = 2
denoise = True
framenplist = [[]]
def updateview(framenplist, task):
if len(framenplist[0]) > 0:
for axisnp in framenplist[0]:
axisnp.removeNode()
framenplist[0] = []
tic = time.time()
frameavglist = trackobject_multicamfusion(camcaps, cammtxs, camdists, camrelhomos, aruco_dict, arucomarkersize, nframe, denoise, bandwidth=arucomarkersize*.05)
print(time.time()-tic)
for id in frameavglist:
posvecavg = frameavglist[id][0]
rotmatavg = frameavglist[id][1]
framenp = base.pggen.genAxis(spos=base.pg.npToV3(posvecavg), pandamat3=base.pg.npToMat3(rotmatavg))
framenp.reparentTo(base.render)
framenplist[0].append(framenp)
return task.again
taskMgr.doMethodLater(0.01, updateview, "updateview", extraArgs=[framenplist], appendTask=True)
base.run() | import cv2
import math
import yaml
import numpy as np
from cv2 import aruco
from sklearn import cluster
import utiltools.robotmath as rm
from pandaplotutils import pandactrl
def trackobject_multicamfusion(camcaps, cammtxs, camdists, camrelhomos, aruco_dict, arucomarkersize = 100, nframe = 5, denoise = True, bandwidth=10):
"""
:param camcaps: a list of cv2.VideoCaptures
:param cammtxs: a list of mtx for each of the camcaps
:param camdists: as list of dist for each of the camcaps
:param camrelhomos: a list of relative homogeneous matrices
:param aruco_dict: NOTE this is not things like aruco.DICT_6x6_250, instead, it is the return value of aruco.Dictionary_get
:param nframe: number of frames used for fusion
:param denoise:
:param bandwidth: the bandwidth for meanshift, a large bandwidth leads to tracking instead of clustering, a small bandwith will be very costly
:return:
author: weiwei
date: 20190422
"""
parameters = aruco.DetectorParameters_create()
framelist = {}
for i in range(nframe):
for capid, cap in enumerate(camcaps):
ret, frame = cap.read()
corners, ids, rejectedImgPoints = aruco.detectMarkers(frame, aruco_dict, parameters=parameters, ids=np.array([[1,2,3,4]]))
ids = ids.get()
if ids is not None:
rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, arucomarkersize, cammtxs[capid], camdists[capid])
for i in range(ids.size):
rot = cv2.Rodrigues(rvecs[i])[0]
pos = tvecs[i][0].ravel()
if capid > 0:
matinb = np.dot(rm.homoinverse(camrelhomos[capid-1]), rm.homobuild(pos, rot))
rot = matinb[:3, :3]
pos = matinb[:3, 3]
idslist = ids.ravel().tolist()
if idslist[i] in framelist:
framelist[idslist[i]].append([pos, rot])
else:
framelist[idslist[i]] = [[pos, rot]]
import time
frameavglist = {}
for id in framelist:
posveclist = [frame[0] for frame in framelist[id]]
rotmatlist = [frame[1] for frame in framelist[id]]
if len(posveclist) >= nframe:
posvecavg = rm.posvec_average(posveclist, bandwidth, denoise)
rotmatavg = rm.rotmat_average(rotmatlist, bandwidth, denoise)
frameavglist[id] = [posvecavg, rotmatavg]
return frameavglist
if __name__=='__main__':
# squaremarkersize = 40
#
# calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs0/', savename='cam0_calib.yaml')
# calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs2/', savename='cam2_calib.yaml')
# calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs4/', savename='cam4_calib.yaml')
# find_rhomo(basecamyamlpath = 'cam0_calib.yaml', relcamyamlpath = 'cam2_calib.yaml', savename = 'homo_rb20.yaml')
# find_rhomo(basecamyamlpath = 'cam0_calib.yaml', relcamyamlpath = 'cam4_calib.yaml', savename = 'homo_rb40.yaml')
base = pandactrl.World(camp=[2700, 300, 2700], lookatpos=[0, 0, 0])
# framenp = base.pggen.genAxis()
# framenp.reparentTo(base.render)
# base.run()
base.pggen.plotAxis(base.render)
homo_rb20 = yaml.load(open('homo_rb20.yaml', 'r'), Loader=yaml.UnsafeLoader)
homo_rb40 = yaml.load(open('homo_rb40.yaml', 'r'), Loader=yaml.UnsafeLoader)
# draw in 3d to validate
pandamat4homo_r2 = base.pg.np4ToMat4(rm.homoinverse(homo_rb20))
base.pggen.plotAxis(base.render, spos = pandamat4homo_r2.getRow3(3), pandamat3 = pandamat4homo_r2.getUpper3())
pandamat4homo_r4 = base.pg.np4ToMat4(rm.homoinverse(homo_rb40))
base.pggen.plotAxis(base.render, spos = pandamat4homo_r4.getRow3(3), pandamat3 = pandamat4homo_r4.getUpper3())
# show in videos
mtx0, dist0, rvecs0, tvecs0, candfiles0 = yaml.load(open('cam0_calib.yaml', 'r'), Loader=yaml.UnsafeLoader)
mtx2, dist2, rvecs2, tvecs2, candfiles2 = yaml.load(open('cam2_calib.yaml', 'r'), Loader=yaml.UnsafeLoader)
mtx4, dist4, rvecs4, tvecs4, candfiles4 = yaml.load(open('cam4_calib.yaml', 'r'), Loader=yaml.UnsafeLoader)
import time
# arucomarkersize = int(40*.57)
# aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
cap0 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(2)
cap4 = cv2.VideoCapture(4)
camcaps = [cap0, cap2, cap4]
cammtxs = [mtx0, mtx2, mtx4]
camdists = [dist0, dist2, dist4]
camrelhomos = [homo_rb20, homo_rb40]
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
arucomarkersize = 100
nframe = 2
denoise = True
framenplist = [[]]
def updateview(framenplist, task):
if len(framenplist[0]) > 0:
for axisnp in framenplist[0]:
axisnp.removeNode()
framenplist[0] = []
tic = time.time()
frameavglist = trackobject_multicamfusion(camcaps, cammtxs, camdists, camrelhomos, aruco_dict, arucomarkersize, nframe, denoise, bandwidth=arucomarkersize*.05)
print(time.time()-tic)
for id in frameavglist:
posvecavg = frameavglist[id][0]
rotmatavg = frameavglist[id][1]
framenp = base.pggen.genAxis(spos=base.pg.npToV3(posvecavg), pandamat3=base.pg.npToMat3(rotmatavg))
framenp.reparentTo(base.render)
framenplist[0].append(framenp)
return task.again
taskMgr.doMethodLater(0.01, updateview, "updateview", extraArgs=[framenplist], appendTask=True)
base.run() | en | 0.572291 | :param camcaps: a list of cv2.VideoCaptures :param cammtxs: a list of mtx for each of the camcaps :param camdists: as list of dist for each of the camcaps :param camrelhomos: a list of relative homogeneous matrices :param aruco_dict: NOTE this is not things like aruco.DICT_6x6_250, instead, it is the return value of aruco.Dictionary_get :param nframe: number of frames used for fusion :param denoise: :param bandwidth: the bandwidth for meanshift, a large bandwidth leads to tracking instead of clustering, a small bandwith will be very costly :return: author: weiwei date: 20190422 # squaremarkersize = 40 # # calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs0/', savename='cam0_calib.yaml') # calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs2/', savename='cam2_calib.yaml') # calibcharucoboard(7,5, squaremarkersize=squaremarkersize, imgspath='./camimgs4/', savename='cam4_calib.yaml') # find_rhomo(basecamyamlpath = 'cam0_calib.yaml', relcamyamlpath = 'cam2_calib.yaml', savename = 'homo_rb20.yaml') # find_rhomo(basecamyamlpath = 'cam0_calib.yaml', relcamyamlpath = 'cam4_calib.yaml', savename = 'homo_rb40.yaml') # framenp = base.pggen.genAxis() # framenp.reparentTo(base.render) # base.run() # draw in 3d to validate # show in videos # arucomarkersize = int(40*.57) # aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) | 2.262472 | 2 |
HydrothermalCoordination_Metaheuristics/EvolutionaryParticleSwarmOptimization/Functions/mutate.py | anaSilva2018/TryingPy | 0 | 6632163 | # -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
def _popmut(cpar, nper, mdupl, indxa, indxb):
mmut = np.zeros([indxa, 2*cpar.pop])
munif = np.zeros([indxb, 2*cpar.pop])
mgauss = np.zeros([indxb, 2*cpar.pop])
for i in range(indxb):
for j in range(cpar.pop):
mmut[i, j] = mdupl[i, j]
for j in range(cpar.pop, 2*cpar.pop, 1):
munif[nper, j] = np.random.uniform(0, 1, 1)
mgauss[nper, j] = (np.power((-2*np.log(munif[nper, j])), 0.5))*np.cos(2*np.pi* munif[nper, j])
mmut[nper, j] = mdupl[nper, j] + cpar.tau*mgauss[nper, j]
for i in range(nper):
munif[i, j] = np.random.uniform(0, 1, 1)
mgauss[i, j] = (np.power((-2*np.log(munif[i, j])), 0.5))*np.cos(2*np.pi* munif[i, j])
mmut[i, j] = mdupl[i, j] + mmut[nper, j]*mgauss[i, j]
if mmut[i, j] > 1:
mmut[i, j] = 1
elif mmut[i, j] < 0:
mmut[i, j] = 0
return mmut
| # -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
def _popmut(cpar, nper, mdupl, indxa, indxb):
mmut = np.zeros([indxa, 2*cpar.pop])
munif = np.zeros([indxb, 2*cpar.pop])
mgauss = np.zeros([indxb, 2*cpar.pop])
for i in range(indxb):
for j in range(cpar.pop):
mmut[i, j] = mdupl[i, j]
for j in range(cpar.pop, 2*cpar.pop, 1):
munif[nper, j] = np.random.uniform(0, 1, 1)
mgauss[nper, j] = (np.power((-2*np.log(munif[nper, j])), 0.5))*np.cos(2*np.pi* munif[nper, j])
mmut[nper, j] = mdupl[nper, j] + cpar.tau*mgauss[nper, j]
for i in range(nper):
munif[i, j] = np.random.uniform(0, 1, 1)
mgauss[i, j] = (np.power((-2*np.log(munif[i, j])), 0.5))*np.cos(2*np.pi* munif[i, j])
mmut[i, j] = mdupl[i, j] + mmut[nper, j]*mgauss[i, j]
if mmut[i, j] > 1:
mmut[i, j] = 1
elif mmut[i, j] < 0:
mmut[i, j] = 0
return mmut
| en | 0.609409 | # -*- coding: utf-8 -*- @author: <NAME> | 2.051955 | 2 |
crawling_yes24.py | sourcery-ai-bot/github-action-with-python | 0 | 6632164 | <gh_stars>0
import requests
from bs4 import BeautifulSoup
def parsing_beautifulsoup(url):
"""
뷰티풀 수프로 파싱하는 함수
:param url: paring할 URL. 여기선 YES24 Link
:return: BeautifulSoup soup Object
"""
data = requests.get(url)
html = data.text
return BeautifulSoup(html, 'html.parser')
def extract_book_data(soup):
"""
BeautifulSoup Object에서 book data를 추출하는 함수
:param soup: BeautifulSoup soup Object
:return: contents(str)
"""
upload_contents = ''
new_books = soup.select(".goodsTxtInfo")
url_prefix = "http://www.yes24.com"
for new_book in new_books:
book_name = new_book.select("a")[0].text
url_suffix = new_book.select("a")[1].attrs['href']
url = url_prefix + url_suffix
price = new_book.select(".priceB")[0].text
content = f"<a href={url}>" + book_name + "</a>" + ", " + price + "<br/>\n"
upload_contents += content
return upload_contents
| import requests
from bs4 import BeautifulSoup
def parsing_beautifulsoup(url):
"""
뷰티풀 수프로 파싱하는 함수
:param url: paring할 URL. 여기선 YES24 Link
:return: BeautifulSoup soup Object
"""
data = requests.get(url)
html = data.text
return BeautifulSoup(html, 'html.parser')
def extract_book_data(soup):
"""
BeautifulSoup Object에서 book data를 추출하는 함수
:param soup: BeautifulSoup soup Object
:return: contents(str)
"""
upload_contents = ''
new_books = soup.select(".goodsTxtInfo")
url_prefix = "http://www.yes24.com"
for new_book in new_books:
book_name = new_book.select("a")[0].text
url_suffix = new_book.select("a")[1].attrs['href']
url = url_prefix + url_suffix
price = new_book.select(".priceB")[0].text
content = f"<a href={url}>" + book_name + "</a>" + ", " + price + "<br/>\n"
upload_contents += content
return upload_contents | ko | 0.697257 | 뷰티풀 수프로 파싱하는 함수 :param url: paring할 URL. 여기선 YES24 Link :return: BeautifulSoup soup Object BeautifulSoup Object에서 book data를 추출하는 함수 :param soup: BeautifulSoup soup Object :return: contents(str) | 3.505109 | 4 |
test/alignat_tst.py | ikucan/MathsMonkey | 0 | 6632165 | import numpy as np
from pylatex import Document, Section, Subsection, Tabular, Math, TikZ, Axis, \
Plot, Figure, Matrix, Alignat
from pylatex.utils import italic
import os
if __name__ == '__main__':
image_filename = os.path.join(os.path.dirname(__file__), 'kitten.jpg')
geometry_options = {"tmargin": "1cm", "lmargin": "10cm"}
doc = Document(geometry_options=geometry_options)
with doc.create(Section('The simple stuff')):
doc.append('Some regular text and some')
doc.append(italic('italic text. '))
doc.append('\nAlso some crazy characters: $&#{}')
with doc.create(Subsection('Math that is incorrect')):
doc.append(Math(data=['2*3', '=', 9]))
with doc.create(Subsection('Table of something')):
with doc.create(Tabular('rc|cl')) as table:
table.add_hline()
table.add_row((1, 2, 3, 4))
table.add_hline(1, 2)
table.add_empty_row()
table.add_row((4, 5, 6, 7))
a = np.array([[100, 10, 20]]).T
M = np.matrix([[2, 3, 4],
[0, 0, 1],
[0, 0, 2]])
with doc.create(Section('The fancy stuff')):
with doc.create(Subsection('Correct matrix equations')):
doc.append(Math(data=[Matrix(M), Matrix(a), '=', Matrix(M * a)]))
with doc.create(Subsection('Alignat math environment')):
with doc.create(Alignat(numbering=False, escape=False)) as agn:
agn.append(r'\frac{a}{b} &= 0 \\')
agn.append(r'\frac{a}{b} &= 0 \\')
agn.extend([Matrix(M), Matrix(a), '&=', Matrix(M * a)])
with doc.create(Subsection('Beautiful graphs')):
with doc.create(TikZ()):
plot_options = 'height=4cm, width=6cm, grid=major'
with doc.create(Axis(options=plot_options)) as plot:
plot.append(Plot(name='model', func='-x^5 - 242'))
coordinates = [
(-4.77778, 2027.60977),
(-3.55556, 347.84069),
(-2.33333, 22.58953),
(-1.11111, -493.50066),
(0.11111, 46.66082),
(1.33333, -205.56286),
(2.55556, -341.40638),
(3.77778, -1169.24780),
(5.00000, -3269.56775),
]
plot.append(Plot(name='estimate', coordinates=coordinates))
with doc.create(Subsection('Cute kitten pictures')):
with doc.create(Figure(position='h!')) as kitten_pic:
kitten_pic.add_image(image_filename, width='120px')
kitten_pic.add_caption('Look it\'s on its back')
doc.generate_pdf('full', clean_tex=False)
| import numpy as np
from pylatex import Document, Section, Subsection, Tabular, Math, TikZ, Axis, \
Plot, Figure, Matrix, Alignat
from pylatex.utils import italic
import os
if __name__ == '__main__':
image_filename = os.path.join(os.path.dirname(__file__), 'kitten.jpg')
geometry_options = {"tmargin": "1cm", "lmargin": "10cm"}
doc = Document(geometry_options=geometry_options)
with doc.create(Section('The simple stuff')):
doc.append('Some regular text and some')
doc.append(italic('italic text. '))
doc.append('\nAlso some crazy characters: $&#{}')
with doc.create(Subsection('Math that is incorrect')):
doc.append(Math(data=['2*3', '=', 9]))
with doc.create(Subsection('Table of something')):
with doc.create(Tabular('rc|cl')) as table:
table.add_hline()
table.add_row((1, 2, 3, 4))
table.add_hline(1, 2)
table.add_empty_row()
table.add_row((4, 5, 6, 7))
a = np.array([[100, 10, 20]]).T
M = np.matrix([[2, 3, 4],
[0, 0, 1],
[0, 0, 2]])
with doc.create(Section('The fancy stuff')):
with doc.create(Subsection('Correct matrix equations')):
doc.append(Math(data=[Matrix(M), Matrix(a), '=', Matrix(M * a)]))
with doc.create(Subsection('Alignat math environment')):
with doc.create(Alignat(numbering=False, escape=False)) as agn:
agn.append(r'\frac{a}{b} &= 0 \\')
agn.append(r'\frac{a}{b} &= 0 \\')
agn.extend([Matrix(M), Matrix(a), '&=', Matrix(M * a)])
with doc.create(Subsection('Beautiful graphs')):
with doc.create(TikZ()):
plot_options = 'height=4cm, width=6cm, grid=major'
with doc.create(Axis(options=plot_options)) as plot:
plot.append(Plot(name='model', func='-x^5 - 242'))
coordinates = [
(-4.77778, 2027.60977),
(-3.55556, 347.84069),
(-2.33333, 22.58953),
(-1.11111, -493.50066),
(0.11111, 46.66082),
(1.33333, -205.56286),
(2.55556, -341.40638),
(3.77778, -1169.24780),
(5.00000, -3269.56775),
]
plot.append(Plot(name='estimate', coordinates=coordinates))
with doc.create(Subsection('Cute kitten pictures')):
with doc.create(Figure(position='h!')) as kitten_pic:
kitten_pic.add_image(image_filename, width='120px')
kitten_pic.add_caption('Look it\'s on its back')
doc.generate_pdf('full', clean_tex=False)
| none | 1 | 2.750823 | 3 |
|
app/main/models/movies.py | NiHighlism/Minerva | 4 | 6632166 | <reponame>NiHighlism/Minerva
"""
DB Model for Movies table and
relevant junction tables
"""
import datetime
import json
from sqlalchemy.sql import and_, select
from app.main import db, login_manager
from app.main.models.movieSearches import SearchableMixin
class Movie(SearchableMixin, db.Model):
"""
Description of User model.
Columns
-----------
:id: int [pk]
:user_id: int [Foreign Key -> User.id]
:imdb_ID: varchar(128) [not NULL]
:title: Text [not NULL]
:year: int
:release_date: DateTime
:runtime: int
:genre: JSON
:director: JSON
:writer: JSON
:actors: JSON
:plot: Text
:language: JSON
:country: JSON
:awards: Text
:ratings: JSON
:imdb_rating: Float
:rotten_tomatoes: int
:metascore: int
:poster_URL: varchar(255)
:box_office: varchar(255)
:added_to_db: DateTime
"""
# Columns
id = db.Column(db.Integer, primary_key=True)
parent_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
imdb_ID = db.Column(db.String(128), unique=True, nullable=False)
title = db.Column(db.Text, nullable=False)
year = db.Column(db.Integer)
release_date = db.Column(db.String(128))
runtime = db.Column(db.String(128))
plot = db.Column(db.Text)
genre = db.Column(db.JSON)
director = db.Column(db.JSON)
writer = db.Column(db.JSON)
actors = db.Column(db.JSON)
language = db.Column(db.JSON)
country = db.Column(db.JSON)
awards = db.Column(db.Text)
imdb_rating = db.Column(db.String(128))
rotten_tomatoes = db.Column(db.String(128))
metascore = db.Column(db.String(128))
poster_url = db.Column(db.String(255))
box_office = db.Column(db.String(128))
added_time = db.Column(db.DateTime)
__searchable__ = ['title', 'year', 'genre',
'director', 'actors', 'language', 'country']
def __init__(self, imdb_ID, title, year, release_date, runtime, genre, director,
writer, actors, plot, language, country, awards,
imdb_rating, rotten_tomatoes, metascore, poster_url, box_office):
self.imdb_ID = imdb_ID
self.title = title
self.year = year
self.release_date = release_date
self.runtime = runtime
self.genre = genre
self.director = director
self.writer = writer
self.actors = actors
self.plot = plot
self.language = language
self.country = country
self.awards = awards
self.imdb_rating = imdb_rating
self.rotten_tomatoes = rotten_tomatoes
self.metascore = metascore
self.poster_url = poster_url
self.box_office = box_office
self.added_time = datetime.datetime.now()
db.session.add(self)
db.session.commit()
def update_col(self, key, value):
setattr(self, key, value)
db.session.commit()
| """
DB Model for Movies table and
relevant junction tables
"""
import datetime
import json
from sqlalchemy.sql import and_, select
from app.main import db, login_manager
from app.main.models.movieSearches import SearchableMixin
class Movie(SearchableMixin, db.Model):
"""
Description of User model.
Columns
-----------
:id: int [pk]
:user_id: int [Foreign Key -> User.id]
:imdb_ID: varchar(128) [not NULL]
:title: Text [not NULL]
:year: int
:release_date: DateTime
:runtime: int
:genre: JSON
:director: JSON
:writer: JSON
:actors: JSON
:plot: Text
:language: JSON
:country: JSON
:awards: Text
:ratings: JSON
:imdb_rating: Float
:rotten_tomatoes: int
:metascore: int
:poster_URL: varchar(255)
:box_office: varchar(255)
:added_to_db: DateTime
"""
# Columns
id = db.Column(db.Integer, primary_key=True)
parent_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
imdb_ID = db.Column(db.String(128), unique=True, nullable=False)
title = db.Column(db.Text, nullable=False)
year = db.Column(db.Integer)
release_date = db.Column(db.String(128))
runtime = db.Column(db.String(128))
plot = db.Column(db.Text)
genre = db.Column(db.JSON)
director = db.Column(db.JSON)
writer = db.Column(db.JSON)
actors = db.Column(db.JSON)
language = db.Column(db.JSON)
country = db.Column(db.JSON)
awards = db.Column(db.Text)
imdb_rating = db.Column(db.String(128))
rotten_tomatoes = db.Column(db.String(128))
metascore = db.Column(db.String(128))
poster_url = db.Column(db.String(255))
box_office = db.Column(db.String(128))
added_time = db.Column(db.DateTime)
__searchable__ = ['title', 'year', 'genre',
'director', 'actors', 'language', 'country']
def __init__(self, imdb_ID, title, year, release_date, runtime, genre, director,
writer, actors, plot, language, country, awards,
imdb_rating, rotten_tomatoes, metascore, poster_url, box_office):
self.imdb_ID = imdb_ID
self.title = title
self.year = year
self.release_date = release_date
self.runtime = runtime
self.genre = genre
self.director = director
self.writer = writer
self.actors = actors
self.plot = plot
self.language = language
self.country = country
self.awards = awards
self.imdb_rating = imdb_rating
self.rotten_tomatoes = rotten_tomatoes
self.metascore = metascore
self.poster_url = poster_url
self.box_office = box_office
self.added_time = datetime.datetime.now()
db.session.add(self)
db.session.commit()
def update_col(self, key, value):
setattr(self, key, value)
db.session.commit() | en | 0.481977 | DB Model for Movies table and relevant junction tables Description of User model. Columns ----------- :id: int [pk] :user_id: int [Foreign Key -> User.id] :imdb_ID: varchar(128) [not NULL] :title: Text [not NULL] :year: int :release_date: DateTime :runtime: int :genre: JSON :director: JSON :writer: JSON :actors: JSON :plot: Text :language: JSON :country: JSON :awards: Text :ratings: JSON :imdb_rating: Float :rotten_tomatoes: int :metascore: int :poster_URL: varchar(255) :box_office: varchar(255) :added_to_db: DateTime # Columns | 2.994666 | 3 |
usr/share/pyshared/ajenti/plugins/network/api.py | lupyuen/RaspberryPiImage | 7 | 6632167 | import psutil
from ajenti.api import *
from ajenti.ui import *
@plugin
class NetworkManager (BasePlugin):
def get_devices(self):
return psutil.net_io_counters(pernic=True).keys()
@interface
class INetworkConfig (object):
interfaces = {}
@property
def interface_list(self):
return self.interfaces.values()
def rescan(self):
pass
def save(self):
pass
@interface
class INetworkConfigBit (object):
def apply(self):
pass
@plugin
class NetworkConfigBit (UIElement, INetworkConfigBit):
cls = 'unknown'
iface = None
title = 'Unknown'
typeid = 'box'
class NetworkInterface(object):
def __init__(self):
self.up = False
self.auto = False
self.name = ''
self.devclass = ''
self.addressing = 'static'
self.bits = []
self.params = {'address': '0.0.0.0'}
self.type = ''
self.editable = True
def __getitem__(self, idx):
if idx in self.params:
return self.params[idx]
else:
return ''
def __setitem__(self, idx, val):
self.params[idx] = val
def add_bits(self, ui):
for cls in INetworkConfigBit.get_classes():
if cls.cls in self.bit_classes:
b = cls.new(ui)
b.iface = self
b.refresh()
self.bits.append(b)
| import psutil
from ajenti.api import *
from ajenti.ui import *
@plugin
class NetworkManager (BasePlugin):
def get_devices(self):
return psutil.net_io_counters(pernic=True).keys()
@interface
class INetworkConfig (object):
interfaces = {}
@property
def interface_list(self):
return self.interfaces.values()
def rescan(self):
pass
def save(self):
pass
@interface
class INetworkConfigBit (object):
def apply(self):
pass
@plugin
class NetworkConfigBit (UIElement, INetworkConfigBit):
cls = 'unknown'
iface = None
title = 'Unknown'
typeid = 'box'
class NetworkInterface(object):
def __init__(self):
self.up = False
self.auto = False
self.name = ''
self.devclass = ''
self.addressing = 'static'
self.bits = []
self.params = {'address': '0.0.0.0'}
self.type = ''
self.editable = True
def __getitem__(self, idx):
if idx in self.params:
return self.params[idx]
else:
return ''
def __setitem__(self, idx, val):
self.params[idx] = val
def add_bits(self, ui):
for cls in INetworkConfigBit.get_classes():
if cls.cls in self.bit_classes:
b = cls.new(ui)
b.iface = self
b.refresh()
self.bits.append(b)
| none | 1 | 1.979867 | 2 |
|
qdmr_parsing/model/rule_based/rule_based_model.py | justeuer/Break | 38 | 6632168 | <filename>qdmr_parsing/model/rule_based/rule_based_model.py
import re
from model.rule_based.decompose_rules import *
from model.model_base import ModelBase
class RuleBasedModel(ModelBase):
def __init__(self):
super(RuleBasedModel, self).__init__()
self.prefixes_to_remove = ["what is", "what are", "who is", "who are", "who was", "who were",
"in what", "in which",
"show me", "return me", "give me",
"can you list", "i'd like to see", "i would like", "do you have",
"what", "which", "list", "show", "return", "find"]
# TODO: consider keeping question marks, which improve tagger accuracy
# (e.g. "Where did the illustrator of \"De Divina Proportione\" die?").
self.suffixes_to_remove = ["?", "?\"", ".", "please"]
self.infixes_to_substitute = [
("are there", "(.*are) there(.*)", lambda x: x.group(1) + x.group(2)),
("were there", "(.*were) there(.*)", lambda x: x.group(1) + x.group(2))
]
self.rules = {"single_prep": DecomposeRuleSinglePrep(),
"superlative": DecomposeRuleSuperlative(),
"conjunction": DecomposeRuleConjunction(),
"subj_do_have": DecomposeRuleSubjDoHave(),
"do_subj": DecomposeRuleDoSubj(),
"relcl": DecomposeRuleRelcl(),
"how_many": DecomposeRuleHowMany(),
"be_auxpass": DecomposeRuleBeAuxpass(),
"be_root": DecomposeRuleBeRoot(),
"multi_prep": DecomposeRuleMultiPrep(),
"acl_verb": DecomposeRuleAclVerb()}
def _clean(self, question):
"""Simplify the question by removing extra unnecessary parts of it, if exist."""
for prefix in self.prefixes_to_remove:
if question.lower().startswith(prefix):
question = question[len(prefix):].strip()
break
for suffix in self.suffixes_to_remove:
if question.lower().endswith(suffix):
question = question[:-len(suffix)].strip()
break
for infix in self.infixes_to_substitute:
if infix[0] in question.lower():
question = re.sub(infix[1], infix[2], question).strip()
break
return question
def _get_num_sentences(self, question):
parsed = self.parser(question)
return len([sent for sent in parsed.sents])
def _get_multi_sent_decomposition(self, question):
parsed = self.parser(question)
sents = [sent.text for sent in parsed.sents]
# extract mention-reference sentence pairs
replace_pairs = []
if parsed._.coref_clusters is not None:
for cluster in parsed._.coref_clusters:
main_sent = cluster.main.sent.text
main_sent_index = sents.index(main_sent)
for mention in cluster.mentions:
if mention != cluster.main:
mention_sent = mention.sent.text
mention_sent_index = sents.index(mention_sent)
if main_sent_index != mention_sent_index:
replace_pairs.append((mention, main_sent_index))
# in case there is no coref, treat the text as a single sentence
if not replace_pairs:
return []
# replace any mention with the corresponding reference sentence index
tokens = [token.text for token in parsed]
for (mention, ref_sent_index) in replace_pairs:
remove_indices = [x for x in range(mention.start, mention.end)][::-1]
for index in remove_indices:
tokens.pop(index)
tokens.insert(mention.start, "@@{}@@".format(ref_sent_index+1))
# parse each sentence separately
question_coref = self.parser(' '.join(tokens))
sents_coref = [sent.text for sent in question_coref.sents]
return [self._parse(self._clean(sent)) for sent in sents_coref]
def _update_decomposition(self, decomposition, i, decomposed):
# remove non-decomposed part
decomposition_previous_len = len(decomposition)
decomposition.remove(decomposition[i])
# fix references in text following the decomposed part, by increasing it by len(decomposed) - 1.
# this way, if the reference was to the i'th position (which is now decomposed), it will be updated
# to the last part of the decomposition.
for j in range(i+1, decomposition_previous_len):
# -1 because we removed one element
updated_part = re.sub("@@(\d\d)?@@",
lambda x: "@@" + str(int(x.group(1)) + len(decomposed) - 1) + "@@",
decomposition[j-1].text)
decomposition[j-1] = self._parse(updated_part)
# insert decomposed parts one by one
for part in decomposed[::-1]:
decomposition.insert(i, self._parse(part))
return decomposition
def _decompose(self, question, verbose):
trace = []
num_sentences = self._get_num_sentences(question)
if num_sentences > 1:
decomposition = self._get_multi_sent_decomposition(question)
if not decomposition:
decomposition = [self._parse(self._clean(question))]
else:
trace.append("multi_sent_coref")
else:
decomposition = [self._parse(self._clean(question))]
if verbose:
print(decomposition)
while True:
is_decomposed = False
for i, part in enumerate(decomposition):
decomposed = None
if self.rules["be_root"].check(part, i + 1):
decomposed = self.rules["be_root"].decompose()
trace.append("be_root")
elif self.rules["be_auxpass"].check(part, i + 1):
decomposed = self.rules["be_auxpass"].decompose()
trace.append("be_auxpass")
elif self.rules["do_subj"].check(part, i + 1):
decomposed = self.rules["do_subj"].decompose()
trace.append("do_subj")
elif self.rules["subj_do_have"].check(part, i+1):
decomposed = self.rules["subj_do_have"].decompose()
trace.append("subj_do_have")
elif self.rules["conjunction"].check(part, i+1):
decomposed = self.rules["conjunction"].decompose()
trace.append("conjunction")
elif self.rules["how_many"].check(part, i+1):
decomposed = self.rules["how_many"].decompose()
trace.append("how_many")
elif self.rules["multi_prep"].check(part, i+1):
decomposed = self.rules["multi_prep"].decompose()
trace.append("multi_prep")
elif self.rules["single_prep"].check(part, i+1):
# check that "relcl" should not be applied
single_prep_index = self.rules["single_prep"].prep_index
if self.rules["relcl"].check(part, i + 1) and \
self.rules["relcl"].check_relcl_contains_index(single_prep_index):
decomposed = self.rules["relcl"].decompose()
trace.append("relcl")
else:
decomposed = self.rules["single_prep"].decompose()
trace.append("single_prep")
elif self.rules["relcl"].check(part, i+1):
decomposed = self.rules["relcl"].decompose()
trace.append("relcl")
elif self.rules["superlative"].check(part, i+1):
decomposed = self.rules["superlative"].decompose()
trace.append("superlative")
elif self.rules["acl_verb"].check(part, i+1):
decomposed = self.rules["acl_verb"].decompose()
trace.append("acl_verb")
if decomposed:
decomposition = self._update_decomposition(decomposition, i, decomposed)
is_decomposed = True
if verbose:
print(decomposition)
if not is_decomposed:
break
return decomposition, trace
| <filename>qdmr_parsing/model/rule_based/rule_based_model.py
import re
from model.rule_based.decompose_rules import *
from model.model_base import ModelBase
class RuleBasedModel(ModelBase):
def __init__(self):
super(RuleBasedModel, self).__init__()
self.prefixes_to_remove = ["what is", "what are", "who is", "who are", "who was", "who were",
"in what", "in which",
"show me", "return me", "give me",
"can you list", "i'd like to see", "i would like", "do you have",
"what", "which", "list", "show", "return", "find"]
# TODO: consider keeping question marks, which improve tagger accuracy
# (e.g. "Where did the illustrator of \"De Divina Proportione\" die?").
self.suffixes_to_remove = ["?", "?\"", ".", "please"]
self.infixes_to_substitute = [
("are there", "(.*are) there(.*)", lambda x: x.group(1) + x.group(2)),
("were there", "(.*were) there(.*)", lambda x: x.group(1) + x.group(2))
]
self.rules = {"single_prep": DecomposeRuleSinglePrep(),
"superlative": DecomposeRuleSuperlative(),
"conjunction": DecomposeRuleConjunction(),
"subj_do_have": DecomposeRuleSubjDoHave(),
"do_subj": DecomposeRuleDoSubj(),
"relcl": DecomposeRuleRelcl(),
"how_many": DecomposeRuleHowMany(),
"be_auxpass": DecomposeRuleBeAuxpass(),
"be_root": DecomposeRuleBeRoot(),
"multi_prep": DecomposeRuleMultiPrep(),
"acl_verb": DecomposeRuleAclVerb()}
def _clean(self, question):
"""Simplify the question by removing extra unnecessary parts of it, if exist."""
for prefix in self.prefixes_to_remove:
if question.lower().startswith(prefix):
question = question[len(prefix):].strip()
break
for suffix in self.suffixes_to_remove:
if question.lower().endswith(suffix):
question = question[:-len(suffix)].strip()
break
for infix in self.infixes_to_substitute:
if infix[0] in question.lower():
question = re.sub(infix[1], infix[2], question).strip()
break
return question
def _get_num_sentences(self, question):
parsed = self.parser(question)
return len([sent for sent in parsed.sents])
def _get_multi_sent_decomposition(self, question):
parsed = self.parser(question)
sents = [sent.text for sent in parsed.sents]
# extract mention-reference sentence pairs
replace_pairs = []
if parsed._.coref_clusters is not None:
for cluster in parsed._.coref_clusters:
main_sent = cluster.main.sent.text
main_sent_index = sents.index(main_sent)
for mention in cluster.mentions:
if mention != cluster.main:
mention_sent = mention.sent.text
mention_sent_index = sents.index(mention_sent)
if main_sent_index != mention_sent_index:
replace_pairs.append((mention, main_sent_index))
# in case there is no coref, treat the text as a single sentence
if not replace_pairs:
return []
# replace any mention with the corresponding reference sentence index
tokens = [token.text for token in parsed]
for (mention, ref_sent_index) in replace_pairs:
remove_indices = [x for x in range(mention.start, mention.end)][::-1]
for index in remove_indices:
tokens.pop(index)
tokens.insert(mention.start, "@@{}@@".format(ref_sent_index+1))
# parse each sentence separately
question_coref = self.parser(' '.join(tokens))
sents_coref = [sent.text for sent in question_coref.sents]
return [self._parse(self._clean(sent)) for sent in sents_coref]
def _update_decomposition(self, decomposition, i, decomposed):
# remove non-decomposed part
decomposition_previous_len = len(decomposition)
decomposition.remove(decomposition[i])
# fix references in text following the decomposed part, by increasing it by len(decomposed) - 1.
# this way, if the reference was to the i'th position (which is now decomposed), it will be updated
# to the last part of the decomposition.
for j in range(i+1, decomposition_previous_len):
# -1 because we removed one element
updated_part = re.sub("@@(\d\d)?@@",
lambda x: "@@" + str(int(x.group(1)) + len(decomposed) - 1) + "@@",
decomposition[j-1].text)
decomposition[j-1] = self._parse(updated_part)
# insert decomposed parts one by one
for part in decomposed[::-1]:
decomposition.insert(i, self._parse(part))
return decomposition
def _decompose(self, question, verbose):
trace = []
num_sentences = self._get_num_sentences(question)
if num_sentences > 1:
decomposition = self._get_multi_sent_decomposition(question)
if not decomposition:
decomposition = [self._parse(self._clean(question))]
else:
trace.append("multi_sent_coref")
else:
decomposition = [self._parse(self._clean(question))]
if verbose:
print(decomposition)
while True:
is_decomposed = False
for i, part in enumerate(decomposition):
decomposed = None
if self.rules["be_root"].check(part, i + 1):
decomposed = self.rules["be_root"].decompose()
trace.append("be_root")
elif self.rules["be_auxpass"].check(part, i + 1):
decomposed = self.rules["be_auxpass"].decompose()
trace.append("be_auxpass")
elif self.rules["do_subj"].check(part, i + 1):
decomposed = self.rules["do_subj"].decompose()
trace.append("do_subj")
elif self.rules["subj_do_have"].check(part, i+1):
decomposed = self.rules["subj_do_have"].decompose()
trace.append("subj_do_have")
elif self.rules["conjunction"].check(part, i+1):
decomposed = self.rules["conjunction"].decompose()
trace.append("conjunction")
elif self.rules["how_many"].check(part, i+1):
decomposed = self.rules["how_many"].decompose()
trace.append("how_many")
elif self.rules["multi_prep"].check(part, i+1):
decomposed = self.rules["multi_prep"].decompose()
trace.append("multi_prep")
elif self.rules["single_prep"].check(part, i+1):
# check that "relcl" should not be applied
single_prep_index = self.rules["single_prep"].prep_index
if self.rules["relcl"].check(part, i + 1) and \
self.rules["relcl"].check_relcl_contains_index(single_prep_index):
decomposed = self.rules["relcl"].decompose()
trace.append("relcl")
else:
decomposed = self.rules["single_prep"].decompose()
trace.append("single_prep")
elif self.rules["relcl"].check(part, i+1):
decomposed = self.rules["relcl"].decompose()
trace.append("relcl")
elif self.rules["superlative"].check(part, i+1):
decomposed = self.rules["superlative"].decompose()
trace.append("superlative")
elif self.rules["acl_verb"].check(part, i+1):
decomposed = self.rules["acl_verb"].decompose()
trace.append("acl_verb")
if decomposed:
decomposition = self._update_decomposition(decomposition, i, decomposed)
is_decomposed = True
if verbose:
print(decomposition)
if not is_decomposed:
break
return decomposition, trace
| en | 0.911877 | # TODO: consider keeping question marks, which improve tagger accuracy # (e.g. "Where did the illustrator of \"De Divina Proportione\" die?"). Simplify the question by removing extra unnecessary parts of it, if exist. # extract mention-reference sentence pairs # in case there is no coref, treat the text as a single sentence # replace any mention with the corresponding reference sentence index # parse each sentence separately # remove non-decomposed part # fix references in text following the decomposed part, by increasing it by len(decomposed) - 1. # this way, if the reference was to the i'th position (which is now decomposed), it will be updated # to the last part of the decomposition. # -1 because we removed one element # insert decomposed parts one by one # check that "relcl" should not be applied | 2.713692 | 3 |
venv/Lib/site-packages/pygame/tests/image__save_gl_surface_test.py | ZenithEmber/COMP120-Assignment-1-contract | 46 | 6632169 | import os
import unittest
from pygame.tests import test_utils
import pygame
from pygame.locals import *
@unittest.skipIf(os.environ.get('SDL_VIDEODRIVER') == 'dummy',
'OpenGL requires a non-"dummy" SDL_VIDEODRIVER')
class GL_ImageSave(unittest.TestCase):
def test_image_save_works_with_opengl_surfaces(self):
"""
|tags:display,slow,opengl|
"""
pygame.display.init()
screen = pygame.display.set_mode((640,480), OPENGL|DOUBLEBUF)
pygame.display.flip()
tmp_dir = test_utils.get_tmp_dir()
# Try the imageext module.
tmp_file = os.path.join(tmp_dir, "opengl_save_surface_test.png")
pygame.image.save(screen, tmp_file)
self.assertTrue(os.path.exists(tmp_file))
os.remove(tmp_file)
# Only test the image module.
tmp_file = os.path.join(tmp_dir, "opengl_save_surface_test.bmp")
pygame.image.save(screen, tmp_file)
self.assertTrue(os.path.exists(tmp_file))
os.remove(tmp_file)
# stops tonnes of tmp dirs building up in trunk dir
os.rmdir(tmp_dir)
pygame.display.quit()
if __name__ == '__main__':
unittest.main()
| import os
import unittest
from pygame.tests import test_utils
import pygame
from pygame.locals import *
@unittest.skipIf(os.environ.get('SDL_VIDEODRIVER') == 'dummy',
'OpenGL requires a non-"dummy" SDL_VIDEODRIVER')
class GL_ImageSave(unittest.TestCase):
def test_image_save_works_with_opengl_surfaces(self):
"""
|tags:display,slow,opengl|
"""
pygame.display.init()
screen = pygame.display.set_mode((640,480), OPENGL|DOUBLEBUF)
pygame.display.flip()
tmp_dir = test_utils.get_tmp_dir()
# Try the imageext module.
tmp_file = os.path.join(tmp_dir, "opengl_save_surface_test.png")
pygame.image.save(screen, tmp_file)
self.assertTrue(os.path.exists(tmp_file))
os.remove(tmp_file)
# Only test the image module.
tmp_file = os.path.join(tmp_dir, "opengl_save_surface_test.bmp")
pygame.image.save(screen, tmp_file)
self.assertTrue(os.path.exists(tmp_file))
os.remove(tmp_file)
# stops tonnes of tmp dirs building up in trunk dir
os.rmdir(tmp_dir)
pygame.display.quit()
if __name__ == '__main__':
unittest.main()
| en | 0.341557 | |tags:display,slow,opengl| # Try the imageext module. # Only test the image module. # stops tonnes of tmp dirs building up in trunk dir | 2.451428 | 2 |
plugin/references.py | kaste/LSP | 0 | 6632170 | import os
import sublime
import linecache
from .core.panels import ensure_panel
from .core.protocol import Request, Point
from .core.registry import get_position
from .core.registry import LspTextCommand
from .core.registry import windows
from .core.settings import PLUGIN_NAME
from .core.settings import userprefs
from .core.typing import List, Dict, Optional, Tuple, TypedDict
from .core.url import uri_to_filename
from .core.views import get_line, text_document_position_params
from .documents import is_at_word
ReferenceDict = TypedDict('ReferenceDict', {'uri': str, 'range': dict})
def ensure_references_panel(window: sublime.Window) -> 'Optional[sublime.View]':
return ensure_panel(window, "references", r"^\s*\S\s+(\S.*):$", r"^\s+([0-9]+):?([0-9]+).*$",
"Packages/" + PLUGIN_NAME + "/Syntaxes/References.sublime-syntax")
class LspSymbolReferencesCommand(LspTextCommand):
capability = 'referencesProvider'
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self.reflist = [] # type: List[List[str]]
self.word_region = None # type: Optional[sublime.Region]
self.word = ""
self.base_dir = None # type: Optional[str]
def is_enabled(self, event: Optional[dict] = None) -> bool:
return super().is_enabled(event) and is_at_word(self.view, event)
def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:
session = self.best_session(self.capability)
file_path = self.view.file_name()
if session and file_path:
pos = get_position(self.view, event)
window = self.view.window()
if not window:
return
self.word_region = self.view.word(pos)
self.word = self.view.substr(self.word_region)
# use relative paths if file on the same root.
base_dir = windows.lookup(window).get_project_path(file_path)
if base_dir:
if os.path.commonprefix([base_dir, file_path]):
self.base_dir = base_dir
document_position = text_document_position_params(self.view, pos)
document_position['context'] = {"includeDeclaration": False}
request = Request.references(document_position)
session.send_request(request, lambda response: self.handle_response(response, pos))
def handle_response(self, response: Optional[List[ReferenceDict]], pos: int) -> None:
window = self.view.window()
if response is None:
response = []
if window:
references_count = len(response)
# return if there are no references
if references_count < 1:
window.run_command("hide_panel", {"panel": "output.references"})
window.status_message("No references found")
return
references_by_file = self._group_references_by_file(response)
if userprefs().show_references_in_quick_panel:
self.show_quick_panel(references_by_file)
else:
self.show_references_panel(references_by_file)
def show_quick_panel(self, references_by_file: Dict[str, List[Tuple[Point, str]]]) -> None:
selected_index = -1
current_file_path = self.view.file_name()
self.reflist.clear()
for file_path, references in references_by_file.items():
for reference in references:
point, line = reference
item = ['{}:{}:{}'.format(self.get_relative_path(file_path), point.row + 1, point.col + 1), line]
self.reflist.append(item)
# pre-select a reference in the current file.
if current_file_path == file_path and selected_index == -1:
selected_index = len(self.reflist) - 1
flags = sublime.KEEP_OPEN_ON_FOCUS_LOST
window = self.view.window()
if window:
window.show_quick_panel(
self.reflist,
self.on_ref_choice,
flags,
selected_index,
self.on_ref_highlight
)
def on_ref_choice(self, index: int) -> None:
self.open_ref_index(index)
def on_ref_highlight(self, index: int) -> None:
self.open_ref_index(index, transient=True)
def open_ref_index(self, index: int, transient: bool = False) -> None:
if index != -1:
flags = sublime.ENCODED_POSITION | sublime.TRANSIENT if transient else sublime.ENCODED_POSITION
window = self.view.window()
if window:
window.open_file(self.get_selected_file_path(index), flags)
def show_references_panel(self, references_by_file: Dict[str, List[Tuple[Point, str]]]) -> None:
window = self.view.window()
if window:
panel = ensure_references_panel(window)
if not panel:
return
text = ''
references_count = 0
for file, references in references_by_file.items():
text += '◌ {}:\n'.format(self.get_relative_path(file))
for reference in references:
references_count += 1
point, line = reference
text += '\t{:>8}:{:<4} {}\n'.format(point.row + 1, point.col + 1, line)
# append a new line after each file name
text += '\n'
base_dir = windows.lookup(window).get_project_path(self.view.file_name() or "")
panel.settings().set("result_base_dir", base_dir)
panel.run_command("lsp_clear_panel")
window.run_command("show_panel", {"panel": "output.references"})
panel.run_command('append', {
'characters': "{} references for '{}'\n\n{}".format(references_count, self.word, text),
'force': True,
'scroll_to_end': False
})
# highlight all word occurrences
regions = panel.find_all(r"\b{}\b".format(self.word))
panel.add_regions('ReferenceHighlight', regions, 'comment', flags=sublime.DRAW_OUTLINED)
def get_selected_file_path(self, index: int) -> str:
return self.get_full_path(self.reflist[index][0])
def get_relative_path(self, file_path: str) -> str:
if self.base_dir:
return os.path.relpath(file_path, self.base_dir)
else:
return file_path
def get_full_path(self, file_path: str) -> str:
if self.base_dir:
return os.path.join(self.base_dir, file_path)
return file_path
def _group_references_by_file(self, references: List[ReferenceDict]
) -> Dict[str, List[Tuple[Point, str]]]:
""" Return a dictionary that groups references by the file it belongs. """
grouped_references = {} # type: Dict[str, List[Tuple[Point, str]]]
for reference in references:
file_path = uri_to_filename(reference["uri"])
point = Point.from_lsp(reference['range']['start'])
# get line of the reference, to showcase its use
reference_line = get_line(self.view.window(), file_path, point.row)
if grouped_references.get(file_path) is None:
grouped_references[file_path] = []
grouped_references[file_path].append((point, reference_line))
# we don't want to cache the line, we always want to get fresh data
linecache.clearcache()
return grouped_references
| import os
import sublime
import linecache
from .core.panels import ensure_panel
from .core.protocol import Request, Point
from .core.registry import get_position
from .core.registry import LspTextCommand
from .core.registry import windows
from .core.settings import PLUGIN_NAME
from .core.settings import userprefs
from .core.typing import List, Dict, Optional, Tuple, TypedDict
from .core.url import uri_to_filename
from .core.views import get_line, text_document_position_params
from .documents import is_at_word
ReferenceDict = TypedDict('ReferenceDict', {'uri': str, 'range': dict})
def ensure_references_panel(window: sublime.Window) -> 'Optional[sublime.View]':
return ensure_panel(window, "references", r"^\s*\S\s+(\S.*):$", r"^\s+([0-9]+):?([0-9]+).*$",
"Packages/" + PLUGIN_NAME + "/Syntaxes/References.sublime-syntax")
class LspSymbolReferencesCommand(LspTextCommand):
capability = 'referencesProvider'
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self.reflist = [] # type: List[List[str]]
self.word_region = None # type: Optional[sublime.Region]
self.word = ""
self.base_dir = None # type: Optional[str]
def is_enabled(self, event: Optional[dict] = None) -> bool:
return super().is_enabled(event) and is_at_word(self.view, event)
def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:
session = self.best_session(self.capability)
file_path = self.view.file_name()
if session and file_path:
pos = get_position(self.view, event)
window = self.view.window()
if not window:
return
self.word_region = self.view.word(pos)
self.word = self.view.substr(self.word_region)
# use relative paths if file on the same root.
base_dir = windows.lookup(window).get_project_path(file_path)
if base_dir:
if os.path.commonprefix([base_dir, file_path]):
self.base_dir = base_dir
document_position = text_document_position_params(self.view, pos)
document_position['context'] = {"includeDeclaration": False}
request = Request.references(document_position)
session.send_request(request, lambda response: self.handle_response(response, pos))
def handle_response(self, response: Optional[List[ReferenceDict]], pos: int) -> None:
window = self.view.window()
if response is None:
response = []
if window:
references_count = len(response)
# return if there are no references
if references_count < 1:
window.run_command("hide_panel", {"panel": "output.references"})
window.status_message("No references found")
return
references_by_file = self._group_references_by_file(response)
if userprefs().show_references_in_quick_panel:
self.show_quick_panel(references_by_file)
else:
self.show_references_panel(references_by_file)
def show_quick_panel(self, references_by_file: Dict[str, List[Tuple[Point, str]]]) -> None:
selected_index = -1
current_file_path = self.view.file_name()
self.reflist.clear()
for file_path, references in references_by_file.items():
for reference in references:
point, line = reference
item = ['{}:{}:{}'.format(self.get_relative_path(file_path), point.row + 1, point.col + 1), line]
self.reflist.append(item)
# pre-select a reference in the current file.
if current_file_path == file_path and selected_index == -1:
selected_index = len(self.reflist) - 1
flags = sublime.KEEP_OPEN_ON_FOCUS_LOST
window = self.view.window()
if window:
window.show_quick_panel(
self.reflist,
self.on_ref_choice,
flags,
selected_index,
self.on_ref_highlight
)
def on_ref_choice(self, index: int) -> None:
self.open_ref_index(index)
def on_ref_highlight(self, index: int) -> None:
self.open_ref_index(index, transient=True)
def open_ref_index(self, index: int, transient: bool = False) -> None:
if index != -1:
flags = sublime.ENCODED_POSITION | sublime.TRANSIENT if transient else sublime.ENCODED_POSITION
window = self.view.window()
if window:
window.open_file(self.get_selected_file_path(index), flags)
def show_references_panel(self, references_by_file: Dict[str, List[Tuple[Point, str]]]) -> None:
window = self.view.window()
if window:
panel = ensure_references_panel(window)
if not panel:
return
text = ''
references_count = 0
for file, references in references_by_file.items():
text += '◌ {}:\n'.format(self.get_relative_path(file))
for reference in references:
references_count += 1
point, line = reference
text += '\t{:>8}:{:<4} {}\n'.format(point.row + 1, point.col + 1, line)
# append a new line after each file name
text += '\n'
base_dir = windows.lookup(window).get_project_path(self.view.file_name() or "")
panel.settings().set("result_base_dir", base_dir)
panel.run_command("lsp_clear_panel")
window.run_command("show_panel", {"panel": "output.references"})
panel.run_command('append', {
'characters': "{} references for '{}'\n\n{}".format(references_count, self.word, text),
'force': True,
'scroll_to_end': False
})
# highlight all word occurrences
regions = panel.find_all(r"\b{}\b".format(self.word))
panel.add_regions('ReferenceHighlight', regions, 'comment', flags=sublime.DRAW_OUTLINED)
def get_selected_file_path(self, index: int) -> str:
return self.get_full_path(self.reflist[index][0])
def get_relative_path(self, file_path: str) -> str:
if self.base_dir:
return os.path.relpath(file_path, self.base_dir)
else:
return file_path
def get_full_path(self, file_path: str) -> str:
if self.base_dir:
return os.path.join(self.base_dir, file_path)
return file_path
def _group_references_by_file(self, references: List[ReferenceDict]
) -> Dict[str, List[Tuple[Point, str]]]:
""" Return a dictionary that groups references by the file it belongs. """
grouped_references = {} # type: Dict[str, List[Tuple[Point, str]]]
for reference in references:
file_path = uri_to_filename(reference["uri"])
point = Point.from_lsp(reference['range']['start'])
# get line of the reference, to showcase its use
reference_line = get_line(self.view.window(), file_path, point.row)
if grouped_references.get(file_path) is None:
grouped_references[file_path] = []
grouped_references[file_path].append((point, reference_line))
# we don't want to cache the line, we always want to get fresh data
linecache.clearcache()
return grouped_references
| en | 0.800215 | # type: List[List[str]] # type: Optional[sublime.Region] # type: Optional[str] # use relative paths if file on the same root. # return if there are no references # pre-select a reference in the current file. # append a new line after each file name # highlight all word occurrences Return a dictionary that groups references by the file it belongs. # type: Dict[str, List[Tuple[Point, str]]] # get line of the reference, to showcase its use # we don't want to cache the line, we always want to get fresh data | 1.95289 | 2 |
tensorimage/util/system/mkdir.py | Nesac128/tensorimage | 19 | 6632171 | <gh_stars>10-100
import os
def mkdir(dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
| import os
def mkdir(dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path) | none | 1 | 2.580427 | 3 |
|
VolumeToMesh/VolumeToMesh.py | lassoan/SlicerMorph | 0 | 6632172 | <reponame>lassoan/SlicerMorph
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import fnmatch
import numpy as np
import random
import math
#
# VolumeToMesh
#
class VolumeToMesh(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "VolumeToMesh" # TODO make this more human readable by adding spaces
self.parent.categories = ["SlicerMorph.SlicerMorph Labs"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (UW), <NAME> (UW)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This module takes a directory of volumes and segments them using a user-supplied threshold value. The output segments are converted to models and saved in the
output directory.
"""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """
This module was developed by <NAME> and <NAME>, through a NSF ABI Development grant, "An Integrated Platform for Retrieval, Visualization and Analysis of
3D Morphology From Digital Biological Collections" (Award Numbers: 1759883 (Murat Maga), 1759637 (<NAME>), 1759839 (Douglas Boyer)).
https://nsf.gov/awardsearch/showAward?AWD_ID=1759883&HistoricalAwards=false
""" # replace with organization, grant and thanks.
#
# VolumeToMeshWidget
#
class VolumeToMeshWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def onSelectInput(self):
self.applyButton.enabled = bool (self.inputDirectory.currentPath and self.outputDirectory.currentPath)
def onSelectOutput(self):
self.applyButton.enabled = bool (self.inputDirectory.currentPath and self.outputDirectory.currentPath)
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
self.inputDirectory=ctk.ctkPathLineEdit()
self.inputDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.inputDirectory.setToolTip( "Select directory containing volumes" )
parametersFormLayout.addRow("Input directory: ", self.inputDirectory)
# Select output directory
self.outputDirectory=ctk.ctkPathLineEdit()
self.outputDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.outputDirectory.setToolTip( "Select directory for output models: " )
parametersFormLayout.addRow("Output directory: ", self.outputDirectory)
#
# Select the extension type
#
self.extensionOptionGZ = qt.QRadioButton(".nii.gz")
self.extensionOptionGZ.setChecked(True)
parametersFormLayout.addRow("Select extension type: ", self.extensionOptionGZ)
#
# set threshold value
#
self.threshold = ctk.ctkDoubleSpinBox()
self.threshold.singleStep = 1
self.threshold.minimum = 0
self.threshold.maximum = 100000
self.threshold.setDecimals(0)
self.threshold.value = 500
self.threshold.setToolTip("Select threshold for segmentation of volume")
parametersFormLayout.addRow("Threshold for segmentation:", self.threshold)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Generate VolumeToMeshs."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
#
# check box to trigger taking screen shots for later use in tutorials
#
self.enableScreenshotsFlagCheckBox = qt.QCheckBox()
self.enableScreenshotsFlagCheckBox.checked = 0
self.enableScreenshotsFlagCheckBox.setToolTip("If checked, take screen shots for tutorials. Use Save Data to write them to disk.")
parametersFormLayout.addRow("Enable Screenshots", self.enableScreenshotsFlagCheckBox)
# connections
self.inputDirectory.connect('validInputChanged(bool)', self.onSelectInput)
self.outputDirectory.connect('validInputChanged(bool)', self.onSelectOutput)
self.applyButton.connect('clicked(bool)', self.onApplyButton)
# Add vertical spacer
self.layout.addStretch(1)
def cleanup(self):
pass
def onApplyButton(self):
logic = VolumeToMeshLogic()
enableScreenshotsFlag = self.enableScreenshotsFlagCheckBox.checked
extension =""
if self.extensionOptionGZ.checked:
extension = ".nii.gz"
logic.run(self.inputDirectory.currentPath, self.outputDirectory.currentPath, extension, int(self.threshold.value))
#
# VolumeToMeshLogic
#
class VolumeToMeshLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def run(self, inputDirectory, outputDirectory, extension, stepThreshold):
renderLogic = slicer.modules.volumerendering.logic()
for file in os.listdir(inputDirectory):
if file.endswith(extension):
inputFile = os.path.join(inputDirectory, file)
volumeNode =slicer.util.loadVolume(inputFile)
labelVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.vtkSlicerVolumesLogic().CreateLabelVolumeFromVolume(slicer.mrmlScene, labelVolumeNode, volumeNode)
voxelArray = slicer.util.arrayFromVolume(volumeNode)
labelVoxelArray = slicer.util.arrayFromVolume(labelVolumeNode)
labelVoxelArray[voxelArray >= stepThreshold] = 100
labelVoxelArray[voxelArray < stepThreshold] = 0
slicer.util.arrayFromVolumeModified(labelVolumeNode)
imageName = volumeNode.GetName()
segmentationNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLSegmentationNode', imageName)
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(labelVolumeNode, segmentationNode)
segmentID = segmentationNode.GetSegmentation().GetNthSegmentID(0)
polydata=vtk.vtkPolyData()
slicer.modules.segmentations.logic().GetSegmentClosedSurfaceRepresentation(segmentationNode, segmentID, polydata,1)
normalFilter = vtk.vtkPolyDataNormals()
normalFilter.SetInputData(polydata)
normalFilter.SetFlipNormals(1)
normalFilter.Update()
polydataFlip = normalFilter.GetOutput()
modelNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelNode',imageName)
modelNode.CreateDefaultDisplayNodes()
modelNode.SetAndObservePolyData(polydataFlip)
outputFilename = os.path.join(outputDirectory, imageName + '.ply')
slicer.util.saveNode(modelNode, outputFilename)
slicer.mrmlScene.RemoveNode(labelVolumeNode)
slicer.mrmlScene.RemoveNode(volumeNode)
slicer.mrmlScene.RemoveNode(segmentationNode)
slicer.mrmlScene.RemoveNode(modelNode)
def takeScreenshot(self,name,description,type=-1):
# show the message even if not taking a screen shot
slicer.util.delayDisplay('Take screenshot: '+description+'.\nResult is available in the Annotations module.', 3000)
lm = slicer.app.layoutManager()
# switch on the type to get the requested window
widget = 0
if type == slicer.qMRMLScreenShotDialog.FullLayout:
# full layout
widget = lm.viewport()
elif type == slicer.qMRMLScreenShotDialog.ThreeD:
# just the 3D window
widget = lm.threeDWidget(0).threeDView()
elif type == slicer.qMRMLScreenShotDialog.Red:
# red slice window
widget = lm.sliceWidget("Red")
elif type == slicer.qMRMLScreenShotDialog.Yellow:
# yellow slice window
widget = lm.sliceWidget("Yellow")
elif type == slicer.qMRMLScreenShotDialog.Green:
# green slice window
widget = lm.sliceWidget("Green")
else:
# default to using the full window
widget = slicer.util.mainWindow()
# reset the type so that the node is set correctly
type = slicer.qMRMLScreenShotDialog.FullLayout
# grab and convert to vtk image data
qimage = ctk.ctkWidgetsUtils.grabWidget(widget)
imageData = vtk.vtkImageData()
slicer.qMRMLUtils().qImageToVtkImageData(qimage,imageData)
annotationLogic = slicer.modules.annotations.logic()
annotationLogic.CreateSnapShot(name, description, type, 1, imageData)
class VolumeToMeshTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_VolumeToMesh1()
def test_VolumeToMesh1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import SampleData
SampleData.downloadFromURL(
nodeNames='FA',
fileNames='FA.nrrd',
uris='http://slicer.kitware.com/midas3/download?items=5767',
checksums='SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560')
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
logic = VolumeToMeshLogic()
self.assertIsNotNone( logic.hasImageData(volumeNode) )
self.delayDisplay('Test passed!')
| import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import fnmatch
import numpy as np
import random
import math
#
# VolumeToMesh
#
class VolumeToMesh(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "VolumeToMesh" # TODO make this more human readable by adding spaces
self.parent.categories = ["SlicerMorph.SlicerMorph Labs"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (UW), <NAME> (UW)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This module takes a directory of volumes and segments them using a user-supplied threshold value. The output segments are converted to models and saved in the
output directory.
"""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """
This module was developed by <NAME> and <NAME>, through a NSF ABI Development grant, "An Integrated Platform for Retrieval, Visualization and Analysis of
3D Morphology From Digital Biological Collections" (Award Numbers: 1759883 (Murat Maga), 1759637 (<NAME>), 1759839 (Douglas Boyer)).
https://nsf.gov/awardsearch/showAward?AWD_ID=1759883&HistoricalAwards=false
""" # replace with organization, grant and thanks.
#
# VolumeToMeshWidget
#
class VolumeToMeshWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def onSelectInput(self):
self.applyButton.enabled = bool (self.inputDirectory.currentPath and self.outputDirectory.currentPath)
def onSelectOutput(self):
self.applyButton.enabled = bool (self.inputDirectory.currentPath and self.outputDirectory.currentPath)
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
self.inputDirectory=ctk.ctkPathLineEdit()
self.inputDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.inputDirectory.setToolTip( "Select directory containing volumes" )
parametersFormLayout.addRow("Input directory: ", self.inputDirectory)
# Select output directory
self.outputDirectory=ctk.ctkPathLineEdit()
self.outputDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.outputDirectory.setToolTip( "Select directory for output models: " )
parametersFormLayout.addRow("Output directory: ", self.outputDirectory)
#
# Select the extension type
#
self.extensionOptionGZ = qt.QRadioButton(".nii.gz")
self.extensionOptionGZ.setChecked(True)
parametersFormLayout.addRow("Select extension type: ", self.extensionOptionGZ)
#
# set threshold value
#
self.threshold = ctk.ctkDoubleSpinBox()
self.threshold.singleStep = 1
self.threshold.minimum = 0
self.threshold.maximum = 100000
self.threshold.setDecimals(0)
self.threshold.value = 500
self.threshold.setToolTip("Select threshold for segmentation of volume")
parametersFormLayout.addRow("Threshold for segmentation:", self.threshold)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Generate VolumeToMeshs."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
#
# check box to trigger taking screen shots for later use in tutorials
#
self.enableScreenshotsFlagCheckBox = qt.QCheckBox()
self.enableScreenshotsFlagCheckBox.checked = 0
self.enableScreenshotsFlagCheckBox.setToolTip("If checked, take screen shots for tutorials. Use Save Data to write them to disk.")
parametersFormLayout.addRow("Enable Screenshots", self.enableScreenshotsFlagCheckBox)
# connections
self.inputDirectory.connect('validInputChanged(bool)', self.onSelectInput)
self.outputDirectory.connect('validInputChanged(bool)', self.onSelectOutput)
self.applyButton.connect('clicked(bool)', self.onApplyButton)
# Add vertical spacer
self.layout.addStretch(1)
def cleanup(self):
pass
def onApplyButton(self):
logic = VolumeToMeshLogic()
enableScreenshotsFlag = self.enableScreenshotsFlagCheckBox.checked
extension =""
if self.extensionOptionGZ.checked:
extension = ".nii.gz"
logic.run(self.inputDirectory.currentPath, self.outputDirectory.currentPath, extension, int(self.threshold.value))
#
# VolumeToMeshLogic
#
class VolumeToMeshLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def run(self, inputDirectory, outputDirectory, extension, stepThreshold):
renderLogic = slicer.modules.volumerendering.logic()
for file in os.listdir(inputDirectory):
if file.endswith(extension):
inputFile = os.path.join(inputDirectory, file)
volumeNode =slicer.util.loadVolume(inputFile)
labelVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.vtkSlicerVolumesLogic().CreateLabelVolumeFromVolume(slicer.mrmlScene, labelVolumeNode, volumeNode)
voxelArray = slicer.util.arrayFromVolume(volumeNode)
labelVoxelArray = slicer.util.arrayFromVolume(labelVolumeNode)
labelVoxelArray[voxelArray >= stepThreshold] = 100
labelVoxelArray[voxelArray < stepThreshold] = 0
slicer.util.arrayFromVolumeModified(labelVolumeNode)
imageName = volumeNode.GetName()
segmentationNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLSegmentationNode', imageName)
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(labelVolumeNode, segmentationNode)
segmentID = segmentationNode.GetSegmentation().GetNthSegmentID(0)
polydata=vtk.vtkPolyData()
slicer.modules.segmentations.logic().GetSegmentClosedSurfaceRepresentation(segmentationNode, segmentID, polydata,1)
normalFilter = vtk.vtkPolyDataNormals()
normalFilter.SetInputData(polydata)
normalFilter.SetFlipNormals(1)
normalFilter.Update()
polydataFlip = normalFilter.GetOutput()
modelNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelNode',imageName)
modelNode.CreateDefaultDisplayNodes()
modelNode.SetAndObservePolyData(polydataFlip)
outputFilename = os.path.join(outputDirectory, imageName + '.ply')
slicer.util.saveNode(modelNode, outputFilename)
slicer.mrmlScene.RemoveNode(labelVolumeNode)
slicer.mrmlScene.RemoveNode(volumeNode)
slicer.mrmlScene.RemoveNode(segmentationNode)
slicer.mrmlScene.RemoveNode(modelNode)
def takeScreenshot(self,name,description,type=-1):
# show the message even if not taking a screen shot
slicer.util.delayDisplay('Take screenshot: '+description+'.\nResult is available in the Annotations module.', 3000)
lm = slicer.app.layoutManager()
# switch on the type to get the requested window
widget = 0
if type == slicer.qMRMLScreenShotDialog.FullLayout:
# full layout
widget = lm.viewport()
elif type == slicer.qMRMLScreenShotDialog.ThreeD:
# just the 3D window
widget = lm.threeDWidget(0).threeDView()
elif type == slicer.qMRMLScreenShotDialog.Red:
# red slice window
widget = lm.sliceWidget("Red")
elif type == slicer.qMRMLScreenShotDialog.Yellow:
# yellow slice window
widget = lm.sliceWidget("Yellow")
elif type == slicer.qMRMLScreenShotDialog.Green:
# green slice window
widget = lm.sliceWidget("Green")
else:
# default to using the full window
widget = slicer.util.mainWindow()
# reset the type so that the node is set correctly
type = slicer.qMRMLScreenShotDialog.FullLayout
# grab and convert to vtk image data
qimage = ctk.ctkWidgetsUtils.grabWidget(widget)
imageData = vtk.vtkImageData()
slicer.qMRMLUtils().qImageToVtkImageData(qimage,imageData)
annotationLogic = slicer.modules.annotations.logic()
annotationLogic.CreateSnapShot(name, description, type, 1, imageData)
class VolumeToMeshTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_VolumeToMesh1()
def test_VolumeToMesh1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import SampleData
SampleData.downloadFromURL(
nodeNames='FA',
fileNames='FA.nrrd',
uris='http://slicer.kitware.com/midas3/download?items=5767',
checksums='SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560')
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
logic = VolumeToMeshLogic()
self.assertIsNotNone( logic.hasImageData(volumeNode) )
self.delayDisplay('Test passed!') | en | 0.780295 | # # VolumeToMesh # Uses ScriptedLoadableModule base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py # TODO make this more human readable by adding spaces # replace with "Firstname Lastname (Organization)" This module takes a directory of volumes and segments them using a user-supplied threshold value. The output segments are converted to models and saved in the output directory. This module was developed by <NAME> and <NAME>, through a NSF ABI Development grant, "An Integrated Platform for Retrieval, Visualization and Analysis of 3D Morphology From Digital Biological Collections" (Award Numbers: 1759883 (Murat Maga), 1759637 (<NAME>), 1759839 (Douglas Boyer)). https://nsf.gov/awardsearch/showAward?AWD_ID=1759883&HistoricalAwards=false # replace with organization, grant and thanks. # # VolumeToMeshWidget # Uses ScriptedLoadableModuleWidget base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py # Instantiate and connect widgets ... # # Parameters Area # # Layout within the dummy collapsible button # Select output directory # # Select the extension type # # # set threshold value # # # Apply Button # # # check box to trigger taking screen shots for later use in tutorials # # connections # Add vertical spacer # # VolumeToMeshLogic # This class should implement all the actual computation done by your module. The interface should be such that other python code can import this class and make use of the functionality without requiring an instance of the Widget. Uses ScriptedLoadableModuleLogic base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py # show the message even if not taking a screen shot # switch on the type to get the requested window # full layout # just the 3D window # red slice window # yellow slice window # green slice window # default to using the full window # reset the type so that the node is set correctly # grab and convert to vtk image data This is the test case for your scripted module. Uses ScriptedLoadableModuleTest base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py Do whatever is needed to reset the state - typically a scene clear will be enough. Run as few or as many tests as needed here. Ideally you should have several levels of tests. At the lowest level tests should exercise the functionality of the logic with different inputs (both valid and invalid). At higher levels your tests should emulate the way the user would interact with your code and confirm that it still works the way you intended. One of the most important features of the tests is that it should alert other developers when their changes will have an impact on the behavior of your module. For example, if a developer removes a feature that you depend on, your test should break so they know that the feature is needed. # # first, get some data # | 2.240111 | 2 |
week6-Evasion/bot/random_player.py | chirag1992m/heuristicProblemSolvingFall17 | 0 | 6632173 | import argparse
from .base_bot import BaseBot
import random
class RandomBot(BaseBot):
def __init__(self, host, port, name, visualize=False, seed=42):
super().__init__(host=host, port=port, name=name, visualize=visualize)
random.seed(seed)
def move_hunter(self):
wall = random.randint(0, 4)
to_delete = []
for wall_idx in range(len(self.game.walls)):
if random.random() < .1:
to_delete.append(wall_idx)
return wall, to_delete
def move_prey(self):
x = random.randint(-1, 1)
y = random.randint(-1, 1)
return x, y
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ip', default='127.0.0.1', type=str)
parser.add_argument('--port', default=9001, type=int)
parser.add_argument('--name', default='chirag-ojas', type=str)
parser.add_argument('--viz', default=False, action='store_true')
parser.add_argument('--seed', default=42, type=int)
args = parser.parse_args()
client = RandomBot(args.ip, args.port, args.name, visualize=args.viz, seed=args.seed)
client.start()
client.done()
| import argparse
from .base_bot import BaseBot
import random
class RandomBot(BaseBot):
def __init__(self, host, port, name, visualize=False, seed=42):
super().__init__(host=host, port=port, name=name, visualize=visualize)
random.seed(seed)
def move_hunter(self):
wall = random.randint(0, 4)
to_delete = []
for wall_idx in range(len(self.game.walls)):
if random.random() < .1:
to_delete.append(wall_idx)
return wall, to_delete
def move_prey(self):
x = random.randint(-1, 1)
y = random.randint(-1, 1)
return x, y
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ip', default='127.0.0.1', type=str)
parser.add_argument('--port', default=9001, type=int)
parser.add_argument('--name', default='chirag-ojas', type=str)
parser.add_argument('--viz', default=False, action='store_true')
parser.add_argument('--seed', default=42, type=int)
args = parser.parse_args()
client = RandomBot(args.ip, args.port, args.name, visualize=args.viz, seed=args.seed)
client.start()
client.done()
| none | 1 | 2.863017 | 3 |
|
aumento de salario .py | Danielporcela/Meus-exercicios-phyton | 2 | 6632174 | <filename>aumento de salario .py<gh_stars>1-10
salario = float(input('Qual é o salario do funcionario ? R$ '))
aumento = salario + (salario *15 / 100)
print(' Um funcionario que ganhava R$ {:.2f}, com aumento de 15 % passa a receber R$ {:.2f} '.format(salario,aumento ))
| <filename>aumento de salario .py<gh_stars>1-10
salario = float(input('Qual é o salario do funcionario ? R$ '))
aumento = salario + (salario *15 / 100)
print(' Um funcionario que ganhava R$ {:.2f}, com aumento de 15 % passa a receber R$ {:.2f} '.format(salario,aumento ))
| none | 1 | 3.587743 | 4 |
|
examples/misc/h2o2_ilt.py | amatsugi/me2d | 0 | 6632175 | #! /usr/bin/env python3
"""
ILT (inverse laplace transform) calculation of k(E) for H2O2 => OH + OH
output file: h2o2_iltE_dE10.dat
"""
from me2d import RoVib
from me2d import ilt
nsym = 2
rotA = 10.3560
rotB2D = 0.84680
freq = [877, 1266, 1402, 3599, 3608]
# HO-OH rotational energy levels
levels = [0.0, 13.28417, 251.96034, 375.2889, 576.38451, 786.65549, 1014.27698,
1251.27288, 1492.82778, 1738.07658, 1965.91838, 2224.64158, 2354.85548,
2728.62328, 2750.95608, 3300.27678, 3301.84558, 3960.96818, 3961.04278,
4709.42418, 4709.42688, 5542.65208, 5542.65218, 6458.95218, 6458.95218,
7457.38088, 7457.38088, 8537.37598, 8537.37598, 9698.58218, 9698.58218,
10940.76388, 10940.76388, 12263.76088, 12263.76088, 13667.45788, 13667.45788,
15151.77188, 15151.77188, 16716.64188, 16716.64188, 18362.02188, 18362.02188,
20087.87688, 20087.87688, 21894.17788, 21894.17788, 23780.90488, 23780.90488,
25748.03888, 25748.03888, 27795.56688, 27795.56688, 29923.47788, 29923.47788,
32131.76188, 32131.76188, 34420.41188, 34420.41188, 36789.42088, 36789.42088,
39238.78388, 39238.78388, 41768.49588, 41768.49588, 44378.55388, 44378.55388,
47068.95488, 47068.95488, 49839.69488, 49839.69488, 52690.77188, 52690.77188,
55622.18388, 55622.18388, 58633.92988, 58633.92988, 61726.00688, 61726.00688,
64898.41488, 64898.41488]
states = [(1, x) for x in levels]
rovib = RoVib(nsym, rotA, rotB2D, freq, states=states)
# ILT [k(T) = A*exp(-E/RT)]
ilt_A = 6.7e14 # s^-1
ilt_E = 17236.884 # cm^-1
maxE = 65000
dE = 10
ilt(maxE, dE, rovib, ilt_A, ilt_E, outf="h2o2_iltE_dE%d.dat" % (dE))
| #! /usr/bin/env python3
"""
ILT (inverse laplace transform) calculation of k(E) for H2O2 => OH + OH
output file: h2o2_iltE_dE10.dat
"""
from me2d import RoVib
from me2d import ilt
nsym = 2
rotA = 10.3560
rotB2D = 0.84680
freq = [877, 1266, 1402, 3599, 3608]
# HO-OH rotational energy levels
levels = [0.0, 13.28417, 251.96034, 375.2889, 576.38451, 786.65549, 1014.27698,
1251.27288, 1492.82778, 1738.07658, 1965.91838, 2224.64158, 2354.85548,
2728.62328, 2750.95608, 3300.27678, 3301.84558, 3960.96818, 3961.04278,
4709.42418, 4709.42688, 5542.65208, 5542.65218, 6458.95218, 6458.95218,
7457.38088, 7457.38088, 8537.37598, 8537.37598, 9698.58218, 9698.58218,
10940.76388, 10940.76388, 12263.76088, 12263.76088, 13667.45788, 13667.45788,
15151.77188, 15151.77188, 16716.64188, 16716.64188, 18362.02188, 18362.02188,
20087.87688, 20087.87688, 21894.17788, 21894.17788, 23780.90488, 23780.90488,
25748.03888, 25748.03888, 27795.56688, 27795.56688, 29923.47788, 29923.47788,
32131.76188, 32131.76188, 34420.41188, 34420.41188, 36789.42088, 36789.42088,
39238.78388, 39238.78388, 41768.49588, 41768.49588, 44378.55388, 44378.55388,
47068.95488, 47068.95488, 49839.69488, 49839.69488, 52690.77188, 52690.77188,
55622.18388, 55622.18388, 58633.92988, 58633.92988, 61726.00688, 61726.00688,
64898.41488, 64898.41488]
states = [(1, x) for x in levels]
rovib = RoVib(nsym, rotA, rotB2D, freq, states=states)
# ILT [k(T) = A*exp(-E/RT)]
ilt_A = 6.7e14 # s^-1
ilt_E = 17236.884 # cm^-1
maxE = 65000
dE = 10
ilt(maxE, dE, rovib, ilt_A, ilt_E, outf="h2o2_iltE_dE%d.dat" % (dE))
| en | 0.506519 | #! /usr/bin/env python3 ILT (inverse laplace transform) calculation of k(E) for H2O2 => OH + OH output file: h2o2_iltE_dE10.dat # HO-OH rotational energy levels # ILT [k(T) = A*exp(-E/RT)] # s^-1 # cm^-1 | 2.407072 | 2 |
src/selena/stats/migrations/0002_auto__add_field_incident_incident_type.py | deejay1/selena | 23 | 6632176 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Incident.incident_type'
db.add_column(u'stats_incident', 'incident_type',
self.gf('django.db.models.fields.CharField')(default=u'failure', max_length=15),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Incident.incident_type'
db.delete_column(u'stats_incident', 'incident_type')
models = {
u'services.agent': {
'Meta': {'object_name': 'Agent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.Queue']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
u'services.queue': {
'Meta': {'object_name': 'Queue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
u'services.service': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Service'},
'additional_agents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['services.Agent']", 'null': 'True', 'blank': 'True'}),
'auth_method': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'auth_pass': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'auth_user': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'base_referer': ('django.db.models.fields.URLField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'base_useragent': ('django.db.models.fields.CharField', [], {'default': "u'Mozilla/5.0 (X11; U; Linux x86_64; pl-PL; rv:1.9.2.3) Gecko/20100423 Ubuntu/10.04 (lucid) Firefox/3.6.3'", 'max_length': '250'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'connection_timeout': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '30'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hosting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_core_service': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_technical_break': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'performance_issues_min_probes_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'performance_issues_time': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '15'}),
'response_code': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '200'}),
'sensitivity': ('django.db.models.fields.DecimalField', [], {'default': "'0.5'", 'max_digits': '3', 'decimal_places': '2'}),
'service_not_working_min_probes_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'service_working_min_probes_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'time_delta': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'stats.incident': {
'Meta': {'object_name': 'Incident'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incident_type': ('django.db.models.fields.CharField', [], {'default': "u'failure'", 'max_length': '15'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.Service']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['stats'] | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Incident.incident_type'
db.add_column(u'stats_incident', 'incident_type',
self.gf('django.db.models.fields.CharField')(default=u'failure', max_length=15),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Incident.incident_type'
db.delete_column(u'stats_incident', 'incident_type')
models = {
u'services.agent': {
'Meta': {'object_name': 'Agent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.Queue']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
u'services.queue': {
'Meta': {'object_name': 'Queue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
u'services.service': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Service'},
'additional_agents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['services.Agent']", 'null': 'True', 'blank': 'True'}),
'auth_method': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'auth_pass': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'auth_user': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'base_referer': ('django.db.models.fields.URLField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'base_useragent': ('django.db.models.fields.CharField', [], {'default': "u'Mozilla/5.0 (X11; U; Linux x86_64; pl-PL; rv:1.9.2.3) Gecko/20100423 Ubuntu/10.04 (lucid) Firefox/3.6.3'", 'max_length': '250'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'connection_timeout': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '30'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hosting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_core_service': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_technical_break': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'performance_issues_min_probes_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'performance_issues_time': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '15'}),
'response_code': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '200'}),
'sensitivity': ('django.db.models.fields.DecimalField', [], {'default': "'0.5'", 'max_digits': '3', 'decimal_places': '2'}),
'service_not_working_min_probes_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'service_working_min_probes_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'time_delta': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'stats.incident': {
'Meta': {'object_name': 'Incident'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incident_type': ('django.db.models.fields.CharField', [], {'default': "u'failure'", 'max_length': '15'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.Service']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['stats'] | en | 0.530649 | # -*- coding: utf-8 -*- # Adding field 'Incident.incident_type' # Deleting field 'Incident.incident_type' | 2.176739 | 2 |
Club_Performance_BenjaminMeco_Final.py | jesperiksson/SoccermaticsForPython | 0 | 6632177 | <reponame>jesperiksson/SoccermaticsForPython<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 10:35:12 2020
@author: BenjaminMeco
"""
import matplotlib.pyplot as plt
import numpy as np
import json
import pandas as pd
from pandas import json_normalize
from FCPython import createPitch
import statsmodels.formula.api as smf
def factorial(n):
if(n == 0):
return 1
else:
return n*factorial(n-1)
def pois(l,k):
return (l**k)*np.exp(-l)/factorial(k)
# this is just a help for getting the data
def indexOf(team_name,team_list):
index = -1
for element in team_list:
index = index + 1
if(element[0] == team_name):
return index
return -1
# for getting the distributions:
def getWeights(arr,size):
weights = np.zeros(size)
W = 0
for k in range(0,size):
W = W + arr[k]
for k in range(0,size):
weights[k] = arr[k]/W
return weights
def outcomeWeights(r,probabilities):
s = probabilities[0]
count = 0
for p in probabilities:
if(s > r):
return count
else:
count = count + 1
s = s + p
return count
# this makes a simulation using weights as the probability distribution
def simulate(team_list):
points = np.zeros(len(team_list))
for i in range(0,len(team_list)):
for j in range(1,len(team_list)):
t_1 = team_list[i]
t_2 = team_list[(i+j)%len(team_list)]
lambda_1 = (t_1[1] + t_2[2])/2
lambda_2 = (t_1[2] + t_2[1])/2
g_1 = int(np.random.poisson(lambda_1))
g_2 = int(np.random.poisson(lambda_2))
if(g_1 > g_2):
points[i] = points[i] + 3
elif(g_1 < g_2):
points[(i+j)%len(team_list)] = points[(i+j)%len(team_list)] + 3
else:
points[i] = points[i] + 1
points[(i+j)%len(team_list)] = points[(i+j)%len(team_list)] + 1
result = []
for i in range(0,len(team_list)):
result = result + [[points[i],team_list[i][0]]]
return result
def simulMany(team_list,N):
team_placements = []
for t in team_list:
team_placements = team_placements + [[t[0],np.zeros(21)]]
for n in range(N):
# do a simulation:
s = sorted(simulate(team_list))
# get the placements:
for i in range(0,len(s)):
e = s[i]
index = indexOf(e[1],team_list)
team_placements[index][1][20-i] = team_placements[index][1][20-i] + 1
for t in team_placements:
t[1] = getWeights(t[1],21)[1:]
return team_placements
#Load the data
with open('Wyscout/matches/matches_England.json') as data_file:
data = json.load(data_file)
df = json_normalize(data, sep = "_")
# first we extract the relevant bits of the matches:
matches = []
for i,game in df.iterrows():
label = game['label']
dash = label.find(" -")
comma = label.find(",")
team_1 = label[0:dash]
team_2 = label[dash+3:comma]
score_1 = label[comma+2:comma+3]
score_2 = label[comma+6:]
matches = matches + [[team_1,score_1,team_2,score_2]]
# now we make the distributions for each team:
teamList = []
for m in matches:
index_1 = indexOf(m[0],teamList)
index_2 = indexOf(m[2],teamList)
# update the data for the first team
if(index_1 == -1):
new_team = [m[0],0,0]
new_team[1] = int(m[1])
new_team[2] = int(m[3])
teamList = teamList + [new_team]
else:
teamList[index_1][1] = teamList[index_1][1] + int(m[1])
teamList[index_1][2] = teamList[index_1][2] + int(m[3])
# update the data for the second team
if(index_2 == -1):
new_team = [m[2],0,0]
new_team[1] = int(m[3])
new_team[2] = int(m[1])
teamList = teamList + [new_team]
else:
teamList[index_2][1] = teamList[index_2][1] + int(m[3])
teamList[index_2][2] = teamList[index_2][2] + int(m[1])
teamList.sort()
# now we get the desired data for the weights and the poisson distributions:
teamPoisson = []
for t in teamList:
teamPoisson = teamPoisson + [[t[0],t[1]/38,t[2]/38]]
# finally some simulations, first with the Poisson distributions:
N = 10000
alph = 0.8
W = 0.25
team_placements = simulMany(teamPoisson,N)
col = (1,0,0)
c = -1
plt.figure(dpi = 160)
for t in team_placements:
if(t[0] == "Liverpool" or t[0] == "Manchester City" or t[0] == "Manchester United"):
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = col, label = t[0],alpha = 0.9,width = W)
c = c+1
if(col == (1,0,0)):
col = (0,1,0)
else:
col = (0,0,1)
plt.xlabel("Placement")
plt.ylabel("Probability of placement")
plt.xticks(range(1,21))
plt.xlim(0,8)
plt.legend()
plt.show()
# next we look at how the performance of liverpool changes when they
# improve the offence/defence or both. We do this by changing their parameters in the
# poisson distribution.
lambda_off = teamPoisson[indexOf("Liverpool",teamPoisson)][1]
lambda_def = teamPoisson[indexOf("Liverpool",teamPoisson)][2]
# first we look at improving offence:
plt.figure(dpi = 160)
c = -1
for d in np.linspace(20,10,2):
print(str(d))
# make the modifications:
teamPoisson[indexOf("Liverpool",teamPoisson)][1] = lambda_off + d/38
# simulate and plot the distributions of Liverpool:
T = simulMany(teamPoisson,N)
t = T[indexOf("Liverpool",T)]
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = (0.5-c*0.5,0,0),width = W, label = "Scoring " +str(d) + " more goals", alpha = alph)
c = c+1
plt.bar([k+c*(W + 0.03) for k in range(1,21)],team_placements[indexOf("Liverpool",team_placements)][1],color = "black", width = W, label = "Baseline")
plt.xlabel("Placement")
plt.ylabel("Probability of placement for Liverpool\n with improved offence")
plt.xticks(range(1,21))
plt.xlim(0,10)
plt.legend()
plt.show()
plt.figure(dpi = 160)
# secondly we look at improving defence:
c = -1
for d in np.linspace(20,10,2):
print(str(d))
# make the modifications:
teamPoisson[indexOf("Liverpool",teamPoisson)][2] = lambda_def-d/38
# simulate and plot the distributions of Liverpool:
T = simulMany(teamPoisson,N)
t = T[indexOf("Liverpool",T)]
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = (0,0,0.5-c*0.5),width = W, label = "Conceding " +str(d) + " fewer goals",alpha = alph)
c = c+1
plt.bar([k+c*(W + 0.03) for k in range(1,21)],team_placements[indexOf("Liverpool",team_placements)][1],color = "black",width = W, label = "Baseline")
plt.xlabel("Placement")
plt.ylabel("Probability of placement for Liverpool\n with defence")
plt.xticks(range(1,21))
plt.xlim(0,10)
plt.legend()
plt.show()
c = -1
col = (1,0,0)
plt.figure(dpi = 160)
for t in T:
if(t[0] == "Liverpool" or t[0] == "Manchester City" or t[0] == "Manchester United"):
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = col, label = t[0],alpha = 0.9,width = W)
c = c+1
if(col == (1,0,0)):
col = (0,1,0)
else:
col = (0,0,1)
plt.xlabel("Placement")
plt.ylabel("Probability of placement")
plt.xticks(range(1,21))
plt.xlim(0,8)
plt.legend()
plt.show()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 10:35:12 2020
@author: BenjaminMeco
"""
import matplotlib.pyplot as plt
import numpy as np
import json
import pandas as pd
from pandas import json_normalize
from FCPython import createPitch
import statsmodels.formula.api as smf
def factorial(n):
if(n == 0):
return 1
else:
return n*factorial(n-1)
def pois(l,k):
return (l**k)*np.exp(-l)/factorial(k)
# this is just a help for getting the data
def indexOf(team_name,team_list):
index = -1
for element in team_list:
index = index + 1
if(element[0] == team_name):
return index
return -1
# for getting the distributions:
def getWeights(arr,size):
weights = np.zeros(size)
W = 0
for k in range(0,size):
W = W + arr[k]
for k in range(0,size):
weights[k] = arr[k]/W
return weights
def outcomeWeights(r,probabilities):
s = probabilities[0]
count = 0
for p in probabilities:
if(s > r):
return count
else:
count = count + 1
s = s + p
return count
# this makes a simulation using weights as the probability distribution
def simulate(team_list):
points = np.zeros(len(team_list))
for i in range(0,len(team_list)):
for j in range(1,len(team_list)):
t_1 = team_list[i]
t_2 = team_list[(i+j)%len(team_list)]
lambda_1 = (t_1[1] + t_2[2])/2
lambda_2 = (t_1[2] + t_2[1])/2
g_1 = int(np.random.poisson(lambda_1))
g_2 = int(np.random.poisson(lambda_2))
if(g_1 > g_2):
points[i] = points[i] + 3
elif(g_1 < g_2):
points[(i+j)%len(team_list)] = points[(i+j)%len(team_list)] + 3
else:
points[i] = points[i] + 1
points[(i+j)%len(team_list)] = points[(i+j)%len(team_list)] + 1
result = []
for i in range(0,len(team_list)):
result = result + [[points[i],team_list[i][0]]]
return result
def simulMany(team_list,N):
team_placements = []
for t in team_list:
team_placements = team_placements + [[t[0],np.zeros(21)]]
for n in range(N):
# do a simulation:
s = sorted(simulate(team_list))
# get the placements:
for i in range(0,len(s)):
e = s[i]
index = indexOf(e[1],team_list)
team_placements[index][1][20-i] = team_placements[index][1][20-i] + 1
for t in team_placements:
t[1] = getWeights(t[1],21)[1:]
return team_placements
#Load the data
with open('Wyscout/matches/matches_England.json') as data_file:
data = json.load(data_file)
df = json_normalize(data, sep = "_")
# first we extract the relevant bits of the matches:
matches = []
for i,game in df.iterrows():
label = game['label']
dash = label.find(" -")
comma = label.find(",")
team_1 = label[0:dash]
team_2 = label[dash+3:comma]
score_1 = label[comma+2:comma+3]
score_2 = label[comma+6:]
matches = matches + [[team_1,score_1,team_2,score_2]]
# now we make the distributions for each team:
teamList = []
for m in matches:
index_1 = indexOf(m[0],teamList)
index_2 = indexOf(m[2],teamList)
# update the data for the first team
if(index_1 == -1):
new_team = [m[0],0,0]
new_team[1] = int(m[1])
new_team[2] = int(m[3])
teamList = teamList + [new_team]
else:
teamList[index_1][1] = teamList[index_1][1] + int(m[1])
teamList[index_1][2] = teamList[index_1][2] + int(m[3])
# update the data for the second team
if(index_2 == -1):
new_team = [m[2],0,0]
new_team[1] = int(m[3])
new_team[2] = int(m[1])
teamList = teamList + [new_team]
else:
teamList[index_2][1] = teamList[index_2][1] + int(m[3])
teamList[index_2][2] = teamList[index_2][2] + int(m[1])
teamList.sort()
# now we get the desired data for the weights and the poisson distributions:
teamPoisson = []
for t in teamList:
teamPoisson = teamPoisson + [[t[0],t[1]/38,t[2]/38]]
# finally some simulations, first with the Poisson distributions:
N = 10000
alph = 0.8
W = 0.25
team_placements = simulMany(teamPoisson,N)
col = (1,0,0)
c = -1
plt.figure(dpi = 160)
for t in team_placements:
if(t[0] == "Liverpool" or t[0] == "Manchester City" or t[0] == "Manchester United"):
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = col, label = t[0],alpha = 0.9,width = W)
c = c+1
if(col == (1,0,0)):
col = (0,1,0)
else:
col = (0,0,1)
plt.xlabel("Placement")
plt.ylabel("Probability of placement")
plt.xticks(range(1,21))
plt.xlim(0,8)
plt.legend()
plt.show()
# next we look at how the performance of liverpool changes when they
# improve the offence/defence or both. We do this by changing their parameters in the
# poisson distribution.
lambda_off = teamPoisson[indexOf("Liverpool",teamPoisson)][1]
lambda_def = teamPoisson[indexOf("Liverpool",teamPoisson)][2]
# first we look at improving offence:
plt.figure(dpi = 160)
c = -1
for d in np.linspace(20,10,2):
print(str(d))
# make the modifications:
teamPoisson[indexOf("Liverpool",teamPoisson)][1] = lambda_off + d/38
# simulate and plot the distributions of Liverpool:
T = simulMany(teamPoisson,N)
t = T[indexOf("Liverpool",T)]
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = (0.5-c*0.5,0,0),width = W, label = "Scoring " +str(d) + " more goals", alpha = alph)
c = c+1
plt.bar([k+c*(W + 0.03) for k in range(1,21)],team_placements[indexOf("Liverpool",team_placements)][1],color = "black", width = W, label = "Baseline")
plt.xlabel("Placement")
plt.ylabel("Probability of placement for Liverpool\n with improved offence")
plt.xticks(range(1,21))
plt.xlim(0,10)
plt.legend()
plt.show()
plt.figure(dpi = 160)
# secondly we look at improving defence:
c = -1
for d in np.linspace(20,10,2):
print(str(d))
# make the modifications:
teamPoisson[indexOf("Liverpool",teamPoisson)][2] = lambda_def-d/38
# simulate and plot the distributions of Liverpool:
T = simulMany(teamPoisson,N)
t = T[indexOf("Liverpool",T)]
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = (0,0,0.5-c*0.5),width = W, label = "Conceding " +str(d) + " fewer goals",alpha = alph)
c = c+1
plt.bar([k+c*(W + 0.03) for k in range(1,21)],team_placements[indexOf("Liverpool",team_placements)][1],color = "black",width = W, label = "Baseline")
plt.xlabel("Placement")
plt.ylabel("Probability of placement for Liverpool\n with defence")
plt.xticks(range(1,21))
plt.xlim(0,10)
plt.legend()
plt.show()
c = -1
col = (1,0,0)
plt.figure(dpi = 160)
for t in T:
if(t[0] == "Liverpool" or t[0] == "Manchester City" or t[0] == "Manchester United"):
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = col, label = t[0],alpha = 0.9,width = W)
c = c+1
if(col == (1,0,0)):
col = (0,1,0)
else:
col = (0,0,1)
plt.xlabel("Placement")
plt.ylabel("Probability of placement")
plt.xticks(range(1,21))
plt.xlim(0,8)
plt.legend()
plt.show() | en | 0.863498 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Sun Oct 25 10:35:12 2020 @author: BenjaminMeco # this is just a help for getting the data # for getting the distributions: # this makes a simulation using weights as the probability distribution # do a simulation: # get the placements: #Load the data # first we extract the relevant bits of the matches: # now we make the distributions for each team: # update the data for the first team # update the data for the second team # now we get the desired data for the weights and the poisson distributions: # finally some simulations, first with the Poisson distributions: # next we look at how the performance of liverpool changes when they # improve the offence/defence or both. We do this by changing their parameters in the # poisson distribution. # first we look at improving offence: # make the modifications: # simulate and plot the distributions of Liverpool: # secondly we look at improving defence: # make the modifications: # simulate and plot the distributions of Liverpool: | 3.273476 | 3 |
Py57/main.py | xhexe/Py8R | 0 | 6632178 | <gh_stars>0
def longest_word(file):
with open(file, "r") as f:
words = f.read().split()
longest_word = len(max(words, key=len))
return [word for word in words if len(word) == longest_word]
print(longest_word("/home/xhexe/Py/Py8R/files/text.txt"))
| def longest_word(file):
with open(file, "r") as f:
words = f.read().split()
longest_word = len(max(words, key=len))
return [word for word in words if len(word) == longest_word]
print(longest_word("/home/xhexe/Py/Py8R/files/text.txt")) | none | 1 | 4.161389 | 4 |
|
turb2d/cip.py | narusehajime/turb2d | 0 | 6632179 | import numpy as np
def cip_2d_M_advection(
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""Calculate one time step using M-type 2D cip method
"""
# First, the variables out and temp are allocated to
# store the calculation results
if out_f is None:
out_f = np.empty(f.shape)
if out_dfdx is None:
out_dfdx = np.empty(dfdx.shape)
if out_dfdy is None:
out_dfdy = np.empty(dfdy.shape)
# 1st step for horizontal advection
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
cip_1d_advection(f, dfdx, u, core, h_up, dx, dt,
out_f=out_f, out_dfdx=out_dfdx)
out_dfdy[core] = dfdy[core] - xi_x[core] / D_x[core] * (
dfdy[core] - dfdy[h_up[core]]
)
# 2nd step for vertical advection
D_y = -np.where(v > 0.0, 1.0, -1.0) * dx
xi_y = -v * dt
cip_1d_advection(out_f, dfdy, v, core, v_up, dx,
dt, out_f=out_f, out_dfdx=out_dfdy)
out_dfdx[core] = out_dfdx[core] - xi_y[core] / D_y[core] * (
out_dfdx[core] - out_dfdx[v_up[core]]
)
return out_f, out_dfdx, out_dfdy
def cip_1d_advection(
f, dfdx, u, core, up_id, dx, dt, out_f=None, out_dfdx=None,
):
"""Calculate one time step using M-type 2D cip method
"""
# First, the variables out and temp are allocated to
# store the calculation results
if out_f is None:
out_f = np.empty(f.shape)
if out_dfdx is None:
out_dfdx = np.empty(dfdx.shape)
up = up_id[core]
# 1st step for horizontal advection
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
a = (dfdx[core] + dfdx[up]) / (D_x[core] ** 2) + 2.0 * (f[core] - f[up]) / (
D_x[core] ** 3
)
b = (
3.0 * (f[up] - f[core]) / (D_x[core] ** 2)
- (2.0 * dfdx[core] + dfdx[up]) / D_x[core]
)
out_f[core] = (
a * (xi_x[core] ** 3)
+ b * (xi_x[core] ** 2)
+ dfdx[core] * xi_x[core]
+ f[core]
)
out_dfdx[core] = 3.0 * a * (xi_x[core] ** 2) + \
2.0 * b * xi_x[core] + dfdx[core]
return out_f, out_dfdx
def cip_2d_nonadvection(
f,
dfdx,
dfdy,
G,
u,
v,
core,
h_up,
h_down,
v_up,
v_down,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
if out_f is None:
out_f = np.zeros(f.shape)
if out_dfdx is None:
out_dfdx = np.zeros(dfdx.shape)
if out_dfdy is None:
out_dfdy = np.zeros(dfdy.shape)
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
D_y = -np.where(v > 0.0, 1.0, -1.0) * dx
xi_y = -v * dt
# non-advection term
out_f[core] = f[core] + G[core] * dt
out_dfdx[core] = (
dfdx[core]
+ ((out_f[h_down] - f[h_down]) -
(out_f[h_up] - f[h_up])) / (-2 * D_x[core])
- dfdx[core] * (xi_x[h_down] - xi_x[h_up]) / (2 * D_x[core])
)
out_dfdy[core] = dfdy[core]
+((out_f[v_down] - f[v_down]) - (out_f[v_up] - f[v_up])) / (-2 * D_y[core]) - dfdy[
core
] * (xi_y[v_down] - xi_y[v_up]) / (2 * D_y[core])
return out_f, out_dfdx, out_dfdy
def cip_2d_diffusion(
u,
v,
nu_t,
h_active,
v_active,
north,
south,
east,
west,
dx,
dt,
out_u=None,
out_v=None,
):
"""Caclulate horizontal and vertical diffusion of velocities u and v
"""
if out_u is None:
out_u = np.zeros(u.shape)
if out_v is None:
out_v = np.zeros(v.shape)
out_u[h_active] = (
u[h_active]
+ nu_t[h_active]
* dt
* (
(u[east][h_active] - 2 * u[h_active] + u[west][h_active])
+ (u[north][h_active] - 2 * u[h_active] + u[south][h_active])
)
/ dx ** 2
)
out_v[v_active] = (
v[v_active]
+ nu_t[v_active]
* dt
* (
(v[east][v_active] - 2 * v[v_active] + v[west][v_active])
+ (v[north][v_active] - 2 * v[v_active] + v[south][v_active])
)
/ dx ** 2
)
return out_u, out_v
def rcip_1d_advection(f, dfdx, u, core, up_id, dx, dt, out_f=None, out_dfdx=None):
""" calculate 1 step of advection phase by rational function
CIP method.
Parameters
----------------
f : ndarray
variable to be calculated
dfdx : ndarray
spatial gradient of the parameter f
u : ndarray
advection velocity of f
core : ndarray
indeces of core grids
up : ndarray
indeces of grids that locate upstream
down : ndarray
indeces of grids that locate downstream
dx : float
spatial grid spacing
dt : float
time step length
out_f : ndarray
resultant value of f
out_dfdx : ndarray
resultant value of dfdx
Returns
--------------------
out_f : ndarray
output value of f
out_dfdx : ndarray
output value of dfdx
"""
if out_f is None:
out_f = np.zeros(f.shape)
if out_dfdx is None:
out_dfdx = np.zeros(f.shape)
up = up_id[core]
# advection phase
D = -np.where(u[core] > 0.0, 1.0, -1.0) * dx
xi = -u * dt
BB = np.zeros(D.shape)
S = (f[up] - f[core]) / D
dz_index = (S - dfdx[core]) * (dfdx[up] - S) > 0.0
BB[dz_index] = (
np.abs(
(S[dz_index] - dfdx[core][dz_index]) /
(dfdx[up][dz_index] - S[dz_index])
)
- 1.0
) / D[dz_index]
a = (dfdx[core] - S + (dfdx[up] - S) * (1.0 + BB * D)) / (D * D)
b = S * BB + (S - dfdx[core]) / D - a * D
c = dfdx[core] + f[core] * BB
out_f[core] = (((a * xi[core] + b) * xi[core] + c) * xi[core] + f[core]) / (
1.0 + BB * xi[core]
)
out_dfdx[core] = ((3.0 * a * xi[core] + 2.0 * b) * xi[core] + c) / (
1.0 + BB * xi[core]
) - out_f[core] * BB / (1.0 + BB * xi[core])
return out_f, out_dfdx
def rcip_2d_M_advection(
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""Calculate one time step using M-type 2D cip method
"""
# First, the variables out and temp are allocated to
# store the calculation results
if out_f is None:
out_f = np.empty(f.shape)
if out_dfdx is None:
out_dfdx = np.empty(dfdx.shape)
if out_dfdy is None:
out_dfdy = np.empty(dfdy.shape)
# 1st step for horizontal advection
rcip_1d_advection(f, dfdx, u, core, h_up, dx, dt,
out_f=out_f, out_dfdx=out_dfdx)
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
out_dfdy[core] = dfdy[core] - xi_x[core] / D_x[core] * (
dfdy[core] - dfdy[h_up[core]]
)
# 2nd step for vertical advection
rcip_1d_advection(
out_f, dfdy, v, core, v_up, dx, dt, out_f=out_f, out_dfdx=out_dfdy
)
D_y = -np.where(v > 0.0, 1.0, -1.0) * dx
xi_y = -v * dt
out_dfdx[core] = out_dfdx[core] - xi_y[core] / D_y[core] * (
out_dfdx[core] - out_dfdx[v_up[core]]
)
return out_f, out_dfdx, out_dfdy
def shock_dissipation(
f, p, core, north_id, south_id, east_id, west_id, dt, kappa2, kappa4, out=None,
):
""" adding artificial viscosity for numerical stability
Parameters
------------------
f : ndarray, float
parameter for which the artificial viscosity is applied
h : ndarray, float
flow height
core : ndarray, int
indeces of core nodes or links
north_id : ndarray, int
indeces of nodes or links that locate north of core
south_id : ndarray, int
indeces of nodes or links that locate south of core
east_id : ndarray, int
indeces of nodes or links that locate east of core
west_id : ndarray, int
indeces of nodes or links that locate west of core
"""
n = f.shape[0]
if out is None:
out = np.zeros(n)
nu_i = np.zeros(n, dtype=np.float)
nu_j = np.zeros(n, dtype=np.float)
eps_i_half2 = np.zeros(n, dtype=np.float)
# eps_i_half4 = np.zeros(n, dtype=np.float)
eps_j_half2 = np.zeros(n, dtype=np.float)
# eps_j_half4 = np.zeros(n, dtype=np.float)
d_i_half = np.zeros(n, dtype=np.float)
d_j_half = np.zeros(n, dtype=np.float)
north = north_id[core]
south = south_id[core]
east = east_id[core]
west = west_id[core]
# easteast = east_id[east]
# northnorth = north_id[north]
# First, artificial diffusion is applied to east-west direction
nu_i[core] = np.abs(p[east] - 2 * p[core] + p[west]) / (
np.abs(p[east]) + 2 * np.abs(p[core]) + np.abs(p[west]) + 10 ** -20
)
eps_i_half2[core] = kappa2 * np.max([nu_i[east], nu_i[core]], axis=0)
# eps_i_half4[core] = np.max(
# [np.zeros_like(core), kappa4 - eps_i_half2[core]], axis=0)
# d_i_half[core] = eps_i_half2[core] * (
# f[east] - f[core]) - eps_i_half4[core] * (f[easteast] - 3.0 * f[east] +
# 3.0 * f[core] - f[west])
d_i_half[core] = eps_i_half2[core] * (f[east] - f[core])
# Next, artificial diffusion is applied to north-south direction
nu_j[core] = np.abs(p[north] - 2 * p[core] + p[south]) / (
np.abs(p[north]) + 2 * np.abs(p[core]) + np.abs(p[south]) + 10 ** -20
)
eps_j_half2[core] = kappa2 * np.max([nu_j[north], nu_j[core]], axis=0)
# eps_j_half4[core] = np.max(
# [np.zeros_like(core), kappa4 - eps_j_half2[core]], axis=0)
# d_j_half[core] = eps_j_half2[core] * (f[north] - f[core]) - eps_j_half4[
# core] * (f[northnorth] - 3.0 * f[north] + 3.0 * f[core] - f[south])
d_j_half[core] = eps_j_half2[core] * (f[north] - f[core])
# apply artificial diffusion
out[core] = (
f[core] + d_i_half[core] - d_i_half[west] +
d_j_half[core] - d_j_half[south]
)
return out
def update_gradient(
f,
f_new,
dfdx,
dfdy,
core,
north,
south,
east,
west,
dx,
dt,
out_dfdx=None,
out_dfdy=None,
):
"""Update gradients when main variables are updated
"""
if out_dfdx is None:
out_dfdx = np.zeros(dfdx.shape[0], dtype=np.float)
if out_dfdy is None:
out_dfdx = np.zeros(dfdy.shape[0], dtype=np.float)
# non-advection term
out_dfdx[core] = dfdx[core] + (
(f_new[east] - f[east]) - (f_new[west] - f[west])
) / (2 * dx)
out_dfdy[core] = dfdy[core] + (
(f_new[north] - f[north]) - (f_new[south] - f[south])
) / (2 * dx)
def update_gradient2(
f,
dfdx,
dfdy,
u,
v,
core,
north,
south,
east,
west,
dx,
dt,
out_dfdx=None,
out_dfdy=None,
):
"""Update gradients when main variables are updated
"""
if out_dfdx is None:
out_dfdx = np.zeros(dfdx.shape[0], dtype=np.float)
if out_dfdy is None:
out_dfdx = np.zeros(dfdy.shape[0], dtype=np.float)
# non-advection term
out_dfdx[core] = (
dfdx[core]
- (
(f[east] - f[west]) * (u[east] - u[west]) / (2 * dx) ** 2
+ (f[north] - f[south]) * (v[east] - v[west]) / (2 * dx) ** 2
)
* dt
)
out_dfdy[core] = (
dfdy[core]
- (
(f[east] - f[west]) * (u[north] - u[south]) / (2 * dx) ** 2
+ (f[north] - f[south]) * (v[north] - v[south]) / (2 * dx) ** 2
)
* dt
)
class CIP2D:
""" CIP Direct 2D scheme
parameters
---------------------
max_number_of_grids : int
maximum number of grids that may be used in this solver
"""
def __init__(self, max_number_of_grids):
self.XX = np.empty(max_number_of_grids, dtype=float)
self.YY = np.empty(max_number_of_grids, dtype=float)
self.Ddx = np.empty(max_number_of_grids, dtype=float)
self.Ddy = np.empty(max_number_of_grids, dtype=float)
self.xup = np.empty(max_number_of_grids, dtype=int)
self.yup = np.empty(max_number_of_grids, dtype=int)
self.xyup = np.empty(max_number_of_grids, dtype=int)
self.C30 = np.empty(max_number_of_grids, dtype=float)
self.C20 = np.empty(max_number_of_grids, dtype=float)
self.C03 = np.empty(max_number_of_grids, dtype=float)
self.C02 = np.empty(max_number_of_grids, dtype=float)
self.tmp = np.empty(max_number_of_grids, dtype=float)
self.tmq = np.empty(max_number_of_grids, dtype=float)
self.C12 = np.empty(max_number_of_grids, dtype=float)
self.C21 = np.empty(max_number_of_grids, dtype=float)
self.C11 = np.empty(max_number_of_grids, dtype=float)
def run(
self,
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""run this solver to calculate advection transport of a variable f
parameters
----------------------
f : ndarray, float
A variable to calculate
"""
if out_f is None:
out_f = np.zeros_like(f)
if out_dfdx is None:
out_dfdx = np.zeros_like(f)
if out_dfdy is None:
out_dfdy = np.zeros_like(f)
XX = self.XX
YY = self.YY
Ddx = self.Ddx
Ddy = self.Ddy
xup = self.xup
yup = self.yup
xyup = self.xyup
XX[core] = -u[core] * dt
YY[core] = -v[core] * dt
Ddx[core] = np.where(u[core] > 0.0, 1.0, -1.0) * dx
Ddy[core] = np.where(v[core] > 0.0, 1.0, -1.0) * dx
xup[core] = h_up[core]
yup[core] = v_up[core]
xyup[core] = v_up[h_up[core]]
tmp = self.tmp
tmq = self.tmq
self.C30[core] = (
(dfdx[xup[core]] + dfdx[core]) *
Ddx[core] - 2.0 * (f[core] - f[xup[core]])
) / (Ddx[core] * Ddx[core] * Ddx[core])
self.C20[core] = (
3.0 * (f[xup[core]] - f[core])
+ (dfdx[xup[core]] + 2.0 * dfdx[core]) * Ddx[core]
) / (Ddx[core] * Ddx[core])
self.C03[core] = (
(dfdy[yup[core]] + dfdy[core]) *
Ddy[core] - 2.0 * (f[core] - f[yup[core]])
) / (Ddy[core] * Ddy[core] * Ddy[core])
self.C02[core] = (
3.0 * (f[yup[core]] - f[core])
+ (dfdy[yup[core]] + 2.0 * dfdy[core]) * Ddy[core]
) / (Ddy[core] * Ddy[core])
self.tmp[core] = f[core] - f[yup[core]] - f[xup[core]] + f[xyup[core]]
self.tmq[core] = dfdy[xup[core]] - dfdy[core]
self.C12[core] = (-tmp[core] - tmq[core] * Ddy[core]) / (
Ddx[core] * Ddy[core] * Ddy[core]
)
self.C21[core] = (-tmp[core] - (dfdx[yup[core]] - dfdx[core]) * Ddx[core]) / (
Ddx[core] * Ddx[core] * Ddy[core]
)
self.C11[core] = (-tmq[core] + self.C21[core] * Ddx[core] * Ddx[core]) / (
Ddx[core]
)
out_f[core] = (
(
(self.C30[core] * XX[core] + self.C21[core]
* YY[core] + self.C20[core])
* XX[core]
+ self.C11[core] * YY[core]
+ dfdx[core]
)
* XX[core]
+ (
(self.C03[core] * YY[core] + self.C12[core]
* XX[core] + self.C02[core])
* YY[core]
+ dfdy[core]
)
* YY[core]
+ f[core]
)
out_dfdx[core] = (
(
3.0 * self.C30[core] * XX[core]
+ 2.0 * (self.C21[core] * YY[core] + self.C20[core])
)
* XX[core]
+ (self.C12[core] * YY[core] + self.C11[core]) * YY[core]
+ dfdx[core]
)
out_dfdy[core] = (
(
3.0 * self.C03[core] * YY[core]
+ 2.0 * (self.C12[core] * XX[core] + self.C02[core])
)
* YY[core]
+ (self.C21[core] * XX[core] + self.C11[core]) * XX[core]
+ dfdy[core]
)
return out_f, out_dfdx, out_dfdy
def rcip_2d_advection(
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""Direct 2D calculation of advection by R-CIP method
"""
if out_f is None:
out_f = np.zeros(f.shape[0])
if out_dfdx is None:
out_dfdx = np.zeros(f.shape[0])
if out_dfdy is None:
out_dfdy = np.zeros(f.shape[0])
XX = -u[core] * dt
YY = -v[core] * dt
Ddx = np.where(u[core] > 0.0, -1.0, 1.0) * dx
Ddy = np.where(v[core] > 0.0, -1.0, 1.0) * dx
xup = h_up[core]
yup = v_up[core]
xyup = (v_up[h_up])[core]
a01 = np.zeros(core.shape[0])
a10 = np.zeros(core.shape[0])
b01 = np.zeros(core.shape[0])
b10 = np.zeros(core.shape[0])
Sx = (f[xup] - f[core]) / Ddx
Sy = (f[yup] - f[core]) / Ddy
a10 = np.where(dfdx[core] * dfdx[xup] < 0, 1.0, 0.0)
a01 = np.where(dfdy[core] * dfdy[yup] < 0, 1.0, 0.0)
b10 = (np.abs((Sx - dfdx[core]) /
(dfdx[xup] - Sx + 1.0 * 10 ** -10)) - 1) / Ddx
b01 = (np.abs((Sy - dfdy[core]) /
(dfdy[yup] - Sy + 1.0 * 10 ** -10)) - 1) / Ddy
C00 = f[core]
C10 = dfdx[core] + a10 * b10 * C00
C01 = dfdy[core] + a01 * b01 * C00
# C30 = ((dfdx[xup] + dfdx[core]) * Ddx - 2.0 *
# (f[core] - f[xup])) / (Ddx * Ddx * Ddx)
C30 = ((1 + a10 * b10 * Ddx) *
(dfdx[xup] - Sx) + dfdx[core] - Sx) / (Ddx * Ddx)
# C20 = (3.0 * (f[xup] - f[core]) +
# (dfdx[xup] + 2.0 * dfdx[core]) * Ddx) / (Ddx * Ddx)
C20 = ((1 + a10 * b10 * Ddx) * f[xup] -
C00 - C10 * Ddx) / (Ddx * Ddx) - C30 * Ddx
# C03 = ((dfdy[yup] + dfdy[core]) * Ddy - 2.0 *
# (f[core] - f[yup])) / (Ddy * Ddy * Ddy)
C03 = ((1 + a01 * b01 * Ddy) *
(dfdy[yup] - Sy) + dfdy[core] - Sy) / (Ddy * Ddy)
# C02 = (3.0 * (f[yup] - f[core]) +
# (dfdy[yup] + 2.0 * dfdy[core]) * Ddy) / (Ddy * Ddy)
C02 = ((1 + a01 * b01 * Ddy) * f[yup] -
C00 - C01 * Ddy) / (Ddy * Ddy) - C03 * Ddy
# tmp = f[core] - f[yup] - f[xup] + f[xyup]
# tmq = dfdy[xup] - dfdy[core]
C11 = (
(a01 * b01 * f[xup] + (1.0 + a10 * b10 * Ddx) * dfdy[xup]) / Ddx
+ (a10 * b10 * f[yup] + (1.0 + a01 * b01 * Ddy) * dfdx[yup]) / Ddy
+ (C00 - (1.0 + a10 * b10 * Ddx + a01 * b01 * Ddy)
* f[xyup]) / Ddx / Ddy
+ C30 * Ddx * Ddx / Ddy
+ C03 * Ddy * Ddy / Ddx
+ C20 * Ddx / Ddy
+ C02 * Ddy / Ddx
)
# C12 = (-tmp - tmq * Ddy) / (Ddx * Ddy * Ddy)
C12 = (a10 * b10 * f[yup] + (1 + a01 * b01 * Ddy) * dfdx[yup] - C10) / (
Ddy * Ddy
) - C11 / Ddy
# C21 = (-tmp - (dfdx[yup] - dfdx[core]) * Ddx) / (Ddx * Ddx * Ddy)
C21 = (a01 * b01 * f[xup] + (1 + a10 * b10 * Ddx) * dfdy[xup] - C01) / (
Ddx * Ddx
) - C11 / Ddx
# C11 = (-tmq + C21 * Ddx * Ddx) / (Ddx)
out_f[core] = (
((C30 * XX + C21 * YY + C20) * XX + C11 * YY + C10) * XX
+ ((C03 * YY + C12 * XX + C02) * YY + C01) * YY
+ C00
) / (1 + a10 * b10 * XX + a01 * b01 * YY)
out_dfdx[core] = (
(3.0 * C30 * XX + 2.0 * (C21 * YY + C20)) * XX
+ (C12 * YY + C11) * YY
+ C10
- a10 * b10 * out_f[core]
) / (1 + a10 * b10 * XX + a01 * b01 * YY)
out_dfdy[core] = (
(3.0 * C03 * YY + 2.0 * (C12 * XX + C02)) * YY
+ (C21 * XX + C11) * XX
+ C01
- a01 * b01 * out_f[core]
) / (1 + a10 * b10 * XX + a01 * b01 * YY)
return out_f, out_dfdx, out_dfdy
def cubic_interp_1d(f, dfdx, core, iplus, iminus, dx, out=None):
"""interpolate values to links or nodes by cubic function
Interplated values at the grid between "iplus" and "iminus" is returned.
Parameters
--------------------------
f : ndarray, float
values to be interpolated
dfdx : ndarray, float
spatial gradient of f
iplus : ndarray, int
grid id of (i + dx / 2)
iminus : ndarray, int
grid id of (i - dx / 2)
dx : ndarray, float
grid spacing
out : ndarray, float
interpolated values between grids of iplus and iminus
"""
if out is None:
out = np.empty(iplus.shape)
# interplation by cubic function
D_x = -dx
xi_x = -dx / 2.0
a = (dfdx[iplus] + dfdx[iminus]) / (D_x ** 2) + 2.0 * (f[iplus] - f[iminus]) / (
D_x ** 3
)
b = (
3.0 * (f[iminus] - f[iplus]) / (D_x ** 2)
- (2.0 * dfdx[iplus] + dfdx[iminus]) / D_x
)
out[core] = a * (xi_x ** 3) + b * (xi_x ** 2) + \
dfdx[iplus] * xi_x + f[iplus]
return out
def rcubic_interp_1d(f, dfdx, core, iplus, iminus, dx, out=None):
"""interpolate values to links or nodes by cubic function
Interplated values at the grid between "iplus" and "iminus" is returned.
Parameters
--------------------------
f : ndarray, float
values to be interpolated
dfdx : ndarray, float
spatial gradient of f
iplus : ndarray, int
grid id of (i + dx / 2)
iminus : ndarray, int
grid id of (i - dx / 2)
dx : ndarray, float
grid spacing
out : ndarray, float
interpolated values between grids of iplus and iminus
"""
if out is None:
out = np.zeros(f.shape)
# advection phase
D = -dx
xi = -dx / 2.0
BB = np.zeros(core.shape, dtype=float)
S = (f[iminus] - f[iplus]) / D
dz_index = (S - dfdx[iplus]) * (dfdx[iminus] - S) > 0.0
BB[dz_index] = (
np.abs(
(S[dz_index] - dfdx[iplus][dz_index])
/ (dfdx[iminus][dz_index] - S[dz_index])
)
- 1.0
) / D
a = (dfdx[iplus] - S + (dfdx[iminus] - S) * (1.0 + BB * D)) / (D ** 2)
b = S * BB + (S - dfdx[iplus]) / D - a * D
c = dfdx[iplus] + f[iplus] * BB
out[core] = (((a * xi + b) * xi + c) * xi + f[iplus]) / (1.0 + BB * xi)
# adjust negative values
negative_value = out[core] < 0
out[core][negative_value] = (
f[iplus][negative_value] + f[iminus][negative_value]
) / 2.0
return out
def forester_filter(
f, core, east_id, west_id, north_id, south_id, nu_f=0.1, out_f=None,
):
""" Forester filter for removing negative values from Concentration and
Flow depth
"""
if out_f is None:
out_f = np.zeros_like(f)
out_f[:] = f[:]
east = east_id[core]
west = west_id[core]
north = north_id[core]
south = south_id[core]
out_f[core] += (
nu_f * (f[east] + f[west] + f[north] + f[south] - 4.0 * f[core]) / 4.0
)
out_f[east] -= nu_f * (f[east] - f[core]) / 4.0
out_f[west] -= nu_f * (f[west] - f[core]) / 4.0
out_f[north] -= nu_f * (f[north] - f[core]) / 4.0
out_f[south] -= nu_f * (f[south] - f[core]) / 4.0
return out_f
class Jameson:
""" Jameson filter for smoothing the variables
Parameters
-------------------
tc: TurbudityCurrent2D
Instance of TurbidityCurrent2D to obtain necessary parameters
"""
def __init__(self, tc):
self.number_of_nodes = tc.grid.number_of_nodes
self.number_of_links = tc.grid.number_of_links
self.node_east = tc.node_east
self.node_west = tc.node_west
self.node_north = tc.node_north
self.node_south = tc.node_south
self.link_east = tc.link_east
self.link_west = tc.link_west
self.link_north = tc.link_north
self.link_south = tc.link_south
self.link_horiz = tc.grid.horizontal_links
self.link_vert = tc.grid.vertical_links
self.east_node_at_horizontal_link = tc.east_node_at_horizontal_link
self.west_node_at_horizontal_link = tc.west_node_at_horizontal_link
self.north_node_at_vertical_link = tc.north_node_at_vertical_link
self.south_node_at_vertical_link = tc.south_node_at_vertical_link
self.east_link_at_node = tc.east_link_at_node
self.west_link_at_node = tc.west_link_at_node
self.north_link_at_node = tc.north_link_at_node
self.south_link_at_node = tc.south_link_at_node
self.kappa = tc.kappa
self.kappa = tc.kappa
self.nu_x = np.zeros(self.number_of_nodes)
self.nu_y = np.zeros(self.number_of_nodes)
self.nu_x_link = np.zeros(self.number_of_links)
self.nu_y_link = np.zeros(self.number_of_links)
self.eps_link = np.zeros(self.number_of_links)
self.eps_node_horiz = np.zeros(self.number_of_nodes)
self.eps_node_vert = np.zeros(self.number_of_nodes)
def update_artificial_viscosity(self, p, p_link):
""" update artificial viscosity at nodes (nu) and links (eps)
paramters
-------------------
p : ndarray, float
pressure at nodes
p_link : ndarray, float
pressure at links
"""
# artificial viscosity coefficient at links
self.nu_x_link[self.link_horiz] = np.abs(
p_link[self.link_east[self.link_horiz]]
- 2 * p_link[self.link_horiz]
+ p_link[self.link_west[self.link_horiz]]
) / (
p_link[self.link_east[self.link_horiz]]
+ 2 * p_link[self.link_horiz]
+ p_link[self.link_west[self.link_horiz]]
+ 10 ** -20
)
self.nu_y_link[self.link_vert] = np.abs(
p_link[self.link_north[self.link_vert]]
- 2 * p_link[self.link_vert]
+ p_link[self.link_south[self.link_vert]]
) / (
p_link[self.link_north[self.link_vert]]
+ 2 * p_link[self.link_vert]
+ p_link[self.link_south[self.link_vert]]
+ 10 ** -20
)
# artificial viscosity coefficient at nodes
self.nu_x[:] = np.abs(
p[self.node_east]
+ p[self.node_north]
+ p[self.node_south]
+ p[self.node_west]
- 4 * p
) / (
p[self.node_east]
+ p[self.node_west]
+ p[self.node_north]
+ p[self.node_south]
+ 4 * p
+ 10 ** -20
)
self.nu_y[:] = self.nu_x[:]
# maximum artificial viscosity coefficient at links
self.eps_link[self.link_horiz] = self.kappa * np.max(
[
self.nu_x[self.east_node_at_horizontal_link[self.link_horiz]],
self.nu_x[self.west_node_at_horizontal_link[self.link_horiz]],
],
axis=0,
)
self.eps_link[self.link_vert] = self.kappa * np.max(
[
self.nu_y[self.north_node_at_vertical_link[self.link_vert]],
self.nu_y[self.south_node_at_vertical_link[self.link_vert]],
],
axis=0,
)
# maximum artificial viscosity coefficient at nodes
self.eps_node_horiz[:] = (
0.01
* self.kappa
* np.max(
[
self.nu_x_link[self.east_link_at_node],
self.nu_x_link[self.west_link_at_node],
],
axis=0,
)
)
self.eps_node_vert[:] = (
0.01
* self.kappa
* np.max(
[
self.nu_y_link[self.north_link_at_node],
self.nu_y_link[self.south_link_at_node],
],
axis=0,
)
)
def run(self, f, core, at="node", out=None):
""" run one step of the Jameson filter
paramters
--------------------
f : ndarray, float
variables to be filtered
core : ndarray, int
grid ids to apply the filter
at : String, optional
'node', 'hlink', or 'vlink' for values on nodes,
horizontal links or vertical links
out : ndarray, float
output
returns
--------------------
out : ndarray, float
filtered variables
"""
if out is None:
out = np.zeros_like(f)
if at == "node":
out[core] = (
f[core]
+ self.eps_link[self.east_link_at_node[core]]
* (f[self.node_east[core]] - f[core])
- self.eps_link[self.west_link_at_node[core]]
* (f[core] - f[self.node_west[core]])
+ self.eps_link[self.north_link_at_node[core]]
* (f[self.node_north[core]] - f[core])
- self.eps_link[self.south_link_at_node[core]]
* (f[core] - f[self.node_south[core]])
)
if at == "hlink":
out[core] = (
f[core]
+ self.eps_node_horiz[self.east_node_at_horizontal_link[core]]
* (f[self.link_east[core]] - f[core])
- self.eps_node_horiz[self.west_node_at_horizontal_link[core]]
* (f[core] - f[self.west_node_at_horizontal_link[core]])
)
if at == "vlink":
out[core] = (
f[core]
+ self.eps_node_vert[self.north_node_at_vertical_link[core]]
* (f[self.link_north[core]] - f[core])
- self.eps_node_vert[self.south_node_at_vertical_link[core]]
* (f[core] - f[self.south_node_at_vertical_link[core]])
)
return out
class SOR:
"""SOR method to solve inverse matrix
"""
def __init__(
self,
number_of_nodes,
node_east,
node_west,
node_north,
node_south,
implicit_threshold,
max_loop,
alpha,
update_boundary_conditions,
):
self.implicit_threshold = implicit_threshold
self.max_loop = max_loop
self.alpha = alpha
self.update_boundary_conditions = update_boundary_conditions
self.node_east = node_east
self.node_west = node_west
self.node_north = node_north
self.node_south = node_south
self.a = np.empty(number_of_nodes)
self.b = np.empty(number_of_nodes)
self.c = np.empty(number_of_nodes)
self.d = np.empty(number_of_nodes)
self.e = np.empty(number_of_nodes)
self.g = np.empty(number_of_nodes)
self.w = np.empty(number_of_nodes)
def run(self, p, core, out=None):
if out is None:
out = np.zeros_like(p)
out[:] = p[:]
err = 100.0
count = 0
core_size = core.shape[0]
while err > self.implicit_threshold:
self.w[core] = (
self.g[core]
- self.b[core] * out[self.node_east[core]]
- self.c[core] * out[self.node_west[core]]
- self.d[core] * out[self.node_north[core]]
- self.e[core] * out[self.node_south[core]]
) / self.a[core]
err = np.linalg.norm(self.w[core] - out[core]) / (
core_size + 1.0 * 10 ** -20
)
out[core] = out[core] * (1 - self.alpha) + \
self.alpha * self.w[core]
self.update_boundary_conditions(p=out)
count += 1
if count == self.max_loop:
print("Implicit calculation did not converge")
return out
return out
| import numpy as np
def cip_2d_M_advection(
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""Calculate one time step using M-type 2D cip method
"""
# First, the variables out and temp are allocated to
# store the calculation results
if out_f is None:
out_f = np.empty(f.shape)
if out_dfdx is None:
out_dfdx = np.empty(dfdx.shape)
if out_dfdy is None:
out_dfdy = np.empty(dfdy.shape)
# 1st step for horizontal advection
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
cip_1d_advection(f, dfdx, u, core, h_up, dx, dt,
out_f=out_f, out_dfdx=out_dfdx)
out_dfdy[core] = dfdy[core] - xi_x[core] / D_x[core] * (
dfdy[core] - dfdy[h_up[core]]
)
# 2nd step for vertical advection
D_y = -np.where(v > 0.0, 1.0, -1.0) * dx
xi_y = -v * dt
cip_1d_advection(out_f, dfdy, v, core, v_up, dx,
dt, out_f=out_f, out_dfdx=out_dfdy)
out_dfdx[core] = out_dfdx[core] - xi_y[core] / D_y[core] * (
out_dfdx[core] - out_dfdx[v_up[core]]
)
return out_f, out_dfdx, out_dfdy
def cip_1d_advection(
f, dfdx, u, core, up_id, dx, dt, out_f=None, out_dfdx=None,
):
"""Calculate one time step using M-type 2D cip method
"""
# First, the variables out and temp are allocated to
# store the calculation results
if out_f is None:
out_f = np.empty(f.shape)
if out_dfdx is None:
out_dfdx = np.empty(dfdx.shape)
up = up_id[core]
# 1st step for horizontal advection
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
a = (dfdx[core] + dfdx[up]) / (D_x[core] ** 2) + 2.0 * (f[core] - f[up]) / (
D_x[core] ** 3
)
b = (
3.0 * (f[up] - f[core]) / (D_x[core] ** 2)
- (2.0 * dfdx[core] + dfdx[up]) / D_x[core]
)
out_f[core] = (
a * (xi_x[core] ** 3)
+ b * (xi_x[core] ** 2)
+ dfdx[core] * xi_x[core]
+ f[core]
)
out_dfdx[core] = 3.0 * a * (xi_x[core] ** 2) + \
2.0 * b * xi_x[core] + dfdx[core]
return out_f, out_dfdx
def cip_2d_nonadvection(
f,
dfdx,
dfdy,
G,
u,
v,
core,
h_up,
h_down,
v_up,
v_down,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
if out_f is None:
out_f = np.zeros(f.shape)
if out_dfdx is None:
out_dfdx = np.zeros(dfdx.shape)
if out_dfdy is None:
out_dfdy = np.zeros(dfdy.shape)
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
D_y = -np.where(v > 0.0, 1.0, -1.0) * dx
xi_y = -v * dt
# non-advection term
out_f[core] = f[core] + G[core] * dt
out_dfdx[core] = (
dfdx[core]
+ ((out_f[h_down] - f[h_down]) -
(out_f[h_up] - f[h_up])) / (-2 * D_x[core])
- dfdx[core] * (xi_x[h_down] - xi_x[h_up]) / (2 * D_x[core])
)
out_dfdy[core] = dfdy[core]
+((out_f[v_down] - f[v_down]) - (out_f[v_up] - f[v_up])) / (-2 * D_y[core]) - dfdy[
core
] * (xi_y[v_down] - xi_y[v_up]) / (2 * D_y[core])
return out_f, out_dfdx, out_dfdy
def cip_2d_diffusion(
u,
v,
nu_t,
h_active,
v_active,
north,
south,
east,
west,
dx,
dt,
out_u=None,
out_v=None,
):
"""Caclulate horizontal and vertical diffusion of velocities u and v
"""
if out_u is None:
out_u = np.zeros(u.shape)
if out_v is None:
out_v = np.zeros(v.shape)
out_u[h_active] = (
u[h_active]
+ nu_t[h_active]
* dt
* (
(u[east][h_active] - 2 * u[h_active] + u[west][h_active])
+ (u[north][h_active] - 2 * u[h_active] + u[south][h_active])
)
/ dx ** 2
)
out_v[v_active] = (
v[v_active]
+ nu_t[v_active]
* dt
* (
(v[east][v_active] - 2 * v[v_active] + v[west][v_active])
+ (v[north][v_active] - 2 * v[v_active] + v[south][v_active])
)
/ dx ** 2
)
return out_u, out_v
def rcip_1d_advection(f, dfdx, u, core, up_id, dx, dt, out_f=None, out_dfdx=None):
""" calculate 1 step of advection phase by rational function
CIP method.
Parameters
----------------
f : ndarray
variable to be calculated
dfdx : ndarray
spatial gradient of the parameter f
u : ndarray
advection velocity of f
core : ndarray
indeces of core grids
up : ndarray
indeces of grids that locate upstream
down : ndarray
indeces of grids that locate downstream
dx : float
spatial grid spacing
dt : float
time step length
out_f : ndarray
resultant value of f
out_dfdx : ndarray
resultant value of dfdx
Returns
--------------------
out_f : ndarray
output value of f
out_dfdx : ndarray
output value of dfdx
"""
if out_f is None:
out_f = np.zeros(f.shape)
if out_dfdx is None:
out_dfdx = np.zeros(f.shape)
up = up_id[core]
# advection phase
D = -np.where(u[core] > 0.0, 1.0, -1.0) * dx
xi = -u * dt
BB = np.zeros(D.shape)
S = (f[up] - f[core]) / D
dz_index = (S - dfdx[core]) * (dfdx[up] - S) > 0.0
BB[dz_index] = (
np.abs(
(S[dz_index] - dfdx[core][dz_index]) /
(dfdx[up][dz_index] - S[dz_index])
)
- 1.0
) / D[dz_index]
a = (dfdx[core] - S + (dfdx[up] - S) * (1.0 + BB * D)) / (D * D)
b = S * BB + (S - dfdx[core]) / D - a * D
c = dfdx[core] + f[core] * BB
out_f[core] = (((a * xi[core] + b) * xi[core] + c) * xi[core] + f[core]) / (
1.0 + BB * xi[core]
)
out_dfdx[core] = ((3.0 * a * xi[core] + 2.0 * b) * xi[core] + c) / (
1.0 + BB * xi[core]
) - out_f[core] * BB / (1.0 + BB * xi[core])
return out_f, out_dfdx
def rcip_2d_M_advection(
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""Calculate one time step using M-type 2D cip method
"""
# First, the variables out and temp are allocated to
# store the calculation results
if out_f is None:
out_f = np.empty(f.shape)
if out_dfdx is None:
out_dfdx = np.empty(dfdx.shape)
if out_dfdy is None:
out_dfdy = np.empty(dfdy.shape)
# 1st step for horizontal advection
rcip_1d_advection(f, dfdx, u, core, h_up, dx, dt,
out_f=out_f, out_dfdx=out_dfdx)
D_x = -np.where(u > 0.0, 1.0, -1.0) * dx
xi_x = -u * dt
out_dfdy[core] = dfdy[core] - xi_x[core] / D_x[core] * (
dfdy[core] - dfdy[h_up[core]]
)
# 2nd step for vertical advection
rcip_1d_advection(
out_f, dfdy, v, core, v_up, dx, dt, out_f=out_f, out_dfdx=out_dfdy
)
D_y = -np.where(v > 0.0, 1.0, -1.0) * dx
xi_y = -v * dt
out_dfdx[core] = out_dfdx[core] - xi_y[core] / D_y[core] * (
out_dfdx[core] - out_dfdx[v_up[core]]
)
return out_f, out_dfdx, out_dfdy
def shock_dissipation(
f, p, core, north_id, south_id, east_id, west_id, dt, kappa2, kappa4, out=None,
):
""" adding artificial viscosity for numerical stability
Parameters
------------------
f : ndarray, float
parameter for which the artificial viscosity is applied
h : ndarray, float
flow height
core : ndarray, int
indeces of core nodes or links
north_id : ndarray, int
indeces of nodes or links that locate north of core
south_id : ndarray, int
indeces of nodes or links that locate south of core
east_id : ndarray, int
indeces of nodes or links that locate east of core
west_id : ndarray, int
indeces of nodes or links that locate west of core
"""
n = f.shape[0]
if out is None:
out = np.zeros(n)
nu_i = np.zeros(n, dtype=np.float)
nu_j = np.zeros(n, dtype=np.float)
eps_i_half2 = np.zeros(n, dtype=np.float)
# eps_i_half4 = np.zeros(n, dtype=np.float)
eps_j_half2 = np.zeros(n, dtype=np.float)
# eps_j_half4 = np.zeros(n, dtype=np.float)
d_i_half = np.zeros(n, dtype=np.float)
d_j_half = np.zeros(n, dtype=np.float)
north = north_id[core]
south = south_id[core]
east = east_id[core]
west = west_id[core]
# easteast = east_id[east]
# northnorth = north_id[north]
# First, artificial diffusion is applied to east-west direction
nu_i[core] = np.abs(p[east] - 2 * p[core] + p[west]) / (
np.abs(p[east]) + 2 * np.abs(p[core]) + np.abs(p[west]) + 10 ** -20
)
eps_i_half2[core] = kappa2 * np.max([nu_i[east], nu_i[core]], axis=0)
# eps_i_half4[core] = np.max(
# [np.zeros_like(core), kappa4 - eps_i_half2[core]], axis=0)
# d_i_half[core] = eps_i_half2[core] * (
# f[east] - f[core]) - eps_i_half4[core] * (f[easteast] - 3.0 * f[east] +
# 3.0 * f[core] - f[west])
d_i_half[core] = eps_i_half2[core] * (f[east] - f[core])
# Next, artificial diffusion is applied to north-south direction
nu_j[core] = np.abs(p[north] - 2 * p[core] + p[south]) / (
np.abs(p[north]) + 2 * np.abs(p[core]) + np.abs(p[south]) + 10 ** -20
)
eps_j_half2[core] = kappa2 * np.max([nu_j[north], nu_j[core]], axis=0)
# eps_j_half4[core] = np.max(
# [np.zeros_like(core), kappa4 - eps_j_half2[core]], axis=0)
# d_j_half[core] = eps_j_half2[core] * (f[north] - f[core]) - eps_j_half4[
# core] * (f[northnorth] - 3.0 * f[north] + 3.0 * f[core] - f[south])
d_j_half[core] = eps_j_half2[core] * (f[north] - f[core])
# apply artificial diffusion
out[core] = (
f[core] + d_i_half[core] - d_i_half[west] +
d_j_half[core] - d_j_half[south]
)
return out
def update_gradient(
f,
f_new,
dfdx,
dfdy,
core,
north,
south,
east,
west,
dx,
dt,
out_dfdx=None,
out_dfdy=None,
):
"""Update gradients when main variables are updated
"""
if out_dfdx is None:
out_dfdx = np.zeros(dfdx.shape[0], dtype=np.float)
if out_dfdy is None:
out_dfdx = np.zeros(dfdy.shape[0], dtype=np.float)
# non-advection term
out_dfdx[core] = dfdx[core] + (
(f_new[east] - f[east]) - (f_new[west] - f[west])
) / (2 * dx)
out_dfdy[core] = dfdy[core] + (
(f_new[north] - f[north]) - (f_new[south] - f[south])
) / (2 * dx)
def update_gradient2(
f,
dfdx,
dfdy,
u,
v,
core,
north,
south,
east,
west,
dx,
dt,
out_dfdx=None,
out_dfdy=None,
):
"""Update gradients when main variables are updated
"""
if out_dfdx is None:
out_dfdx = np.zeros(dfdx.shape[0], dtype=np.float)
if out_dfdy is None:
out_dfdx = np.zeros(dfdy.shape[0], dtype=np.float)
# non-advection term
out_dfdx[core] = (
dfdx[core]
- (
(f[east] - f[west]) * (u[east] - u[west]) / (2 * dx) ** 2
+ (f[north] - f[south]) * (v[east] - v[west]) / (2 * dx) ** 2
)
* dt
)
out_dfdy[core] = (
dfdy[core]
- (
(f[east] - f[west]) * (u[north] - u[south]) / (2 * dx) ** 2
+ (f[north] - f[south]) * (v[north] - v[south]) / (2 * dx) ** 2
)
* dt
)
class CIP2D:
""" CIP Direct 2D scheme
parameters
---------------------
max_number_of_grids : int
maximum number of grids that may be used in this solver
"""
def __init__(self, max_number_of_grids):
self.XX = np.empty(max_number_of_grids, dtype=float)
self.YY = np.empty(max_number_of_grids, dtype=float)
self.Ddx = np.empty(max_number_of_grids, dtype=float)
self.Ddy = np.empty(max_number_of_grids, dtype=float)
self.xup = np.empty(max_number_of_grids, dtype=int)
self.yup = np.empty(max_number_of_grids, dtype=int)
self.xyup = np.empty(max_number_of_grids, dtype=int)
self.C30 = np.empty(max_number_of_grids, dtype=float)
self.C20 = np.empty(max_number_of_grids, dtype=float)
self.C03 = np.empty(max_number_of_grids, dtype=float)
self.C02 = np.empty(max_number_of_grids, dtype=float)
self.tmp = np.empty(max_number_of_grids, dtype=float)
self.tmq = np.empty(max_number_of_grids, dtype=float)
self.C12 = np.empty(max_number_of_grids, dtype=float)
self.C21 = np.empty(max_number_of_grids, dtype=float)
self.C11 = np.empty(max_number_of_grids, dtype=float)
def run(
self,
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""run this solver to calculate advection transport of a variable f
parameters
----------------------
f : ndarray, float
A variable to calculate
"""
if out_f is None:
out_f = np.zeros_like(f)
if out_dfdx is None:
out_dfdx = np.zeros_like(f)
if out_dfdy is None:
out_dfdy = np.zeros_like(f)
XX = self.XX
YY = self.YY
Ddx = self.Ddx
Ddy = self.Ddy
xup = self.xup
yup = self.yup
xyup = self.xyup
XX[core] = -u[core] * dt
YY[core] = -v[core] * dt
Ddx[core] = np.where(u[core] > 0.0, 1.0, -1.0) * dx
Ddy[core] = np.where(v[core] > 0.0, 1.0, -1.0) * dx
xup[core] = h_up[core]
yup[core] = v_up[core]
xyup[core] = v_up[h_up[core]]
tmp = self.tmp
tmq = self.tmq
self.C30[core] = (
(dfdx[xup[core]] + dfdx[core]) *
Ddx[core] - 2.0 * (f[core] - f[xup[core]])
) / (Ddx[core] * Ddx[core] * Ddx[core])
self.C20[core] = (
3.0 * (f[xup[core]] - f[core])
+ (dfdx[xup[core]] + 2.0 * dfdx[core]) * Ddx[core]
) / (Ddx[core] * Ddx[core])
self.C03[core] = (
(dfdy[yup[core]] + dfdy[core]) *
Ddy[core] - 2.0 * (f[core] - f[yup[core]])
) / (Ddy[core] * Ddy[core] * Ddy[core])
self.C02[core] = (
3.0 * (f[yup[core]] - f[core])
+ (dfdy[yup[core]] + 2.0 * dfdy[core]) * Ddy[core]
) / (Ddy[core] * Ddy[core])
self.tmp[core] = f[core] - f[yup[core]] - f[xup[core]] + f[xyup[core]]
self.tmq[core] = dfdy[xup[core]] - dfdy[core]
self.C12[core] = (-tmp[core] - tmq[core] * Ddy[core]) / (
Ddx[core] * Ddy[core] * Ddy[core]
)
self.C21[core] = (-tmp[core] - (dfdx[yup[core]] - dfdx[core]) * Ddx[core]) / (
Ddx[core] * Ddx[core] * Ddy[core]
)
self.C11[core] = (-tmq[core] + self.C21[core] * Ddx[core] * Ddx[core]) / (
Ddx[core]
)
out_f[core] = (
(
(self.C30[core] * XX[core] + self.C21[core]
* YY[core] + self.C20[core])
* XX[core]
+ self.C11[core] * YY[core]
+ dfdx[core]
)
* XX[core]
+ (
(self.C03[core] * YY[core] + self.C12[core]
* XX[core] + self.C02[core])
* YY[core]
+ dfdy[core]
)
* YY[core]
+ f[core]
)
out_dfdx[core] = (
(
3.0 * self.C30[core] * XX[core]
+ 2.0 * (self.C21[core] * YY[core] + self.C20[core])
)
* XX[core]
+ (self.C12[core] * YY[core] + self.C11[core]) * YY[core]
+ dfdx[core]
)
out_dfdy[core] = (
(
3.0 * self.C03[core] * YY[core]
+ 2.0 * (self.C12[core] * XX[core] + self.C02[core])
)
* YY[core]
+ (self.C21[core] * XX[core] + self.C11[core]) * XX[core]
+ dfdy[core]
)
return out_f, out_dfdx, out_dfdy
def rcip_2d_advection(
f,
dfdx,
dfdy,
u,
v,
core,
h_up,
v_up,
dx,
dt,
out_f=None,
out_dfdx=None,
out_dfdy=None,
):
"""Direct 2D calculation of advection by R-CIP method
"""
if out_f is None:
out_f = np.zeros(f.shape[0])
if out_dfdx is None:
out_dfdx = np.zeros(f.shape[0])
if out_dfdy is None:
out_dfdy = np.zeros(f.shape[0])
XX = -u[core] * dt
YY = -v[core] * dt
Ddx = np.where(u[core] > 0.0, -1.0, 1.0) * dx
Ddy = np.where(v[core] > 0.0, -1.0, 1.0) * dx
xup = h_up[core]
yup = v_up[core]
xyup = (v_up[h_up])[core]
a01 = np.zeros(core.shape[0])
a10 = np.zeros(core.shape[0])
b01 = np.zeros(core.shape[0])
b10 = np.zeros(core.shape[0])
Sx = (f[xup] - f[core]) / Ddx
Sy = (f[yup] - f[core]) / Ddy
a10 = np.where(dfdx[core] * dfdx[xup] < 0, 1.0, 0.0)
a01 = np.where(dfdy[core] * dfdy[yup] < 0, 1.0, 0.0)
b10 = (np.abs((Sx - dfdx[core]) /
(dfdx[xup] - Sx + 1.0 * 10 ** -10)) - 1) / Ddx
b01 = (np.abs((Sy - dfdy[core]) /
(dfdy[yup] - Sy + 1.0 * 10 ** -10)) - 1) / Ddy
C00 = f[core]
C10 = dfdx[core] + a10 * b10 * C00
C01 = dfdy[core] + a01 * b01 * C00
# C30 = ((dfdx[xup] + dfdx[core]) * Ddx - 2.0 *
# (f[core] - f[xup])) / (Ddx * Ddx * Ddx)
C30 = ((1 + a10 * b10 * Ddx) *
(dfdx[xup] - Sx) + dfdx[core] - Sx) / (Ddx * Ddx)
# C20 = (3.0 * (f[xup] - f[core]) +
# (dfdx[xup] + 2.0 * dfdx[core]) * Ddx) / (Ddx * Ddx)
C20 = ((1 + a10 * b10 * Ddx) * f[xup] -
C00 - C10 * Ddx) / (Ddx * Ddx) - C30 * Ddx
# C03 = ((dfdy[yup] + dfdy[core]) * Ddy - 2.0 *
# (f[core] - f[yup])) / (Ddy * Ddy * Ddy)
C03 = ((1 + a01 * b01 * Ddy) *
(dfdy[yup] - Sy) + dfdy[core] - Sy) / (Ddy * Ddy)
# C02 = (3.0 * (f[yup] - f[core]) +
# (dfdy[yup] + 2.0 * dfdy[core]) * Ddy) / (Ddy * Ddy)
C02 = ((1 + a01 * b01 * Ddy) * f[yup] -
C00 - C01 * Ddy) / (Ddy * Ddy) - C03 * Ddy
# tmp = f[core] - f[yup] - f[xup] + f[xyup]
# tmq = dfdy[xup] - dfdy[core]
C11 = (
(a01 * b01 * f[xup] + (1.0 + a10 * b10 * Ddx) * dfdy[xup]) / Ddx
+ (a10 * b10 * f[yup] + (1.0 + a01 * b01 * Ddy) * dfdx[yup]) / Ddy
+ (C00 - (1.0 + a10 * b10 * Ddx + a01 * b01 * Ddy)
* f[xyup]) / Ddx / Ddy
+ C30 * Ddx * Ddx / Ddy
+ C03 * Ddy * Ddy / Ddx
+ C20 * Ddx / Ddy
+ C02 * Ddy / Ddx
)
# C12 = (-tmp - tmq * Ddy) / (Ddx * Ddy * Ddy)
C12 = (a10 * b10 * f[yup] + (1 + a01 * b01 * Ddy) * dfdx[yup] - C10) / (
Ddy * Ddy
) - C11 / Ddy
# C21 = (-tmp - (dfdx[yup] - dfdx[core]) * Ddx) / (Ddx * Ddx * Ddy)
C21 = (a01 * b01 * f[xup] + (1 + a10 * b10 * Ddx) * dfdy[xup] - C01) / (
Ddx * Ddx
) - C11 / Ddx
# C11 = (-tmq + C21 * Ddx * Ddx) / (Ddx)
out_f[core] = (
((C30 * XX + C21 * YY + C20) * XX + C11 * YY + C10) * XX
+ ((C03 * YY + C12 * XX + C02) * YY + C01) * YY
+ C00
) / (1 + a10 * b10 * XX + a01 * b01 * YY)
out_dfdx[core] = (
(3.0 * C30 * XX + 2.0 * (C21 * YY + C20)) * XX
+ (C12 * YY + C11) * YY
+ C10
- a10 * b10 * out_f[core]
) / (1 + a10 * b10 * XX + a01 * b01 * YY)
out_dfdy[core] = (
(3.0 * C03 * YY + 2.0 * (C12 * XX + C02)) * YY
+ (C21 * XX + C11) * XX
+ C01
- a01 * b01 * out_f[core]
) / (1 + a10 * b10 * XX + a01 * b01 * YY)
return out_f, out_dfdx, out_dfdy
def cubic_interp_1d(f, dfdx, core, iplus, iminus, dx, out=None):
"""interpolate values to links or nodes by cubic function
Interplated values at the grid between "iplus" and "iminus" is returned.
Parameters
--------------------------
f : ndarray, float
values to be interpolated
dfdx : ndarray, float
spatial gradient of f
iplus : ndarray, int
grid id of (i + dx / 2)
iminus : ndarray, int
grid id of (i - dx / 2)
dx : ndarray, float
grid spacing
out : ndarray, float
interpolated values between grids of iplus and iminus
"""
if out is None:
out = np.empty(iplus.shape)
# interplation by cubic function
D_x = -dx
xi_x = -dx / 2.0
a = (dfdx[iplus] + dfdx[iminus]) / (D_x ** 2) + 2.0 * (f[iplus] - f[iminus]) / (
D_x ** 3
)
b = (
3.0 * (f[iminus] - f[iplus]) / (D_x ** 2)
- (2.0 * dfdx[iplus] + dfdx[iminus]) / D_x
)
out[core] = a * (xi_x ** 3) + b * (xi_x ** 2) + \
dfdx[iplus] * xi_x + f[iplus]
return out
def rcubic_interp_1d(f, dfdx, core, iplus, iminus, dx, out=None):
"""interpolate values to links or nodes by cubic function
Interplated values at the grid between "iplus" and "iminus" is returned.
Parameters
--------------------------
f : ndarray, float
values to be interpolated
dfdx : ndarray, float
spatial gradient of f
iplus : ndarray, int
grid id of (i + dx / 2)
iminus : ndarray, int
grid id of (i - dx / 2)
dx : ndarray, float
grid spacing
out : ndarray, float
interpolated values between grids of iplus and iminus
"""
if out is None:
out = np.zeros(f.shape)
# advection phase
D = -dx
xi = -dx / 2.0
BB = np.zeros(core.shape, dtype=float)
S = (f[iminus] - f[iplus]) / D
dz_index = (S - dfdx[iplus]) * (dfdx[iminus] - S) > 0.0
BB[dz_index] = (
np.abs(
(S[dz_index] - dfdx[iplus][dz_index])
/ (dfdx[iminus][dz_index] - S[dz_index])
)
- 1.0
) / D
a = (dfdx[iplus] - S + (dfdx[iminus] - S) * (1.0 + BB * D)) / (D ** 2)
b = S * BB + (S - dfdx[iplus]) / D - a * D
c = dfdx[iplus] + f[iplus] * BB
out[core] = (((a * xi + b) * xi + c) * xi + f[iplus]) / (1.0 + BB * xi)
# adjust negative values
negative_value = out[core] < 0
out[core][negative_value] = (
f[iplus][negative_value] + f[iminus][negative_value]
) / 2.0
return out
def forester_filter(
f, core, east_id, west_id, north_id, south_id, nu_f=0.1, out_f=None,
):
""" Forester filter for removing negative values from Concentration and
Flow depth
"""
if out_f is None:
out_f = np.zeros_like(f)
out_f[:] = f[:]
east = east_id[core]
west = west_id[core]
north = north_id[core]
south = south_id[core]
out_f[core] += (
nu_f * (f[east] + f[west] + f[north] + f[south] - 4.0 * f[core]) / 4.0
)
out_f[east] -= nu_f * (f[east] - f[core]) / 4.0
out_f[west] -= nu_f * (f[west] - f[core]) / 4.0
out_f[north] -= nu_f * (f[north] - f[core]) / 4.0
out_f[south] -= nu_f * (f[south] - f[core]) / 4.0
return out_f
class Jameson:
""" Jameson filter for smoothing the variables
Parameters
-------------------
tc: TurbudityCurrent2D
Instance of TurbidityCurrent2D to obtain necessary parameters
"""
def __init__(self, tc):
self.number_of_nodes = tc.grid.number_of_nodes
self.number_of_links = tc.grid.number_of_links
self.node_east = tc.node_east
self.node_west = tc.node_west
self.node_north = tc.node_north
self.node_south = tc.node_south
self.link_east = tc.link_east
self.link_west = tc.link_west
self.link_north = tc.link_north
self.link_south = tc.link_south
self.link_horiz = tc.grid.horizontal_links
self.link_vert = tc.grid.vertical_links
self.east_node_at_horizontal_link = tc.east_node_at_horizontal_link
self.west_node_at_horizontal_link = tc.west_node_at_horizontal_link
self.north_node_at_vertical_link = tc.north_node_at_vertical_link
self.south_node_at_vertical_link = tc.south_node_at_vertical_link
self.east_link_at_node = tc.east_link_at_node
self.west_link_at_node = tc.west_link_at_node
self.north_link_at_node = tc.north_link_at_node
self.south_link_at_node = tc.south_link_at_node
self.kappa = tc.kappa
self.kappa = tc.kappa
self.nu_x = np.zeros(self.number_of_nodes)
self.nu_y = np.zeros(self.number_of_nodes)
self.nu_x_link = np.zeros(self.number_of_links)
self.nu_y_link = np.zeros(self.number_of_links)
self.eps_link = np.zeros(self.number_of_links)
self.eps_node_horiz = np.zeros(self.number_of_nodes)
self.eps_node_vert = np.zeros(self.number_of_nodes)
def update_artificial_viscosity(self, p, p_link):
""" update artificial viscosity at nodes (nu) and links (eps)
paramters
-------------------
p : ndarray, float
pressure at nodes
p_link : ndarray, float
pressure at links
"""
# artificial viscosity coefficient at links
self.nu_x_link[self.link_horiz] = np.abs(
p_link[self.link_east[self.link_horiz]]
- 2 * p_link[self.link_horiz]
+ p_link[self.link_west[self.link_horiz]]
) / (
p_link[self.link_east[self.link_horiz]]
+ 2 * p_link[self.link_horiz]
+ p_link[self.link_west[self.link_horiz]]
+ 10 ** -20
)
self.nu_y_link[self.link_vert] = np.abs(
p_link[self.link_north[self.link_vert]]
- 2 * p_link[self.link_vert]
+ p_link[self.link_south[self.link_vert]]
) / (
p_link[self.link_north[self.link_vert]]
+ 2 * p_link[self.link_vert]
+ p_link[self.link_south[self.link_vert]]
+ 10 ** -20
)
# artificial viscosity coefficient at nodes
self.nu_x[:] = np.abs(
p[self.node_east]
+ p[self.node_north]
+ p[self.node_south]
+ p[self.node_west]
- 4 * p
) / (
p[self.node_east]
+ p[self.node_west]
+ p[self.node_north]
+ p[self.node_south]
+ 4 * p
+ 10 ** -20
)
self.nu_y[:] = self.nu_x[:]
# maximum artificial viscosity coefficient at links
self.eps_link[self.link_horiz] = self.kappa * np.max(
[
self.nu_x[self.east_node_at_horizontal_link[self.link_horiz]],
self.nu_x[self.west_node_at_horizontal_link[self.link_horiz]],
],
axis=0,
)
self.eps_link[self.link_vert] = self.kappa * np.max(
[
self.nu_y[self.north_node_at_vertical_link[self.link_vert]],
self.nu_y[self.south_node_at_vertical_link[self.link_vert]],
],
axis=0,
)
# maximum artificial viscosity coefficient at nodes
self.eps_node_horiz[:] = (
0.01
* self.kappa
* np.max(
[
self.nu_x_link[self.east_link_at_node],
self.nu_x_link[self.west_link_at_node],
],
axis=0,
)
)
self.eps_node_vert[:] = (
0.01
* self.kappa
* np.max(
[
self.nu_y_link[self.north_link_at_node],
self.nu_y_link[self.south_link_at_node],
],
axis=0,
)
)
def run(self, f, core, at="node", out=None):
""" run one step of the Jameson filter
paramters
--------------------
f : ndarray, float
variables to be filtered
core : ndarray, int
grid ids to apply the filter
at : String, optional
'node', 'hlink', or 'vlink' for values on nodes,
horizontal links or vertical links
out : ndarray, float
output
returns
--------------------
out : ndarray, float
filtered variables
"""
if out is None:
out = np.zeros_like(f)
if at == "node":
out[core] = (
f[core]
+ self.eps_link[self.east_link_at_node[core]]
* (f[self.node_east[core]] - f[core])
- self.eps_link[self.west_link_at_node[core]]
* (f[core] - f[self.node_west[core]])
+ self.eps_link[self.north_link_at_node[core]]
* (f[self.node_north[core]] - f[core])
- self.eps_link[self.south_link_at_node[core]]
* (f[core] - f[self.node_south[core]])
)
if at == "hlink":
out[core] = (
f[core]
+ self.eps_node_horiz[self.east_node_at_horizontal_link[core]]
* (f[self.link_east[core]] - f[core])
- self.eps_node_horiz[self.west_node_at_horizontal_link[core]]
* (f[core] - f[self.west_node_at_horizontal_link[core]])
)
if at == "vlink":
out[core] = (
f[core]
+ self.eps_node_vert[self.north_node_at_vertical_link[core]]
* (f[self.link_north[core]] - f[core])
- self.eps_node_vert[self.south_node_at_vertical_link[core]]
* (f[core] - f[self.south_node_at_vertical_link[core]])
)
return out
class SOR:
"""SOR method to solve inverse matrix
"""
def __init__(
self,
number_of_nodes,
node_east,
node_west,
node_north,
node_south,
implicit_threshold,
max_loop,
alpha,
update_boundary_conditions,
):
self.implicit_threshold = implicit_threshold
self.max_loop = max_loop
self.alpha = alpha
self.update_boundary_conditions = update_boundary_conditions
self.node_east = node_east
self.node_west = node_west
self.node_north = node_north
self.node_south = node_south
self.a = np.empty(number_of_nodes)
self.b = np.empty(number_of_nodes)
self.c = np.empty(number_of_nodes)
self.d = np.empty(number_of_nodes)
self.e = np.empty(number_of_nodes)
self.g = np.empty(number_of_nodes)
self.w = np.empty(number_of_nodes)
def run(self, p, core, out=None):
if out is None:
out = np.zeros_like(p)
out[:] = p[:]
err = 100.0
count = 0
core_size = core.shape[0]
while err > self.implicit_threshold:
self.w[core] = (
self.g[core]
- self.b[core] * out[self.node_east[core]]
- self.c[core] * out[self.node_west[core]]
- self.d[core] * out[self.node_north[core]]
- self.e[core] * out[self.node_south[core]]
) / self.a[core]
err = np.linalg.norm(self.w[core] - out[core]) / (
core_size + 1.0 * 10 ** -20
)
out[core] = out[core] * (1 - self.alpha) + \
self.alpha * self.w[core]
self.update_boundary_conditions(p=out)
count += 1
if count == self.max_loop:
print("Implicit calculation did not converge")
return out
return out
| en | 0.59521 | Calculate one time step using M-type 2D cip method # First, the variables out and temp are allocated to # store the calculation results # 1st step for horizontal advection # 2nd step for vertical advection Calculate one time step using M-type 2D cip method # First, the variables out and temp are allocated to # store the calculation results # 1st step for horizontal advection # non-advection term Caclulate horizontal and vertical diffusion of velocities u and v calculate 1 step of advection phase by rational function CIP method. Parameters ---------------- f : ndarray variable to be calculated dfdx : ndarray spatial gradient of the parameter f u : ndarray advection velocity of f core : ndarray indeces of core grids up : ndarray indeces of grids that locate upstream down : ndarray indeces of grids that locate downstream dx : float spatial grid spacing dt : float time step length out_f : ndarray resultant value of f out_dfdx : ndarray resultant value of dfdx Returns -------------------- out_f : ndarray output value of f out_dfdx : ndarray output value of dfdx # advection phase Calculate one time step using M-type 2D cip method # First, the variables out and temp are allocated to # store the calculation results # 1st step for horizontal advection # 2nd step for vertical advection adding artificial viscosity for numerical stability Parameters ------------------ f : ndarray, float parameter for which the artificial viscosity is applied h : ndarray, float flow height core : ndarray, int indeces of core nodes or links north_id : ndarray, int indeces of nodes or links that locate north of core south_id : ndarray, int indeces of nodes or links that locate south of core east_id : ndarray, int indeces of nodes or links that locate east of core west_id : ndarray, int indeces of nodes or links that locate west of core # eps_i_half4 = np.zeros(n, dtype=np.float) # eps_j_half4 = np.zeros(n, dtype=np.float) # easteast = east_id[east] # northnorth = north_id[north] # First, artificial diffusion is applied to east-west direction # eps_i_half4[core] = np.max( # [np.zeros_like(core), kappa4 - eps_i_half2[core]], axis=0) # d_i_half[core] = eps_i_half2[core] * ( # f[east] - f[core]) - eps_i_half4[core] * (f[easteast] - 3.0 * f[east] + # 3.0 * f[core] - f[west]) # Next, artificial diffusion is applied to north-south direction # eps_j_half4[core] = np.max( # [np.zeros_like(core), kappa4 - eps_j_half2[core]], axis=0) # d_j_half[core] = eps_j_half2[core] * (f[north] - f[core]) - eps_j_half4[ # core] * (f[northnorth] - 3.0 * f[north] + 3.0 * f[core] - f[south]) # apply artificial diffusion Update gradients when main variables are updated # non-advection term Update gradients when main variables are updated # non-advection term CIP Direct 2D scheme parameters --------------------- max_number_of_grids : int maximum number of grids that may be used in this solver run this solver to calculate advection transport of a variable f parameters ---------------------- f : ndarray, float A variable to calculate Direct 2D calculation of advection by R-CIP method # C30 = ((dfdx[xup] + dfdx[core]) * Ddx - 2.0 * # (f[core] - f[xup])) / (Ddx * Ddx * Ddx) # C20 = (3.0 * (f[xup] - f[core]) + # (dfdx[xup] + 2.0 * dfdx[core]) * Ddx) / (Ddx * Ddx) # C03 = ((dfdy[yup] + dfdy[core]) * Ddy - 2.0 * # (f[core] - f[yup])) / (Ddy * Ddy * Ddy) # C02 = (3.0 * (f[yup] - f[core]) + # (dfdy[yup] + 2.0 * dfdy[core]) * Ddy) / (Ddy * Ddy) # tmp = f[core] - f[yup] - f[xup] + f[xyup] # tmq = dfdy[xup] - dfdy[core] # C12 = (-tmp - tmq * Ddy) / (Ddx * Ddy * Ddy) # C21 = (-tmp - (dfdx[yup] - dfdx[core]) * Ddx) / (Ddx * Ddx * Ddy) # C11 = (-tmq + C21 * Ddx * Ddx) / (Ddx) interpolate values to links or nodes by cubic function Interplated values at the grid between "iplus" and "iminus" is returned. Parameters -------------------------- f : ndarray, float values to be interpolated dfdx : ndarray, float spatial gradient of f iplus : ndarray, int grid id of (i + dx / 2) iminus : ndarray, int grid id of (i - dx / 2) dx : ndarray, float grid spacing out : ndarray, float interpolated values between grids of iplus and iminus # interplation by cubic function interpolate values to links or nodes by cubic function Interplated values at the grid between "iplus" and "iminus" is returned. Parameters -------------------------- f : ndarray, float values to be interpolated dfdx : ndarray, float spatial gradient of f iplus : ndarray, int grid id of (i + dx / 2) iminus : ndarray, int grid id of (i - dx / 2) dx : ndarray, float grid spacing out : ndarray, float interpolated values between grids of iplus and iminus # advection phase # adjust negative values Forester filter for removing negative values from Concentration and Flow depth Jameson filter for smoothing the variables Parameters ------------------- tc: TurbudityCurrent2D Instance of TurbidityCurrent2D to obtain necessary parameters update artificial viscosity at nodes (nu) and links (eps) paramters ------------------- p : ndarray, float pressure at nodes p_link : ndarray, float pressure at links # artificial viscosity coefficient at links # artificial viscosity coefficient at nodes # maximum artificial viscosity coefficient at links # maximum artificial viscosity coefficient at nodes run one step of the Jameson filter paramters -------------------- f : ndarray, float variables to be filtered core : ndarray, int grid ids to apply the filter at : String, optional 'node', 'hlink', or 'vlink' for values on nodes, horizontal links or vertical links out : ndarray, float output returns -------------------- out : ndarray, float filtered variables SOR method to solve inverse matrix | 3.052096 | 3 |
cortex/secondary/entropy.py | dcurrey88/LAMP-cortex | 0 | 6632180 | <gh_stars>0
from ..feature_types import secondary_feature, log
from ..primary.significant_locations import significant_locations
MS_IN_A_DAY = 86400000
@secondary_feature(
name='cortex.feature.entropy',
dependencies=[significant_locations]
)
def entropy(resolution=MS_IN_A_DAY, **kwargs):
"""
Calculate entropy
"""
#log.info(f'Loading significant locations data...')
_significant_locations = significant_locations(id=kwargs['id'], start=kwargs['start'], end=kwargs['end'])
if len(_significant_locations['data']) == 0:
_entropy = None
#log.info(f'Computing entropy...')
_entropy = sum([loc['proportion'] * math.log(loc['proportion']) for loc in _significant_locations['data']])
if _entropy == 0: #no sig locs
_entropy = None
return {'timetamp':kwargs['start'], 'entropy': _entropy}
| from ..feature_types import secondary_feature, log
from ..primary.significant_locations import significant_locations
MS_IN_A_DAY = 86400000
@secondary_feature(
name='cortex.feature.entropy',
dependencies=[significant_locations]
)
def entropy(resolution=MS_IN_A_DAY, **kwargs):
"""
Calculate entropy
"""
#log.info(f'Loading significant locations data...')
_significant_locations = significant_locations(id=kwargs['id'], start=kwargs['start'], end=kwargs['end'])
if len(_significant_locations['data']) == 0:
_entropy = None
#log.info(f'Computing entropy...')
_entropy = sum([loc['proportion'] * math.log(loc['proportion']) for loc in _significant_locations['data']])
if _entropy == 0: #no sig locs
_entropy = None
return {'timetamp':kwargs['start'], 'entropy': _entropy} | en | 0.340194 | Calculate entropy #log.info(f'Loading significant locations data...') #log.info(f'Computing entropy...') #no sig locs | 2.570448 | 3 |
mne/io/artemis123/tests/test_artemis123.py | slew/mne-python | 0 | 6632181 |
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
from mne.io import read_raw_artemis123
from mne.io.tests.test_raw import _test_raw_reader
from mne.datasets import testing
from mne.io.artemis123.utils import _generate_mne_locs_file, _load_mne_locs
from mne import pick_types
from mne.transforms import rot_to_quat, _angle_between_quats
from mne.io.constants import FIFF
artemis123_dir = op.join(testing.data_path(download=False), 'ARTEMIS123')
short_HPI_dip_fname = op.join(artemis123_dir,
'Artemis_Data_2017-04-04-15h-44m-' +
'22s_Motion_Translation-z.bin')
dig_fname = op.join(artemis123_dir, 'Phantom_040417_dig.pos')
short_hpi_1kz_fname = op.join(artemis123_dir, 'Artemis_Data_2017-04-14-10h' +
'-38m-59s_Phantom_1k_HPI_1s.bin')
# XXX this tol is way too high, but it's not clear which is correct
# (old or new)
def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.):
__tracebackhide__ = True
trans_est = actual[0:3, 3]
quat_est = rot_to_quat(actual[0:3, 0:3])
trans = desired[0:3, 3]
quat = rot_to_quat(desired[0:3, 0:3])
angle = np.rad2deg(_angle_between_quats(quat_est, quat))
dist = np.linalg.norm(trans - trans_est)
assert dist <= dist_tol, \
'%0.3f > %0.3f mm translation' % (1000 * dist, 1000 * dist_tol)
assert angle <= angle_tol, \
'%0.3f > %0.3f° rotation' % (angle, angle_tol)
@pytest.mark.timeout(60) # ~25 sec on Travis Linux OpenBLAS
@testing.requires_testing_data
def test_artemis_reader():
"""Test reading raw Artemis123 files."""
_test_raw_reader(read_raw_artemis123, input_fname=short_hpi_1kz_fname,
pos_fname=dig_fname, verbose='error')
@pytest.mark.timeout(60)
def test_dev_head_t():
"""Test dev_head_t computation for Artemis123."""
# test a random selected point
raw = read_raw_artemis123(short_hpi_1kz_fname, preload=True,
add_head_trans=False)
meg_picks = pick_types(raw.info, meg=True, eeg=False)
# checked against matlab reader.
assert_allclose(raw[meg_picks[12]][0][0][123], 1.08239606023e-11)
dev_head_t_1 = np.array([[9.713e-01, 2.340e-01, -4.164e-02, 1.302e-04],
[-2.371e-01, 9.664e-01, -9.890e-02, 1.977e-03],
[1.710e-02, 1.059e-01, 9.942e-01, -8.159e-03],
[0.0, 0.0, 0.0, 1.0]])
dev_head_t_2 = np.array([[9.890e-01, 1.475e-01, -8.090e-03, 4.997e-04],
[-1.476e-01, 9.846e-01, -9.389e-02, 1.962e-03],
[-5.888e-03, 9.406e-02, 9.955e-01, -1.610e-02],
[0.0, 0.0, 0.0, 1.0]])
expected_dev_hpi_rr = np.array([[-0.01579644, 0.06527367, 0.00152648],
[0.06666813, 0.0148956, 0.00545488],
[-0.06699212, -0.01732376, 0.0112027]])
# test with head loc no digitization
raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1)
assert_equal(raw.info['sfreq'], 5000.0)
# test with head loc and digitization
with pytest.warns(RuntimeWarning, match='Large difference'):
raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True,
pos_fname=dig_fname)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1)
# test cHPI localization..
dev_hpi_rr = np.array([p['r'] for p in raw.info['dig']
if p['coord_frame'] == FIFF.FIFFV_COORD_DEVICE])
# points should be within 0.1 mm (1e-4m) and within 1%
assert_allclose(dev_hpi_rr, expected_dev_hpi_rr, atol=1e-4, rtol=0.01)
# test 1kz hpi head loc (different freq)
raw = read_raw_artemis123(short_hpi_1kz_fname, add_head_trans=True)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_2)
assert_equal(raw.info['sfreq'], 1000.0)
def test_utils(tmpdir):
"""Test artemis123 utils."""
# make a tempfile
tmp_dir = str(tmpdir)
tmp_fname = op.join(tmp_dir, 'test_gen_mne_locs.csv')
_generate_mne_locs_file(tmp_fname)
installed_locs = _load_mne_locs()
generated_locs = _load_mne_locs(tmp_fname)
assert_equal(set(installed_locs.keys()), set(generated_locs.keys()))
for key in installed_locs.keys():
assert_allclose(installed_locs[key], generated_locs[key], atol=1e-7)
|
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
from mne.io import read_raw_artemis123
from mne.io.tests.test_raw import _test_raw_reader
from mne.datasets import testing
from mne.io.artemis123.utils import _generate_mne_locs_file, _load_mne_locs
from mne import pick_types
from mne.transforms import rot_to_quat, _angle_between_quats
from mne.io.constants import FIFF
artemis123_dir = op.join(testing.data_path(download=False), 'ARTEMIS123')
short_HPI_dip_fname = op.join(artemis123_dir,
'Artemis_Data_2017-04-04-15h-44m-' +
'22s_Motion_Translation-z.bin')
dig_fname = op.join(artemis123_dir, 'Phantom_040417_dig.pos')
short_hpi_1kz_fname = op.join(artemis123_dir, 'Artemis_Data_2017-04-14-10h' +
'-38m-59s_Phantom_1k_HPI_1s.bin')
# XXX this tol is way too high, but it's not clear which is correct
# (old or new)
def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.):
__tracebackhide__ = True
trans_est = actual[0:3, 3]
quat_est = rot_to_quat(actual[0:3, 0:3])
trans = desired[0:3, 3]
quat = rot_to_quat(desired[0:3, 0:3])
angle = np.rad2deg(_angle_between_quats(quat_est, quat))
dist = np.linalg.norm(trans - trans_est)
assert dist <= dist_tol, \
'%0.3f > %0.3f mm translation' % (1000 * dist, 1000 * dist_tol)
assert angle <= angle_tol, \
'%0.3f > %0.3f° rotation' % (angle, angle_tol)
@pytest.mark.timeout(60) # ~25 sec on Travis Linux OpenBLAS
@testing.requires_testing_data
def test_artemis_reader():
"""Test reading raw Artemis123 files."""
_test_raw_reader(read_raw_artemis123, input_fname=short_hpi_1kz_fname,
pos_fname=dig_fname, verbose='error')
@pytest.mark.timeout(60)
def test_dev_head_t():
"""Test dev_head_t computation for Artemis123."""
# test a random selected point
raw = read_raw_artemis123(short_hpi_1kz_fname, preload=True,
add_head_trans=False)
meg_picks = pick_types(raw.info, meg=True, eeg=False)
# checked against matlab reader.
assert_allclose(raw[meg_picks[12]][0][0][123], 1.08239606023e-11)
dev_head_t_1 = np.array([[9.713e-01, 2.340e-01, -4.164e-02, 1.302e-04],
[-2.371e-01, 9.664e-01, -9.890e-02, 1.977e-03],
[1.710e-02, 1.059e-01, 9.942e-01, -8.159e-03],
[0.0, 0.0, 0.0, 1.0]])
dev_head_t_2 = np.array([[9.890e-01, 1.475e-01, -8.090e-03, 4.997e-04],
[-1.476e-01, 9.846e-01, -9.389e-02, 1.962e-03],
[-5.888e-03, 9.406e-02, 9.955e-01, -1.610e-02],
[0.0, 0.0, 0.0, 1.0]])
expected_dev_hpi_rr = np.array([[-0.01579644, 0.06527367, 0.00152648],
[0.06666813, 0.0148956, 0.00545488],
[-0.06699212, -0.01732376, 0.0112027]])
# test with head loc no digitization
raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1)
assert_equal(raw.info['sfreq'], 5000.0)
# test with head loc and digitization
with pytest.warns(RuntimeWarning, match='Large difference'):
raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True,
pos_fname=dig_fname)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1)
# test cHPI localization..
dev_hpi_rr = np.array([p['r'] for p in raw.info['dig']
if p['coord_frame'] == FIFF.FIFFV_COORD_DEVICE])
# points should be within 0.1 mm (1e-4m) and within 1%
assert_allclose(dev_hpi_rr, expected_dev_hpi_rr, atol=1e-4, rtol=0.01)
# test 1kz hpi head loc (different freq)
raw = read_raw_artemis123(short_hpi_1kz_fname, add_head_trans=True)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_2)
assert_equal(raw.info['sfreq'], 1000.0)
def test_utils(tmpdir):
"""Test artemis123 utils."""
# make a tempfile
tmp_dir = str(tmpdir)
tmp_fname = op.join(tmp_dir, 'test_gen_mne_locs.csv')
_generate_mne_locs_file(tmp_fname)
installed_locs = _load_mne_locs()
generated_locs = _load_mne_locs(tmp_fname)
assert_equal(set(installed_locs.keys()), set(generated_locs.keys()))
for key in installed_locs.keys():
assert_allclose(installed_locs[key], generated_locs[key], atol=1e-7)
| en | 0.813608 | # Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) # XXX this tol is way too high, but it's not clear which is correct # (old or new) # ~25 sec on Travis Linux OpenBLAS Test reading raw Artemis123 files. Test dev_head_t computation for Artemis123. # test a random selected point # checked against matlab reader. # test with head loc no digitization # test with head loc and digitization # test cHPI localization.. # points should be within 0.1 mm (1e-4m) and within 1% # test 1kz hpi head loc (different freq) Test artemis123 utils. # make a tempfile | 1.766914 | 2 |
server/athenian/api/controllers/datetime_utils.py | athenianco/athenian-api | 9 | 6632182 | from datetime import date, datetime, timedelta, timezone
from typing import List, Optional, Tuple, Union
from athenian.api.models.web import Granularity, InvalidRequestError
from athenian.api.response import ResponseError
def coarsen_time_interval(time_from: datetime, time_to: datetime) -> Tuple[date, date]:
"""Extend the time interval to align at the date boarders."""
assert time_to > time_from
zerotd = timedelta(0)
assert isinstance(time_from, datetime) and time_from.tzinfo.utcoffset(time_from) == zerotd
assert isinstance(time_to, datetime) and time_to.tzinfo.utcoffset(time_to) == zerotd
date_from = time_from.date()
date_to = time_to.date()
if time_to.time() != datetime.min.time():
date_to += timedelta(days=1)
return date_from, date_to
def split_to_time_intervals(date_from: date,
date_to: date,
granularities: Union[str, List[str]],
tzoffset: Optional[int],
) -> Tuple[Union[List[datetime], List[List[datetime]]], timedelta]:
"""Produce time interval boundaries from the min and the max dates and the interval lengths \
(granularities).
:param tzoffset: Time zone offset in minutes. We ignore DST for now.
:return: tuple with the time intervals and the timezone offset converted to timedelta. \
If `granularities` is a scalar, then return a list of boundaries, otherwise, return \
a list of lists.
"""
if date_to < date_from:
raise ResponseError(InvalidRequestError(
detail="date_from may not be greater than date_to",
pointer=".date_from",
))
tzoffset = timedelta(minutes=-tzoffset) if tzoffset is not None else timedelta(0)
def split(granularity: str, ptr: str) -> List[datetime]:
try:
intervals = Granularity.split(granularity, date_from, date_to)
except ValueError:
raise ResponseError(InvalidRequestError(
detail='granularity "%s" does not match /%s/' % (
granularity, Granularity.format.pattern),
pointer=ptr,
))
return [datetime.combine(i, datetime.min.time(), tzinfo=timezone.utc) + tzoffset
for i in intervals]
if isinstance(granularities, str):
return split(granularities, ".granularity"), tzoffset
return [split(g, ".granularities[%d]" % i) for i, g in enumerate(granularities)], tzoffset
| from datetime import date, datetime, timedelta, timezone
from typing import List, Optional, Tuple, Union
from athenian.api.models.web import Granularity, InvalidRequestError
from athenian.api.response import ResponseError
def coarsen_time_interval(time_from: datetime, time_to: datetime) -> Tuple[date, date]:
"""Extend the time interval to align at the date boarders."""
assert time_to > time_from
zerotd = timedelta(0)
assert isinstance(time_from, datetime) and time_from.tzinfo.utcoffset(time_from) == zerotd
assert isinstance(time_to, datetime) and time_to.tzinfo.utcoffset(time_to) == zerotd
date_from = time_from.date()
date_to = time_to.date()
if time_to.time() != datetime.min.time():
date_to += timedelta(days=1)
return date_from, date_to
def split_to_time_intervals(date_from: date,
date_to: date,
granularities: Union[str, List[str]],
tzoffset: Optional[int],
) -> Tuple[Union[List[datetime], List[List[datetime]]], timedelta]:
"""Produce time interval boundaries from the min and the max dates and the interval lengths \
(granularities).
:param tzoffset: Time zone offset in minutes. We ignore DST for now.
:return: tuple with the time intervals and the timezone offset converted to timedelta. \
If `granularities` is a scalar, then return a list of boundaries, otherwise, return \
a list of lists.
"""
if date_to < date_from:
raise ResponseError(InvalidRequestError(
detail="date_from may not be greater than date_to",
pointer=".date_from",
))
tzoffset = timedelta(minutes=-tzoffset) if tzoffset is not None else timedelta(0)
def split(granularity: str, ptr: str) -> List[datetime]:
try:
intervals = Granularity.split(granularity, date_from, date_to)
except ValueError:
raise ResponseError(InvalidRequestError(
detail='granularity "%s" does not match /%s/' % (
granularity, Granularity.format.pattern),
pointer=ptr,
))
return [datetime.combine(i, datetime.min.time(), tzinfo=timezone.utc) + tzoffset
for i in intervals]
if isinstance(granularities, str):
return split(granularities, ".granularity"), tzoffset
return [split(g, ".granularities[%d]" % i) for i, g in enumerate(granularities)], tzoffset
| en | 0.718588 | Extend the time interval to align at the date boarders. Produce time interval boundaries from the min and the max dates and the interval lengths \ (granularities). :param tzoffset: Time zone offset in minutes. We ignore DST for now. :return: tuple with the time intervals and the timezone offset converted to timedelta. \ If `granularities` is a scalar, then return a list of boundaries, otherwise, return \ a list of lists. | 2.696744 | 3 |
binreconfiguration/strategy/gauge/count.py | vialette/binreconfiguration | 0 | 6632183 | """Count gauge"""
from .gauge import Gauge
class Count(Gauge):
def __call__(self, t):
"""Return the number of items in a bin (including the requested one)."""
(_, bin) = t
return bin.count() + 1 | """Count gauge"""
from .gauge import Gauge
class Count(Gauge):
def __call__(self, t):
"""Return the number of items in a bin (including the requested one)."""
(_, bin) = t
return bin.count() + 1 | en | 0.893536 | Count gauge Return the number of items in a bin (including the requested one). | 3.035648 | 3 |
dakara_server/library/tests/test_song_tag.py | DakaraProject/dakara-server | 4 | 6632184 | from django.urls import reverse
from rest_framework import status
from internal.tests.base_test import UserModel
from library.models import SongTag
from library.tests.base_test import LibraryAPITestCase
class SongTagListViewTestCase(LibraryAPITestCase):
url = reverse("library-songtag-list")
def setUp(self):
# create a manager
self.manager = self.create_user(
"TestUserManager", library_level=UserModel.MANAGER
)
# create a user without any rights
self.user = self.create_user("TestUser")
# create test data
self.create_test_data()
def test_get_tag_list(self):
"""Test to verify tag list."""
# Login as simple user
self.authenticate(self.user)
# Get tags list
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 2)
self.assertEqual(len(response.data["results"]), 2)
# Tags are sorted by name
self.check_tag_json(response.data["results"][0], self.tag1)
self.check_tag_json(response.data["results"][1], self.tag2)
def test_get_tag_list_forbidden(self):
"""Test to verify unauthenticated user can't get tag list."""
# Attempt to get work type list
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_tag_already_exists(self):
"""Test to create a tag when it already exists."""
# Login as simple user
self.authenticate(self.manager)
# create an existing tag
response = self.client.post(self.url, {"name": "TAG1"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class SongTagViewTestCase(LibraryAPITestCase):
def setUp(self):
# create a manager
self.manager = self.create_user(
"TestUserManager", library_level=UserModel.MANAGER
)
self.user = self.create_user("TestUser")
# create test data
self.create_test_data()
# create urls
self.url_sg1 = reverse("library-songtag", kwargs={"pk": self.tag1.id})
self.url_sg2 = reverse("library-songtag", kwargs={"pk": self.tag2.id})
def test_update_song_tag_manager(self):
"""Test manager can update tag."""
# login as manager
self.authenticate(self.manager)
# pre-assertion: the tag is enabled
tag = SongTag.objects.get(id=self.tag1.id)
self.assertFalse(tag.disabled)
# alter one tag
response = self.client.patch(self.url_sg1, {"disabled": True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# the tag should be disabled now
tag = SongTag.objects.get(id=self.tag1.id)
self.assertTrue(tag.disabled)
def test_update_song_tag_user(self):
"""Test simple user can not update tags."""
# login as user
self.authenticate(self.user)
# attempt to alter one tag
response = self.client.patch(self.url_sg1, {"disabled": True})
# user can't update tag
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| from django.urls import reverse
from rest_framework import status
from internal.tests.base_test import UserModel
from library.models import SongTag
from library.tests.base_test import LibraryAPITestCase
class SongTagListViewTestCase(LibraryAPITestCase):
url = reverse("library-songtag-list")
def setUp(self):
# create a manager
self.manager = self.create_user(
"TestUserManager", library_level=UserModel.MANAGER
)
# create a user without any rights
self.user = self.create_user("TestUser")
# create test data
self.create_test_data()
def test_get_tag_list(self):
"""Test to verify tag list."""
# Login as simple user
self.authenticate(self.user)
# Get tags list
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 2)
self.assertEqual(len(response.data["results"]), 2)
# Tags are sorted by name
self.check_tag_json(response.data["results"][0], self.tag1)
self.check_tag_json(response.data["results"][1], self.tag2)
def test_get_tag_list_forbidden(self):
"""Test to verify unauthenticated user can't get tag list."""
# Attempt to get work type list
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_tag_already_exists(self):
"""Test to create a tag when it already exists."""
# Login as simple user
self.authenticate(self.manager)
# create an existing tag
response = self.client.post(self.url, {"name": "TAG1"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class SongTagViewTestCase(LibraryAPITestCase):
def setUp(self):
# create a manager
self.manager = self.create_user(
"TestUserManager", library_level=UserModel.MANAGER
)
self.user = self.create_user("TestUser")
# create test data
self.create_test_data()
# create urls
self.url_sg1 = reverse("library-songtag", kwargs={"pk": self.tag1.id})
self.url_sg2 = reverse("library-songtag", kwargs={"pk": self.tag2.id})
def test_update_song_tag_manager(self):
"""Test manager can update tag."""
# login as manager
self.authenticate(self.manager)
# pre-assertion: the tag is enabled
tag = SongTag.objects.get(id=self.tag1.id)
self.assertFalse(tag.disabled)
# alter one tag
response = self.client.patch(self.url_sg1, {"disabled": True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# the tag should be disabled now
tag = SongTag.objects.get(id=self.tag1.id)
self.assertTrue(tag.disabled)
def test_update_song_tag_user(self):
"""Test simple user can not update tags."""
# login as user
self.authenticate(self.user)
# attempt to alter one tag
response = self.client.patch(self.url_sg1, {"disabled": True})
# user can't update tag
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| en | 0.788675 | # create a manager # create a user without any rights # create test data Test to verify tag list. # Login as simple user # Get tags list # Tags are sorted by name Test to verify unauthenticated user can't get tag list. # Attempt to get work type list Test to create a tag when it already exists. # Login as simple user # create an existing tag # create a manager # create test data # create urls Test manager can update tag. # login as manager # pre-assertion: the tag is enabled # alter one tag # the tag should be disabled now Test simple user can not update tags. # login as user # attempt to alter one tag # user can't update tag | 2.554426 | 3 |
trace_event/etw_manifest/BUILD/message_compiler.py | chinmaygarde/flutter_base | 20 | 6632185 | <filename>trace_event/etw_manifest/BUILD/message_compiler.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Runs the Microsoft Message Compiler (mc.exe). This Python adapter is for the
# GN build, which can only run Python and not native binaries.
import subprocess
import sys
# mc writes to stderr, so this explicily redirects to stdout and eats it.
try:
subprocess.check_output(["mc.exe"] + sys.argv[1:], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
sys.exit(e.returncode)
| <filename>trace_event/etw_manifest/BUILD/message_compiler.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Runs the Microsoft Message Compiler (mc.exe). This Python adapter is for the
# GN build, which can only run Python and not native binaries.
import subprocess
import sys
# mc writes to stderr, so this explicily redirects to stdout and eats it.
try:
subprocess.check_output(["mc.exe"] + sys.argv[1:], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
sys.exit(e.returncode)
| en | 0.888763 | # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Runs the Microsoft Message Compiler (mc.exe). This Python adapter is for the # GN build, which can only run Python and not native binaries. # mc writes to stderr, so this explicily redirects to stdout and eats it. | 1.890278 | 2 |
spotui/src/SearchInput.py | ssiyad/spotui | 410 | 6632186 | import curses
from spotui.src.util import truncate
from spotui.src.input import Input
from spotui.src.component import Component
class SearchInput(Component):
def __init__(self, stdscr, api, handle_search):
self.stdscr = stdscr
self.api = api
self.handle_search = handle_search
self.active = True
self.popup = True
self.title = "Search"
self.interactive = False
self.restart()
def restart(self):
scry, scrx = self.stdscr.getmaxyx()
box_height = 4
box_width = round(scrx / 3)
self.startx = round((scrx / 2) - (box_width / 2))
self.endx = self.startx + box_width
self.starty = round((scry / 2) - (box_height / 2))
self.endy = self.starty + box_height
self.component = Input(
self.stdscr,
self.starty,
self.startx,
self.endy,
self.endx,
self.handle_search,
)
def activate(self):
self.component.active = True
def deactivate(self):
self.component.active = False
| import curses
from spotui.src.util import truncate
from spotui.src.input import Input
from spotui.src.component import Component
class SearchInput(Component):
def __init__(self, stdscr, api, handle_search):
self.stdscr = stdscr
self.api = api
self.handle_search = handle_search
self.active = True
self.popup = True
self.title = "Search"
self.interactive = False
self.restart()
def restart(self):
scry, scrx = self.stdscr.getmaxyx()
box_height = 4
box_width = round(scrx / 3)
self.startx = round((scrx / 2) - (box_width / 2))
self.endx = self.startx + box_width
self.starty = round((scry / 2) - (box_height / 2))
self.endy = self.starty + box_height
self.component = Input(
self.stdscr,
self.starty,
self.startx,
self.endy,
self.endx,
self.handle_search,
)
def activate(self):
self.component.active = True
def deactivate(self):
self.component.active = False
| none | 1 | 2.488842 | 2 |
|
vollseg/spatial_image.py | Kapoorlabs-CAPED/CAPED-VollSeg | 7 | 6632187 | # -*- python -*-
#
# spatial_image: spatial nd images
#
# Copyright 2006 INRIA - CIRAD - INRA
#
# File author(s): <NAME> <<EMAIL>>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# OpenAlea WebSite : http://openalea.gforge.inria.fr
#
"""
This module create the main |SpatialImage| object
"""
__license__= "Cecill-C"
__revision__=" $Id: $ "
import numpy as np
from scipy import ndimage
import copy as cp
# -- deprecation messages --
# import warnings, exceptions
# msg = "SpatialImage.resolution is deprecated, use SpatialImage.voxelsize"
# rezexc = exceptions.PendingDeprecationWarning(msg)
class SpatialImage(np.ndarray) :
"""
Associate meta data to np.ndarray
"""
def __new__ (cls, input_array, voxelsize = None,
vdim = None, info = None, dtype = None, **kwargs) :
"""Instantiate a new |SpatialImage|
if voxelsize is None, vdim will be used to infer space size and affect
a voxelsize of 1 in each direction of space
.. warning :: `resolution` keyword is deprecated. Use `voxelsize` instead.
:Parameters:
- `cls` - internal python
- `input_array` (array) - data to put in the image
- `voxelsize` (tuple of float) - spatial extension in each direction
of space
- `vdim` (int) - size of data if vector data are used
- `info` (dict of str|any) - metainfo
"""
#if the input_array is 2D we can reshape it to 3D.
#~ if input_array.ndim == 2: # Jonathan
#~ input_array = input_array.reshape( input_array.shape+(1,) ) # Jonathan
#initialize datas. For some obscure reason, we want the data
#to be F-Contiguous in the NUMPY sense. I mean, if this is not
#respected, we will have problems when communicating with
#C-Code... yeah, that makes so much sense (fortran-contiguous
#to be c-readable...).
dtype = dtype if dtype is not None else input_array.dtype
if input_array.flags.f_contiguous :
obj = np.asarray(input_array, dtype=dtype).view(cls)
else :
obj = np.asarray(input_array, dtype=dtype, order='F').view(cls)
voxelsize = kwargs.get("resolution", voxelsize) #to manage transition
if voxelsize is None :
#~ voxelsize = (1.,) * 3
voxelsize = (1.,) * input_array.ndim # Jonathan
else :
#~ if len(voxelsize) != 3 :
if (input_array.ndim != 4) and (len(voxelsize) != input_array.ndim) : # Jonathan _ Compatibility with "champs_*.inr.gz" generated by Baloo & SuperBaloo
raise ValueError("data dimension and voxelsize mismatch")
obj.voxelsize = tuple(voxelsize)
obj.vdim = vdim if vdim else 1
#set metadata
if info is None :
obj.info = {}
else :
obj.info = dict(info)
#return
return obj
def _get_resolution(self):
# warnings.warn(rezexc)
return self.voxelsize
def _set_resolution(self, val):
# warnings.warn(rezexc)
self.voxelsize = val
resolution = property(_get_resolution, _set_resolution)
@property
def real_shape(self):
#~ return np.multiply(self.shape[:3], self.voxelsize)
return np.multiply(self.shape, self.voxelsize) # Jonathan
def invert_z_axis( self ):
"""
invert allong 'Z' axis
"""
self = self[:,:,::-1]
def __array_finalize__ (self, obj) :
if obj is None :
return
#assert resolution
res = getattr(obj, 'voxelsize', None)
if res is None :#assert vdim == 1
res = (1.,) * len(obj.shape)
self.voxelsize = tuple(res)
#metadata
self.info = dict(getattr(obj, 'info', {}) )
def clone (self, data) :
"""Clone the current image metadata
on the given data.
.. warning:: vdim is defined according to self.voxelsize and data.shape
:Parameters:
- `data` - (array)
:Returns Type: |SpatialImage|
"""
if len(data.shape) == len(self.voxelsize) :
vdim = 1
elif len(data.shape) - len(self.voxelsize) == 1 :
vdim =data.shape[-1]
else :
raise UserWarning("unable to handle such data dimension")
return SpatialImage(data,self.voxelsize,vdim,self.info)
@classmethod
def valid_array(cls, array_like):
return isinstance(array_like, (np.ndarray, cls)) and \
array_like.flags.f_contiguous
def empty_image_like(spatial_image):
array = np.zeros( spatial_image.shape, dtype=spatial_image.dtype )
return SpatialImage(array, spatial_image.voxelsize, vdim=1)
def null_vector_field_like(spatial_image):
array = np.zeros( list(spatial_image.shape)+[3], dtype=np.float32 )
return SpatialImage(array, spatial_image.voxelsize, vdim=3)
def random_vector_field_like(spatial_image, smooth=0, max_=1):
#~ if spatial_image.vdim == 1:
#~ shape = spatial_image.shape+(3,)
#~ else:
#~ shape = spatial_image.shape
shape = spatial_image.shape # Jonathan
array = np.random.uniform(-max_, max_, shape)
if smooth:
array = ndimage.gaussian_filter(array, smooth)
return SpatialImage(array, spatial_image.voxelsize, dtype=np.float32)
def checkerboard(nx=9, ny=8, nz=5, size=10, vs=(1.,1.,1.), dtype=np.uint8):
"""Creates a 3D checkerboard image with `nx` squares in width,
`ny` squares in height and `nz` squares in depth. The length of the edge in real units
of each square is `size`."""
sxv, syv, szv = np.array([size]*3) / np.array(vs)
array = np.zeros( (sxv*nx, syv*ny, szv*nz), dtype=dtype, order="F")
typeinfo = np.iinfo(dtype)
# -- wooo surely not the most beautiful implementation out here --
for k in range(nz):
kval = typeinfo.max if (k%2==0) else typeinfo.min
jval = kval
for j in range(ny):
ival = jval
for i in range(nx):
array[i*sxv:i*sxv+sxv, j*syv:j*syv+syv, k*szv:k*szv+szv] = ival
ival = typeinfo.max if (ival==typeinfo.min) else typeinfo.min
jval = typeinfo.max if (jval==typeinfo.min) else typeinfo.min
kval = typeinfo.max if (kval==typeinfo.min) else typeinfo.min
return SpatialImage(array, vs, dtype=dtype)
def is2D(image):
"""
Test if the `image` (array) is in 2D or 3D.
Return True if 2D, False if not.
"""
if len(image.shape) == 2 or image.shape[2] == 1:
return True
else:
return False
| # -*- python -*-
#
# spatial_image: spatial nd images
#
# Copyright 2006 INRIA - CIRAD - INRA
#
# File author(s): <NAME> <<EMAIL>>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# OpenAlea WebSite : http://openalea.gforge.inria.fr
#
"""
This module create the main |SpatialImage| object
"""
__license__= "Cecill-C"
__revision__=" $Id: $ "
import numpy as np
from scipy import ndimage
import copy as cp
# -- deprecation messages --
# import warnings, exceptions
# msg = "SpatialImage.resolution is deprecated, use SpatialImage.voxelsize"
# rezexc = exceptions.PendingDeprecationWarning(msg)
class SpatialImage(np.ndarray) :
"""
Associate meta data to np.ndarray
"""
def __new__ (cls, input_array, voxelsize = None,
vdim = None, info = None, dtype = None, **kwargs) :
"""Instantiate a new |SpatialImage|
if voxelsize is None, vdim will be used to infer space size and affect
a voxelsize of 1 in each direction of space
.. warning :: `resolution` keyword is deprecated. Use `voxelsize` instead.
:Parameters:
- `cls` - internal python
- `input_array` (array) - data to put in the image
- `voxelsize` (tuple of float) - spatial extension in each direction
of space
- `vdim` (int) - size of data if vector data are used
- `info` (dict of str|any) - metainfo
"""
#if the input_array is 2D we can reshape it to 3D.
#~ if input_array.ndim == 2: # Jonathan
#~ input_array = input_array.reshape( input_array.shape+(1,) ) # Jonathan
#initialize datas. For some obscure reason, we want the data
#to be F-Contiguous in the NUMPY sense. I mean, if this is not
#respected, we will have problems when communicating with
#C-Code... yeah, that makes so much sense (fortran-contiguous
#to be c-readable...).
dtype = dtype if dtype is not None else input_array.dtype
if input_array.flags.f_contiguous :
obj = np.asarray(input_array, dtype=dtype).view(cls)
else :
obj = np.asarray(input_array, dtype=dtype, order='F').view(cls)
voxelsize = kwargs.get("resolution", voxelsize) #to manage transition
if voxelsize is None :
#~ voxelsize = (1.,) * 3
voxelsize = (1.,) * input_array.ndim # Jonathan
else :
#~ if len(voxelsize) != 3 :
if (input_array.ndim != 4) and (len(voxelsize) != input_array.ndim) : # Jonathan _ Compatibility with "champs_*.inr.gz" generated by Baloo & SuperBaloo
raise ValueError("data dimension and voxelsize mismatch")
obj.voxelsize = tuple(voxelsize)
obj.vdim = vdim if vdim else 1
#set metadata
if info is None :
obj.info = {}
else :
obj.info = dict(info)
#return
return obj
def _get_resolution(self):
# warnings.warn(rezexc)
return self.voxelsize
def _set_resolution(self, val):
# warnings.warn(rezexc)
self.voxelsize = val
resolution = property(_get_resolution, _set_resolution)
@property
def real_shape(self):
#~ return np.multiply(self.shape[:3], self.voxelsize)
return np.multiply(self.shape, self.voxelsize) # Jonathan
def invert_z_axis( self ):
"""
invert allong 'Z' axis
"""
self = self[:,:,::-1]
def __array_finalize__ (self, obj) :
if obj is None :
return
#assert resolution
res = getattr(obj, 'voxelsize', None)
if res is None :#assert vdim == 1
res = (1.,) * len(obj.shape)
self.voxelsize = tuple(res)
#metadata
self.info = dict(getattr(obj, 'info', {}) )
def clone (self, data) :
"""Clone the current image metadata
on the given data.
.. warning:: vdim is defined according to self.voxelsize and data.shape
:Parameters:
- `data` - (array)
:Returns Type: |SpatialImage|
"""
if len(data.shape) == len(self.voxelsize) :
vdim = 1
elif len(data.shape) - len(self.voxelsize) == 1 :
vdim =data.shape[-1]
else :
raise UserWarning("unable to handle such data dimension")
return SpatialImage(data,self.voxelsize,vdim,self.info)
@classmethod
def valid_array(cls, array_like):
return isinstance(array_like, (np.ndarray, cls)) and \
array_like.flags.f_contiguous
def empty_image_like(spatial_image):
array = np.zeros( spatial_image.shape, dtype=spatial_image.dtype )
return SpatialImage(array, spatial_image.voxelsize, vdim=1)
def null_vector_field_like(spatial_image):
array = np.zeros( list(spatial_image.shape)+[3], dtype=np.float32 )
return SpatialImage(array, spatial_image.voxelsize, vdim=3)
def random_vector_field_like(spatial_image, smooth=0, max_=1):
#~ if spatial_image.vdim == 1:
#~ shape = spatial_image.shape+(3,)
#~ else:
#~ shape = spatial_image.shape
shape = spatial_image.shape # Jonathan
array = np.random.uniform(-max_, max_, shape)
if smooth:
array = ndimage.gaussian_filter(array, smooth)
return SpatialImage(array, spatial_image.voxelsize, dtype=np.float32)
def checkerboard(nx=9, ny=8, nz=5, size=10, vs=(1.,1.,1.), dtype=np.uint8):
"""Creates a 3D checkerboard image with `nx` squares in width,
`ny` squares in height and `nz` squares in depth. The length of the edge in real units
of each square is `size`."""
sxv, syv, szv = np.array([size]*3) / np.array(vs)
array = np.zeros( (sxv*nx, syv*ny, szv*nz), dtype=dtype, order="F")
typeinfo = np.iinfo(dtype)
# -- wooo surely not the most beautiful implementation out here --
for k in range(nz):
kval = typeinfo.max if (k%2==0) else typeinfo.min
jval = kval
for j in range(ny):
ival = jval
for i in range(nx):
array[i*sxv:i*sxv+sxv, j*syv:j*syv+syv, k*szv:k*szv+szv] = ival
ival = typeinfo.max if (ival==typeinfo.min) else typeinfo.min
jval = typeinfo.max if (jval==typeinfo.min) else typeinfo.min
kval = typeinfo.max if (kval==typeinfo.min) else typeinfo.min
return SpatialImage(array, vs, dtype=dtype)
def is2D(image):
"""
Test if the `image` (array) is in 2D or 3D.
Return True if 2D, False if not.
"""
if len(image.shape) == 2 or image.shape[2] == 1:
return True
else:
return False
| en | 0.577009 | # -*- python -*- # # spatial_image: spatial nd images # # Copyright 2006 INRIA - CIRAD - INRA # # File author(s): <NAME> <<EMAIL>> # # Distributed under the Cecill-C License. # See accompanying file LICENSE.txt or copy at # http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html # # OpenAlea WebSite : http://openalea.gforge.inria.fr # This module create the main |SpatialImage| object # -- deprecation messages -- # import warnings, exceptions # msg = "SpatialImage.resolution is deprecated, use SpatialImage.voxelsize" # rezexc = exceptions.PendingDeprecationWarning(msg) Associate meta data to np.ndarray Instantiate a new |SpatialImage| if voxelsize is None, vdim will be used to infer space size and affect a voxelsize of 1 in each direction of space .. warning :: `resolution` keyword is deprecated. Use `voxelsize` instead. :Parameters: - `cls` - internal python - `input_array` (array) - data to put in the image - `voxelsize` (tuple of float) - spatial extension in each direction of space - `vdim` (int) - size of data if vector data are used - `info` (dict of str|any) - metainfo #if the input_array is 2D we can reshape it to 3D. #~ if input_array.ndim == 2: # Jonathan #~ input_array = input_array.reshape( input_array.shape+(1,) ) # Jonathan #initialize datas. For some obscure reason, we want the data #to be F-Contiguous in the NUMPY sense. I mean, if this is not #respected, we will have problems when communicating with #C-Code... yeah, that makes so much sense (fortran-contiguous #to be c-readable...). #to manage transition #~ voxelsize = (1.,) * 3 # Jonathan #~ if len(voxelsize) != 3 : # Jonathan _ Compatibility with "champs_*.inr.gz" generated by Baloo & SuperBaloo #set metadata #return # warnings.warn(rezexc) # warnings.warn(rezexc) #~ return np.multiply(self.shape[:3], self.voxelsize) # Jonathan invert allong 'Z' axis #assert resolution #assert vdim == 1 #metadata Clone the current image metadata on the given data. .. warning:: vdim is defined according to self.voxelsize and data.shape :Parameters: - `data` - (array) :Returns Type: |SpatialImage| #~ if spatial_image.vdim == 1: #~ shape = spatial_image.shape+(3,) #~ else: #~ shape = spatial_image.shape # Jonathan Creates a 3D checkerboard image with `nx` squares in width, `ny` squares in height and `nz` squares in depth. The length of the edge in real units of each square is `size`. # -- wooo surely not the most beautiful implementation out here -- Test if the `image` (array) is in 2D or 3D. Return True if 2D, False if not. | 2.668311 | 3 |
snippets/math/digit.py | KATO-Hiro/Somen-Soupy | 1 | 6632188 | # -*- coding: utf-8 -*-
def count_digit(max_number: int) -> int:
'''
Args:
max_number: Int of number (greater than 1).
Returns:
the number of digit.
Landau notation: O(log n)
'''
if max_number == 0:
return 1
digit = 0
while max_number:
digit += 1
max_number //= 10
return digit
| # -*- coding: utf-8 -*-
def count_digit(max_number: int) -> int:
'''
Args:
max_number: Int of number (greater than 1).
Returns:
the number of digit.
Landau notation: O(log n)
'''
if max_number == 0:
return 1
digit = 0
while max_number:
digit += 1
max_number //= 10
return digit
| en | 0.659243 | # -*- coding: utf-8 -*- Args:
max_number: Int of number (greater than 1).
Returns:
the number of digit.
Landau notation: O(log n) | 3.993526 | 4 |
learning-flask/application.py | eduardoc7/edx_python_javascript | 1 | 6632189 | from flask import Flask, render_template, request, session
from flask_session import Session
app = Flask(__name__)
notes = list()
# Aplicação para receber um formulário e adicionar a uma lista de notas,
# que será exibida na tela
# Main Program
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = 'filesystem'
Session(app)
@app.route('/', methods=['GET', 'POST'])
def index():
if session.get('notes') is None:
session['notes'] = list()
if request.method == 'POST':
note = request.form.get('note')
session['notes'].append(note)
return render_template('index4.html', notes=session['notes'])
| from flask import Flask, render_template, request, session
from flask_session import Session
app = Flask(__name__)
notes = list()
# Aplicação para receber um formulário e adicionar a uma lista de notas,
# que será exibida na tela
# Main Program
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = 'filesystem'
Session(app)
@app.route('/', methods=['GET', 'POST'])
def index():
if session.get('notes') is None:
session['notes'] = list()
if request.method == 'POST':
note = request.form.get('note')
session['notes'].append(note)
return render_template('index4.html', notes=session['notes'])
| pt | 0.994828 | # Aplicação para receber um formulário e adicionar a uma lista de notas, # que será exibida na tela # Main Program | 3.311467 | 3 |
python/perspective/perspective/tests/table/test_remove.py | willium/perspective | 0 | 6632190 | # *****************************************************************************
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from perspective.table import Table
class TestRemove(object):
def test_remove_all(self):
tbl = Table([{"a": "abc", "b": 123}], {"index": "a"})
tbl.remove(["abc"])
assert tbl.view().to_records() == []
def test_remove_nonsequential(self):
tbl = Table([{"a": "abc", "b": 123}, {"a": "def", "b": 456}, {"a": "efg", "b": 789}], {"index": "a"})
tbl.remove(["abc", "efg"])
assert tbl.view().to_records() == [{"a": "def", "b": 456}]
def test_remove_multiple_single(self):
tbl = Table({"a": int, "b": str}, {"index": "a"})
for i in range(0, 10):
tbl.update([{"a": i, "b": str(i)}])
for i in range(1, 10):
tbl.remove([i])
assert tbl.view().to_records() == [{"a": 0, "b": "0"}]
| # *****************************************************************************
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from perspective.table import Table
class TestRemove(object):
def test_remove_all(self):
tbl = Table([{"a": "abc", "b": 123}], {"index": "a"})
tbl.remove(["abc"])
assert tbl.view().to_records() == []
def test_remove_nonsequential(self):
tbl = Table([{"a": "abc", "b": 123}, {"a": "def", "b": 456}, {"a": "efg", "b": 789}], {"index": "a"})
tbl.remove(["abc", "efg"])
assert tbl.view().to_records() == [{"a": "def", "b": 456}]
def test_remove_multiple_single(self):
tbl = Table({"a": int, "b": str}, {"index": "a"})
for i in range(0, 10):
tbl.update([{"a": i, "b": str(i)}])
for i in range(1, 10):
tbl.remove([i])
assert tbl.view().to_records() == [{"a": 0, "b": "0"}]
| en | 0.76268 | # ***************************************************************************** # # Copyright (c) 2019, the Perspective Authors. # # This file is part of the Perspective library, distributed under the terms of # the Apache License 2.0. The full license can be found in the LICENSE file. # | 2.415083 | 2 |
main/urls.py | dera1992/blog_tutorial | 0 | 6632191 | app_name = "main"
urlpatterns = [ ]
| app_name = "main"
urlpatterns = [ ]
| none | 1 | 1.023896 | 1 |
|
kstore/migrations/0013_auto_20160215_1028.py | KeoH/django-keoh-kstore | 0 | 6632192 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('kstore', '0012_basicconfiguration_theme'),
]
operations = [
migrations.AddField(
model_name='mailmessage',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2016, 2, 15, 9, 28, 18, 635423, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AlterField(
model_name='mailmessage',
name='sended_at',
field=models.DateTimeField(blank=True),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('kstore', '0012_basicconfiguration_theme'),
]
operations = [
migrations.AddField(
model_name='mailmessage',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2016, 2, 15, 9, 28, 18, 635423, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AlterField(
model_name='mailmessage',
name='sended_at',
field=models.DateTimeField(blank=True),
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.667571 | 2 |
client/commands/tests/start_test.py | aspin/pyre-check | 0 | 6632193 | <filename>client/commands/tests/start_test.py
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import fcntl
import unittest
from unittest.mock import MagicMock, call, mock_open, patch
from ... import commands, monitor, project_files_monitor # noqa
from ...filesystem import AnalysisDirectory, acquire_lock # noqa
from .command_test import mock_arguments, mock_configuration
class StartTest(unittest.TestCase):
@patch("{}.ProjectFilesMonitor".format(project_files_monitor.__name__))
@patch("fcntl.lockf")
@patch.object(commands.Reporting, "_get_directories_to_analyze", return_value=set())
@patch.object(monitor.Monitor, "daemonize")
def test_start(
self,
_daemonize,
get_directories_to_analyze,
lock_file,
_daemonize_files_monitor,
) -> None:
arguments = mock_arguments()
arguments.terminal = False
configuration = mock_configuration()
configuration.version_hash = "hash"
configuration.number_of_workers = 5
analysis_directory = AnalysisDirectory(".")
# Check start without watchman.
with patch("builtins.open", mock_open()), patch.object(
commands.Command, "_call_client"
) as call_client:
arguments.no_watchman = True
command = commands.Start(arguments, configuration, analysis_directory)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
command.run()
call_client.assert_called_once_with(command=commands.Start.NAME)
analysis_directory = AnalysisDirectory(".")
# This magic is necessary to test, because the inner call to ping a server is
# always non-blocking.
def pass_when_blocking(file_descriptor, command):
if not pass_when_blocking.failed and (command & fcntl.LOCK_NB):
pass_when_blocking.failed = True
raise OSError(errno.EAGAIN, "Only accepting blocking calls.")
pass_when_blocking.failed = False
lock_file.side_effect = pass_when_blocking
# EAGAINs get caught.
with patch("builtins.open", mock_open()), patch.object(
commands.Command, "_call_client"
) as call_client:
arguments.no_watchman = True
command = commands.Start(arguments, configuration, analysis_directory)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
command.run()
call_client.assert_called_once_with(command=commands.Start.NAME)
lock_file.side_effect = None
def raise_mount_error(fileno, command):
raise OSError(errno.ENOTCONN)
lock_file.side_effect = raise_mount_error
# Check that the command errors on OS errors other than EAGAIN.
with patch("builtins.open", mock_open()), patch.object(
commands.Command, "_call_client"
) as call_client:
arguments.no_watchman = True
command = commands.Start(arguments, configuration, analysis_directory)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
with self.assertRaises(OSError):
command.run()
call_client.assert_not_called()
lock_file.side_effect = None
# Shared analysis directories are prepared when starting.
shared_analysis_directory = MagicMock()
shared_analysis_directory.get_root = lambda: "."
with patch.object(
commands.Command, "_call_client"
) as call_client, patch.object(shared_analysis_directory, "prepare") as prepare:
arguments = mock_arguments(no_watchman=True)
configuration = mock_configuration(version_hash="hash")
command = commands.Start(
arguments, configuration, shared_analysis_directory
)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
command.run()
call_client.assert_called_once_with(command=commands.Start.NAME)
prepare.assert_called_once_with()
@patch.object(commands.Reporting, "_get_directories_to_analyze", return_value=set())
def test_start_flags(self, get_directories_to_analyze):
# Check start with watchman.
arguments = mock_arguments()
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
arguments = mock_arguments(no_watchman=True, terminal=True)
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-terminal",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check filter directories.
arguments = mock_arguments(no_watchman=True)
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
with patch.object(command, "_get_directories_to_analyze") as get_directories:
get_directories.return_value = {"a", "b"}
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-filter-directories",
"a;b",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check configuration-file-hash.
arguments = mock_arguments(no_watchman=True)
configuration = mock_configuration(version_hash="hash", file_hash="ABCD")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
with patch.object(command, "_get_directories_to_analyze") as get_directories:
get_directories.return_value = {"a", "b"}
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-filter-directories",
"a;b",
"-configuration-file-hash",
"ABCD",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check save-initial-state-to.
arguments = mock_arguments(save_initial_state_to="/tmp")
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-save-initial-state-to",
"/tmp",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check load-initial-state-from.
arguments = mock_arguments(
load_initial_state_from="/tmp/pyre_shared_memory",
changed_files_path="/tmp/changed_files",
)
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-load-state-from",
"/tmp/pyre_shared_memory",
"-changed-files-path",
"/tmp/changed_files",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Both changed-files-path and load-initial-state-from must be not-None.
arguments = mock_arguments(changed_files_path="/tmp/changed_files")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check load-initial-state-from.
arguments = mock_arguments(changed_files_path="/tmp/changed_files")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check --saved-state-project.
arguments = mock_arguments(saved_state_project="pyre/saved_state")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-saved-state-project",
"pyre/saved_state",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check --no-saved-state.
arguments = mock_arguments(
saved_state_project="pyre/saved_state", no_saved_state=True
)
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
arguments = mock_arguments(no_saved_state=True)
arguments.load_initial_state_from = "/do/not/load"
arguments.save_initial_state_to = "/do/not/save"
arguments.changed_files_path = "/do/not/change"
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
arguments = mock_arguments(store_type_check_resolution=True)
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-store-type-check-resolution",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
| <filename>client/commands/tests/start_test.py
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import fcntl
import unittest
from unittest.mock import MagicMock, call, mock_open, patch
from ... import commands, monitor, project_files_monitor # noqa
from ...filesystem import AnalysisDirectory, acquire_lock # noqa
from .command_test import mock_arguments, mock_configuration
class StartTest(unittest.TestCase):
@patch("{}.ProjectFilesMonitor".format(project_files_monitor.__name__))
@patch("fcntl.lockf")
@patch.object(commands.Reporting, "_get_directories_to_analyze", return_value=set())
@patch.object(monitor.Monitor, "daemonize")
def test_start(
self,
_daemonize,
get_directories_to_analyze,
lock_file,
_daemonize_files_monitor,
) -> None:
arguments = mock_arguments()
arguments.terminal = False
configuration = mock_configuration()
configuration.version_hash = "hash"
configuration.number_of_workers = 5
analysis_directory = AnalysisDirectory(".")
# Check start without watchman.
with patch("builtins.open", mock_open()), patch.object(
commands.Command, "_call_client"
) as call_client:
arguments.no_watchman = True
command = commands.Start(arguments, configuration, analysis_directory)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
command.run()
call_client.assert_called_once_with(command=commands.Start.NAME)
analysis_directory = AnalysisDirectory(".")
# This magic is necessary to test, because the inner call to ping a server is
# always non-blocking.
def pass_when_blocking(file_descriptor, command):
if not pass_when_blocking.failed and (command & fcntl.LOCK_NB):
pass_when_blocking.failed = True
raise OSError(errno.EAGAIN, "Only accepting blocking calls.")
pass_when_blocking.failed = False
lock_file.side_effect = pass_when_blocking
# EAGAINs get caught.
with patch("builtins.open", mock_open()), patch.object(
commands.Command, "_call_client"
) as call_client:
arguments.no_watchman = True
command = commands.Start(arguments, configuration, analysis_directory)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
command.run()
call_client.assert_called_once_with(command=commands.Start.NAME)
lock_file.side_effect = None
def raise_mount_error(fileno, command):
raise OSError(errno.ENOTCONN)
lock_file.side_effect = raise_mount_error
# Check that the command errors on OS errors other than EAGAIN.
with patch("builtins.open", mock_open()), patch.object(
commands.Command, "_call_client"
) as call_client:
arguments.no_watchman = True
command = commands.Start(arguments, configuration, analysis_directory)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
with self.assertRaises(OSError):
command.run()
call_client.assert_not_called()
lock_file.side_effect = None
# Shared analysis directories are prepared when starting.
shared_analysis_directory = MagicMock()
shared_analysis_directory.get_root = lambda: "."
with patch.object(
commands.Command, "_call_client"
) as call_client, patch.object(shared_analysis_directory, "prepare") as prepare:
arguments = mock_arguments(no_watchman=True)
configuration = mock_configuration(version_hash="hash")
command = commands.Start(
arguments, configuration, shared_analysis_directory
)
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
command.run()
call_client.assert_called_once_with(command=commands.Start.NAME)
prepare.assert_called_once_with()
@patch.object(commands.Reporting, "_get_directories_to_analyze", return_value=set())
def test_start_flags(self, get_directories_to_analyze):
# Check start with watchman.
arguments = mock_arguments()
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
arguments = mock_arguments(no_watchman=True, terminal=True)
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-terminal",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check filter directories.
arguments = mock_arguments(no_watchman=True)
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
with patch.object(command, "_get_directories_to_analyze") as get_directories:
get_directories.return_value = {"a", "b"}
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-filter-directories",
"a;b",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check configuration-file-hash.
arguments = mock_arguments(no_watchman=True)
configuration = mock_configuration(version_hash="hash", file_hash="ABCD")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
with patch.object(command, "_get_directories_to_analyze") as get_directories:
get_directories.return_value = {"a", "b"}
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-filter-directories",
"a;b",
"-configuration-file-hash",
"ABCD",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check save-initial-state-to.
arguments = mock_arguments(save_initial_state_to="/tmp")
configuration = mock_configuration(version_hash="hash")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-save-initial-state-to",
"/tmp",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check load-initial-state-from.
arguments = mock_arguments(
load_initial_state_from="/tmp/pyre_shared_memory",
changed_files_path="/tmp/changed_files",
)
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-load-state-from",
"/tmp/pyre_shared_memory",
"-changed-files-path",
"/tmp/changed_files",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Both changed-files-path and load-initial-state-from must be not-None.
arguments = mock_arguments(changed_files_path="/tmp/changed_files")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check load-initial-state-from.
arguments = mock_arguments(changed_files_path="/tmp/changed_files")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check --saved-state-project.
arguments = mock_arguments(saved_state_project="pyre/saved_state")
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-saved-state-project",
"pyre/saved_state",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
# Check --no-saved-state.
arguments = mock_arguments(
saved_state_project="pyre/saved_state", no_saved_state=True
)
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
arguments = mock_arguments(no_saved_state=True)
arguments.load_initial_state_from = "/do/not/load"
arguments.save_initial_state_to = "/do/not/save"
arguments.changed_files_path = "/do/not/change"
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
arguments = mock_arguments(store_type_check_resolution=True)
command = commands.Start(arguments, configuration, AnalysisDirectory("."))
self.assertEqual(
command._flags(),
[
"-logging-sections",
"parser",
"-project-root",
".",
"-store-type-check-resolution",
"-workers",
"5",
"-typeshed",
"stub",
"-expected-binary-version",
"hash",
"-search-path",
"path1,path2",
],
)
| en | 0.819658 | # Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # noqa # noqa # Check start without watchman. # This magic is necessary to test, because the inner call to ping a server is # always non-blocking. # EAGAINs get caught. # Check that the command errors on OS errors other than EAGAIN. # Shared analysis directories are prepared when starting. # Check start with watchman. # Check filter directories. # Check configuration-file-hash. # Check save-initial-state-to. # Check load-initial-state-from. # Both changed-files-path and load-initial-state-from must be not-None. # Check load-initial-state-from. # Check --saved-state-project. # Check --no-saved-state. | 2.083856 | 2 |
evaluation_cyclegan/test.py | samxuxiang/mcmi | 3 | 6632194 | import torch
from options.test_options import TestOptions
from dataset import dataset_single
from saver import save_imgs
import os
import inception_utils
from inception import InceptionV3
import numpy as np
import glob
from models import create_model
import modelss
import torchvision
def compute_lpips(imgs, model_alexnet):
num_samples = len(imgs)
dists = []
for idx1 in range(0,num_samples):
idx2 = idx1+1
while idx2 < num_samples:
img0 = imgs[idx1]
img1 = imgs[idx2]
lpips_dist = model_alexnet.forward(img0,img1).item()
dists.append(lpips_dist)
idx2 += 1
lpips_score = sum(dists) / len(dists)
return lpips_score
def main():
# parse options
opt = TestOptions().parse()
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
# data loader
print('\n--- load dataset ---')
#if opt.a2b:
dataset = dataset_single(opt, 'A')
#else:
#dataset = dataset_single(opt, 'B')
loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=8)
file_name = opt.fid
data_mu = np.load(file_name)['mu']
data_sigma = np.load(file_name)['sigma']
# Load inception net
print('\n--- load inception net ---')
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
model_inception = InceptionV3([block_idx]).cuda()
model_inception.eval() # set to eval mode
## Initializing the AlexNet model
model_alexnet = modelss.PerceptualLoss(model='net-lin',net='alex',use_gpu=True)
transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.Resize(64),
torchvision.transforms.ToTensor()])
# Load pre-trained model
print('\n--- load model ---')
model = create_model(opt)
all_files = np.arange(200, 210, 10).tolist()
torch.manual_seed(8)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(8)
test_sample = 1
for epoch in all_files:
model.load_networks(epoch)
model.eval()
# generate images & accumulate inception activation
pool1 = []
pool2 = []
dists = []
for idx1, img1 in enumerate(loader):
img = img1.cuda()
imgs = [img]
names = ['input']
with torch.no_grad():
img = model.test_forward(img, a2b=True)
imgs = [img]
imgss = []
# accumulate inception activation for FID1
assert (torch.max(img)<=1.0 and torch.min(img)>=-1.0)
pool_val = model_inception(img)[0] # bs x 2048 x 1 x 1
assert(pool_val.size(1)==2048 and pool_val.size(2) == 1 and pool_val.size(3)==1)
pool_val = pool_val.squeeze()
pool1 += [np.asarray(pool_val.cpu())]
for i in range(1):
img = model.test_forward(img, a2b=False)
img = model.test_forward(img, a2b=True)
imgs += [img]
# accumulate inception activation for FID2
assert (torch.max(img)<=1.0 and torch.min(img)>=-1.0)
pool_val = model_inception(img)[0] # bs x 2048 x 1 x 1
assert(pool_val.size(1)==2048 and pool_val.size(2) == 1 and pool_val.size(3)==1)
pool_val = pool_val.squeeze()
pool2 += [np.asarray(pool_val.cpu())]
# test lpips on 4 (1+1+2) generated images
for i in range(2):
img = model.test_forward(img, a2b=False)
img = model.test_forward(img, a2b=True)
imgs += [img]
for i in imgs:
imgss.append(transform(i[0].cpu()).cuda())
dist = compute_lpips(imgss, model_alexnet)
dists.append(dist)
# compute fid score
lpips_score = sum(dists) / len(dists)
print('LPIPS score for epoch %d is %f ' %(epoch, lpips_score))
pool1 = np.vstack(pool1)
mu, sigma = np.mean(pool1, axis=0), np.cov(pool1, rowvar=False)
FID1 = inception_utils.numpy_calculate_frechet_distance(mu, sigma, data_mu, data_sigma)
print('FID1 score for epoch %d is %f ' %(epoch, FID1))
pool2 = np.vstack(pool2)
mu, sigma = np.mean(pool2, axis=0), np.cov(pool2, rowvar=False)
FID2 = inception_utils.numpy_calculate_frechet_distance(mu, sigma, data_mu, data_sigma)
print('FID2 score for epoch %d is %f ' %(epoch, FID2))
if __name__ == '__main__':
main()
| import torch
from options.test_options import TestOptions
from dataset import dataset_single
from saver import save_imgs
import os
import inception_utils
from inception import InceptionV3
import numpy as np
import glob
from models import create_model
import modelss
import torchvision
def compute_lpips(imgs, model_alexnet):
num_samples = len(imgs)
dists = []
for idx1 in range(0,num_samples):
idx2 = idx1+1
while idx2 < num_samples:
img0 = imgs[idx1]
img1 = imgs[idx2]
lpips_dist = model_alexnet.forward(img0,img1).item()
dists.append(lpips_dist)
idx2 += 1
lpips_score = sum(dists) / len(dists)
return lpips_score
def main():
# parse options
opt = TestOptions().parse()
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
# data loader
print('\n--- load dataset ---')
#if opt.a2b:
dataset = dataset_single(opt, 'A')
#else:
#dataset = dataset_single(opt, 'B')
loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=8)
file_name = opt.fid
data_mu = np.load(file_name)['mu']
data_sigma = np.load(file_name)['sigma']
# Load inception net
print('\n--- load inception net ---')
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
model_inception = InceptionV3([block_idx]).cuda()
model_inception.eval() # set to eval mode
## Initializing the AlexNet model
model_alexnet = modelss.PerceptualLoss(model='net-lin',net='alex',use_gpu=True)
transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.Resize(64),
torchvision.transforms.ToTensor()])
# Load pre-trained model
print('\n--- load model ---')
model = create_model(opt)
all_files = np.arange(200, 210, 10).tolist()
torch.manual_seed(8)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(8)
test_sample = 1
for epoch in all_files:
model.load_networks(epoch)
model.eval()
# generate images & accumulate inception activation
pool1 = []
pool2 = []
dists = []
for idx1, img1 in enumerate(loader):
img = img1.cuda()
imgs = [img]
names = ['input']
with torch.no_grad():
img = model.test_forward(img, a2b=True)
imgs = [img]
imgss = []
# accumulate inception activation for FID1
assert (torch.max(img)<=1.0 and torch.min(img)>=-1.0)
pool_val = model_inception(img)[0] # bs x 2048 x 1 x 1
assert(pool_val.size(1)==2048 and pool_val.size(2) == 1 and pool_val.size(3)==1)
pool_val = pool_val.squeeze()
pool1 += [np.asarray(pool_val.cpu())]
for i in range(1):
img = model.test_forward(img, a2b=False)
img = model.test_forward(img, a2b=True)
imgs += [img]
# accumulate inception activation for FID2
assert (torch.max(img)<=1.0 and torch.min(img)>=-1.0)
pool_val = model_inception(img)[0] # bs x 2048 x 1 x 1
assert(pool_val.size(1)==2048 and pool_val.size(2) == 1 and pool_val.size(3)==1)
pool_val = pool_val.squeeze()
pool2 += [np.asarray(pool_val.cpu())]
# test lpips on 4 (1+1+2) generated images
for i in range(2):
img = model.test_forward(img, a2b=False)
img = model.test_forward(img, a2b=True)
imgs += [img]
for i in imgs:
imgss.append(transform(i[0].cpu()).cuda())
dist = compute_lpips(imgss, model_alexnet)
dists.append(dist)
# compute fid score
lpips_score = sum(dists) / len(dists)
print('LPIPS score for epoch %d is %f ' %(epoch, lpips_score))
pool1 = np.vstack(pool1)
mu, sigma = np.mean(pool1, axis=0), np.cov(pool1, rowvar=False)
FID1 = inception_utils.numpy_calculate_frechet_distance(mu, sigma, data_mu, data_sigma)
print('FID1 score for epoch %d is %f ' %(epoch, FID1))
pool2 = np.vstack(pool2)
mu, sigma = np.mean(pool2, axis=0), np.cov(pool2, rowvar=False)
FID2 = inception_utils.numpy_calculate_frechet_distance(mu, sigma, data_mu, data_sigma)
print('FID2 score for epoch %d is %f ' %(epoch, FID2))
if __name__ == '__main__':
main()
| en | 0.619141 | # parse options # test code only supports num_threads = 1 # test code only supports batch_size = 1 # disable data shuffling; comment this line if results on randomly chosen images are needed. # no flip; comment this line if results on flipped images are needed. # no visdom display; the test code saves the results to a HTML file. # data loader #if opt.a2b: #else: #dataset = dataset_single(opt, 'B') # Load inception net # set to eval mode ## Initializing the AlexNet model # Load pre-trained model # generate images & accumulate inception activation # accumulate inception activation for FID1 # bs x 2048 x 1 x 1 # accumulate inception activation for FID2 # bs x 2048 x 1 x 1 # test lpips on 4 (1+1+2) generated images # compute fid score | 2.158939 | 2 |
api/v1/app.py | Theemiss/Quick_Report | 4 | 6632195 | <gh_stars>1-10
from flask import Flask, make_response, jsonify
from datetime import timedelta
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
from flask_cors import CORS
from flask_migrate import Migrate
from flask_restful_swagger import swagger
from dotenv import dotenv_values
from flask_wkhtmltopdf import Wkhtmltopdf
"""
Global File Config And Route api instance
"""
config = dotenv_values('.env')
app = Flask(__name__)
wkhtmltopdf = Wkhtmltopdf(app)
api = swagger.docs(Api(app), apiVersion='2.8') # swagger Init
jwt = JWTManager(app)
WKHTMLTOPDF_BIN_PATH = "" #path to your wkhtmltopdf installation.
db = SQLAlchemy(app)
migrate = Migrate(app, db)
#app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:55664730@localhost:5432/quick_report'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'holbieQuickreport'
app.config['JWT_SECRET_KEY'] = 'Qucikreportadmin'
ACCESS_EXPIRES = timedelta(hours=24)
app.config["JWT_ACCESS_TOKEN_EXPIRES"] = ACCESS_EXPIRES #Token Time Auto Revoke
app.config['JWT_BLACKLIST_ENABLED'] = True # Enable Token Blocklist
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh'] #Type of Token
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class TokenBlocklist(db.Model):
"""
db table for the jwt token Blocklist
"""
id = db.Column(db.Integer, primary_key=True)
jti = db.Column(db.String(36), nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
# Callback function to check if a JWT exists in the database blocklist
@jwt.token_in_blocklist_loader
def check_if_token_revoked(jwt_header,jwt_payload):
"""
Check if Token in Database Bloclist or Revoked
jwt_header: Header of the Request to check
jwt_payload: Body of the Request to Check
"""
jti = jwt_payload["jti"]
token = db.session.query(TokenBlocklist.id).filter_by(jti=jti).scalar()
return token is not None
@app.before_first_request
def create_tables():
"""
First thing to excute in first request to the server
"""
db.create_all()
cors = CORS(app, resources={"/*": {"origins": "*"}})
@app.errorhandler(404)
def not_found(error):
""" 404 Error
---
responses:
404:
description: a resource was not found
"""
return make_response(jsonify({'error': "Not found"}), 404)
@app.errorhandler(500)
def special_exception_handler(error):
return make_response(jsonify({'error': "'Database connection failed'"}), 500)
from api.v1.views import company_route_all
# Public Route for now getting comapny and creating new one
api.add_resource(company_route_all, "/api/hidden")
from api.v1.views import Login, sign_up, Logout, TokenRefresh, ClientUserForm, CompanyAllClient, AdminUserID, CompanyAllRepport,CompanySingleRapport
# Authentication
api.add_resource(sign_up, "/api/signup") # signup
api.add_resource(Login, "/api/login") # Login
api.add_resource(Logout, "/api/logout") # Logout
api.add_resource(TokenRefresh, "/api/refresh") # Token refrecher
# Get Current Logged Client or update user information
api.add_resource(ClientUserForm, "/api/client")
# Company Action Web
# Get All Client that Belong to this Comapny
api.add_resource(CompanyAllClient, "/api/company/clients")
# Get Client that belong to this comapny by id
api.add_resource(AdminUserID, "/api/company/clients/<id>")
# Get All Report that bellong to current Company
api.add_resource(CompanyAllRepport, '/api/company/reports')
api.add_resource(CompanySingleRapport,'/api/report/<id>')
from api.v1.views import NewCar, GetUserCar, NewInsurance, GetClientCarId
# Cars and insurance Endpoint
api.add_resource(NewInsurance, '/api/insurance/new') # Create new insurance
api.add_resource(NewCar, '/api/car/new') # Create new car
# All Car that belong to the current client
api.add_resource(GetUserCar, '/api/client/cars')
# Car by id that belong current Client
api.add_resource(GetClientCarId, '/api/client/cars/<id>')
from api.v1.views import ReportNew, Reportid, ReportPdf, Media, AllMedia,MatcherA,MatcherB
# Rapport Creation and Handling
# Create new Repport or Get All current Client Repport
api.add_resource(ReportNew, '/api/client/report')
# Get repport bellong to current client
api.add_resource(Reportid, '/api/client/report/<id>')
# Create a pdf file from reports
api.add_resource(ReportPdf, '/api/client/report/pdf/<a>')
# Get File image
api.add_resource(Media, "/api/client/report/media/<path:path>")
api.add_resource(AllMedia, "/api/client/report/media") # List all file media
api.add_resource(MatcherA,'/api/reportA')
api.add_resource(MatcherB,'/api/reportB')
from api.v1.views import FetchCar,Dashboard,GetBReport,AllClientCar
api.add_resource(Dashboard,"/api/company/data")
api.add_resource(GetBReport,"/api/company/<id>")
api.add_resource(FetchCar,'/api/company/Car')
api.add_resource(AllClientCar,"/api/company/client/car") | from flask import Flask, make_response, jsonify
from datetime import timedelta
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
from flask_cors import CORS
from flask_migrate import Migrate
from flask_restful_swagger import swagger
from dotenv import dotenv_values
from flask_wkhtmltopdf import Wkhtmltopdf
"""
Global File Config And Route api instance
"""
config = dotenv_values('.env')
app = Flask(__name__)
wkhtmltopdf = Wkhtmltopdf(app)
api = swagger.docs(Api(app), apiVersion='2.8') # swagger Init
jwt = JWTManager(app)
WKHTMLTOPDF_BIN_PATH = "" #path to your wkhtmltopdf installation.
db = SQLAlchemy(app)
migrate = Migrate(app, db)
#app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:55664730@localhost:5432/quick_report'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'holbieQuickreport'
app.config['JWT_SECRET_KEY'] = 'Qucikreportadmin'
ACCESS_EXPIRES = timedelta(hours=24)
app.config["JWT_ACCESS_TOKEN_EXPIRES"] = ACCESS_EXPIRES #Token Time Auto Revoke
app.config['JWT_BLACKLIST_ENABLED'] = True # Enable Token Blocklist
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh'] #Type of Token
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class TokenBlocklist(db.Model):
"""
db table for the jwt token Blocklist
"""
id = db.Column(db.Integer, primary_key=True)
jti = db.Column(db.String(36), nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
# Callback function to check if a JWT exists in the database blocklist
@jwt.token_in_blocklist_loader
def check_if_token_revoked(jwt_header,jwt_payload):
"""
Check if Token in Database Bloclist or Revoked
jwt_header: Header of the Request to check
jwt_payload: Body of the Request to Check
"""
jti = jwt_payload["jti"]
token = db.session.query(TokenBlocklist.id).filter_by(jti=jti).scalar()
return token is not None
@app.before_first_request
def create_tables():
"""
First thing to excute in first request to the server
"""
db.create_all()
cors = CORS(app, resources={"/*": {"origins": "*"}})
@app.errorhandler(404)
def not_found(error):
""" 404 Error
---
responses:
404:
description: a resource was not found
"""
return make_response(jsonify({'error': "Not found"}), 404)
@app.errorhandler(500)
def special_exception_handler(error):
return make_response(jsonify({'error': "'Database connection failed'"}), 500)
from api.v1.views import company_route_all
# Public Route for now getting comapny and creating new one
api.add_resource(company_route_all, "/api/hidden")
from api.v1.views import Login, sign_up, Logout, TokenRefresh, ClientUserForm, CompanyAllClient, AdminUserID, CompanyAllRepport,CompanySingleRapport
# Authentication
api.add_resource(sign_up, "/api/signup") # signup
api.add_resource(Login, "/api/login") # Login
api.add_resource(Logout, "/api/logout") # Logout
api.add_resource(TokenRefresh, "/api/refresh") # Token refrecher
# Get Current Logged Client or update user information
api.add_resource(ClientUserForm, "/api/client")
# Company Action Web
# Get All Client that Belong to this Comapny
api.add_resource(CompanyAllClient, "/api/company/clients")
# Get Client that belong to this comapny by id
api.add_resource(AdminUserID, "/api/company/clients/<id>")
# Get All Report that bellong to current Company
api.add_resource(CompanyAllRepport, '/api/company/reports')
api.add_resource(CompanySingleRapport,'/api/report/<id>')
from api.v1.views import NewCar, GetUserCar, NewInsurance, GetClientCarId
# Cars and insurance Endpoint
api.add_resource(NewInsurance, '/api/insurance/new') # Create new insurance
api.add_resource(NewCar, '/api/car/new') # Create new car
# All Car that belong to the current client
api.add_resource(GetUserCar, '/api/client/cars')
# Car by id that belong current Client
api.add_resource(GetClientCarId, '/api/client/cars/<id>')
from api.v1.views import ReportNew, Reportid, ReportPdf, Media, AllMedia,MatcherA,MatcherB
# Rapport Creation and Handling
# Create new Repport or Get All current Client Repport
api.add_resource(ReportNew, '/api/client/report')
# Get repport bellong to current client
api.add_resource(Reportid, '/api/client/report/<id>')
# Create a pdf file from reports
api.add_resource(ReportPdf, '/api/client/report/pdf/<a>')
# Get File image
api.add_resource(Media, "/api/client/report/media/<path:path>")
api.add_resource(AllMedia, "/api/client/report/media") # List all file media
api.add_resource(MatcherA,'/api/reportA')
api.add_resource(MatcherB,'/api/reportB')
from api.v1.views import FetchCar,Dashboard,GetBReport,AllClientCar
api.add_resource(Dashboard,"/api/company/data")
api.add_resource(GetBReport,"/api/company/<id>")
api.add_resource(FetchCar,'/api/company/Car')
api.add_resource(AllClientCar,"/api/company/client/car") | en | 0.733972 | Global File Config And Route api instance # swagger Init #path to your wkhtmltopdf installation. #app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True #Token Time Auto Revoke # Enable Token Blocklist #Type of Token db table for the jwt token Blocklist # Callback function to check if a JWT exists in the database blocklist Check if Token in Database Bloclist or Revoked jwt_header: Header of the Request to check jwt_payload: Body of the Request to Check First thing to excute in first request to the server 404 Error --- responses: 404: description: a resource was not found # Public Route for now getting comapny and creating new one # Authentication # signup # Login # Logout # Token refrecher # Get Current Logged Client or update user information # Company Action Web # Get All Client that Belong to this Comapny # Get Client that belong to this comapny by id # Get All Report that bellong to current Company # Cars and insurance Endpoint # Create new insurance # Create new car # All Car that belong to the current client # Car by id that belong current Client # Rapport Creation and Handling # Create new Repport or Get All current Client Repport # Get repport bellong to current client # Create a pdf file from reports # Get File image # List all file media | 2.015714 | 2 |
validations/argsparser.py | binary-hideout/p-dispersion-problem | 1 | 6632196 | '''
Validations for inputs from the command-line argument parsers.
'''
from argparse import ArgumentTypeError
from typing import Tuple
from .numerical import is_float, is_int
def is_valid_n(string: str) -> int:
'''
If the string parameter represents a valid value for 'n' returns its value,
otherwise raises ArgumentTypeError exception.
'''
if is_int(string):
number = int(string)
if number > 2:
return number
msg = f'must be greater than 2'
else:
msg = f'invalid int value: {repr(string)}'
raise ArgumentTypeError(msg)
def is_percentage(string: str) -> float:
'''
If the string parameter represents a valid decimal percentage returns its value,
otherwise raises ArgumentTypeError exception.
'''
if is_float(string):
number = float(string)
if 0 <= number <= 1:
return number
msg = f'invalid decimal percentage: {number}'
else:
msg = f'invalid float value: {repr(string)}'
raise ArgumentTypeError(msg)
def is_positive_int(string: str) -> int:
'''
If the string parameter represents a positive integer (> 0) returns its value,
otherwise raises ArgumentTypeError exception.
'''
if is_int(string):
number = int(string)
if number > 0:
return number
msg = f'invalid positive int value (> 0): {number}'
else:
msg = f'invalid int value: {repr(string)}'
raise ArgumentTypeError(msg)
def is_time(string: str) -> float:
if is_float(string):
number = float(string)
if number >= 0:
return number
msg = f'invalid positive float value: {number}'
else:
msg = f'invalid float value: {repr(string)}'
raise ArgumentTypeError(msg)
def are_valid_dimensions(n: int, dimensions: Tuple[int, int]) -> bool:
'''
x*y must be greater or equal to n.
'''
x, y = dimensions
# if there is enough space in dimensions for all n points
if n <= x * y:
return True
return False
def is_valid_p(p: int) -> bool:
if p >= 2:
return True
return False
| '''
Validations for inputs from the command-line argument parsers.
'''
from argparse import ArgumentTypeError
from typing import Tuple
from .numerical import is_float, is_int
def is_valid_n(string: str) -> int:
'''
If the string parameter represents a valid value for 'n' returns its value,
otherwise raises ArgumentTypeError exception.
'''
if is_int(string):
number = int(string)
if number > 2:
return number
msg = f'must be greater than 2'
else:
msg = f'invalid int value: {repr(string)}'
raise ArgumentTypeError(msg)
def is_percentage(string: str) -> float:
'''
If the string parameter represents a valid decimal percentage returns its value,
otherwise raises ArgumentTypeError exception.
'''
if is_float(string):
number = float(string)
if 0 <= number <= 1:
return number
msg = f'invalid decimal percentage: {number}'
else:
msg = f'invalid float value: {repr(string)}'
raise ArgumentTypeError(msg)
def is_positive_int(string: str) -> int:
'''
If the string parameter represents a positive integer (> 0) returns its value,
otherwise raises ArgumentTypeError exception.
'''
if is_int(string):
number = int(string)
if number > 0:
return number
msg = f'invalid positive int value (> 0): {number}'
else:
msg = f'invalid int value: {repr(string)}'
raise ArgumentTypeError(msg)
def is_time(string: str) -> float:
if is_float(string):
number = float(string)
if number >= 0:
return number
msg = f'invalid positive float value: {number}'
else:
msg = f'invalid float value: {repr(string)}'
raise ArgumentTypeError(msg)
def are_valid_dimensions(n: int, dimensions: Tuple[int, int]) -> bool:
'''
x*y must be greater or equal to n.
'''
x, y = dimensions
# if there is enough space in dimensions for all n points
if n <= x * y:
return True
return False
def is_valid_p(p: int) -> bool:
if p >= 2:
return True
return False
| en | 0.269757 | Validations for inputs from the command-line argument parsers. If the string parameter represents a valid value for 'n' returns its value, otherwise raises ArgumentTypeError exception. If the string parameter represents a valid decimal percentage returns its value, otherwise raises ArgumentTypeError exception. If the string parameter represents a positive integer (> 0) returns its value, otherwise raises ArgumentTypeError exception. x*y must be greater or equal to n. # if there is enough space in dimensions for all n points | 4.097856 | 4 |
client/over-the-rainbow/over-the-rainbow-client.py | GamesCreatorsClub/GCC-Rover | 3 | 6632197 | <filename>client/over-the-rainbow/over-the-rainbow-client.py
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import sys
import time
import pygame
import pyros
import pyros.gcc
import pyros.gccui
import pyros.agent
import pyros.pygamehelper
from PIL import Image
MAX_PING_TIMEOUT = 1
MAX_PICTURES = 400
pingLastTime = 0
screen_size = (1024, 800)
screen = pyros.gccui.initAll(screen_size, True)
cameraImage = Image.new("L", [80, 64])
rawImage = pygame.Surface((80, 64), 24)
rawImageBig = pygame.Surface((320, 256), 24)
arrow_image = pygame.image.load("arrow.png")
arrow_image = pygame.transform.scale(arrow_image, (50, 50))
completeRawImage = None
lastImage = None
processedImages = []
processedBigImages = []
forwardSpeed = 5
running = False
lights = False
resubscribe = time.time()
lastReceivedTime = time.time()
frameTime = ""
receivedFrameTime = ""
feedback = {
"angle": "",
"turnDistance": "",
"action": "",
"left": "",
"right": ""
}
imgNo = 0
ptr = -1
size = (320, 256)
record = False
sequence = False
continuous = False
localFPS = 0
lastProcessed = time.time()
renewContinuous = time.time()
distanceDeg1 = -1
distanceDeg2 = -1
distance1 = -1
distance2 = -1
avgDistance1 = -1
avgDistance2 = -1
gyroAngle = 0
def connected():
print("Starting agent... ", end="")
pyros.agent.init(pyros.client, "over-the-rainbow-agent.py")
print("Done.")
pyros.publish("camera/processed/fetch", "")
pyros.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
def toPILImage(imageBytes):
pilImage = Image.frombytes("RGB", size, imageBytes)
return pilImage
def toPyImage(pilImage):
pyImage = pygame.image.fromstring(pilImage.tobytes("raw"), size, "RGB")
return pyImage
def handleDistances(topic, message, groups):
global distanceDeg1, distanceDeg2, distance1, distance2, avgDistance1, avgDistance2
c = 0
split1 = message.split(",")
for s1 in split1:
split2 = s1.split(":")
if len(split2) == 2:
deg = int(split2[0])
split3 = split2[1].split(";")
if len(split3) == 2:
dis = float(split3[0])
avg = float(split3[1])
if c == 0:
distanceDeg1 = deg
distance1 = dis
avgDistance1 = avg
elif c == 1:
distanceDeg2 = deg
distance2 = dis
avgDistance2 = avg
c += 1
def handleGyro(topic, message, groups):
global gyroAngle
gyroAngle = float(message)
def handleImageDetails(topic, message, groups):
global rawImage, rawImageBig, completeRawImage
results = []
for line in message.split("\n"):
split = line.split(",")
if len(split) == 3:
result = int(split[0]), int(split[1]), split[2].lower(), 10
results.append(result)
elif len(split) >= 4:
result = int(split[0]), int(split[1]), split[2].lower(), int(split[3])
results.append(result)
print("Got details " + str(results) + " from: \n" + message)
for result in results:
if "red" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.RED, "red", result[3])
if "green" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.GREEN, "green", result[3])
if "yellow" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.YELLOW, "yellow", result[3])
if "blue" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.BLUE, "blue", result[3])
if completeRawImage is not None:
rawImage = pygame.transform.scale(completeRawImage, (80, 64))
rawImageBig = pygame.transform.scale(completeRawImage, (320, 256))
if record:
processedImages.append(rawImage)
processedBigImages.append(rawImageBig)
def handleCameraRaw(topic, message, groups):
global rawImage, rawImageBig, lastProcessed, localFPS, completeRawImage
handleCameraProcessed(topic, message, groups)
completeRawImage = lastImage
def handleCameraProcessed(topic, message, groups):
global rawImage, rawImageBig, lastProcessed, localFPS, lastImage
n = time.time()
delta = n - lastProcessed
lastProcessed = n
if delta < 5:
localFPS = "%.2f" % round(1 / delta, 2)
else:
localFPS = "-"
pilImage = toPILImage(message)
processedPilImage = processImage(pilImage)
image = toPyImage(processedPilImage)
lastImage = image
# if "red" in result:
# drawTarget(image, result["red"], pyros.gccui.RED, "red")
# if "green" in result:
# drawTarget(image, result["green"], pyros.gccui.GREEN, "green")
# if "yellow" in result:
# drawTarget(image, result["yellow"], pyros.gccui.YELLOW, "yellow")
# if "blue" in result:
# drawTarget(image, result["blue"], pyros.gccui.BLUE, "blue")
#
rawImage = pygame.transform.scale(lastImage, (80, 64))
rawImageBig = pygame.transform.scale(lastImage, (320, 256))
if record:
processedImages.append(rawImage)
processedBigImages.append(rawImageBig)
#
# if sequence and not continuous:
# pyros.publish("camera/raw/fetch", "")
def processImage(image):
red_pixels = []
green_pixels = []
blue_pixels = []
yellow_pixels = []
for y in range(0, 256):
for x in range(0, 320):
p = image.getpixel((x, y))
if isRed(p):
red_pixels.append((x, y))
if isGreen(p):
green_pixels.append((x, y))
if isBlue(p):
blue_pixels.append((x, y))
if isYellow(p):
yellow_pixels.append((x, y))
result = {}
if len(red_pixels) > 20:
centre = calculateCentre(red_pixels)
result["red"] = centre
drawSpot(image, centre[0], centre[1], (255, 64, 64), "red")
if len(green_pixels) > 20:
centre = calculateCentre(green_pixels)
result["green"] = centre
drawSpot(image, centre[0], centre[1], (64, 255, 64), "green")
if len(blue_pixels) > 20:
centre = calculateCentre(blue_pixels)
result["blue"] = centre
drawSpot(image, centre[0], centre[1], (64, 64, 255), "blue")
if len(yellow_pixels) > 20:
centre = calculateCentre(yellow_pixels)
result["yellow"] = centre
drawSpot(image, centre[0], centre[1], (255, 255, 64), "yellow")
processedImage = image
return processedImage
def isRed(p):
return p[0] > 64 and distance(p[0], p[1]) > 1.2 and distance(p[0], p[1]) > 1.2 and 0.8 < distance(p[1], p[2]) < 1.2
def isGreen(p):
return p[1] > 64 and distance(p[1], p[0]) > 1.2 and distance(p[1], p[2]) > 1.2 and 0.8 < distance(p[0], p[2]) < 1.2
def isBlue(p):
return p[2] > 64 and distance(p[2], p[0]) > 1.2 and distance(p[2], p[1]) > 1.2 and 0.8 < distance(p[0], p[1]) < 1.2
def isYellow(p):
return p[0] > 64 and p[1] > 128 and 0.8 < distance(p[0], p[1]) < 1.2 and distance(p[0], p[2]) > 1.2 and distance(p[1], p[2]) > 1.2
def distance(x, y):
if y != 0:
return x / y
else:
return x / 256
def calculateCentre(pixels):
cx = 0
cy = 0
for p in pixels:
cx = cx + p[0]
cy = cy + p[1]
cx = int(cx / len(pixels))
cy = int(cy / len(pixels))
return cx, cy
def drawTarget(image, centre, colour, text, radius=20):
if radius < 16:
radius = 16
x = centre[0] - radius / 4
if x < 0:
x = 0
y = centre[1] - radius / 4
if y < 0:
y = 0
w = radius - 1
h = radius - 1
tl = 13
tl1 = 12
# pygame.draw.rect(image, pyros.gccui.WHITE, pygame.Rect(x + 2, y + 2, w - 2, h - 2), 1)
pygame.draw.line(image, pyros.gccui.WHITE, (x, y), (x + tl, y))
pygame.draw.line(image, pyros.gccui.WHITE, (x, y), (x, y + tl))
pygame.draw.line(image, colour, (x + 1, y + 1), (x + 1, y + tl1))
pygame.draw.line(image, colour, (x + 1, y + 1), (x + tl1, y + 1))
pygame.draw.line(image, pyros.gccui.WHITE, (x, y + h), (x + tl, y + h))
pygame.draw.line(image, pyros.gccui.WHITE, (x, y + h), (x, y + h - tl))
pygame.draw.line(image, colour, (x + 1, y + h - 1), (x + 1, y + h - tl1))
pygame.draw.line(image, colour, (x + 1, y + h - 1), (x + tl1, y + h - 1))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y), (x + w - tl, y))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y), (x + w, y + tl))
pygame.draw.line(image, colour, (x + w - 1, y + 1), (x + w - 1, y + tl1))
pygame.draw.line(image, colour, (x + w - 1, y + 1), (x + w - tl1, y + 1))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y + h), (x + w - tl, y + h))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y + h), (x + w, y + h - tl))
pygame.draw.line(image, colour, (x + w - 1, y + h - 1), (x + w - 1, y + h - tl1))
pygame.draw.line(image, colour, (x + w - 1, y + h - 1), (x + w - tl1, y + h - 1))
tdist = 30
left = False
if x > tdist:
tx = x - tdist
lx = x - 2
left = True
elif x + w < image.get_width() - tdist:
tx = x + w + tdist
lx = x + w + 2
else:
tx = centre[0]
lx = centre[0]
if y > tdist:
ty = y - tdist
ly = y - 2
elif y + h < image.get_height() - tdist:
ty = y + h + tdist
ly = y + h + 2
else:
ty = centre[0]
ly = centre[0]
pyros.gccui.font.set_bold(True)
tw = pyros.gccui.font.size(text)[1]
pygame.draw.line(image, pyros.gccui.WHITE, (lx, ly), (tx, ty))
if left:
pygame.draw.line(image, pyros.gccui.WHITE, (tx - tw - 5, ty), (tx, ty))
image.blit(pyros.gccui.font.render(text, 1, colour), (tx - tw, ty - 25))
else:
pygame.draw.line(image, pyros.gccui.WHITE, (tx + tw + 5, ty), (tx, ty))
image.blit(pyros.gccui.font.render(text, 1, colour), (tx, ty - 25))
pyros.gccui.font.set_bold(False)
def drawSpot(image, cx, cy, colour, text):
if False:
for x in range(cx - 30, cx + 30):
if x >= 0 and x < 320:
if cy > 0:
image.putpixel((x, cy - 1), (255, 255, 255))
image.putpixel((x, cy), colour)
if cy < 256 - 1:
image.putpixel((x, cy + 1), (255, 255, 255))
for y in range(cy - 30, cy + 30):
if y >= 0 and y < 256:
if cx > 0:
image.putpixel((cx - 1, y), (255, 255, 255))
image.putpixel((cx, y), colour)
if cx < 320 - 1:
image.putpixel((cx + 1, y), (255, 255, 255))
def toggleStart():
global imgNo, processedImages, processedBigImages, running
pass
def stop():
global running
pyros.publish("camera/lift", "stop")
pyros.publish("move/stop", "")
pyros.publish("overtherainbow/command", "stop")
running = False
def clear():
global imgNo, processedImages, processedBigImages
imgNo = 0
del processedImages[:]
del processedBigImages[:]
def onKeyDown(key):
global lights, forwardSpeed, running, ptr, imgNo
global sequence, record, continuous
global processedImages, processedBigImages
global gyroAngle
if pyros.gcc.handleConnectKeyDown(key):
pass
elif key == pygame.K_f:
print(" fetching picture...")
pyros.publish("camera/raw/fetch", "")
elif key == pygame.K_s:
sequence = not sequence
elif key == pygame.K_r:
record = not record
elif key == pygame.K_c:
continuous = not continuous
if continuous:
print(" fetching continuous pictures...")
pyros.publish("camera/continuous", "")
else:
print(" stopping continuous pictures...")
pyros.publish("camera/continuous", "stop")
elif key == pygame.K_x:
clear()
elif key == pygame.K_RETURN:
toggleStart()
elif key == pygame.K_SPACE:
stop()
elif key == pygame.K_1:
pyros.publish("overtherainbow/command", "alg1")
elif key == pygame.K_2:
pyros.publish("overtherainbow/command", "alg2")
elif key == pygame.K_3:
pyros.publish("overtherainbow/command", "alg3")
elif key == pygame.K_4:
pyros.publish("overtherainbow/command", "alg4")
elif key == pygame.K_5:
pyros.publish("overtherainbow/command", "alg5")
elif key == pygame.K_6:
pyros.publish("overtherainbow/command", "alg6")
elif key == pygame.K_7:
pyros.publish("overtherainbow/command", "alg7")
elif key == pygame.K_8:
pyros.publish("overtherainbow/command", "alg8")
elif key == pygame.K_9:
pyros.publish("overtherainbow/command", "alg9")
elif key == pygame.K_0:
pyros.publish("overtherainbow/command", "alg10")
elif key == pygame.K_g:
pyros.publish("sensor/gyro/continuous", "calibrate,50")
gyroAngle = 0
elif key == pygame.K_u:
pyros.publish("camera/lift", "up")
elif key == pygame.K_d:
pyros.publish("camera/lift", "down")
elif key == pygame.K_SLASH:
pyros.publish("camera/lift", "reset")
elif key == pygame.K_LEFT:
if ptr == -1:
ptr = len(processedImages) - 2
else:
ptr -= 1
elif key == pygame.K_RIGHT:
ptr += 1
if ptr >= len(processedImages) - 1:
ptr = -1
def onKeyUp(key):
pyros.gcc.handleConnectKeyUp(key)
return
def swap(array):
v = array[0]
array[0] = array[1]
array[1] = v
pyros.subscribeBinary("camera/raw", handleCameraRaw)
pyros.subscribe("overtherainbow/distances", handleDistances)
pyros.subscribe("overtherainbow/gyro", handleGyro)
pyros.subscribe("overtherainbow/imagedetails", handleImageDetails)
pyros.subscribeBinary("overtherainbow/processed", handleCameraProcessed)
pyros.init("over-the-rainbow-#", unique=True, onConnected=connected, host=pyros.gcc.getHost(), port=pyros.gcc.getPort(), waitToConnect=False)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.VIDEORESIZE:
pyros.gccui.screenResized(event.size)
pyros.pygamehelper.processKeys(onKeyDown, onKeyUp)
pyros.loop(0.03)
pyros.agent.keepAgents()
pyros.gccui.background(True)
avgDistance1String = str(format(avgDistance1, '.2f'))
avgDistance2String = str(format(avgDistance2, '.2f'))
# noinspection PyRedeclaration
hpos = 40
hpos = pyros.gccui.drawKeyValue("Local FPS", str(localFPS), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Recording", str(record), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Sequence", str(sequence), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Continuous", str(continuous), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Selected", str(ptr) + " of " + str(len(processedImages)), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Running", str(running), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Turn dist", str(feedback["turnDistance"]), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Dist @ " + str(distanceDeg1), str(distance1) + ", avg: " + avgDistance1String, 8, hpos)
hpos = pyros.gccui.drawKeyValue("Dist @ " + str(distanceDeg2), str(distance2) + ", avg: " + avgDistance2String, 8, hpos)
hpos = pyros.gccui.drawKeyValue("Gyro", str(round(gyroAngle, 2)), 8, hpos)
# if len(historyDistances[0]):
# mn = min(historyDistances[0])
# mx = max(historyDistances[0])
# pyros.gccui.drawGraph((200, 50), (81, 65), historyDistances[0], mn, mx, 80, stick = 10)
#
# if len(historyDistances[1]):
# mn = min(historyDistances[1])
# mx = max(historyDistances[1])
# pyros.gccui.drawGraph((320, 50), (81, 65), historyDistances[1], mn, mx, 80, stick = 10)
loc = arrow_image.get_rect().center
rot_arrow_image = pygame.transform.rotate(arrow_image, -gyroAngle)
rot_arrow_image.get_rect().center = loc
screen.blit(rot_arrow_image, (530, 200))
# if len(rotSpeeds) > 0:
# gyroDegPersSecText = str(round(sum(rotSpeeds) / len(rotSpeeds), 2))
# pyros.gccui.drawBigText(gyroDegPersSecText, (440, 10))
#
# pyros.gccui.drawText("º/s", (445 + pyros.gccui.bigFont.size(gyroDegPersSecText)[0], 15))
#
# pyros.gccui.drawBigText(str(int(thisGyroAngle)), (440, 40))
pyros.gccui.drawSmallText("r-toggle record, f - fetch, s-sequence, LEFT/RIGHT-scroll, SPACE-stop, RETURN-start, l-lights, d-distances, x- clear, camera: u-up, d-down, /-reset", (8, screen.get_height() - pyros.gccui.smallFont.get_height()))
pyros.gccui.drawImage(rawImage, (500, 50), 10)
pyros.gccui.drawImage(rawImageBig, (688, 50), 10)
if ptr >= 0:
if ptr > len(processedImages) - 1:
ptr = len(processedImages) - 1
i = ptr
else:
i = len(processedImages) - 1
imgX = 1024 - 320 - 16
while i >= 0 and imgX >= 0:
pyros.gccui.drawImage(processedBigImages[i], (imgX, 420))
imgX -= 336
i -= 1
pyros.gcc.drawConnection()
pyros.gccui.frameEnd()
now = time.time()
| <filename>client/over-the-rainbow/over-the-rainbow-client.py
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import sys
import time
import pygame
import pyros
import pyros.gcc
import pyros.gccui
import pyros.agent
import pyros.pygamehelper
from PIL import Image
MAX_PING_TIMEOUT = 1
MAX_PICTURES = 400
pingLastTime = 0
screen_size = (1024, 800)
screen = pyros.gccui.initAll(screen_size, True)
cameraImage = Image.new("L", [80, 64])
rawImage = pygame.Surface((80, 64), 24)
rawImageBig = pygame.Surface((320, 256), 24)
arrow_image = pygame.image.load("arrow.png")
arrow_image = pygame.transform.scale(arrow_image, (50, 50))
completeRawImage = None
lastImage = None
processedImages = []
processedBigImages = []
forwardSpeed = 5
running = False
lights = False
resubscribe = time.time()
lastReceivedTime = time.time()
frameTime = ""
receivedFrameTime = ""
feedback = {
"angle": "",
"turnDistance": "",
"action": "",
"left": "",
"right": ""
}
imgNo = 0
ptr = -1
size = (320, 256)
record = False
sequence = False
continuous = False
localFPS = 0
lastProcessed = time.time()
renewContinuous = time.time()
distanceDeg1 = -1
distanceDeg2 = -1
distance1 = -1
distance2 = -1
avgDistance1 = -1
avgDistance2 = -1
gyroAngle = 0
def connected():
print("Starting agent... ", end="")
pyros.agent.init(pyros.client, "over-the-rainbow-agent.py")
print("Done.")
pyros.publish("camera/processed/fetch", "")
pyros.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
def toPILImage(imageBytes):
pilImage = Image.frombytes("RGB", size, imageBytes)
return pilImage
def toPyImage(pilImage):
pyImage = pygame.image.fromstring(pilImage.tobytes("raw"), size, "RGB")
return pyImage
def handleDistances(topic, message, groups):
global distanceDeg1, distanceDeg2, distance1, distance2, avgDistance1, avgDistance2
c = 0
split1 = message.split(",")
for s1 in split1:
split2 = s1.split(":")
if len(split2) == 2:
deg = int(split2[0])
split3 = split2[1].split(";")
if len(split3) == 2:
dis = float(split3[0])
avg = float(split3[1])
if c == 0:
distanceDeg1 = deg
distance1 = dis
avgDistance1 = avg
elif c == 1:
distanceDeg2 = deg
distance2 = dis
avgDistance2 = avg
c += 1
def handleGyro(topic, message, groups):
global gyroAngle
gyroAngle = float(message)
def handleImageDetails(topic, message, groups):
global rawImage, rawImageBig, completeRawImage
results = []
for line in message.split("\n"):
split = line.split(",")
if len(split) == 3:
result = int(split[0]), int(split[1]), split[2].lower(), 10
results.append(result)
elif len(split) >= 4:
result = int(split[0]), int(split[1]), split[2].lower(), int(split[3])
results.append(result)
print("Got details " + str(results) + " from: \n" + message)
for result in results:
if "red" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.RED, "red", result[3])
if "green" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.GREEN, "green", result[3])
if "yellow" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.YELLOW, "yellow", result[3])
if "blue" == result[2]:
drawTarget(completeRawImage, result, pyros.gccui.BLUE, "blue", result[3])
if completeRawImage is not None:
rawImage = pygame.transform.scale(completeRawImage, (80, 64))
rawImageBig = pygame.transform.scale(completeRawImage, (320, 256))
if record:
processedImages.append(rawImage)
processedBigImages.append(rawImageBig)
def handleCameraRaw(topic, message, groups):
global rawImage, rawImageBig, lastProcessed, localFPS, completeRawImage
handleCameraProcessed(topic, message, groups)
completeRawImage = lastImage
def handleCameraProcessed(topic, message, groups):
global rawImage, rawImageBig, lastProcessed, localFPS, lastImage
n = time.time()
delta = n - lastProcessed
lastProcessed = n
if delta < 5:
localFPS = "%.2f" % round(1 / delta, 2)
else:
localFPS = "-"
pilImage = toPILImage(message)
processedPilImage = processImage(pilImage)
image = toPyImage(processedPilImage)
lastImage = image
# if "red" in result:
# drawTarget(image, result["red"], pyros.gccui.RED, "red")
# if "green" in result:
# drawTarget(image, result["green"], pyros.gccui.GREEN, "green")
# if "yellow" in result:
# drawTarget(image, result["yellow"], pyros.gccui.YELLOW, "yellow")
# if "blue" in result:
# drawTarget(image, result["blue"], pyros.gccui.BLUE, "blue")
#
rawImage = pygame.transform.scale(lastImage, (80, 64))
rawImageBig = pygame.transform.scale(lastImage, (320, 256))
if record:
processedImages.append(rawImage)
processedBigImages.append(rawImageBig)
#
# if sequence and not continuous:
# pyros.publish("camera/raw/fetch", "")
def processImage(image):
red_pixels = []
green_pixels = []
blue_pixels = []
yellow_pixels = []
for y in range(0, 256):
for x in range(0, 320):
p = image.getpixel((x, y))
if isRed(p):
red_pixels.append((x, y))
if isGreen(p):
green_pixels.append((x, y))
if isBlue(p):
blue_pixels.append((x, y))
if isYellow(p):
yellow_pixels.append((x, y))
result = {}
if len(red_pixels) > 20:
centre = calculateCentre(red_pixels)
result["red"] = centre
drawSpot(image, centre[0], centre[1], (255, 64, 64), "red")
if len(green_pixels) > 20:
centre = calculateCentre(green_pixels)
result["green"] = centre
drawSpot(image, centre[0], centre[1], (64, 255, 64), "green")
if len(blue_pixels) > 20:
centre = calculateCentre(blue_pixels)
result["blue"] = centre
drawSpot(image, centre[0], centre[1], (64, 64, 255), "blue")
if len(yellow_pixels) > 20:
centre = calculateCentre(yellow_pixels)
result["yellow"] = centre
drawSpot(image, centre[0], centre[1], (255, 255, 64), "yellow")
processedImage = image
return processedImage
def isRed(p):
return p[0] > 64 and distance(p[0], p[1]) > 1.2 and distance(p[0], p[1]) > 1.2 and 0.8 < distance(p[1], p[2]) < 1.2
def isGreen(p):
return p[1] > 64 and distance(p[1], p[0]) > 1.2 and distance(p[1], p[2]) > 1.2 and 0.8 < distance(p[0], p[2]) < 1.2
def isBlue(p):
return p[2] > 64 and distance(p[2], p[0]) > 1.2 and distance(p[2], p[1]) > 1.2 and 0.8 < distance(p[0], p[1]) < 1.2
def isYellow(p):
return p[0] > 64 and p[1] > 128 and 0.8 < distance(p[0], p[1]) < 1.2 and distance(p[0], p[2]) > 1.2 and distance(p[1], p[2]) > 1.2
def distance(x, y):
if y != 0:
return x / y
else:
return x / 256
def calculateCentre(pixels):
cx = 0
cy = 0
for p in pixels:
cx = cx + p[0]
cy = cy + p[1]
cx = int(cx / len(pixels))
cy = int(cy / len(pixels))
return cx, cy
def drawTarget(image, centre, colour, text, radius=20):
if radius < 16:
radius = 16
x = centre[0] - radius / 4
if x < 0:
x = 0
y = centre[1] - radius / 4
if y < 0:
y = 0
w = radius - 1
h = radius - 1
tl = 13
tl1 = 12
# pygame.draw.rect(image, pyros.gccui.WHITE, pygame.Rect(x + 2, y + 2, w - 2, h - 2), 1)
pygame.draw.line(image, pyros.gccui.WHITE, (x, y), (x + tl, y))
pygame.draw.line(image, pyros.gccui.WHITE, (x, y), (x, y + tl))
pygame.draw.line(image, colour, (x + 1, y + 1), (x + 1, y + tl1))
pygame.draw.line(image, colour, (x + 1, y + 1), (x + tl1, y + 1))
pygame.draw.line(image, pyros.gccui.WHITE, (x, y + h), (x + tl, y + h))
pygame.draw.line(image, pyros.gccui.WHITE, (x, y + h), (x, y + h - tl))
pygame.draw.line(image, colour, (x + 1, y + h - 1), (x + 1, y + h - tl1))
pygame.draw.line(image, colour, (x + 1, y + h - 1), (x + tl1, y + h - 1))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y), (x + w - tl, y))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y), (x + w, y + tl))
pygame.draw.line(image, colour, (x + w - 1, y + 1), (x + w - 1, y + tl1))
pygame.draw.line(image, colour, (x + w - 1, y + 1), (x + w - tl1, y + 1))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y + h), (x + w - tl, y + h))
pygame.draw.line(image, pyros.gccui.WHITE, (x + w, y + h), (x + w, y + h - tl))
pygame.draw.line(image, colour, (x + w - 1, y + h - 1), (x + w - 1, y + h - tl1))
pygame.draw.line(image, colour, (x + w - 1, y + h - 1), (x + w - tl1, y + h - 1))
tdist = 30
left = False
if x > tdist:
tx = x - tdist
lx = x - 2
left = True
elif x + w < image.get_width() - tdist:
tx = x + w + tdist
lx = x + w + 2
else:
tx = centre[0]
lx = centre[0]
if y > tdist:
ty = y - tdist
ly = y - 2
elif y + h < image.get_height() - tdist:
ty = y + h + tdist
ly = y + h + 2
else:
ty = centre[0]
ly = centre[0]
pyros.gccui.font.set_bold(True)
tw = pyros.gccui.font.size(text)[1]
pygame.draw.line(image, pyros.gccui.WHITE, (lx, ly), (tx, ty))
if left:
pygame.draw.line(image, pyros.gccui.WHITE, (tx - tw - 5, ty), (tx, ty))
image.blit(pyros.gccui.font.render(text, 1, colour), (tx - tw, ty - 25))
else:
pygame.draw.line(image, pyros.gccui.WHITE, (tx + tw + 5, ty), (tx, ty))
image.blit(pyros.gccui.font.render(text, 1, colour), (tx, ty - 25))
pyros.gccui.font.set_bold(False)
def drawSpot(image, cx, cy, colour, text):
if False:
for x in range(cx - 30, cx + 30):
if x >= 0 and x < 320:
if cy > 0:
image.putpixel((x, cy - 1), (255, 255, 255))
image.putpixel((x, cy), colour)
if cy < 256 - 1:
image.putpixel((x, cy + 1), (255, 255, 255))
for y in range(cy - 30, cy + 30):
if y >= 0 and y < 256:
if cx > 0:
image.putpixel((cx - 1, y), (255, 255, 255))
image.putpixel((cx, y), colour)
if cx < 320 - 1:
image.putpixel((cx + 1, y), (255, 255, 255))
def toggleStart():
global imgNo, processedImages, processedBigImages, running
pass
def stop():
global running
pyros.publish("camera/lift", "stop")
pyros.publish("move/stop", "")
pyros.publish("overtherainbow/command", "stop")
running = False
def clear():
global imgNo, processedImages, processedBigImages
imgNo = 0
del processedImages[:]
del processedBigImages[:]
def onKeyDown(key):
global lights, forwardSpeed, running, ptr, imgNo
global sequence, record, continuous
global processedImages, processedBigImages
global gyroAngle
if pyros.gcc.handleConnectKeyDown(key):
pass
elif key == pygame.K_f:
print(" fetching picture...")
pyros.publish("camera/raw/fetch", "")
elif key == pygame.K_s:
sequence = not sequence
elif key == pygame.K_r:
record = not record
elif key == pygame.K_c:
continuous = not continuous
if continuous:
print(" fetching continuous pictures...")
pyros.publish("camera/continuous", "")
else:
print(" stopping continuous pictures...")
pyros.publish("camera/continuous", "stop")
elif key == pygame.K_x:
clear()
elif key == pygame.K_RETURN:
toggleStart()
elif key == pygame.K_SPACE:
stop()
elif key == pygame.K_1:
pyros.publish("overtherainbow/command", "alg1")
elif key == pygame.K_2:
pyros.publish("overtherainbow/command", "alg2")
elif key == pygame.K_3:
pyros.publish("overtherainbow/command", "alg3")
elif key == pygame.K_4:
pyros.publish("overtherainbow/command", "alg4")
elif key == pygame.K_5:
pyros.publish("overtherainbow/command", "alg5")
elif key == pygame.K_6:
pyros.publish("overtherainbow/command", "alg6")
elif key == pygame.K_7:
pyros.publish("overtherainbow/command", "alg7")
elif key == pygame.K_8:
pyros.publish("overtherainbow/command", "alg8")
elif key == pygame.K_9:
pyros.publish("overtherainbow/command", "alg9")
elif key == pygame.K_0:
pyros.publish("overtherainbow/command", "alg10")
elif key == pygame.K_g:
pyros.publish("sensor/gyro/continuous", "calibrate,50")
gyroAngle = 0
elif key == pygame.K_u:
pyros.publish("camera/lift", "up")
elif key == pygame.K_d:
pyros.publish("camera/lift", "down")
elif key == pygame.K_SLASH:
pyros.publish("camera/lift", "reset")
elif key == pygame.K_LEFT:
if ptr == -1:
ptr = len(processedImages) - 2
else:
ptr -= 1
elif key == pygame.K_RIGHT:
ptr += 1
if ptr >= len(processedImages) - 1:
ptr = -1
def onKeyUp(key):
pyros.gcc.handleConnectKeyUp(key)
return
def swap(array):
v = array[0]
array[0] = array[1]
array[1] = v
pyros.subscribeBinary("camera/raw", handleCameraRaw)
pyros.subscribe("overtherainbow/distances", handleDistances)
pyros.subscribe("overtherainbow/gyro", handleGyro)
pyros.subscribe("overtherainbow/imagedetails", handleImageDetails)
pyros.subscribeBinary("overtherainbow/processed", handleCameraProcessed)
pyros.init("over-the-rainbow-#", unique=True, onConnected=connected, host=pyros.gcc.getHost(), port=pyros.gcc.getPort(), waitToConnect=False)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.VIDEORESIZE:
pyros.gccui.screenResized(event.size)
pyros.pygamehelper.processKeys(onKeyDown, onKeyUp)
pyros.loop(0.03)
pyros.agent.keepAgents()
pyros.gccui.background(True)
avgDistance1String = str(format(avgDistance1, '.2f'))
avgDistance2String = str(format(avgDistance2, '.2f'))
# noinspection PyRedeclaration
hpos = 40
hpos = pyros.gccui.drawKeyValue("Local FPS", str(localFPS), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Recording", str(record), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Sequence", str(sequence), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Continuous", str(continuous), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Selected", str(ptr) + " of " + str(len(processedImages)), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Running", str(running), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Turn dist", str(feedback["turnDistance"]), 8, hpos)
hpos = pyros.gccui.drawKeyValue("Dist @ " + str(distanceDeg1), str(distance1) + ", avg: " + avgDistance1String, 8, hpos)
hpos = pyros.gccui.drawKeyValue("Dist @ " + str(distanceDeg2), str(distance2) + ", avg: " + avgDistance2String, 8, hpos)
hpos = pyros.gccui.drawKeyValue("Gyro", str(round(gyroAngle, 2)), 8, hpos)
# if len(historyDistances[0]):
# mn = min(historyDistances[0])
# mx = max(historyDistances[0])
# pyros.gccui.drawGraph((200, 50), (81, 65), historyDistances[0], mn, mx, 80, stick = 10)
#
# if len(historyDistances[1]):
# mn = min(historyDistances[1])
# mx = max(historyDistances[1])
# pyros.gccui.drawGraph((320, 50), (81, 65), historyDistances[1], mn, mx, 80, stick = 10)
loc = arrow_image.get_rect().center
rot_arrow_image = pygame.transform.rotate(arrow_image, -gyroAngle)
rot_arrow_image.get_rect().center = loc
screen.blit(rot_arrow_image, (530, 200))
# if len(rotSpeeds) > 0:
# gyroDegPersSecText = str(round(sum(rotSpeeds) / len(rotSpeeds), 2))
# pyros.gccui.drawBigText(gyroDegPersSecText, (440, 10))
#
# pyros.gccui.drawText("º/s", (445 + pyros.gccui.bigFont.size(gyroDegPersSecText)[0], 15))
#
# pyros.gccui.drawBigText(str(int(thisGyroAngle)), (440, 40))
pyros.gccui.drawSmallText("r-toggle record, f - fetch, s-sequence, LEFT/RIGHT-scroll, SPACE-stop, RETURN-start, l-lights, d-distances, x- clear, camera: u-up, d-down, /-reset", (8, screen.get_height() - pyros.gccui.smallFont.get_height()))
pyros.gccui.drawImage(rawImage, (500, 50), 10)
pyros.gccui.drawImage(rawImageBig, (688, 50), 10)
if ptr >= 0:
if ptr > len(processedImages) - 1:
ptr = len(processedImages) - 1
i = ptr
else:
i = len(processedImages) - 1
imgX = 1024 - 320 - 16
while i >= 0 and imgX >= 0:
pyros.gccui.drawImage(processedBigImages[i], (imgX, 420))
imgX -= 336
i -= 1
pyros.gcc.drawConnection()
pyros.gccui.frameEnd()
now = time.time()
| en | 0.484584 | # # Copyright 2016-2017 Games Creators Club # # MIT License # # if "red" in result: # drawTarget(image, result["red"], pyros.gccui.RED, "red") # if "green" in result: # drawTarget(image, result["green"], pyros.gccui.GREEN, "green") # if "yellow" in result: # drawTarget(image, result["yellow"], pyros.gccui.YELLOW, "yellow") # if "blue" in result: # drawTarget(image, result["blue"], pyros.gccui.BLUE, "blue") # # # if sequence and not continuous: # pyros.publish("camera/raw/fetch", "") # pygame.draw.rect(image, pyros.gccui.WHITE, pygame.Rect(x + 2, y + 2, w - 2, h - 2), 1) #", unique=True, onConnected=connected, host=pyros.gcc.getHost(), port=pyros.gcc.getPort(), waitToConnect=False) # noinspection PyRedeclaration # if len(historyDistances[0]): # mn = min(historyDistances[0]) # mx = max(historyDistances[0]) # pyros.gccui.drawGraph((200, 50), (81, 65), historyDistances[0], mn, mx, 80, stick = 10) # # if len(historyDistances[1]): # mn = min(historyDistances[1]) # mx = max(historyDistances[1]) # pyros.gccui.drawGraph((320, 50), (81, 65), historyDistances[1], mn, mx, 80, stick = 10) # if len(rotSpeeds) > 0: # gyroDegPersSecText = str(round(sum(rotSpeeds) / len(rotSpeeds), 2)) # pyros.gccui.drawBigText(gyroDegPersSecText, (440, 10)) # # pyros.gccui.drawText("º/s", (445 + pyros.gccui.bigFont.size(gyroDegPersSecText)[0], 15)) # # pyros.gccui.drawBigText(str(int(thisGyroAngle)), (440, 40)) | 2.421881 | 2 |
cpppm/conans.py | Garcia6l20/cpppm | 3 | 6632198 | import asyncio
from pathlib import Path
from conans import ConanFile as ConanConanFile
from conans import tools
from cpppm import Project, Library, root_project
import nest_asyncio
nest_asyncio.apply()
class PackageInfos:
def __init__(self, data):
self.include_dirs = set()
self.lib_dirs = set()
self.libs = set()
self.res_dirs = set()
self.bin_dirs = set()
self.build_dirs = set()
self.defines = dict()
self.name = data['name']
self.version = data['version']
self.root = Path(data['rootpath'])
self.description = data['description'] if 'description' in data else None
self.load(data)
self.header_only = self.name not in data['libs']
def load(self, comp):
if 'include_paths' in comp:
self.include_dirs.update(comp['include_paths'])
if 'lib_paths' in comp:
self.lib_dirs.update(comp['lib_paths'])
if 'libs' in comp:
self.libs.update([lib for lib in comp['libs'] if lib != self.name])
if 'system_libs' in comp:
self.libs.update(comp['system_libs'])
if 'res_paths' in comp:
self.res_dirs.update(comp['res_paths'])
if 'bin_paths' in comp:
self.bin_dirs.update(comp['bin_paths'])
if 'build_paths' in comp:
self.build_dirs.update(comp['build_paths'])
if 'defines' in comp:
for definition in comp['defines']:
tmp = definition.split('=')
self.defines[tmp[0]] = tmp[1] if len(tmp) > 1 else None
@property
def conan_ref(self):
return f'{self.name}/{self.version}@'
class PackageLibrary(Library):
def __init__(self, infos, **kwargs):
self._infos = PackageInfos(infos)
super().__init__(self._infos.name, self._infos.root, self._infos.root, **kwargs)
self.include_dirs = self._infos.include_dirs
self.link_libraries = self._infos.libs
self.compile_definitions = self._infos.defines
self.library_dirs = {self._infos.root / p for p in self._infos.lib_dirs}
def resolve_deps(self):
# for dep in self._infos.deps:
# self.link_libraries = Project._pkg_libraries[dep]
pass
@property
def conan_ref(self):
return self._infos.conan_ref
@property
def is_header_only(self):
return self._infos.header_only
async def build(self):
return False
class ConanFile(ConanConanFile):
project: Project = root_project()
name = project.package_name
# url = project.url
version = project.version
license = project.license
settings = {"os", "compiler", "build_type", "arch"}
options = project.options
requires = tuple(project.requires)
build_requires = tuple(project.build_requires)
default_options = project.default_options
no_copy_source = False
exports_sources = '*'
def deploy(self):
self.copy("*", dst="bin", src="bin")
def configure(self):
if self.settings.os == "Windows":
del self.options.fPIC
def build(self):
print(self.source_folder)
loop = asyncio.get_event_loop()
ConanFile.project.install_requirements()
loop.run_until_complete(ConanFile.project.build())
def package(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(ConanFile.project.install(self.package_folder))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.bindirs = ['bin']
def test(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(ConanFile.project.test())
| import asyncio
from pathlib import Path
from conans import ConanFile as ConanConanFile
from conans import tools
from cpppm import Project, Library, root_project
import nest_asyncio
nest_asyncio.apply()
class PackageInfos:
def __init__(self, data):
self.include_dirs = set()
self.lib_dirs = set()
self.libs = set()
self.res_dirs = set()
self.bin_dirs = set()
self.build_dirs = set()
self.defines = dict()
self.name = data['name']
self.version = data['version']
self.root = Path(data['rootpath'])
self.description = data['description'] if 'description' in data else None
self.load(data)
self.header_only = self.name not in data['libs']
def load(self, comp):
if 'include_paths' in comp:
self.include_dirs.update(comp['include_paths'])
if 'lib_paths' in comp:
self.lib_dirs.update(comp['lib_paths'])
if 'libs' in comp:
self.libs.update([lib for lib in comp['libs'] if lib != self.name])
if 'system_libs' in comp:
self.libs.update(comp['system_libs'])
if 'res_paths' in comp:
self.res_dirs.update(comp['res_paths'])
if 'bin_paths' in comp:
self.bin_dirs.update(comp['bin_paths'])
if 'build_paths' in comp:
self.build_dirs.update(comp['build_paths'])
if 'defines' in comp:
for definition in comp['defines']:
tmp = definition.split('=')
self.defines[tmp[0]] = tmp[1] if len(tmp) > 1 else None
@property
def conan_ref(self):
return f'{self.name}/{self.version}@'
class PackageLibrary(Library):
def __init__(self, infos, **kwargs):
self._infos = PackageInfos(infos)
super().__init__(self._infos.name, self._infos.root, self._infos.root, **kwargs)
self.include_dirs = self._infos.include_dirs
self.link_libraries = self._infos.libs
self.compile_definitions = self._infos.defines
self.library_dirs = {self._infos.root / p for p in self._infos.lib_dirs}
def resolve_deps(self):
# for dep in self._infos.deps:
# self.link_libraries = Project._pkg_libraries[dep]
pass
@property
def conan_ref(self):
return self._infos.conan_ref
@property
def is_header_only(self):
return self._infos.header_only
async def build(self):
return False
class ConanFile(ConanConanFile):
project: Project = root_project()
name = project.package_name
# url = project.url
version = project.version
license = project.license
settings = {"os", "compiler", "build_type", "arch"}
options = project.options
requires = tuple(project.requires)
build_requires = tuple(project.build_requires)
default_options = project.default_options
no_copy_source = False
exports_sources = '*'
def deploy(self):
self.copy("*", dst="bin", src="bin")
def configure(self):
if self.settings.os == "Windows":
del self.options.fPIC
def build(self):
print(self.source_folder)
loop = asyncio.get_event_loop()
ConanFile.project.install_requirements()
loop.run_until_complete(ConanFile.project.build())
def package(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(ConanFile.project.install(self.package_folder))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.bindirs = ['bin']
def test(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(ConanFile.project.test())
| en | 0.547831 | # for dep in self._infos.deps: # self.link_libraries = Project._pkg_libraries[dep] # url = project.url | 2.184102 | 2 |
engfrosh_site/frosh/migrations/0013_alter_userdetails_invite_email_sent.py | engfrosh/engfrosh | 1 | 6632199 | # Generated by Django 3.2.5 on 2021-09-03 01:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frosh', '0012_userdetails_invite_email_sent'),
]
operations = [
migrations.AlterField(
model_name='userdetails',
name='invite_email_sent',
field=models.BooleanField(default=False, verbose_name='Invite Email Sent'),
),
]
| # Generated by Django 3.2.5 on 2021-09-03 01:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frosh', '0012_userdetails_invite_email_sent'),
]
operations = [
migrations.AlterField(
model_name='userdetails',
name='invite_email_sent',
field=models.BooleanField(default=False, verbose_name='Invite Email Sent'),
),
]
| en | 0.864762 | # Generated by Django 3.2.5 on 2021-09-03 01:17 | 1.429134 | 1 |
cardclass.py | sackidude/2048-solitaire | 1 | 6632200 | <reponame>sackidude/2048-solitaire<filename>cardclass.py
"""This is all the classes. card, piles and hand"""
from random import randrange
import funcs
INVALID_INPUT = 2
class NonRenderGame:
"""This is the whole game class without any rendering."""
def __init__(self, max_cards):
self.hand = NonRenderHand()
self.piles = NonRenderPiles()
self.score = 0
self.multiplier = 1
self.trashes = 2
self.mix = True
self.max_cards = max_cards
def init_hand(self):
"""Initiate the hand with two cards"""
for i in range(2):
self.hand.add_card(NonRenderCard(randrange(6) + 1))
def place_card(self, _place):
"""
This function takes a number between 0-3 and places a card there if it can.
It return true if it placed a card.
This is only used in the msachine learning part of the program.
"""
if len(self.piles.piles[_place]) < self.max_cards:
self.piles.add_card(self.hand.cards[0], _place)
self.hand.cards.pop(0)
self.hand.add_card(NonRenderCard((randrange(6) + 1)))
answer = self.piles.update(_place)
self.score += answer[0]
if answer[1]:
self.piles.piles[_place] = []
self.multiplier += 1
self.mix = True
self.trashes = 2
return True
elif self.piles.piles[_place][self.max_cards-1].value == self.hand.cards[0].value:
self.piles.piles[_place][self.max_cards-1].value += 1
self.piles.update(_place)
self.hand.cards.pop(0)
self.hand.add_card(NonRenderCard((randrange(6) + 1)))
return True
else:
return False
def check_game_over(self):
"""Function used for checking if game is over."""
total_length = 0
for current_pile in self.piles.piles:
total_length += len(current_pile)
return bool(total_length >= 24)
def trash(self):
"""Throw away a card from the hand."""
self.trashes -= 1
self.hand.trash()
def mix_hand(self):
"""Mixes the cards in the hand."""
self.mix = False
self.hand.mix()
class MLGame(NonRenderGame):
"""
This is the normal game with machine learning related functions.
"""
def get_network_inputs(self):
"""
This function is for the machine learning.
It gives back an array of length 24 with the values(1-x not to 2048)of all of the cards.
"""
return_array = []
# Add the pile values to the array
for i in range(4):
for j in range(6):
try:
self.piles.piles[i][j]
except IndexError:
return_array.append(0)
else:
return_array.append(self.piles.piles[i][j].value/11)
# Add the hand to the array
for card in self.hand.cards:
return_array.append(card.value)
# Add the amount of trashes and if there is a mix
return_array.append(self.trashes)
if self.mix:
return_array.append(1)
else:
return_array.append(0)
return return_array
def get_action_random(self):
pass
def get_action_highest(self):
pass
class GameWithRender(NonRenderGame):
"""Game With render function."""
def __init__(self, _max_card, _height, _width):
super().__init__(_max_card)
self.hand = Hand()
self.piles = Piles()
self.height = _height
self.width = _width
def init_hand(self):
"""Initiate the hand with two cards"""
for i in range(2):
self.hand.add_card(Card((randrange(6) + 1), 70, 100))
def place_card(self, _place):
"""This function takes a number between 0-3 and places a card there if it can."""
if len(self.piles.piles[_place]) < self.max_cards:
self.piles.add_card(self.hand.cards[0], _place)
self.hand.cards.pop(0)
self.hand.add_card(Card((randrange(6) + 1), 70, 100))
answer = self.piles.update(_place)
self.score += answer[0]
if answer[1]:
self.piles.piles[_place] = []
self.multiplier += 1
self.mix = True
self.trashes = 2
elif self.piles.piles[_place][self.max_cards-1].value == self.hand.cards[0].value:
self.piles.piles[_place][self.max_cards-1].value += 1
self.piles.update(_place)
self.hand.cards.pop(0)
self.hand.add_card(Card((randrange(6) + 1), 70, 100))
def render(self, font, screen, pygame):
"""Renders hand and piles."""
self.piles.render(font, screen, pygame, self.width, self.height)
self.hand.render(font, screen, pygame, self.height)
def trash(self):
"""Throw away a card from the hand."""
if self.trashes == 0:
return INVALID_INPUT
self.trashes -= 1
self.hand.trash()
return None
class NonRenderCard():
"""Just information about card without any rendering capabilities"""
def __init__(self, _value):
self.value = _value
def get_value(self):
"""This gets the not power two value of the card"""
return 2 ** self.value
class Card(NonRenderCard):
"""This is the class for a single card with rendering capabilities."""
def __init__(self, _value, _width=70, _height=100, _canMove=False):
super().__init__(_value)
self.width = _width
self.height = _height
def get_color(self):
"""Gets the color of the card. Will maybe be based on a color theme in the future"""
translated = funcs.translate(self.value, 1, 12, 0, 255)
return (
translated,
255 - translated,
255 - translated
)
def render(self, _font, _screen, x_cord, y_cord, border, _pygame):
"""Render the card"""
# Render the outline. Large them the card by 5 pixels.
if border > 0:
_pygame.draw.rect(_screen, (255, 255, 255), _pygame.Rect(
(x_cord - border, y_cord - border),
(self.width + 2*border, self.height + 2*border)
))
# Render the actual card
_pygame.draw.rect(
_screen,
self.get_color(),
_pygame.Rect(
(x_cord, y_cord),
(self.width, self.height)
)
)
current_text = _font.render(
str(self.get_value()),
True,
(0, 0, 0)
)
_screen.blit(current_text, (x_cord, y_cord)) # render the text
class NonRenderPiles():
"""Nonrender pile class. Parent to piles"""
def __init__(self):
self.piles = [[], [], [], []]
def update(self, update_pile_index):
"""This combines all the cards that can be combined in the specific pile"""
answer = [0, False]
for i in range(len(self.piles[update_pile_index]), 0, -1):
current_card_i = i - 1
if current_card_i > 0:
current_pile = self.piles[update_pile_index]
if current_pile[current_card_i].value == current_pile[current_card_i - 1].value:
del self.piles[update_pile_index][-1]
current_pile[len(
self.piles[update_pile_index]) - 1].value += 1
new_value = current_pile[len(
self.piles[update_pile_index]) - 1].get_value()
answer[0] += new_value
if new_value == 2048:
answer[1] = True
else:
i = 1
break
return answer
def add_card(self, _card, _pile):
"""Adds a card object to one of the piles"""
self.piles[_pile].append(_card)
class Piles(NonRenderPiles):
"""The class for the four piles in this game"""
def render(self, _font, _screen, _pygame, _width, _height):
"""Render all the cards in the piles"""
for i, current_pile in enumerate(self.piles):
x_cord = funcs.translate(i, 0, 4, 30, _width)
for j, current_card in enumerate(current_pile):
y_cord = funcs.translate(j, 0, 8, 30, _height - 100)
current_card.render(_font, _screen, x_cord, y_cord, 2, _pygame)
class NonRenderHand():
"""NonRender hand parent to hand. Which has rendering functoin"""
def __init__(self):
self.cards = []
def add_card(self, _card):
"""Adds a card to the end of the hand"""
self.cards.append(_card)
def trash(self):
"""Removes the card at the front and adds one to the end"""
self.cards.pop(0)
self.add_card(NonRenderCard(randrange(6)+1))
def mix(self):
"""Switches the values of all the cards in the hand"""
for i in range(0, len(self.cards)):
self.cards[i].value = randrange(6)+1
class Hand(NonRenderHand):
"""This is the class for the two cards in the corner"""
def render(self, _font, screen, pygame, height):
"""Renders the hand"""
idx = 0
for val in reversed(self.cards):
val.render(_font, screen, 50 + 35 * idx, height - 130, 2, pygame)
idx += 1
def trash(self):
"""Removes the card at the front and adds one to the end"""
self.cards.pop(0)
self.add_card(Card(randrange(6)+1, 70, 100))
| """This is all the classes. card, piles and hand"""
from random import randrange
import funcs
INVALID_INPUT = 2
class NonRenderGame:
"""This is the whole game class without any rendering."""
def __init__(self, max_cards):
self.hand = NonRenderHand()
self.piles = NonRenderPiles()
self.score = 0
self.multiplier = 1
self.trashes = 2
self.mix = True
self.max_cards = max_cards
def init_hand(self):
"""Initiate the hand with two cards"""
for i in range(2):
self.hand.add_card(NonRenderCard(randrange(6) + 1))
def place_card(self, _place):
"""
This function takes a number between 0-3 and places a card there if it can.
It return true if it placed a card.
This is only used in the msachine learning part of the program.
"""
if len(self.piles.piles[_place]) < self.max_cards:
self.piles.add_card(self.hand.cards[0], _place)
self.hand.cards.pop(0)
self.hand.add_card(NonRenderCard((randrange(6) + 1)))
answer = self.piles.update(_place)
self.score += answer[0]
if answer[1]:
self.piles.piles[_place] = []
self.multiplier += 1
self.mix = True
self.trashes = 2
return True
elif self.piles.piles[_place][self.max_cards-1].value == self.hand.cards[0].value:
self.piles.piles[_place][self.max_cards-1].value += 1
self.piles.update(_place)
self.hand.cards.pop(0)
self.hand.add_card(NonRenderCard((randrange(6) + 1)))
return True
else:
return False
def check_game_over(self):
"""Function used for checking if game is over."""
total_length = 0
for current_pile in self.piles.piles:
total_length += len(current_pile)
return bool(total_length >= 24)
def trash(self):
"""Throw away a card from the hand."""
self.trashes -= 1
self.hand.trash()
def mix_hand(self):
"""Mixes the cards in the hand."""
self.mix = False
self.hand.mix()
class MLGame(NonRenderGame):
"""
This is the normal game with machine learning related functions.
"""
def get_network_inputs(self):
"""
This function is for the machine learning.
It gives back an array of length 24 with the values(1-x not to 2048)of all of the cards.
"""
return_array = []
# Add the pile values to the array
for i in range(4):
for j in range(6):
try:
self.piles.piles[i][j]
except IndexError:
return_array.append(0)
else:
return_array.append(self.piles.piles[i][j].value/11)
# Add the hand to the array
for card in self.hand.cards:
return_array.append(card.value)
# Add the amount of trashes and if there is a mix
return_array.append(self.trashes)
if self.mix:
return_array.append(1)
else:
return_array.append(0)
return return_array
def get_action_random(self):
pass
def get_action_highest(self):
pass
class GameWithRender(NonRenderGame):
"""Game With render function."""
def __init__(self, _max_card, _height, _width):
super().__init__(_max_card)
self.hand = Hand()
self.piles = Piles()
self.height = _height
self.width = _width
def init_hand(self):
"""Initiate the hand with two cards"""
for i in range(2):
self.hand.add_card(Card((randrange(6) + 1), 70, 100))
def place_card(self, _place):
"""This function takes a number between 0-3 and places a card there if it can."""
if len(self.piles.piles[_place]) < self.max_cards:
self.piles.add_card(self.hand.cards[0], _place)
self.hand.cards.pop(0)
self.hand.add_card(Card((randrange(6) + 1), 70, 100))
answer = self.piles.update(_place)
self.score += answer[0]
if answer[1]:
self.piles.piles[_place] = []
self.multiplier += 1
self.mix = True
self.trashes = 2
elif self.piles.piles[_place][self.max_cards-1].value == self.hand.cards[0].value:
self.piles.piles[_place][self.max_cards-1].value += 1
self.piles.update(_place)
self.hand.cards.pop(0)
self.hand.add_card(Card((randrange(6) + 1), 70, 100))
def render(self, font, screen, pygame):
"""Renders hand and piles."""
self.piles.render(font, screen, pygame, self.width, self.height)
self.hand.render(font, screen, pygame, self.height)
def trash(self):
"""Throw away a card from the hand."""
if self.trashes == 0:
return INVALID_INPUT
self.trashes -= 1
self.hand.trash()
return None
class NonRenderCard():
"""Just information about card without any rendering capabilities"""
def __init__(self, _value):
self.value = _value
def get_value(self):
"""This gets the not power two value of the card"""
return 2 ** self.value
class Card(NonRenderCard):
"""This is the class for a single card with rendering capabilities."""
def __init__(self, _value, _width=70, _height=100, _canMove=False):
super().__init__(_value)
self.width = _width
self.height = _height
def get_color(self):
"""Gets the color of the card. Will maybe be based on a color theme in the future"""
translated = funcs.translate(self.value, 1, 12, 0, 255)
return (
translated,
255 - translated,
255 - translated
)
def render(self, _font, _screen, x_cord, y_cord, border, _pygame):
"""Render the card"""
# Render the outline. Large them the card by 5 pixels.
if border > 0:
_pygame.draw.rect(_screen, (255, 255, 255), _pygame.Rect(
(x_cord - border, y_cord - border),
(self.width + 2*border, self.height + 2*border)
))
# Render the actual card
_pygame.draw.rect(
_screen,
self.get_color(),
_pygame.Rect(
(x_cord, y_cord),
(self.width, self.height)
)
)
current_text = _font.render(
str(self.get_value()),
True,
(0, 0, 0)
)
_screen.blit(current_text, (x_cord, y_cord)) # render the text
class NonRenderPiles():
"""Nonrender pile class. Parent to piles"""
def __init__(self):
self.piles = [[], [], [], []]
def update(self, update_pile_index):
"""This combines all the cards that can be combined in the specific pile"""
answer = [0, False]
for i in range(len(self.piles[update_pile_index]), 0, -1):
current_card_i = i - 1
if current_card_i > 0:
current_pile = self.piles[update_pile_index]
if current_pile[current_card_i].value == current_pile[current_card_i - 1].value:
del self.piles[update_pile_index][-1]
current_pile[len(
self.piles[update_pile_index]) - 1].value += 1
new_value = current_pile[len(
self.piles[update_pile_index]) - 1].get_value()
answer[0] += new_value
if new_value == 2048:
answer[1] = True
else:
i = 1
break
return answer
def add_card(self, _card, _pile):
"""Adds a card object to one of the piles"""
self.piles[_pile].append(_card)
class Piles(NonRenderPiles):
"""The class for the four piles in this game"""
def render(self, _font, _screen, _pygame, _width, _height):
"""Render all the cards in the piles"""
for i, current_pile in enumerate(self.piles):
x_cord = funcs.translate(i, 0, 4, 30, _width)
for j, current_card in enumerate(current_pile):
y_cord = funcs.translate(j, 0, 8, 30, _height - 100)
current_card.render(_font, _screen, x_cord, y_cord, 2, _pygame)
class NonRenderHand():
"""NonRender hand parent to hand. Which has rendering functoin"""
def __init__(self):
self.cards = []
def add_card(self, _card):
"""Adds a card to the end of the hand"""
self.cards.append(_card)
def trash(self):
"""Removes the card at the front and adds one to the end"""
self.cards.pop(0)
self.add_card(NonRenderCard(randrange(6)+1))
def mix(self):
"""Switches the values of all the cards in the hand"""
for i in range(0, len(self.cards)):
self.cards[i].value = randrange(6)+1
class Hand(NonRenderHand):
"""This is the class for the two cards in the corner"""
def render(self, _font, screen, pygame, height):
"""Renders the hand"""
idx = 0
for val in reversed(self.cards):
val.render(_font, screen, 50 + 35 * idx, height - 130, 2, pygame)
idx += 1
def trash(self):
"""Removes the card at the front and adds one to the end"""
self.cards.pop(0)
self.add_card(Card(randrange(6)+1, 70, 100)) | en | 0.884638 | This is all the classes. card, piles and hand This is the whole game class without any rendering. Initiate the hand with two cards This function takes a number between 0-3 and places a card there if it can. It return true if it placed a card. This is only used in the msachine learning part of the program. Function used for checking if game is over. Throw away a card from the hand. Mixes the cards in the hand. This is the normal game with machine learning related functions. This function is for the machine learning. It gives back an array of length 24 with the values(1-x not to 2048)of all of the cards. # Add the pile values to the array # Add the hand to the array # Add the amount of trashes and if there is a mix Game With render function. Initiate the hand with two cards This function takes a number between 0-3 and places a card there if it can. Renders hand and piles. Throw away a card from the hand. Just information about card without any rendering capabilities This gets the not power two value of the card This is the class for a single card with rendering capabilities. Gets the color of the card. Will maybe be based on a color theme in the future Render the card # Render the outline. Large them the card by 5 pixels. # Render the actual card # render the text Nonrender pile class. Parent to piles This combines all the cards that can be combined in the specific pile Adds a card object to one of the piles The class for the four piles in this game Render all the cards in the piles NonRender hand parent to hand. Which has rendering functoin Adds a card to the end of the hand Removes the card at the front and adds one to the end Switches the values of all the cards in the hand This is the class for the two cards in the corner Renders the hand Removes the card at the front and adds one to the end | 3.882328 | 4 |
LeetCode/0076_minimum_window_substring.py | KanegaeGabriel/ye-olde-interview-prep-grind | 1 | 6632201 | <gh_stars>1-10
def countChars(s):
count = [0 for _ in range(256)]
for c in s:
count[ord(c)] += 1
return count
def minWindow(s, t):
tCount = countChars(t)
i, j = 0, 0
sCount = [0 for _ in range(256)]
best = [None, i, j]
while i <= j and j < len(s):
sCount[ord(s[j])] += 1
while i <= j and all(si>=ti for si, ti in zip(sCount, tCount)):
if not best[0] or j-i < best[0]:
best = [j-i, i, j]
sCount[ord(s[i])] -= 1
i += 1
j += 1
if best[0] == None: return ""
return s[best[1]:best[2]+1]
print(minWindow("ADOBECODEBANC", "ABC")) # "BANC"
print(minWindow("a", "aa")) # "" | def countChars(s):
count = [0 for _ in range(256)]
for c in s:
count[ord(c)] += 1
return count
def minWindow(s, t):
tCount = countChars(t)
i, j = 0, 0
sCount = [0 for _ in range(256)]
best = [None, i, j]
while i <= j and j < len(s):
sCount[ord(s[j])] += 1
while i <= j and all(si>=ti for si, ti in zip(sCount, tCount)):
if not best[0] or j-i < best[0]:
best = [j-i, i, j]
sCount[ord(s[i])] -= 1
i += 1
j += 1
if best[0] == None: return ""
return s[best[1]:best[2]+1]
print(minWindow("ADOBECODEBANC", "ABC")) # "BANC"
print(minWindow("a", "aa")) # "" | en | 0.486913 | # "BANC" # "" | 3.240774 | 3 |
database_files/hr_naselja_gradovi/izvezi_hr_naselja_u_csv.py | mvrban123/PHP-Laravel-Project---Website | 0 | 6632202 | <filename>database_files/hr_naselja_gradovi/izvezi_hr_naselja_u_csv.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# **Dataset from: https://www.posta.hr/preuzimanje-podataka-o-postanskim-uredima-6543/6543**
import pandas as pd
target_cols = [
"BrojPu",
"Naselje"
]
df_mjesta = pd.read_excel("./mjestaRh.xlsx", usecols=target_cols)
# df_mjesta
df_mjesta.to_csv("hr_naselja_air_2020.csv", index=False)
| <filename>database_files/hr_naselja_gradovi/izvezi_hr_naselja_u_csv.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# **Dataset from: https://www.posta.hr/preuzimanje-podataka-o-postanskim-uredima-6543/6543**
import pandas as pd
target_cols = [
"BrojPu",
"Naselje"
]
df_mjesta = pd.read_excel("./mjestaRh.xlsx", usecols=target_cols)
# df_mjesta
df_mjesta.to_csv("hr_naselja_air_2020.csv", index=False)
| en | 0.322035 | #!/usr/bin/env python # coding: utf-8 # **Dataset from: https://www.posta.hr/preuzimanje-podataka-o-postanskim-uredima-6543/6543** # df_mjesta | 2.417385 | 2 |
test/func_test/test_ctrt/test_pay_chan_ctrt.py | josephzxy/py-vsys | 1 | 6632203 | <filename>test/func_test/test_ctrt/test_pay_chan_ctrt.py
import asyncio
import time
from typing import Tuple
import pytest
import py_vsys as pv
from test.func_test import conftest as cft
class TestPayChanCtrt:
"""
TestPayChanCtrt is the collection of functional tests of Payment Channel Contract.
"""
TOK_MAX = 100
TOK_UNIT = 1
INIT_LOAD = TOK_MAX // 2
@pytest.fixture
async def new_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_tok_ctrt is the fixture that registers a new token contract without split instance.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The token contract instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, self.TOK_MAX, self.TOK_UNIT)
await cft.wait_for_block()
await tc.issue(acnt0, self.TOK_MAX)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_ctrt(
self, acnt0: pv.Account, new_tok_ctrt: pv.TokCtrtWithoutSplit
) -> pv.PayChanCtrt:
"""
new_ctrt is the fixture that registers a new Payment Channel contract.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
Returns:
pv.PayChanCtrt: The PayChanCtrt instance.
"""
tc = new_tok_ctrt
api = acnt0.api
pc = await pv.PayChanCtrt.register(acnt0, tc.tok_id.data)
await cft.wait_for_block()
resp = await tc.deposit(acnt0, pc.ctrt_id.data, self.TOK_MAX)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
return pc
@pytest.fixture
async def new_ctrt_with_chan(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> Tuple[pv.PayChanCtrt, str]:
"""
new_ctrt_with_chan is the fixture that registers a new Payment Channel
contract and creates a channel.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
Tuple[pv.PayChanCtrt, str]: The PayChanCtrt instance & channel id
"""
pc = new_ctrt
load_amount = self.INIT_LOAD
later = int(time.time()) + 60 * 10
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
chan_id = resp["id"]
return pc, chan_id
async def test_register(
self,
acnt0: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.PayChanCtrt,
) -> pv.PayChanCtrt:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
pv.PayChanCtrt: The PayChanCtrt instance.
"""
tc = new_tok_ctrt
pc = new_ctrt
assert (await pc.maker) == acnt0.addr
assert (await pc.tok_id) == tc.tok_id
ctrt_bal = await pc.get_ctrt_bal(acnt0.addr.data)
assert ctrt_bal.amount == self.TOK_MAX
return pc
async def test_create_and_load(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> str:
"""
test_create_and_load tests the method create_and_load.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
str: The channel ID.
"""
pc = new_ctrt
api = acnt0.api
load_amount = self.INIT_LOAD
later = int(time.time()) + 60 * 10
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_id = resp["id"]
chan_creator = await pc.get_chan_creator(chan_id)
assert chan_creator == acnt0.addr
chan_creator_pub_key = await pc.get_chan_creator_pub_key(chan_id)
assert chan_creator_pub_key == acnt0.key_pair.pub
chan_accum_load = await pc.get_chan_accum_load(chan_id)
assert chan_accum_load.amount == load_amount
chan_accum_pay = await pc.get_chan_accum_pay(chan_id)
assert chan_accum_pay.amount == 0
chan_exp_time = await pc.get_chan_exp_time(chan_id)
assert chan_exp_time.unix_ts == later
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is True
return chan_id
async def test_extend_exp_time(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_extend_exp_time tests the method extend_exp_time.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_exp_time_old = await pc.get_chan_exp_time(chan_id)
new_later = chan_exp_time_old.unix_ts + 300
resp = await pc.extend_exp_time(
by=acnt0,
chan_id=chan_id,
expire_at=new_later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_exp_time = await pc.get_chan_exp_time(chan_id)
assert chan_exp_time.unix_ts == new_later
async def test_load(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_load tests the method load.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_load_old = await pc.get_chan_accum_load(chan_id)
assert chan_load_old.amount == self.INIT_LOAD
more_load = self.INIT_LOAD // 2
resp = await pc.load(acnt0, chan_id, more_load)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_load = await pc.get_chan_accum_load(chan_id)
assert chan_load.amount == self.INIT_LOAD + more_load
async def test_abort(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_abort tests the method abort.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is True
resp = await pc.abort(acnt0, chan_id)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is False
async def test_unload(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> None:
"""
test_unload tests the method unload.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
"""
pc = new_ctrt
api = acnt0.api
load_amount = self.TOK_MAX // 10
later = int(time.time()) + cft.AVG_BLOCK_DELAY * 2
# create a channel
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_id = resp["id"]
bal_old = await pc.get_ctrt_bal(acnt0.addr.data)
# wait until the channel expires
await asyncio.sleep(cft.AVG_BLOCK_DELAY * 2)
resp = await pc.unload(acnt0, chan_id)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
bal = await pc.get_ctrt_bal(acnt0.addr.data)
assert bal.amount == bal_old.amount + load_amount
async def test_offchain_pay_and_collect_payment(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_offchain_pay_and_collect_payment tests the method
- offchain_pay
- collect_payment.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
sig = await pc.offchain_pay(
key_pair=acnt0.key_pair,
chan_id=chan_id,
amount=self.INIT_LOAD,
)
resp = await pc.collect_payment(
by=acnt1,
chan_id=chan_id,
amount=self.INIT_LOAD,
signature=sig,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
accum_pay = await pc.get_chan_accum_pay(chan_id)
assert accum_pay.amount == self.INIT_LOAD
acnt1_bal = await pc.get_ctrt_bal(acnt1.addr.data)
assert acnt1_bal.amount == self.INIT_LOAD
@pytest.mark.whole
async def test_as_whole(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.PayChanCtrt,
) -> None:
"""
test_as_whole tests methods of PayChanCtrt as a whole so as to reduce resource consumption.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The token contract instance.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
"""
tc = new_tok_ctrt
pc = new_ctrt
await self.test_register(acnt0, tc, pc)
chan_id = await self.test_create_and_load(acnt0, acnt1, pc)
pc_with_chan = (pc, chan_id)
await self.test_extend_exp_time(acnt0, pc_with_chan)
await self.test_load(acnt0, pc_with_chan)
await self.test_offchain_pay_and_collect_payment(acnt0, acnt1, pc_with_chan)
await self.test_abort(acnt0, pc_with_chan)
await self.test_unload(acnt0, acnt1, pc)
| <filename>test/func_test/test_ctrt/test_pay_chan_ctrt.py
import asyncio
import time
from typing import Tuple
import pytest
import py_vsys as pv
from test.func_test import conftest as cft
class TestPayChanCtrt:
"""
TestPayChanCtrt is the collection of functional tests of Payment Channel Contract.
"""
TOK_MAX = 100
TOK_UNIT = 1
INIT_LOAD = TOK_MAX // 2
@pytest.fixture
async def new_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_tok_ctrt is the fixture that registers a new token contract without split instance.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The token contract instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, self.TOK_MAX, self.TOK_UNIT)
await cft.wait_for_block()
await tc.issue(acnt0, self.TOK_MAX)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_ctrt(
self, acnt0: pv.Account, new_tok_ctrt: pv.TokCtrtWithoutSplit
) -> pv.PayChanCtrt:
"""
new_ctrt is the fixture that registers a new Payment Channel contract.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
Returns:
pv.PayChanCtrt: The PayChanCtrt instance.
"""
tc = new_tok_ctrt
api = acnt0.api
pc = await pv.PayChanCtrt.register(acnt0, tc.tok_id.data)
await cft.wait_for_block()
resp = await tc.deposit(acnt0, pc.ctrt_id.data, self.TOK_MAX)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
return pc
@pytest.fixture
async def new_ctrt_with_chan(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> Tuple[pv.PayChanCtrt, str]:
"""
new_ctrt_with_chan is the fixture that registers a new Payment Channel
contract and creates a channel.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
Tuple[pv.PayChanCtrt, str]: The PayChanCtrt instance & channel id
"""
pc = new_ctrt
load_amount = self.INIT_LOAD
later = int(time.time()) + 60 * 10
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
chan_id = resp["id"]
return pc, chan_id
async def test_register(
self,
acnt0: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.PayChanCtrt,
) -> pv.PayChanCtrt:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
pv.PayChanCtrt: The PayChanCtrt instance.
"""
tc = new_tok_ctrt
pc = new_ctrt
assert (await pc.maker) == acnt0.addr
assert (await pc.tok_id) == tc.tok_id
ctrt_bal = await pc.get_ctrt_bal(acnt0.addr.data)
assert ctrt_bal.amount == self.TOK_MAX
return pc
async def test_create_and_load(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> str:
"""
test_create_and_load tests the method create_and_load.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
str: The channel ID.
"""
pc = new_ctrt
api = acnt0.api
load_amount = self.INIT_LOAD
later = int(time.time()) + 60 * 10
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_id = resp["id"]
chan_creator = await pc.get_chan_creator(chan_id)
assert chan_creator == acnt0.addr
chan_creator_pub_key = await pc.get_chan_creator_pub_key(chan_id)
assert chan_creator_pub_key == acnt0.key_pair.pub
chan_accum_load = await pc.get_chan_accum_load(chan_id)
assert chan_accum_load.amount == load_amount
chan_accum_pay = await pc.get_chan_accum_pay(chan_id)
assert chan_accum_pay.amount == 0
chan_exp_time = await pc.get_chan_exp_time(chan_id)
assert chan_exp_time.unix_ts == later
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is True
return chan_id
async def test_extend_exp_time(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_extend_exp_time tests the method extend_exp_time.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_exp_time_old = await pc.get_chan_exp_time(chan_id)
new_later = chan_exp_time_old.unix_ts + 300
resp = await pc.extend_exp_time(
by=acnt0,
chan_id=chan_id,
expire_at=new_later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_exp_time = await pc.get_chan_exp_time(chan_id)
assert chan_exp_time.unix_ts == new_later
async def test_load(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_load tests the method load.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_load_old = await pc.get_chan_accum_load(chan_id)
assert chan_load_old.amount == self.INIT_LOAD
more_load = self.INIT_LOAD // 2
resp = await pc.load(acnt0, chan_id, more_load)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_load = await pc.get_chan_accum_load(chan_id)
assert chan_load.amount == self.INIT_LOAD + more_load
async def test_abort(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_abort tests the method abort.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is True
resp = await pc.abort(acnt0, chan_id)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is False
async def test_unload(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> None:
"""
test_unload tests the method unload.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
"""
pc = new_ctrt
api = acnt0.api
load_amount = self.TOK_MAX // 10
later = int(time.time()) + cft.AVG_BLOCK_DELAY * 2
# create a channel
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_id = resp["id"]
bal_old = await pc.get_ctrt_bal(acnt0.addr.data)
# wait until the channel expires
await asyncio.sleep(cft.AVG_BLOCK_DELAY * 2)
resp = await pc.unload(acnt0, chan_id)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
bal = await pc.get_ctrt_bal(acnt0.addr.data)
assert bal.amount == bal_old.amount + load_amount
async def test_offchain_pay_and_collect_payment(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_offchain_pay_and_collect_payment tests the method
- offchain_pay
- collect_payment.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
sig = await pc.offchain_pay(
key_pair=acnt0.key_pair,
chan_id=chan_id,
amount=self.INIT_LOAD,
)
resp = await pc.collect_payment(
by=acnt1,
chan_id=chan_id,
amount=self.INIT_LOAD,
signature=sig,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
accum_pay = await pc.get_chan_accum_pay(chan_id)
assert accum_pay.amount == self.INIT_LOAD
acnt1_bal = await pc.get_ctrt_bal(acnt1.addr.data)
assert acnt1_bal.amount == self.INIT_LOAD
@pytest.mark.whole
async def test_as_whole(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.PayChanCtrt,
) -> None:
"""
test_as_whole tests methods of PayChanCtrt as a whole so as to reduce resource consumption.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The token contract instance.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
"""
tc = new_tok_ctrt
pc = new_ctrt
await self.test_register(acnt0, tc, pc)
chan_id = await self.test_create_and_load(acnt0, acnt1, pc)
pc_with_chan = (pc, chan_id)
await self.test_extend_exp_time(acnt0, pc_with_chan)
await self.test_load(acnt0, pc_with_chan)
await self.test_offchain_pay_and_collect_payment(acnt0, acnt1, pc_with_chan)
await self.test_abort(acnt0, pc_with_chan)
await self.test_unload(acnt0, acnt1, pc)
| en | 0.61532 | TestPayChanCtrt is the collection of functional tests of Payment Channel Contract. new_tok_ctrt is the fixture that registers a new token contract without split instance. Args: acnt0 (pv.Account): The account of nonce 0. Returns: pv.TokCtrtWithoutSplit: The token contract instance. new_ctrt is the fixture that registers a new Payment Channel contract. Args: acnt0 (pv.Account): The account of nonce 0. new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract. Returns: pv.PayChanCtrt: The PayChanCtrt instance. new_ctrt_with_chan is the fixture that registers a new Payment Channel contract and creates a channel. Args: acnt0 (pv.Account): The account of nonce 0. acnt1 (pv.Account): The account of nonce 1. new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract. Returns: Tuple[pv.PayChanCtrt, str]: The PayChanCtrt instance & channel id test_register tests the method register. Args: acnt0 (pv.Account): The account of nonce 0. new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract. new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract. Returns: pv.PayChanCtrt: The PayChanCtrt instance. test_create_and_load tests the method create_and_load. Args: acnt0 (pv.Account): The account of nonce 0. acnt1 (pv.Account): The account of nonce 1. new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract. Returns: str: The channel ID. test_extend_exp_time tests the method extend_exp_time. Args: acnt0 (pv.Account): The account of nonce 0. new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract and creates a new channel. test_load tests the method load. Args: acnt0 (pv.Account): The account of nonce 0. new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract and creates a new channel. test_abort tests the method abort. Args: acnt0 (pv.Account): The account of nonce 0. new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract and creates a new channel. test_unload tests the method unload. Args: acnt0 (pv.Account): The account of nonce 0. acnt1 (pv.Account): The account of nonce 1. new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract. # create a channel # wait until the channel expires test_offchain_pay_and_collect_payment tests the method - offchain_pay - collect_payment. Args: acnt0 (pv.Account): The account of nonce 0. acnt1 (pv.Account): The account of nonce 1. new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract and creates a new channel. test_as_whole tests methods of PayChanCtrt as a whole so as to reduce resource consumption. Args: acnt0 (pv.Account): The account of nonce 0. acnt1 (pv.Account): The account of nonce 1. new_tok_ctrt (pv.TokCtrtWithoutSplit): The token contract instance. new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract. | 2.444988 | 2 |
cfgov/data_research/tests/test_views.py | thephillipsequation/cfgov-refresh | 0 | 6632204 | import datetime
import json
import unittest
import django
from django.core.urlresolvers import NoReverseMatch, reverse
from model_bakery import baker
from data_research.models import (
County, CountyMortgageData, MetroArea, MSAMortgageData,
NationalMortgageData, NonMSAMortgageData, State, StateMortgageData
)
from data_research.views import validate_year_month
class YearMonthValidatorTests(unittest.TestCase):
"""check the year_month validator"""
good_pair = '2016-09'
future_year = '2040-08'
too_old_year = '1957-08'
bad_month = '2015-13'
non_integer = '201A-12'
bad_format = '201609'
def test_validate_year_month_good(self):
self.assertTrue(validate_year_month(self.good_pair))
def test_validate_year_month_future_year(self):
self.assertFalse(validate_year_month(self.future_year))
def test_validate_year_month_too_old(self):
self.assertFalse(validate_year_month(self.too_old_year))
def test_validate_year_month_bad_month(self):
self.assertFalse(validate_year_month(self.bad_month))
def test_validate_year_month_non_integer(self):
self.assertFalse(validate_year_month(self.non_integer))
def test_validate_year_month_bad_format(self):
self.assertFalse(validate_year_month(self.bad_format))
class TimeseriesViewTests(django.test.TestCase):
fixtures = ['mortgage_constants.json', 'mortgage_metadata.json']
def setUp(self):
baker.make(
State,
fips='12',
abbr='FL',
ap_abbr='Fla.',
counties=["12081"],
msas=["52081"],
name='Florida',
non_msa_counties=["12001"],
non_msa_valid=True)
baker.make(
County,
fips='12081',
name='<NAME>',
state=State.objects.get(fips='12'),
valid=True)
baker.make(
MetroArea,
fips='35840',
name='North Port-Sarasota-Bradenton, FL',
states=["12"],
counties=["12081", "12115"],
valid=True)
baker.make(
MetroArea,
fips='16220',
name='<NAME>',
states=["56"],
counties=["12081", "12115"],
valid=True)
baker.make(
NationalMortgageData,
current=2500819,
date=datetime.date(2008, 1, 1),
fips='-----',
id=1,
ninety=40692,
other=36196,
sixty=27586,
thirty=67668,
total=2674899)
baker.make(
StateMortgageData,
current=250081,
date=datetime.date(2008, 1, 1),
fips='12',
id=1,
state=State.objects.get(fips='12'),
ninety=4069,
other=3619,
sixty=2758,
thirty=6766,
total=26748)
baker.make(
MSAMortgageData,
current=5250,
date=datetime.date(2008, 1, 1),
msa=MetroArea.objects.get(fips='35840'),
fips='35840',
id=1,
ninety=1406,
other=361,
sixty=1275,
thirty=3676,
total=22674)
baker.make(
NonMSAMortgageData,
current=5250,
date=datetime.date(2008, 1, 1),
state=State.objects.get(fips='12'),
fips='12-non',
id=1,
ninety=1406,
other=361,
sixty=1275,
thirty=3676,
total=22674)
baker.make(
CountyMortgageData,
current=250,
date=datetime.date(2008, 1, 1),
county=County.objects.get(fips='12081'),
fips='12081',
id=1,
ninety=406,
other=361,
sixty=275,
thirty=676,
total=2674)
def test_metadata_request(self):
response = self.client.get(
reverse(
'data_research_api_metadata',
kwargs={'meta_name': 'sampling_dates'}))
self.assertEqual(response.status_code, 200)
self.assertIn(
'2008-01-01',
json.loads(response.content)
)
def test_metadata_request_bad_meta_name(self):
response = self.client.get(
reverse(
'data_research_api_metadata',
kwargs={'meta_name': 'xxx'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No metadata object found.')
def test_national_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries_national',
kwargs={'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_national_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries_national',
kwargs={'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_state_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_state_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_msa_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '35840',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_msa_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '35840',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_msa_timeseries_90_below_threshold(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '16220',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'not valid')
def test_non_msa_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12-non',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_non_msa_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12-non',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_county_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_county_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_timeseries_bad_fips(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '99999',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'FIPS code not found')
def test_map_data_bad_date(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '90',
'year_month': '0000-01'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Invalid year-month pair')
def test_map_data_disallowed_delinquency_digit(self):
with self.assertRaises(NoReverseMatch):
self.client.get(reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '100',
'year_month': '2008-01'}))
def test_map_data_disallowed_delinquency_range(self):
response = self.client.get(reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '38-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unknown delinquency range')
def test_timeseries_disallowed_delinquency_range(self):
response = self.client.get(reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081', 'days_late': '38-89'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unknown delinquency range')
def test_national_timeseries_disallowed_delinquency_range(self):
response = self.client.get(reverse(
'data_research_api_mortgage_timeseries_national',
kwargs={'days_late': '38-89'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unknown delinquency range')
def test_map_data_unknown_geo(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'parish',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unkown geographic unit')
def test_county_map_data_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '30-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.get('data').get('12081').keys()),
['name', 'value'])
def test_county_map_data_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.get('data').get('12081').keys()),
['name', 'value'])
def test_msa_map_data_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '30-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_msa_map_data_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_map_view_msa_below_threshold(self):
"""The view should deliver a below-threshold MSA with value of None"""
msa = MSAMortgageData.objects.get(fips='35840')
geo = msa.msa
geo.valid = False
geo.save()
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
msa_value = json.loads(response.content)['data'][msa.fips]['value']
self.assertIs(msa_value, None)
def test_map_view_non_msa_below_threshold(self):
"""Should deliver a below-threshold non-MSA with value of None"""
non_msa = NonMSAMortgageData.objects.get(fips='12-non')
geo = non_msa.state
geo.non_msa_valid = False
geo.save()
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
msa_value = json.loads(response.content)['data'][non_msa.fips]['value']
self.assertIs(msa_value, None)
def test_national_map_data_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'national',
'days_late': '30-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_national_map_data_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'national',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_county_timeseries_data_invalid(self):
county = County.objects.get(fips='12081')
county.valid = False
county.save()
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081', 'days_late': '90'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'County is below display threshold')
| import datetime
import json
import unittest
import django
from django.core.urlresolvers import NoReverseMatch, reverse
from model_bakery import baker
from data_research.models import (
County, CountyMortgageData, MetroArea, MSAMortgageData,
NationalMortgageData, NonMSAMortgageData, State, StateMortgageData
)
from data_research.views import validate_year_month
class YearMonthValidatorTests(unittest.TestCase):
"""check the year_month validator"""
good_pair = '2016-09'
future_year = '2040-08'
too_old_year = '1957-08'
bad_month = '2015-13'
non_integer = '201A-12'
bad_format = '201609'
def test_validate_year_month_good(self):
self.assertTrue(validate_year_month(self.good_pair))
def test_validate_year_month_future_year(self):
self.assertFalse(validate_year_month(self.future_year))
def test_validate_year_month_too_old(self):
self.assertFalse(validate_year_month(self.too_old_year))
def test_validate_year_month_bad_month(self):
self.assertFalse(validate_year_month(self.bad_month))
def test_validate_year_month_non_integer(self):
self.assertFalse(validate_year_month(self.non_integer))
def test_validate_year_month_bad_format(self):
self.assertFalse(validate_year_month(self.bad_format))
class TimeseriesViewTests(django.test.TestCase):
fixtures = ['mortgage_constants.json', 'mortgage_metadata.json']
def setUp(self):
baker.make(
State,
fips='12',
abbr='FL',
ap_abbr='Fla.',
counties=["12081"],
msas=["52081"],
name='Florida',
non_msa_counties=["12001"],
non_msa_valid=True)
baker.make(
County,
fips='12081',
name='<NAME>',
state=State.objects.get(fips='12'),
valid=True)
baker.make(
MetroArea,
fips='35840',
name='North Port-Sarasota-Bradenton, FL',
states=["12"],
counties=["12081", "12115"],
valid=True)
baker.make(
MetroArea,
fips='16220',
name='<NAME>',
states=["56"],
counties=["12081", "12115"],
valid=True)
baker.make(
NationalMortgageData,
current=2500819,
date=datetime.date(2008, 1, 1),
fips='-----',
id=1,
ninety=40692,
other=36196,
sixty=27586,
thirty=67668,
total=2674899)
baker.make(
StateMortgageData,
current=250081,
date=datetime.date(2008, 1, 1),
fips='12',
id=1,
state=State.objects.get(fips='12'),
ninety=4069,
other=3619,
sixty=2758,
thirty=6766,
total=26748)
baker.make(
MSAMortgageData,
current=5250,
date=datetime.date(2008, 1, 1),
msa=MetroArea.objects.get(fips='35840'),
fips='35840',
id=1,
ninety=1406,
other=361,
sixty=1275,
thirty=3676,
total=22674)
baker.make(
NonMSAMortgageData,
current=5250,
date=datetime.date(2008, 1, 1),
state=State.objects.get(fips='12'),
fips='12-non',
id=1,
ninety=1406,
other=361,
sixty=1275,
thirty=3676,
total=22674)
baker.make(
CountyMortgageData,
current=250,
date=datetime.date(2008, 1, 1),
county=County.objects.get(fips='12081'),
fips='12081',
id=1,
ninety=406,
other=361,
sixty=275,
thirty=676,
total=2674)
def test_metadata_request(self):
response = self.client.get(
reverse(
'data_research_api_metadata',
kwargs={'meta_name': 'sampling_dates'}))
self.assertEqual(response.status_code, 200)
self.assertIn(
'2008-01-01',
json.loads(response.content)
)
def test_metadata_request_bad_meta_name(self):
response = self.client.get(
reverse(
'data_research_api_metadata',
kwargs={'meta_name': 'xxx'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No metadata object found.')
def test_national_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries_national',
kwargs={'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_national_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries_national',
kwargs={'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_state_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_state_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_msa_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '35840',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_msa_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '35840',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_msa_timeseries_90_below_threshold(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '16220',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'not valid')
def test_non_msa_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12-non',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_non_msa_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12-non',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_county_timeseries_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081',
'days_late': '30-89'}))
self.assertEqual(response.status_code, 200)
def test_county_timeseries_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
def test_timeseries_bad_fips(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '99999',
'days_late': '90'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'FIPS code not found')
def test_map_data_bad_date(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '90',
'year_month': '0000-01'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Invalid year-month pair')
def test_map_data_disallowed_delinquency_digit(self):
with self.assertRaises(NoReverseMatch):
self.client.get(reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '100',
'year_month': '2008-01'}))
def test_map_data_disallowed_delinquency_range(self):
response = self.client.get(reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '38-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unknown delinquency range')
def test_timeseries_disallowed_delinquency_range(self):
response = self.client.get(reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081', 'days_late': '38-89'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unknown delinquency range')
def test_national_timeseries_disallowed_delinquency_range(self):
response = self.client.get(reverse(
'data_research_api_mortgage_timeseries_national',
kwargs={'days_late': '38-89'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unknown delinquency range')
def test_map_data_unknown_geo(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'parish',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Unkown geographic unit')
def test_county_map_data_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '30-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.get('data').get('12081').keys()),
['name', 'value'])
def test_county_map_data_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'counties',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.get('data').get('12081').keys()),
['name', 'value'])
def test_msa_map_data_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '30-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_msa_map_data_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_map_view_msa_below_threshold(self):
"""The view should deliver a below-threshold MSA with value of None"""
msa = MSAMortgageData.objects.get(fips='35840')
geo = msa.msa
geo.valid = False
geo.save()
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
msa_value = json.loads(response.content)['data'][msa.fips]['value']
self.assertIs(msa_value, None)
def test_map_view_non_msa_below_threshold(self):
"""Should deliver a below-threshold non-MSA with value of None"""
non_msa = NonMSAMortgageData.objects.get(fips='12-non')
geo = non_msa.state
geo.non_msa_valid = False
geo.save()
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'metros',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
msa_value = json.loads(response.content)['data'][non_msa.fips]['value']
self.assertIs(msa_value, None)
def test_national_map_data_30_89(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'national',
'days_late': '30-89',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_national_map_data_90(self):
response = self.client.get(
reverse(
'data_research_api_mortgage_mapdata',
kwargs={'geo': 'national',
'days_late': '90',
'year_month': '2008-01'}))
self.assertEqual(response.status_code, 200)
def test_county_timeseries_data_invalid(self):
county = County.objects.get(fips='12081')
county.valid = False
county.save()
response = self.client.get(
reverse(
'data_research_api_mortgage_timeseries',
kwargs={'fips': '12081', 'days_late': '90'}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'County is below display threshold')
| en | 0.811758 | check the year_month validator The view should deliver a below-threshold MSA with value of None Should deliver a below-threshold non-MSA with value of None | 2.475304 | 2 |
gdutils/datamine.py | InnovativeInventor/gdutils | 0 | 6632205 | """
gdutils.datamine
================
Provides
- A ``python`` module for mining and listing data sources.
Metadata
--------
:Module: ``gdutils.datamine``
:Filename: `datamine.py <https://github.com/mggg/gdutils/>`_
:Author: `@KeiferC <https://github.com/keiferc>`_
:Date: 27 July 2020
:Version: 1.1.0
:Description: Module for data mining
:Contributors: `@InnovativeInventor <https://github.com/InnovativeInventor>`_
Documentation
-------------
Documentation for the ``datamine`` module can be found as docstrings.
Run ``import gdutils.datamine; help(gdutils.datamine)`` to view documentation.
::
$ python
>>> import gdutils.datamine; help(gdutils.datamine)
Additionally, documentation can be found on `Read the Docs
<https://gdutils.readthedocs.io>`_.
"""
import json
import os
import pathlib
import requests
import subprocess
import sys
import urllib.parse
from typing import (Dict, Hashable, Iterable, List, NoReturn, Optional, Tuple,
Union)
#########################################
# #
# Function Definitions #
# #
#########################################
def list_gh_repos(account: str, account_type: str) -> List[Tuple[str, str]]:
"""
Returns a list of tuples of public GitHub repositories and their URLs
associated with the given account and account type.
Parameters
----------
account : str
Github account whose public repos are to be cloned.
account_type: str
Type of github account whose public repos are to be cloned.
Valid options: ``'users'``, ``'orgs'``.
Returns
-------
List[Tuple[str, str]]
A list of tuples of public Github repositories and their URLs.
E.g.
::
[('boysenberry-repo-1',
'https://github.com/octocat/boysenberry-repo-1.git'),
('git-consortium',
'https://github.com/octocat/git-consortium.git'),
...
('test-repo1', https://github.com/octocat/test-repo1.git)]
Raises
------
ValueError
Raised if the given account_type is neither ``'users'`` nor ``'orgs'``.
RuntimeError
Raised if unable to query GitHub for repo information.
Examples
--------
>>> repos = datamine.list_gh_repos('octocat', 'users')
# gets a list of all repos and their GitHub URLs for account 'octocat'
>>> for repo, url in repos:
... print('{} : {}'.format(repo, url))
boysenberry-repo-1 : https://github.com/octocat/boysenberry-repo-1.git
git-consortium : https://github.com/octocat/git-consortium.git
hello-worId : https://github.com/octocat/hello-worId.git
Hello-World : https://github.com/octocat/Hello-World.git
linguist : https://github.com/octocat/linguist.git
octocat.github.io : https://github.com/octocat/octocat.github.io.git
Spoon-Knife : https://github.com/octocat/Spoon-Knife.git
test-repo1 : https://github.com/octocat/test-repo1.git
"""
valid_acc_types = ['users', 'orgs']
gh_api = 'https://api.github.com'
gh_endpt = 'repos'
if account_type not in valid_acc_types:
raise ValueError(
"Invalid account type. Valid options: {}.".format(valid_acc_types))
gh_api_url = gh_api + '/' + account_type + '/' + account + '/' + gh_endpt
raw_response = requests.get(gh_api_url)
response = json.loads(raw_response.text)
try:
return [(__get_repo_name(repo['clone_url']), repo['clone_url'])
for repo in response]
except Exception:
msg = "Unable to list repos for account {}.".format(account)
try:
raise RuntimeError(msg + response['message'])
except:
raise RuntimeError(msg + e)
def clone_gh_repos(account: str,
account_type: str,
repos: Optional[List[str]] = None,
outpath: Optional[Union[str, pathlib.Path]] = None,
shallow: bool = True,
silent: bool = False
) -> NoReturn:
"""
Clones public GitHub repositories into the given directory. If
directory path is not provided, clones repos into the current
working directory.
Parameters
----------
account : str
GitHub account whose public repos are to be cloned.
account_type : str
Type of GitHub account whose public repos are to be cloned.
Valid options: ``'users'``, ``'orgs'``.
repos : List[str], optional, default = ``None``
List of specific repositories to clone.
outpath : str | pathlib.Path, optional, default = ``None``
Path to which repos are to be cloned. If not specified, clones
repos into current working directory.
shallow : bool | optional, default = ``True``
Determines whether the clone will be shallow or not. If not specified,
defaults to a shallow git clone.
silent : bool | optional, default = ``False``
Determines whether the clone will be silent or not. If not specified,
defaults to a loud git clone.
Raises
------
ValueError
Raised if provided an account type other than ``'users'`` or
``'orgs'``.
Examples
--------
>>> datamine.clone_repos('mggg-states', 'orgs')
# clones all repositories of 'mggg-states' into the current directory
>>> datamine.clone_repos('mggg-states', 'orgs', ['AZ-shapefiles'])
# clones repo 'AZ-shapefiles' from 'mggg-states' into current directory
>>> datamine.clone_repos('mggg-states', 'orgs',
... ['AZ-shapefiles', 'HI-shapefiles'])
# clones repos 'AZ-shapefiles' & 'HI-shapefiles' into current directory
>>> datamine.clone_repos('mggg-states', 'orgs', ['HI-shapefiles'], 'shps/')
# clones repo 'HI-shapefiles' into directory 'shps/'
>>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/')
# clones all repos of 'octocat' into directory 'cloned-repos/'
>>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/',
... shallow=False)
# deep clones all repos of 'octocat' into directory 'cloned-repos/'
>>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/',
... silent=True)
# silently deep clones all repos of 'octocat' into directory 'cloned-repos/'
"""
try:
if repos is None:
queried_repos = [repo for _, repo
in list_gh_repos(account, account_type)]
cmds = __generate_clone_cmds(queried_repos, outpath, shallow=shallow)
else:
repo_urls = [__create_gh_repo_url(account, rname)
for rname in repos]
cmds = __generate_clone_cmds(repo_urls, outpath, shallow=shallow)
if silent:
responses = list(map(lambda cmd : subprocess.run(cmd, stderr=open(os.devnull, 'wb'), stdout=open(os.devnull, 'wb')), cmds))
else:
responses = list(map(lambda cmd : subprocess.run(cmd), cmds))
for res in responses:
if res.returncode != 0:
sys.stderr.write("Failed to clone {}.\n".format(res.args[2]))
except Exception as e:
raise RuntimeError("Unable to clone repos. {}".format(e))
def remove_repos(dirpath: Union[str, pathlib.Path]) -> NoReturn:
"""
Given a name/path of a directory, recursively removes all git repositories
starting from the given directory. This action cannot be undone.
*Warning:* this function will remove the given directory if the given
directory itself is a git repo.
Parameters
----------
dirpath: str | pathlib.Path
Name/path of directory from which recursive removal of repos begins.
Raises
------
FileNotFoundError
Raised if unable to find the given directory.
Examples
--------
>>> datamine.remove_repos('repos_to_remove/')
# removes all repos in directory 'repos_to_remove/'
>>> datamine.remove_repos('repos_to_remove/repo1')
# removes repo 'repo1' in directory 'repos_to_remove/'
"""
try:
repos = __list_repos(dirpath)
cmds = [['rm', '-rf', repo] for repo in repos]
responses = list(map(lambda cmd : subprocess.run(cmd), cmds))
for res in responses:
if res.returncode != 0:
sys.stderr.write("Failed to remove repo {}.\n".format(res.args[2]))
except Exception as e:
raise RuntimeError("Unable to remove repo. {}".format(e))
def list_files_of_type(filetype: Union[str, List[str]],
dirpath: Optional[Union[str, pathlib.Path]] = '.',
exclude_hidden: Optional[bool] = True
) -> List[str]:
"""
Given a file extension and an optional directory path, returns a list of
file paths of files containing the extension. If the directory path is not
specified, function defaults to listing files from the current
working directory.
Parameters
----------
filetype: str | List[str]
File extension of files to list (e.g. ``'.zip'``). Can be a list of
extensions (e.g. ``['.zip', '.shp', '.csv']``).
dirpath: str | pathlib.Path, optional, default = ``'.'``.
Path to directory from which file listing begins. Defaults to
current working directory if not specified.
exclude_hidden: bool, option, default = ``True``
If false, function includes hidden files in the search.
Returns
-------
List[str]
List of file paths of files containing the given extension.
Raises
------
FileNotFoundError
Raised if unable to find given directory.
Examples
--------
>>> list_of_zips = datamine.list_files_of_type('.zip')
# recursively gets a list of '.zip' files from the current directory
>>> print(list_of_zips)
['./zipfile1.zip', './zipfile2.zip', './shapefiles/shape1.zip',
'./shapefiles/shape2.zip']
>>> list_of_shps = datamine.list_files_of_type('.shp', 'shapefiles/')
# recursively gets a list of '.shp' files from the 'shapefiles/' directory
>>> print(list_of_shps)
['./shapefiles/shape1/shape1.shp', './shapefiles/shape2/shape2.shp']
>>> list_of_csvs = datamine.list_files_of_type('.csv',
... exclude_hidden = False)
# recursively gets a list of '.csv' files, including hidden files
>>> print(list_of_csvs)
['./csv1.csv', './.csv_hidden.csv']
>>> list_of_mix = datamine.list_files_of_type(['.shp', '.zip'])
# recursively gets a list of '.shp' and '.zip' files
>>> print(list_of_mix)
['./shapefiles/shape1/shape1.shp', './shapefiles/shape2/shape2.shp',
'./zipfile1.zip', './zipfile2.zip', './shapefiles/shape1.zip',
'./shapefiles/shape2.zip']
"""
root_path = __get_validated_path(dirpath)
if isinstance(filetype, str):
filetype = [filetype]
all_files = []
for path, _, files in os.walk(root_path):
[all_files.append(os.path.join(path, file))
for file in files if not (exclude_hidden and file[0] == '.')]
return [file for file in all_files
if any([file.endswith(ftype) for ftype in filetype])]
def get_keys_by_category(dictionary: Dict[Hashable, List[Iterable]],
category: Union[Hashable, List[Hashable]]
) -> List[Hashable]:
"""
Given a dictionary with categories, returns a list of keys in the
given category.
Examples of accepted forms of dictionary input:
::
{category1 : [{key1 : value1}, {key2 : value2}]
category2 : [{key3 : value3},]}
::
{category1 : [[key1, key2, key3]]}
::
{category1 : [[key1]],
category2 : [[key2], {key3: value3}]}
Parameters
----------
dictionary : Dict[Hashable, List[Iterable]]
Dictionary containing categories in which keys are stored.
category : Hashable | List[Hashable]
Category containing keys.
Returns
-------
List[Hashable]
List of keys of every key-value pair in the given category of the
given dictionary.
Examples
--------
>>> sample_dict = {'category1' : [{'key1': 1}],
... 'category2' : [{'key2' : 2}, {'key3' : 3}]}
>>> keys = datamine.get_keys_by_category(sample_dict, 'category2')
# gets a list of keys under 'category2' from the dictionary 'sample_dict'
>>> print(keys)
['key2', 'key3']
>>> sample_dict = {'category1' : [['key1', 'key4']],
... 'category2' : [['key2'], {'key3': 'value3'}]}
>>> keys = datamine.get_keys_by_category(sample_dict, 'category2')
# note: keys can be stored in both list and dictionary form
>>> print(keys)
['key2', 'key3']
>>> keys = datamine.get_keys_by_category(sample_dict,
... ['category1', 'category2'])
# gets a list of keys under categories 'category1' and 'category2'
>>> print(keys)
['key1', 'key2', 'key3']
"""
flatten = lambda xs : [x for sublist in xs for x in sublist]
try:
return flatten([list(key) for key in dictionary[category]])
except: # category is a list
return flatten([list(key) for item in category
for key in dictionary[item]])
#########################################
# #
# Helper Definitions #
# #
#########################################
def __generate_clone_cmds(
repos: Optional[Union[Dict[str, str], List[str]]] = None,
dirpath: Optional[Union[str, pathlib.Path]] = None,
shallow: bool = True
) -> List[str]:
"""
Given a list of repos, returns a list of subprocess-valid
git clone commands.
"""
try: # if repos is a Dict - EAFP
cmds = [['git', 'clone', repo['clone_url']] for repo in repos]
except Exception:
pass
try: # if repos is a List - EAFP
cmds = [['git', 'clone', repo] for repo in repos]
except Exception as e:
raise RuntimeError(
'Unable to generate clone commands. {}'.format(e))
if dirpath is not None:
[cmd.append(os.path.join(dirpath, __get_repo_name(cmd[2])))
for cmd in cmds]
if shallow:
[cmd.extend(['--depth', '1']) for cmd in cmds]
return cmds
def __get_repo_name(url: str) -> str:
"""
Returns the name of the repository from its given URL.
"""
parsed = urllib.parse.urlparse(url)
name = os.path.basename(parsed.path)
if name.endswith('.git'):
name = name[:-4]
return name
def __create_gh_repo_url(account: str, repo: str) -> str:
"""
Given an account name and a repo name, returns a cloneable
gh repo url.
"""
return 'https://github.com/' + account + '/' + repo + '.git'
def __list_repos(dirpath: Optional[Union[str, pathlib.Path]] = '.'
) -> List[str]:
"""
Given a starting search directory, returns a list of paths to git repos
on the local machine.
"""
root_path = __get_validated_path(dirpath)
subdirs = []
for path, dirs, _ in os.walk(root_path):
[subdirs.append(os.path.join(path, directory)) for directory in dirs]
return [str(pathlib.Path(subdir).parent) for subdir in subdirs
if pathlib.Path(subdir).name == '.git']
def __get_validated_path(dirpath: Union[str, pathlib.Path]) -> pathlib.Path:
try:
root_path = pathlib.Path(dirpath)
if not os.path.isdir(root_path):
raise FileNotFoundError("'{}.'".format(dirpath))
return root_path
except FileNotFoundError as e:
raise FileNotFoundError("Unable to find directory", e)
except Exception as e:
raise Exception("Failed to traverse path.".format(e))
| """
gdutils.datamine
================
Provides
- A ``python`` module for mining and listing data sources.
Metadata
--------
:Module: ``gdutils.datamine``
:Filename: `datamine.py <https://github.com/mggg/gdutils/>`_
:Author: `@KeiferC <https://github.com/keiferc>`_
:Date: 27 July 2020
:Version: 1.1.0
:Description: Module for data mining
:Contributors: `@InnovativeInventor <https://github.com/InnovativeInventor>`_
Documentation
-------------
Documentation for the ``datamine`` module can be found as docstrings.
Run ``import gdutils.datamine; help(gdutils.datamine)`` to view documentation.
::
$ python
>>> import gdutils.datamine; help(gdutils.datamine)
Additionally, documentation can be found on `Read the Docs
<https://gdutils.readthedocs.io>`_.
"""
import json
import os
import pathlib
import requests
import subprocess
import sys
import urllib.parse
from typing import (Dict, Hashable, Iterable, List, NoReturn, Optional, Tuple,
Union)
#########################################
# #
# Function Definitions #
# #
#########################################
def list_gh_repos(account: str, account_type: str) -> List[Tuple[str, str]]:
"""
Returns a list of tuples of public GitHub repositories and their URLs
associated with the given account and account type.
Parameters
----------
account : str
Github account whose public repos are to be cloned.
account_type: str
Type of github account whose public repos are to be cloned.
Valid options: ``'users'``, ``'orgs'``.
Returns
-------
List[Tuple[str, str]]
A list of tuples of public Github repositories and their URLs.
E.g.
::
[('boysenberry-repo-1',
'https://github.com/octocat/boysenberry-repo-1.git'),
('git-consortium',
'https://github.com/octocat/git-consortium.git'),
...
('test-repo1', https://github.com/octocat/test-repo1.git)]
Raises
------
ValueError
Raised if the given account_type is neither ``'users'`` nor ``'orgs'``.
RuntimeError
Raised if unable to query GitHub for repo information.
Examples
--------
>>> repos = datamine.list_gh_repos('octocat', 'users')
# gets a list of all repos and their GitHub URLs for account 'octocat'
>>> for repo, url in repos:
... print('{} : {}'.format(repo, url))
boysenberry-repo-1 : https://github.com/octocat/boysenberry-repo-1.git
git-consortium : https://github.com/octocat/git-consortium.git
hello-worId : https://github.com/octocat/hello-worId.git
Hello-World : https://github.com/octocat/Hello-World.git
linguist : https://github.com/octocat/linguist.git
octocat.github.io : https://github.com/octocat/octocat.github.io.git
Spoon-Knife : https://github.com/octocat/Spoon-Knife.git
test-repo1 : https://github.com/octocat/test-repo1.git
"""
valid_acc_types = ['users', 'orgs']
gh_api = 'https://api.github.com'
gh_endpt = 'repos'
if account_type not in valid_acc_types:
raise ValueError(
"Invalid account type. Valid options: {}.".format(valid_acc_types))
gh_api_url = gh_api + '/' + account_type + '/' + account + '/' + gh_endpt
raw_response = requests.get(gh_api_url)
response = json.loads(raw_response.text)
try:
return [(__get_repo_name(repo['clone_url']), repo['clone_url'])
for repo in response]
except Exception:
msg = "Unable to list repos for account {}.".format(account)
try:
raise RuntimeError(msg + response['message'])
except:
raise RuntimeError(msg + e)
def clone_gh_repos(account: str,
account_type: str,
repos: Optional[List[str]] = None,
outpath: Optional[Union[str, pathlib.Path]] = None,
shallow: bool = True,
silent: bool = False
) -> NoReturn:
"""
Clones public GitHub repositories into the given directory. If
directory path is not provided, clones repos into the current
working directory.
Parameters
----------
account : str
GitHub account whose public repos are to be cloned.
account_type : str
Type of GitHub account whose public repos are to be cloned.
Valid options: ``'users'``, ``'orgs'``.
repos : List[str], optional, default = ``None``
List of specific repositories to clone.
outpath : str | pathlib.Path, optional, default = ``None``
Path to which repos are to be cloned. If not specified, clones
repos into current working directory.
shallow : bool | optional, default = ``True``
Determines whether the clone will be shallow or not. If not specified,
defaults to a shallow git clone.
silent : bool | optional, default = ``False``
Determines whether the clone will be silent or not. If not specified,
defaults to a loud git clone.
Raises
------
ValueError
Raised if provided an account type other than ``'users'`` or
``'orgs'``.
Examples
--------
>>> datamine.clone_repos('mggg-states', 'orgs')
# clones all repositories of 'mggg-states' into the current directory
>>> datamine.clone_repos('mggg-states', 'orgs', ['AZ-shapefiles'])
# clones repo 'AZ-shapefiles' from 'mggg-states' into current directory
>>> datamine.clone_repos('mggg-states', 'orgs',
... ['AZ-shapefiles', 'HI-shapefiles'])
# clones repos 'AZ-shapefiles' & 'HI-shapefiles' into current directory
>>> datamine.clone_repos('mggg-states', 'orgs', ['HI-shapefiles'], 'shps/')
# clones repo 'HI-shapefiles' into directory 'shps/'
>>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/')
# clones all repos of 'octocat' into directory 'cloned-repos/'
>>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/',
... shallow=False)
# deep clones all repos of 'octocat' into directory 'cloned-repos/'
>>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/',
... silent=True)
# silently deep clones all repos of 'octocat' into directory 'cloned-repos/'
"""
try:
if repos is None:
queried_repos = [repo for _, repo
in list_gh_repos(account, account_type)]
cmds = __generate_clone_cmds(queried_repos, outpath, shallow=shallow)
else:
repo_urls = [__create_gh_repo_url(account, rname)
for rname in repos]
cmds = __generate_clone_cmds(repo_urls, outpath, shallow=shallow)
if silent:
responses = list(map(lambda cmd : subprocess.run(cmd, stderr=open(os.devnull, 'wb'), stdout=open(os.devnull, 'wb')), cmds))
else:
responses = list(map(lambda cmd : subprocess.run(cmd), cmds))
for res in responses:
if res.returncode != 0:
sys.stderr.write("Failed to clone {}.\n".format(res.args[2]))
except Exception as e:
raise RuntimeError("Unable to clone repos. {}".format(e))
def remove_repos(dirpath: Union[str, pathlib.Path]) -> NoReturn:
"""
Given a name/path of a directory, recursively removes all git repositories
starting from the given directory. This action cannot be undone.
*Warning:* this function will remove the given directory if the given
directory itself is a git repo.
Parameters
----------
dirpath: str | pathlib.Path
Name/path of directory from which recursive removal of repos begins.
Raises
------
FileNotFoundError
Raised if unable to find the given directory.
Examples
--------
>>> datamine.remove_repos('repos_to_remove/')
# removes all repos in directory 'repos_to_remove/'
>>> datamine.remove_repos('repos_to_remove/repo1')
# removes repo 'repo1' in directory 'repos_to_remove/'
"""
try:
repos = __list_repos(dirpath)
cmds = [['rm', '-rf', repo] for repo in repos]
responses = list(map(lambda cmd : subprocess.run(cmd), cmds))
for res in responses:
if res.returncode != 0:
sys.stderr.write("Failed to remove repo {}.\n".format(res.args[2]))
except Exception as e:
raise RuntimeError("Unable to remove repo. {}".format(e))
def list_files_of_type(filetype: Union[str, List[str]],
dirpath: Optional[Union[str, pathlib.Path]] = '.',
exclude_hidden: Optional[bool] = True
) -> List[str]:
"""
Given a file extension and an optional directory path, returns a list of
file paths of files containing the extension. If the directory path is not
specified, function defaults to listing files from the current
working directory.
Parameters
----------
filetype: str | List[str]
File extension of files to list (e.g. ``'.zip'``). Can be a list of
extensions (e.g. ``['.zip', '.shp', '.csv']``).
dirpath: str | pathlib.Path, optional, default = ``'.'``.
Path to directory from which file listing begins. Defaults to
current working directory if not specified.
exclude_hidden: bool, option, default = ``True``
If false, function includes hidden files in the search.
Returns
-------
List[str]
List of file paths of files containing the given extension.
Raises
------
FileNotFoundError
Raised if unable to find given directory.
Examples
--------
>>> list_of_zips = datamine.list_files_of_type('.zip')
# recursively gets a list of '.zip' files from the current directory
>>> print(list_of_zips)
['./zipfile1.zip', './zipfile2.zip', './shapefiles/shape1.zip',
'./shapefiles/shape2.zip']
>>> list_of_shps = datamine.list_files_of_type('.shp', 'shapefiles/')
# recursively gets a list of '.shp' files from the 'shapefiles/' directory
>>> print(list_of_shps)
['./shapefiles/shape1/shape1.shp', './shapefiles/shape2/shape2.shp']
>>> list_of_csvs = datamine.list_files_of_type('.csv',
... exclude_hidden = False)
# recursively gets a list of '.csv' files, including hidden files
>>> print(list_of_csvs)
['./csv1.csv', './.csv_hidden.csv']
>>> list_of_mix = datamine.list_files_of_type(['.shp', '.zip'])
# recursively gets a list of '.shp' and '.zip' files
>>> print(list_of_mix)
['./shapefiles/shape1/shape1.shp', './shapefiles/shape2/shape2.shp',
'./zipfile1.zip', './zipfile2.zip', './shapefiles/shape1.zip',
'./shapefiles/shape2.zip']
"""
root_path = __get_validated_path(dirpath)
if isinstance(filetype, str):
filetype = [filetype]
all_files = []
for path, _, files in os.walk(root_path):
[all_files.append(os.path.join(path, file))
for file in files if not (exclude_hidden and file[0] == '.')]
return [file for file in all_files
if any([file.endswith(ftype) for ftype in filetype])]
def get_keys_by_category(dictionary: Dict[Hashable, List[Iterable]],
category: Union[Hashable, List[Hashable]]
) -> List[Hashable]:
"""
Given a dictionary with categories, returns a list of keys in the
given category.
Examples of accepted forms of dictionary input:
::
{category1 : [{key1 : value1}, {key2 : value2}]
category2 : [{key3 : value3},]}
::
{category1 : [[key1, key2, key3]]}
::
{category1 : [[key1]],
category2 : [[key2], {key3: value3}]}
Parameters
----------
dictionary : Dict[Hashable, List[Iterable]]
Dictionary containing categories in which keys are stored.
category : Hashable | List[Hashable]
Category containing keys.
Returns
-------
List[Hashable]
List of keys of every key-value pair in the given category of the
given dictionary.
Examples
--------
>>> sample_dict = {'category1' : [{'key1': 1}],
... 'category2' : [{'key2' : 2}, {'key3' : 3}]}
>>> keys = datamine.get_keys_by_category(sample_dict, 'category2')
# gets a list of keys under 'category2' from the dictionary 'sample_dict'
>>> print(keys)
['key2', 'key3']
>>> sample_dict = {'category1' : [['key1', 'key4']],
... 'category2' : [['key2'], {'key3': 'value3'}]}
>>> keys = datamine.get_keys_by_category(sample_dict, 'category2')
# note: keys can be stored in both list and dictionary form
>>> print(keys)
['key2', 'key3']
>>> keys = datamine.get_keys_by_category(sample_dict,
... ['category1', 'category2'])
# gets a list of keys under categories 'category1' and 'category2'
>>> print(keys)
['key1', 'key2', 'key3']
"""
flatten = lambda xs : [x for sublist in xs for x in sublist]
try:
return flatten([list(key) for key in dictionary[category]])
except: # category is a list
return flatten([list(key) for item in category
for key in dictionary[item]])
#########################################
# #
# Helper Definitions #
# #
#########################################
def __generate_clone_cmds(
repos: Optional[Union[Dict[str, str], List[str]]] = None,
dirpath: Optional[Union[str, pathlib.Path]] = None,
shallow: bool = True
) -> List[str]:
"""
Given a list of repos, returns a list of subprocess-valid
git clone commands.
"""
try: # if repos is a Dict - EAFP
cmds = [['git', 'clone', repo['clone_url']] for repo in repos]
except Exception:
pass
try: # if repos is a List - EAFP
cmds = [['git', 'clone', repo] for repo in repos]
except Exception as e:
raise RuntimeError(
'Unable to generate clone commands. {}'.format(e))
if dirpath is not None:
[cmd.append(os.path.join(dirpath, __get_repo_name(cmd[2])))
for cmd in cmds]
if shallow:
[cmd.extend(['--depth', '1']) for cmd in cmds]
return cmds
def __get_repo_name(url: str) -> str:
"""
Returns the name of the repository from its given URL.
"""
parsed = urllib.parse.urlparse(url)
name = os.path.basename(parsed.path)
if name.endswith('.git'):
name = name[:-4]
return name
def __create_gh_repo_url(account: str, repo: str) -> str:
"""
Given an account name and a repo name, returns a cloneable
gh repo url.
"""
return 'https://github.com/' + account + '/' + repo + '.git'
def __list_repos(dirpath: Optional[Union[str, pathlib.Path]] = '.'
) -> List[str]:
"""
Given a starting search directory, returns a list of paths to git repos
on the local machine.
"""
root_path = __get_validated_path(dirpath)
subdirs = []
for path, dirs, _ in os.walk(root_path):
[subdirs.append(os.path.join(path, directory)) for directory in dirs]
return [str(pathlib.Path(subdir).parent) for subdir in subdirs
if pathlib.Path(subdir).name == '.git']
def __get_validated_path(dirpath: Union[str, pathlib.Path]) -> pathlib.Path:
try:
root_path = pathlib.Path(dirpath)
if not os.path.isdir(root_path):
raise FileNotFoundError("'{}.'".format(dirpath))
return root_path
except FileNotFoundError as e:
raise FileNotFoundError("Unable to find directory", e)
except Exception as e:
raise Exception("Failed to traverse path.".format(e))
| en | 0.594174 | gdutils.datamine ================ Provides - A ``python`` module for mining and listing data sources. Metadata -------- :Module: ``gdutils.datamine`` :Filename: `datamine.py <https://github.com/mggg/gdutils/>`_ :Author: `@KeiferC <https://github.com/keiferc>`_ :Date: 27 July 2020 :Version: 1.1.0 :Description: Module for data mining :Contributors: `@InnovativeInventor <https://github.com/InnovativeInventor>`_ Documentation ------------- Documentation for the ``datamine`` module can be found as docstrings. Run ``import gdutils.datamine; help(gdutils.datamine)`` to view documentation. :: $ python >>> import gdutils.datamine; help(gdutils.datamine) Additionally, documentation can be found on `Read the Docs <https://gdutils.readthedocs.io>`_. ######################################### # # # Function Definitions # # # ######################################### Returns a list of tuples of public GitHub repositories and their URLs associated with the given account and account type. Parameters ---------- account : str Github account whose public repos are to be cloned. account_type: str Type of github account whose public repos are to be cloned. Valid options: ``'users'``, ``'orgs'``. Returns ------- List[Tuple[str, str]] A list of tuples of public Github repositories and their URLs. E.g. :: [('boysenberry-repo-1', 'https://github.com/octocat/boysenberry-repo-1.git'), ('git-consortium', 'https://github.com/octocat/git-consortium.git'), ... ('test-repo1', https://github.com/octocat/test-repo1.git)] Raises ------ ValueError Raised if the given account_type is neither ``'users'`` nor ``'orgs'``. RuntimeError Raised if unable to query GitHub for repo information. Examples -------- >>> repos = datamine.list_gh_repos('octocat', 'users') # gets a list of all repos and their GitHub URLs for account 'octocat' >>> for repo, url in repos: ... print('{} : {}'.format(repo, url)) boysenberry-repo-1 : https://github.com/octocat/boysenberry-repo-1.git git-consortium : https://github.com/octocat/git-consortium.git hello-worId : https://github.com/octocat/hello-worId.git Hello-World : https://github.com/octocat/Hello-World.git linguist : https://github.com/octocat/linguist.git octocat.github.io : https://github.com/octocat/octocat.github.io.git Spoon-Knife : https://github.com/octocat/Spoon-Knife.git test-repo1 : https://github.com/octocat/test-repo1.git Clones public GitHub repositories into the given directory. If directory path is not provided, clones repos into the current working directory. Parameters ---------- account : str GitHub account whose public repos are to be cloned. account_type : str Type of GitHub account whose public repos are to be cloned. Valid options: ``'users'``, ``'orgs'``. repos : List[str], optional, default = ``None`` List of specific repositories to clone. outpath : str | pathlib.Path, optional, default = ``None`` Path to which repos are to be cloned. If not specified, clones repos into current working directory. shallow : bool | optional, default = ``True`` Determines whether the clone will be shallow or not. If not specified, defaults to a shallow git clone. silent : bool | optional, default = ``False`` Determines whether the clone will be silent or not. If not specified, defaults to a loud git clone. Raises ------ ValueError Raised if provided an account type other than ``'users'`` or ``'orgs'``. Examples -------- >>> datamine.clone_repos('mggg-states', 'orgs') # clones all repositories of 'mggg-states' into the current directory >>> datamine.clone_repos('mggg-states', 'orgs', ['AZ-shapefiles']) # clones repo 'AZ-shapefiles' from 'mggg-states' into current directory >>> datamine.clone_repos('mggg-states', 'orgs', ... ['AZ-shapefiles', 'HI-shapefiles']) # clones repos 'AZ-shapefiles' & 'HI-shapefiles' into current directory >>> datamine.clone_repos('mggg-states', 'orgs', ['HI-shapefiles'], 'shps/') # clones repo 'HI-shapefiles' into directory 'shps/' >>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/') # clones all repos of 'octocat' into directory 'cloned-repos/' >>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/', ... shallow=False) # deep clones all repos of 'octocat' into directory 'cloned-repos/' >>> datamine.clone_repos('octocat', 'users', outpath='cloned-repos/', ... silent=True) # silently deep clones all repos of 'octocat' into directory 'cloned-repos/' Given a name/path of a directory, recursively removes all git repositories starting from the given directory. This action cannot be undone. *Warning:* this function will remove the given directory if the given directory itself is a git repo. Parameters ---------- dirpath: str | pathlib.Path Name/path of directory from which recursive removal of repos begins. Raises ------ FileNotFoundError Raised if unable to find the given directory. Examples -------- >>> datamine.remove_repos('repos_to_remove/') # removes all repos in directory 'repos_to_remove/' >>> datamine.remove_repos('repos_to_remove/repo1') # removes repo 'repo1' in directory 'repos_to_remove/' Given a file extension and an optional directory path, returns a list of file paths of files containing the extension. If the directory path is not specified, function defaults to listing files from the current working directory. Parameters ---------- filetype: str | List[str] File extension of files to list (e.g. ``'.zip'``). Can be a list of extensions (e.g. ``['.zip', '.shp', '.csv']``). dirpath: str | pathlib.Path, optional, default = ``'.'``. Path to directory from which file listing begins. Defaults to current working directory if not specified. exclude_hidden: bool, option, default = ``True`` If false, function includes hidden files in the search. Returns ------- List[str] List of file paths of files containing the given extension. Raises ------ FileNotFoundError Raised if unable to find given directory. Examples -------- >>> list_of_zips = datamine.list_files_of_type('.zip') # recursively gets a list of '.zip' files from the current directory >>> print(list_of_zips) ['./zipfile1.zip', './zipfile2.zip', './shapefiles/shape1.zip', './shapefiles/shape2.zip'] >>> list_of_shps = datamine.list_files_of_type('.shp', 'shapefiles/') # recursively gets a list of '.shp' files from the 'shapefiles/' directory >>> print(list_of_shps) ['./shapefiles/shape1/shape1.shp', './shapefiles/shape2/shape2.shp'] >>> list_of_csvs = datamine.list_files_of_type('.csv', ... exclude_hidden = False) # recursively gets a list of '.csv' files, including hidden files >>> print(list_of_csvs) ['./csv1.csv', './.csv_hidden.csv'] >>> list_of_mix = datamine.list_files_of_type(['.shp', '.zip']) # recursively gets a list of '.shp' and '.zip' files >>> print(list_of_mix) ['./shapefiles/shape1/shape1.shp', './shapefiles/shape2/shape2.shp', './zipfile1.zip', './zipfile2.zip', './shapefiles/shape1.zip', './shapefiles/shape2.zip'] Given a dictionary with categories, returns a list of keys in the given category. Examples of accepted forms of dictionary input: :: {category1 : [{key1 : value1}, {key2 : value2}] category2 : [{key3 : value3},]} :: {category1 : [[key1, key2, key3]]} :: {category1 : [[key1]], category2 : [[key2], {key3: value3}]} Parameters ---------- dictionary : Dict[Hashable, List[Iterable]] Dictionary containing categories in which keys are stored. category : Hashable | List[Hashable] Category containing keys. Returns ------- List[Hashable] List of keys of every key-value pair in the given category of the given dictionary. Examples -------- >>> sample_dict = {'category1' : [{'key1': 1}], ... 'category2' : [{'key2' : 2}, {'key3' : 3}]} >>> keys = datamine.get_keys_by_category(sample_dict, 'category2') # gets a list of keys under 'category2' from the dictionary 'sample_dict' >>> print(keys) ['key2', 'key3'] >>> sample_dict = {'category1' : [['key1', 'key4']], ... 'category2' : [['key2'], {'key3': 'value3'}]} >>> keys = datamine.get_keys_by_category(sample_dict, 'category2') # note: keys can be stored in both list and dictionary form >>> print(keys) ['key2', 'key3'] >>> keys = datamine.get_keys_by_category(sample_dict, ... ['category1', 'category2']) # gets a list of keys under categories 'category1' and 'category2' >>> print(keys) ['key1', 'key2', 'key3'] # category is a list ######################################### # # # Helper Definitions # # # ######################################### Given a list of repos, returns a list of subprocess-valid git clone commands. # if repos is a Dict - EAFP # if repos is a List - EAFP Returns the name of the repository from its given URL. Given an account name and a repo name, returns a cloneable gh repo url. Given a starting search directory, returns a list of paths to git repos on the local machine. | 2.506256 | 3 |
examples/example_ocr.py | Ichunjo/vardefunc | 18 | 6632206 | <reponame>Ichunjo/vardefunc<filename>examples/example_ocr.py
import vapoursynth as vs
from vardefunc.ocr import OCR
from vsutil import get_y
core = vs.core
# Import your clip
SOURCE = core.std.BlankClip(format=vs.YUV410P8)
def ocring() -> None:
clip = SOURCE
ocr = OCR(get_y(clip), (1900, 125, 70), coord_alt=(1500, 125, 70))
ocr.preview_cropped.set_output(0)
ocr.preview_cleaned.set_output(1)
ocr.launch(datapath=r'C:\Users\Varde\AppData\Roaming\VapourSynth\plugins64\tessdata', language='fra+eng')
ocr.write_ass(
'output.ass',
[('_', '-'), ('…', '...'), ('‘', "'"), ('’', "'"), (" '", "'"),
('—', '-'), ('- ', '– '), ('0u', 'Ou'), ('Gomme', 'Comme'), ('A ', 'À '),
('II', 'Il'), ('ees', 'ces'), ('@', 'O'), ('oe', 'œ'), ('téte', 'tête')]
)
if __name__ == '__main__':
ocring()
else:
ocring()
| import vapoursynth as vs
from vardefunc.ocr import OCR
from vsutil import get_y
core = vs.core
# Import your clip
SOURCE = core.std.BlankClip(format=vs.YUV410P8)
def ocring() -> None:
clip = SOURCE
ocr = OCR(get_y(clip), (1900, 125, 70), coord_alt=(1500, 125, 70))
ocr.preview_cropped.set_output(0)
ocr.preview_cleaned.set_output(1)
ocr.launch(datapath=r'C:\Users\Varde\AppData\Roaming\VapourSynth\plugins64\tessdata', language='fra+eng')
ocr.write_ass(
'output.ass',
[('_', '-'), ('…', '...'), ('‘', "'"), ('’', "'"), (" '", "'"),
('—', '-'), ('- ', '– '), ('0u', 'Ou'), ('Gomme', 'Comme'), ('A ', 'À '),
('II', 'Il'), ('ees', 'ces'), ('@', 'O'), ('oe', 'œ'), ('téte', 'tête')]
)
if __name__ == '__main__':
ocring()
else:
ocring() | en | 0.516469 | # Import your clip | 2.132953 | 2 |
dags/tableau/.ipynb_checkpoints/config-checkpoint.py | divinorum-webb/docker-airflow | 1 | 6632207 | <reponame>divinorum-webb/docker-airflow<gh_stars>1-10
"""
Reference Notes
tableau_server_config details configuration for TableauServer class
'password' -> Note that this is NOT your AD password.
If you use your AD password, authentication will fail.
You will need to obtain your actual Tableau Server password for this.
For example, your AD password could be '<PASSWORD>', but your Tableau Server password is '<PASSWORD>'.
You probably need a Tableau Server Admin to help you set / discover your Tableau Server password.
'api_version' -> Change this value to match the API version which corresponds to the current Tableau Server version.
Look here to look up the appropriate api_version, given the current Tableau Server version:
https://onlinehelp.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_versions.htm
'cache_buster' -> This value references a dummy parameter within a Tableau Workbook file.
We use this value when downloading images from Tableau Server so that the image returned is not a cached ancient relic of days past.
Your workbook needs a parameter whose name matches that of your 'cache_buster' name for this to work.
The subscription_email_config dict details configuration for the EmailSubscription class
"""
tableau_server_config = {
'tableau_prod': {
'server': 'https://tableaupoc.interworks.com',
'api_version': '3.2',
'username': 'estam',
'password': '<PASSWORD>!',
'site': 'estam',
'cache_buster': 'Donut',
'temp_dir': '/dags/tableau_subscriptions/temp/'
}
}
subscription_email_config = {
'default': {
'smtp': {
'username': '<EMAIL>',
'password': '<PASSWORD>!',
'server': 'smtp.office365.com:587'
},
'assets': {
'html_file': '/dags/tableau_subscriptions/assets/html_test.html'
}
}
}
| """
Reference Notes
tableau_server_config details configuration for TableauServer class
'password' -> Note that this is NOT your AD password.
If you use your AD password, authentication will fail.
You will need to obtain your actual Tableau Server password for this.
For example, your AD password could be '<PASSWORD>', but your Tableau Server password is '<PASSWORD>'.
You probably need a Tableau Server Admin to help you set / discover your Tableau Server password.
'api_version' -> Change this value to match the API version which corresponds to the current Tableau Server version.
Look here to look up the appropriate api_version, given the current Tableau Server version:
https://onlinehelp.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_versions.htm
'cache_buster' -> This value references a dummy parameter within a Tableau Workbook file.
We use this value when downloading images from Tableau Server so that the image returned is not a cached ancient relic of days past.
Your workbook needs a parameter whose name matches that of your 'cache_buster' name for this to work.
The subscription_email_config dict details configuration for the EmailSubscription class
"""
tableau_server_config = {
'tableau_prod': {
'server': 'https://tableaupoc.interworks.com',
'api_version': '3.2',
'username': 'estam',
'password': '<PASSWORD>!',
'site': 'estam',
'cache_buster': 'Donut',
'temp_dir': '/dags/tableau_subscriptions/temp/'
}
}
subscription_email_config = {
'default': {
'smtp': {
'username': '<EMAIL>',
'password': '<PASSWORD>!',
'server': 'smtp.office365.com:587'
},
'assets': {
'html_file': '/dags/tableau_subscriptions/assets/html_test.html'
}
}
} | en | 0.68281 | Reference Notes tableau_server_config details configuration for TableauServer class 'password' -> Note that this is NOT your AD password. If you use your AD password, authentication will fail. You will need to obtain your actual Tableau Server password for this. For example, your AD password could be '<PASSWORD>', but your Tableau Server password is '<PASSWORD>'. You probably need a Tableau Server Admin to help you set / discover your Tableau Server password. 'api_version' -> Change this value to match the API version which corresponds to the current Tableau Server version. Look here to look up the appropriate api_version, given the current Tableau Server version: https://onlinehelp.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_versions.htm 'cache_buster' -> This value references a dummy parameter within a Tableau Workbook file. We use this value when downloading images from Tableau Server so that the image returned is not a cached ancient relic of days past. Your workbook needs a parameter whose name matches that of your 'cache_buster' name for this to work. The subscription_email_config dict details configuration for the EmailSubscription class | 1.482136 | 1 |
spacenav_remote/src/spacenav_remote/server.py | carlosvquezada/lg_ros_nodes | 0 | 6632208 | #!/usr/bin/env python
import SocketServer
import thread
import socket
def print_handler(data):
print data
class MyTCPHandler(SocketServer.StreamRequestHandler):
def __init__(self, callback, *args, **keys):
self.callback = callback
SocketServer.StreamRequestHandler.__init__(self, *args, **keys)
def handle(self):
data = self.rfile.readline().strip()
while data:
self.callback(data)
data = self.rfile.readline().strip()
def handler_factory(callback):
def createHandler(*args, **keys):
return MyTCPHandler(callback, *args, **keys)
return createHandler
class SpacenavRemote(object):
def __init__(self, handler=print_handler, port=6465):
HOST, PORT = '', port
# Create the server, binding to localhost on port 6564
SocketServer.TCPServer.allow_reuse_address = True
self.server = SocketServer.TCPServer((HOST, PORT), handler_factory(handler))
self.server.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def fork_and_run(self):
"""
Activate the server in separate thread
"""
thread.start_new_thread(self.serve_forever, ())
def serve_forever(self):
try:
self.server.serve_forever()
except KeyboardInterrupt:
self.shutdown()
def shutdown(self):
self.server.shutdown()
self.server.socket.close()
if __name__ == "__main__":
print "Start spacenav remote server"
server = SpacenavRemote()
server.serve_forever()
| #!/usr/bin/env python
import SocketServer
import thread
import socket
def print_handler(data):
print data
class MyTCPHandler(SocketServer.StreamRequestHandler):
def __init__(self, callback, *args, **keys):
self.callback = callback
SocketServer.StreamRequestHandler.__init__(self, *args, **keys)
def handle(self):
data = self.rfile.readline().strip()
while data:
self.callback(data)
data = self.rfile.readline().strip()
def handler_factory(callback):
def createHandler(*args, **keys):
return MyTCPHandler(callback, *args, **keys)
return createHandler
class SpacenavRemote(object):
def __init__(self, handler=print_handler, port=6465):
HOST, PORT = '', port
# Create the server, binding to localhost on port 6564
SocketServer.TCPServer.allow_reuse_address = True
self.server = SocketServer.TCPServer((HOST, PORT), handler_factory(handler))
self.server.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def fork_and_run(self):
"""
Activate the server in separate thread
"""
thread.start_new_thread(self.serve_forever, ())
def serve_forever(self):
try:
self.server.serve_forever()
except KeyboardInterrupt:
self.shutdown()
def shutdown(self):
self.server.shutdown()
self.server.socket.close()
if __name__ == "__main__":
print "Start spacenav remote server"
server = SpacenavRemote()
server.serve_forever()
| en | 0.639124 | #!/usr/bin/env python # Create the server, binding to localhost on port 6564 Activate the server in separate thread | 2.911752 | 3 |
examples/splunk_to_argus.py | salesforce/python-argusclient | 16 | 6632209 | <gh_stars>10-100
#
# Copyright (c) 2016, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import requests, sys, json, os, time, calendar, csv, getpass, logging
from optparse import OptionParser, Option, OptionValueError
from six import itervalues
from six.moves import urllib
import splunklib.client as splunkclient
from argusclient import ArgusServiceClient, Metric
class MyOptionParser(OptionParser,object):
def check_values(self, values, args):
opt, args = super(MyOptionParser, self).check_values(values, args)
if not opt.password:
opt.password = <PASSWORD>("Password: ")
return opt, args
parser = MyOptionParser()
parser.add_option("-q", "--quite", dest="quite", action="store_true",
help="Quite mode, output only errors", default=False)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="Verbose mode", default=False)
parser.add_option("-d", "--debug", dest="debug", action="store_true",
help="Debug mode", default=False)
parser.add_option("-u", "--user", dest="user", default=getpass.getuser(),
help="Specify username for Argus/Splunk connection")
parser.add_option("-p", "--pass", dest="password", default=None,
help="Specify password for Argus/Splunk connection (not specifying this option will result in getting prompted")
parser.add_option("--argusws", dest="argusws",
help="Specify Argus webservice endpoint")
parser.add_option("--splunkapi", dest="splunkapi",
help="Specify Splunk API endpoint")
parser.add_option("--alert", dest="alert", action="store_true",
help="Create/update alert", default=False)
parser.add_option("--dashboard", dest="dashboard", action="store_true",
help="Create/update dashboard", default=False)
parser.add_option("-I", "--index", dest="index", default="na44",
help="Specify the Splunk index to search against")
parser.add_option("-S", "--earliest", dest="earliest", default="-1d@d",
help="Specify splunk time expression for the start of the time range")
parser.add_option("-E", "--latest", dest="latest", default="-0d@d",
help="Specify splunk time expression for the end of the time range")
parser.add_option("-P", "--pattern", dest="pattern", default=None,
help="Specify a Splunk pattern to search for")
parser.add_option("-T", "--span", dest="span", default="15m",
help="Specify an alternative span for bucketing option")
parser.add_option("--scope", dest="scope", default="patternStats",
help="The Argus scope name for posting metrics")
parser.add_option("--namespace", dest="namespace", default="testNamespace",
help="The Argus namespace name for posting metrics")
(opts, args) = parser.parse_args()
if not opts.pattern:
parser.error("Please specify a Splunk pattern to search for")
if not opts.argusws:
parser.error("Need the URL to the Argus endpoint")
if not opts.splunkapi:
parser.error("Need the URL to the Splunk endpoint")
logging.basicConfig()
if not opts.quite:
logging.root.setLevel(opts.quite and logging.WARN or (opts.debug and logging.DEBUG or logging.INFO))
def to_gmt_epoch(tsstr):
# tsstr is expected to be in the default Splunk format: "2015-11-01T00:00:00.000+00:00"
return calendar.timegm(time.strptime(tsstr[:19], "%Y-%m-%dT%H:%M:%S"))
def get_splunk_metrics(opts):
splunkendpoint = urllib.parse.urlsplit(opts.splunkapi)
splunk_opts = {
"scheme": splunkendpoint.scheme,
"host": splunkendpoint.hostname,
"port": splunkendpoint.port,
"username": opts.user,
"password": opts.password,
}
try:
if not opts.quite:
logging.info("Logging into Splunk service")
service = splunkclient.connect(**splunk_opts)
if not opts.quite:
logging.info("Splunk login successful")
except:
logging.exception("Splunk login failed")
return None
splunkquery = """
search index={index} earliest={earliest} latest={latest} "{pattern}"
| bucket _time span={span}
| stats count by _time, host
| appendpipe [stats avg(count) as avgCount, sum(count) as sumCount, min(count) as minCount, max(count) as maxCount, stdev(count) as stdevCount by host]
"""
if not opts.quite:
logging.info("Submitting job to Splunk..")
job = service.jobs.create(splunkquery.format(index=opts.index, pattern=opts.pattern, earliest=opts.earliest, latest=opts.latest, span=opts.span))
if not opts.quite:
logging.info("Waiting for job to be ready..")
while not job.is_ready():
if opts.verbose:
logging.info("Still waiting for job to be ready..")
time.sleep(1)
else:
if not opts.quite:
logging.info("Job is ready, waiting for completion..")
while not job.is_done():
if opts.verbose:
logging.info("Still waiting for job to be completed..")
time.sleep(2)
else:
if not opts.quite:
logging.info("Job is done, collecting results..")
results = job.results(output_mode="csv", count=0)
csvr = csv.reader(results)
cols = None
data = []
for row in csvr:
logging.debug("Got row: %s", row)
if not cols:
cols = row
continue
data.append(dict(zip(cols, row)))
if not opts.quite:
logging.info("Total result count: %s", len(data))
runts = int(time.time())
m_dict = {}
patternTagVal = opts.pattern.replace(" ", "__") # We can't have spaces in tag values.
for row in data:
host = row["host"]
if not host:
logging.warn("Skipping row with no host: %s", row)
continue
for col in row:
if col in ("_time", "host") or not row[col]:
continue
m_key = (host, col)
if not m_key in m_dict:
m_dict[m_key] = Metric(opts.scope, patternTagVal+"."+col, tags=dict(host=host, patternStr=patternTagVal), namespace=opts.namespace)
m = m_dict[m_key]
ts = row["_time"] and to_gmt_epoch(row["_time"]) or runts
val = row[col]
if "." in val:
val = float(val)
else:
val = int(val)
if logging.root.isEnabledFor(logging.DEBUG):
logging.debug("Adding %s at timestamp: %s for metric: %s", val, ts, m.desc())
m.datapoints[ts] = val
if not opts.quite:
logging.info("Total metric count: %s", len(m_dict))
job.cancel()
return list(itervalues(m_dict))
metrics = get_splunk_metrics(opts)
if metrics:
argus = ArgusServiceClient(opts.user,
opts.password,
endpoint=opts.argusws)
if not opts.quite:
logging.info("Logging into Argus service")
try:
argus.login()
if opts.verbose:
logging.info("Argus login successful")
if not opts.quite:
logging.info("Posting metrics to Argus..")
argus.metrics.add(metrics);
if not opts.quite:
logging.info("Done.")
except:
logging.exception("Argus failure")
| #
# Copyright (c) 2016, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import requests, sys, json, os, time, calendar, csv, getpass, logging
from optparse import OptionParser, Option, OptionValueError
from six import itervalues
from six.moves import urllib
import splunklib.client as splunkclient
from argusclient import ArgusServiceClient, Metric
class MyOptionParser(OptionParser,object):
def check_values(self, values, args):
opt, args = super(MyOptionParser, self).check_values(values, args)
if not opt.password:
opt.password = <PASSWORD>("Password: ")
return opt, args
parser = MyOptionParser()
parser.add_option("-q", "--quite", dest="quite", action="store_true",
help="Quite mode, output only errors", default=False)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="Verbose mode", default=False)
parser.add_option("-d", "--debug", dest="debug", action="store_true",
help="Debug mode", default=False)
parser.add_option("-u", "--user", dest="user", default=getpass.getuser(),
help="Specify username for Argus/Splunk connection")
parser.add_option("-p", "--pass", dest="password", default=None,
help="Specify password for Argus/Splunk connection (not specifying this option will result in getting prompted")
parser.add_option("--argusws", dest="argusws",
help="Specify Argus webservice endpoint")
parser.add_option("--splunkapi", dest="splunkapi",
help="Specify Splunk API endpoint")
parser.add_option("--alert", dest="alert", action="store_true",
help="Create/update alert", default=False)
parser.add_option("--dashboard", dest="dashboard", action="store_true",
help="Create/update dashboard", default=False)
parser.add_option("-I", "--index", dest="index", default="na44",
help="Specify the Splunk index to search against")
parser.add_option("-S", "--earliest", dest="earliest", default="-1d@d",
help="Specify splunk time expression for the start of the time range")
parser.add_option("-E", "--latest", dest="latest", default="-0d@d",
help="Specify splunk time expression for the end of the time range")
parser.add_option("-P", "--pattern", dest="pattern", default=None,
help="Specify a Splunk pattern to search for")
parser.add_option("-T", "--span", dest="span", default="15m",
help="Specify an alternative span for bucketing option")
parser.add_option("--scope", dest="scope", default="patternStats",
help="The Argus scope name for posting metrics")
parser.add_option("--namespace", dest="namespace", default="testNamespace",
help="The Argus namespace name for posting metrics")
(opts, args) = parser.parse_args()
if not opts.pattern:
parser.error("Please specify a Splunk pattern to search for")
if not opts.argusws:
parser.error("Need the URL to the Argus endpoint")
if not opts.splunkapi:
parser.error("Need the URL to the Splunk endpoint")
logging.basicConfig()
if not opts.quite:
logging.root.setLevel(opts.quite and logging.WARN or (opts.debug and logging.DEBUG or logging.INFO))
def to_gmt_epoch(tsstr):
# tsstr is expected to be in the default Splunk format: "2015-11-01T00:00:00.000+00:00"
return calendar.timegm(time.strptime(tsstr[:19], "%Y-%m-%dT%H:%M:%S"))
def get_splunk_metrics(opts):
splunkendpoint = urllib.parse.urlsplit(opts.splunkapi)
splunk_opts = {
"scheme": splunkendpoint.scheme,
"host": splunkendpoint.hostname,
"port": splunkendpoint.port,
"username": opts.user,
"password": opts.password,
}
try:
if not opts.quite:
logging.info("Logging into Splunk service")
service = splunkclient.connect(**splunk_opts)
if not opts.quite:
logging.info("Splunk login successful")
except:
logging.exception("Splunk login failed")
return None
splunkquery = """
search index={index} earliest={earliest} latest={latest} "{pattern}"
| bucket _time span={span}
| stats count by _time, host
| appendpipe [stats avg(count) as avgCount, sum(count) as sumCount, min(count) as minCount, max(count) as maxCount, stdev(count) as stdevCount by host]
"""
if not opts.quite:
logging.info("Submitting job to Splunk..")
job = service.jobs.create(splunkquery.format(index=opts.index, pattern=opts.pattern, earliest=opts.earliest, latest=opts.latest, span=opts.span))
if not opts.quite:
logging.info("Waiting for job to be ready..")
while not job.is_ready():
if opts.verbose:
logging.info("Still waiting for job to be ready..")
time.sleep(1)
else:
if not opts.quite:
logging.info("Job is ready, waiting for completion..")
while not job.is_done():
if opts.verbose:
logging.info("Still waiting for job to be completed..")
time.sleep(2)
else:
if not opts.quite:
logging.info("Job is done, collecting results..")
results = job.results(output_mode="csv", count=0)
csvr = csv.reader(results)
cols = None
data = []
for row in csvr:
logging.debug("Got row: %s", row)
if not cols:
cols = row
continue
data.append(dict(zip(cols, row)))
if not opts.quite:
logging.info("Total result count: %s", len(data))
runts = int(time.time())
m_dict = {}
patternTagVal = opts.pattern.replace(" ", "__") # We can't have spaces in tag values.
for row in data:
host = row["host"]
if not host:
logging.warn("Skipping row with no host: %s", row)
continue
for col in row:
if col in ("_time", "host") or not row[col]:
continue
m_key = (host, col)
if not m_key in m_dict:
m_dict[m_key] = Metric(opts.scope, patternTagVal+"."+col, tags=dict(host=host, patternStr=patternTagVal), namespace=opts.namespace)
m = m_dict[m_key]
ts = row["_time"] and to_gmt_epoch(row["_time"]) or runts
val = row[col]
if "." in val:
val = float(val)
else:
val = int(val)
if logging.root.isEnabledFor(logging.DEBUG):
logging.debug("Adding %s at timestamp: %s for metric: %s", val, ts, m.desc())
m.datapoints[ts] = val
if not opts.quite:
logging.info("Total metric count: %s", len(m_dict))
job.cancel()
return list(itervalues(m_dict))
metrics = get_splunk_metrics(opts)
if metrics:
argus = ArgusServiceClient(opts.user,
opts.password,
endpoint=opts.argusws)
if not opts.quite:
logging.info("Logging into Argus service")
try:
argus.login()
if opts.verbose:
logging.info("Argus login successful")
if not opts.quite:
logging.info("Posting metrics to Argus..")
argus.metrics.add(metrics);
if not opts.quite:
logging.info("Done.")
except:
logging.exception("Argus failure") | en | 0.834089 | # # Copyright (c) 2016, salesforce.com, inc. # All rights reserved. # Licensed under the BSD 3-Clause license. # For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause # # tsstr is expected to be in the default Splunk format: "2015-11-01T00:00:00.000+00:00" search index={index} earliest={earliest} latest={latest} "{pattern}" | bucket _time span={span} | stats count by _time, host | appendpipe [stats avg(count) as avgCount, sum(count) as sumCount, min(count) as minCount, max(count) as maxCount, stdev(count) as stdevCount by host] # We can't have spaces in tag values. | 2.03605 | 2 |
heat/engine/resources/openstack/mistral/workflow.py | maestro-hybrid-cloud/heat | 0 | 6632210 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import yaml
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine import support
class Workflow(signal_responder.SignalResponder,
resource.Resource):
support_status = support.SupportStatus(version='2015.1')
default_client_name = 'mistral'
entity = 'workflows'
PROPERTIES = (
NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS, TASK_DEFAULTS
) = (
'name', 'type', 'description', 'input', 'output', 'tasks', 'params',
'task_defaults'
)
_TASKS_KEYS = (
TASK_NAME, TASK_DESCRIPTION, ON_ERROR, ON_COMPLETE, ON_SUCCESS,
POLICIES, ACTION, WORKFLOW, PUBLISH, TASK_INPUT, REQUIRES,
RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT,
WITH_ITEMS, KEEP_RESULT, TARGET
) = (
'name', 'description', 'on_error', 'on_complete', 'on_success',
'policies', 'action', 'workflow', 'publish', 'input', 'requires',
'retry', 'wait_before', 'wait_after', 'pause_before', 'timeout',
'with_items', 'keep_result', 'target'
)
_TASKS_TASK_DEFAULTS = [
ON_ERROR, ON_COMPLETE, ON_SUCCESS,
REQUIRES, RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT
]
_SIGNAL_DATA_KEYS = (
SIGNAL_DATA_INPUT, SIGNAL_DATA_PARAMS
) = (
'input', 'params'
)
ATTRIBUTES = (
WORKFLOW_DATA, ALARM_URL, EXECUTIONS
) = (
'data', 'alarm_url', 'executions'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Workflow name.')
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Workflow type.'),
constraints=[
constraints.AllowedValues(['direct', 'reverse'])
],
required=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Workflow description.'),
update_allowed=True
),
INPUT: properties.Schema(
properties.Schema.MAP,
_('Dictionary which contains input for workflow.'),
update_allowed=True
),
OUTPUT: properties.Schema(
properties.Schema.MAP,
_('Any data structure arbitrarily containing YAQL '
'expressions that defines workflow output. May be '
'nested.'),
update_allowed=True
),
PARAMS: properties.Schema(
properties.Schema.MAP,
_("Workflow additional parameters. If Workflow is reverse typed, "
"params requires 'task_name', which defines initial task."),
update_allowed=True
),
TASK_DEFAULTS: properties.Schema(
properties.Schema.MAP,
_("Default settings for some of task "
"attributes defined "
"at workflow level."),
support_status=support.SupportStatus(version='5.0.0'),
schema={
ON_SUCCESS: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed successfully.')
),
ON_ERROR: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed with an error.')
),
ON_COMPLETE: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed regardless of whether '
'it is successful or not.')
),
REQUIRES: properties.Schema(
properties.Schema.LIST,
_('List of tasks which should be executed before '
'this task. Used only in reverse workflows.')
),
RETRY: properties.Schema(
properties.Schema.MAP,
_('Defines a pattern how task should be repeated in '
'case of an error.')
),
WAIT_BEFORE: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine'
' should wait before starting a task.')
),
WAIT_AFTER: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine'
' should wait after a task has completed before'
' starting next tasks defined in '
'on-success, on-error or on-complete.')
),
PAUSE_BEFORE: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines whether Mistral Engine should put the '
'workflow on hold or not before starting a task')
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Defines a period of time in seconds after which '
'a task will be failed automatically '
'by engine if hasn\'t completed.')
),
},
update_allowed=True
),
TASKS: properties.Schema(
properties.Schema.LIST,
_('Dictionary containing workflow tasks.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TASK_NAME: properties.Schema(
properties.Schema.STRING,
_('Task name.'),
required=True
),
TASK_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Task description.')
),
TASK_INPUT: properties.Schema(
properties.Schema.MAP,
_('Actual input parameter values of the task.')
),
ACTION: properties.Schema(
properties.Schema.STRING,
_('Name of the action associated with the task. '
'Either action or workflow may be defined in the '
'task.')
),
WORKFLOW: properties.Schema(
properties.Schema.STRING,
_('Name of the workflow associated with the task. '
'Can be defined by intrinsic function get_resource '
'or by name of the referenced workflow, i.e. '
'{ workflow: wf_name } or '
'{ workflow: { get_resource: wf_name }}. Either '
'action or workflow may be defined in the task.')
),
PUBLISH: properties.Schema(
properties.Schema.MAP,
_('Dictionary of variables to publish to '
'the workflow context.')
),
ON_SUCCESS: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed successfully.')
),
ON_ERROR: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed with an error.')
),
ON_COMPLETE: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed regardless of whether '
'it is successful or not.')
),
POLICIES: properties.Schema(
properties.Schema.MAP,
_('Dictionary-like section defining task policies '
'that influence how Mistral Engine runs tasks. Must '
'satisfy Mistral DSL v2.'),
support_status=support.SupportStatus(
status=support.DEPRECATED,
version='5.0.0',
message=_('Add needed policies directly to '
'the task, Policy keyword is not '
'needed'),
previous_status=support.SupportStatus(
version='2015.1'))
),
REQUIRES: properties.Schema(
properties.Schema.LIST,
_('List of tasks which should be executed before '
'this task. Used only in reverse workflows.')
),
RETRY: properties.Schema(
properties.Schema.MAP,
_('Defines a pattern how task should be repeated in '
'case of an error.'),
support_status=support.SupportStatus(version='5.0.0')
),
WAIT_BEFORE: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine '
'should wait before starting a task.'),
support_status=support.SupportStatus(version='5.0.0')
),
WAIT_AFTER: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral '
'Engine should wait after '
'a task has completed before starting next tasks '
'defined in on-success, on-error or on-complete.'),
support_status=support.SupportStatus(version='5.0.0')
),
PAUSE_BEFORE: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines whether Mistral Engine should '
'put the workflow on hold '
'or not before starting a task.'),
support_status=support.SupportStatus(version='5.0.0')
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Defines a period of time in seconds after which a '
'task will be failed automatically by engine '
'if hasn\'t completed.'),
support_status=support.SupportStatus(version='5.0.0')
),
WITH_ITEMS: properties.Schema(
properties.Schema.STRING,
_('If configured, it allows to run action or workflow '
'associated with a task multiple times '
'on a provided list of items.'),
support_status=support.SupportStatus(version='5.0.0')
),
KEEP_RESULT: properties.Schema(
properties.Schema.BOOLEAN,
_('Allowing not to store action results '
'after task completion.'),
support_status=support.SupportStatus(version='5.0.0')
),
TARGET: properties.Schema(
properties.Schema.STRING,
_('It defines an executor to which task action '
'should be sent to.'),
support_status=support.SupportStatus(version='5.0.0')
),
},
),
required=True,
update_allowed=True
)
}
attributes_schema = {
WORKFLOW_DATA: attributes.Schema(
_('A dictionary which contains name and input of the workflow.'),
type=attributes.Schema.MAP
),
ALARM_URL: attributes.Schema(
_("A signed url to create executions for workflows specified in "
"Workflow resource."),
type=attributes.Schema.STRING
),
EXECUTIONS: attributes.Schema(
_("List of workflows' executions, each of them is a dictionary "
"with information about execution. Each dictionary returns "
"values for next keys: id, workflow_name, created_at, "
"updated_at, state for current execution state, input, output."),
type=attributes.Schema.LIST
)
}
def get_reference_id(self):
return self._workflow_name()
def _validate_signal_data(self, data):
if data is not None:
input_value = data.get(self.SIGNAL_DATA_INPUT)
params_value = data.get(self.SIGNAL_DATA_PARAMS)
if input_value is not None:
if not isinstance(input_value, dict):
message = (_('Input in signal data must be a map, '
'find a %s') % type(input_value))
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
for key in six.iterkeys(input_value):
if (self.properties.get(self.INPUT) is None
or key not in self.properties.get(self.INPUT)):
message = _('Unknown input %s') % key
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
if params_value is not None and not isinstance(params_value, dict):
message = (_('Params must be a map, find a '
'%s') % type(params_value))
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
def validate(self):
super(Workflow, self).validate()
if self.properties.get(self.TYPE) == 'reverse':
params = self.properties.get(self.PARAMS)
if params is None or not params.get('task_name'):
raise exception.StackValidationFailed(
error=_('Mistral resource validation error'),
path=[self.name,
('properties'
if self.stack.t.VERSION == 'heat_template_version'
else 'Properties'),
self.PARAMS],
message=_("'task_name' is not assigned in 'params' "
"in case of reverse type workflow.")
)
for task in self.properties.get(self.TASKS):
wf_value = task.get(self.WORKFLOW)
action_value = task.get(self.ACTION)
if wf_value and action_value:
raise exception.ResourcePropertyConflict(self.WORKFLOW,
self.ACTION)
if not wf_value and not action_value:
raise exception.PropertyUnspecifiedError(self.WORKFLOW,
self.ACTION)
if (task.get(self.REQUIRES) is not None
and self.properties.get(self.TYPE)) == 'direct':
msg = _("task %(task)s contains property 'requires' "
"in case of direct workflow. Only reverse workflows "
"can contain property 'requires'.") % {
'name': self.name,
'task': task.get(self.TASK_NAME)
}
raise exception.StackValidationFailed(
error=_('Mistral resource validation error'),
path=[self.name,
('properties'
if self.stack.t.VERSION == 'heat_template_version'
else 'Properties'),
self.TASKS,
task.get(self.TASK_NAME),
self.REQUIRES],
message=msg)
if task.get(self.POLICIES) is not None:
for task_item in task.get(self.POLICIES):
if task.get(task_item) is not None:
msg = _('Property %(policies)s and %(item)s cannot be '
'used both at one time.') % {
'policies': self.POLICIES,
'item': task_item
}
raise exception.StackValidationFailed(message=msg)
def _workflow_name(self):
return self.properties.get(self.NAME) or self.physical_resource_name()
def build_tasks(self, props):
for task in props[self.TASKS]:
current_task = {}
wf_value = task.get(self.WORKFLOW)
if wf_value is not None:
if wf_value in [res.resource_id
for res in six.itervalues(self.stack)]:
current_task.update({self.WORKFLOW: wf_value})
else:
msg = _("No such workflow %s") % wf_value
raise ValueError(msg)
# backward support for kilo.
if task.get(self.POLICIES) is not None:
task.update(task.get(self.POLICIES))
task_keys = [key for key in self._TASKS_KEYS
if key not in [
self.WORKFLOW,
self.TASK_NAME,
self.POLICIES
]]
for task_prop in task_keys:
if task.get(task_prop) is not None:
current_task.update(
{task_prop.replace('_', '-'): task[task_prop]})
yield {task[self.TASK_NAME]: current_task}
def prepare_properties(self, props):
"""Prepare correct YAML-formatted definition for Mistral."""
defn_name = self._workflow_name()
definition = {'version': '2.0',
defn_name: {self.TYPE: props.get(self.TYPE),
self.DESCRIPTION: props.get(
self.DESCRIPTION),
self.OUTPUT: props.get(self.OUTPUT)}}
for key in list(definition[defn_name].keys()):
if definition[defn_name][key] is None:
del definition[defn_name][key]
if props.get(self.INPUT) is not None:
definition[defn_name][self.INPUT] = list(props.get(
self.INPUT).keys())
definition[defn_name][self.TASKS] = {}
for task in self.build_tasks(props):
definition.get(defn_name).get(self.TASKS).update(task)
if props.get(self.TASK_DEFAULTS) is not None:
definition[defn_name][self.TASK_DEFAULTS.replace('_', '-')] = {
k.replace('_', '-'): v for k, v in
six.iteritems(props.get(self.TASK_DEFAULTS)) if v}
return yaml.dump(definition, Dumper=yaml.CSafeDumper
if hasattr(yaml, 'CSafeDumper')
else yaml.SafeDumper)
def handle_create(self):
super(Workflow, self).handle_create()
props = self.prepare_properties(self.properties)
try:
workflow = self.client().workflows.create(props)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
# NOTE(prazumovsky): Mistral uses unique names for resource
# identification.
self.resource_id_set(workflow[0].name)
def handle_signal(self, details=None):
self._validate_signal_data(details)
result_input = {}
result_params = {}
if details is not None:
if details.get(self.INPUT) is not None:
# NOTE(prazumovsky): Signal can contains some data, interesting
# for workflow, e.g. inputs. So, if signal data contains input
# we update override inputs, other leaved defined in template.
for key, value in six.iteritems(
self.properties.get(self.INPUT)):
result_input.update(
{key: details.get(
self.SIGNAL_DATA_INPUT).get(key) or value})
if details.get(self.SIGNAL_DATA_PARAMS) is not None:
if self.properties.get(self.PARAMS) is not None:
result_params.update(self.properties.get(self.PARAMS))
result_params.update(details.get(self.SIGNAL_DATA_PARAMS))
if not result_input and self.properties.get(self.INPUT):
result_input.update(self.properties.get(self.INPUT))
if not result_params and self.properties.get(self.PARAMS):
result_params.update(self.properties.get(self.PARAMS))
try:
execution = self.client().executions.create(
self._workflow_name(),
jsonutils.dumps(result_input),
**result_params)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
executions = [execution.id]
if self.EXECUTIONS in self.data():
executions.extend(self.data().get(self.EXECUTIONS).split(','))
self.data_set(self.EXECUTIONS, ','.join(executions))
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
update_allowed = [self.INPUT, self.PARAMS, self.DESCRIPTION]
for prop in update_allowed:
if prop in prop_diff:
del prop_diff[prop]
if len(prop_diff) > 0:
new_props = self.prepare_properties(tmpl_diff['Properties'])
try:
workflow = self.client().workflows.update(new_props)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
self.data_set(self.NAME, workflow[0].name)
self.resource_id_set(workflow[0].name)
def _delete_executions(self):
if self.data().get(self.EXECUTIONS):
for id in self.data().get(self.EXECUTIONS).split(','):
try:
self.client().executions.delete(id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
self.data_delete('executions')
def handle_delete(self):
self._delete_executions()
return super(Workflow, self).handle_delete()
def _resolve_attribute(self, name):
if name == self.EXECUTIONS:
if self.EXECUTIONS not in self.data():
return []
def parse_execution_response(execution):
return {
'id': execution.id,
'workflow_name': execution.workflow_name,
'created_at': execution.created_at,
'updated_at': execution.updated_at,
'state': execution.state,
'input': jsonutils.loads(six.text_type(execution.input)),
'output': jsonutils.loads(six.text_type(execution.output))
}
return [parse_execution_response(
self.client().executions.get(exec_id))
for exec_id in
self.data().get(self.EXECUTIONS).split(',')]
elif name == self.WORKFLOW_DATA:
return {self.NAME: self.resource_id,
self.INPUT: self.properties.get(self.INPUT)}
elif name == self.ALARM_URL:
return six.text_type(self._get_ec2_signed_url())
# TODO(tlashchova): remove this method when mistralclient>1.0.0 is used.
def _show_resource(self):
workflow = self.client().workflows.get(self.resource_id)
if hasattr(workflow, 'to_dict'):
super(Workflow, self)._show_resource()
return workflow._data
def resource_mapping():
return {
'OS::Mistral::Workflow': Workflow
}
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import yaml
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine import support
class Workflow(signal_responder.SignalResponder,
resource.Resource):
support_status = support.SupportStatus(version='2015.1')
default_client_name = 'mistral'
entity = 'workflows'
PROPERTIES = (
NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS, TASK_DEFAULTS
) = (
'name', 'type', 'description', 'input', 'output', 'tasks', 'params',
'task_defaults'
)
_TASKS_KEYS = (
TASK_NAME, TASK_DESCRIPTION, ON_ERROR, ON_COMPLETE, ON_SUCCESS,
POLICIES, ACTION, WORKFLOW, PUBLISH, TASK_INPUT, REQUIRES,
RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT,
WITH_ITEMS, KEEP_RESULT, TARGET
) = (
'name', 'description', 'on_error', 'on_complete', 'on_success',
'policies', 'action', 'workflow', 'publish', 'input', 'requires',
'retry', 'wait_before', 'wait_after', 'pause_before', 'timeout',
'with_items', 'keep_result', 'target'
)
_TASKS_TASK_DEFAULTS = [
ON_ERROR, ON_COMPLETE, ON_SUCCESS,
REQUIRES, RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT
]
_SIGNAL_DATA_KEYS = (
SIGNAL_DATA_INPUT, SIGNAL_DATA_PARAMS
) = (
'input', 'params'
)
ATTRIBUTES = (
WORKFLOW_DATA, ALARM_URL, EXECUTIONS
) = (
'data', 'alarm_url', 'executions'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Workflow name.')
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Workflow type.'),
constraints=[
constraints.AllowedValues(['direct', 'reverse'])
],
required=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Workflow description.'),
update_allowed=True
),
INPUT: properties.Schema(
properties.Schema.MAP,
_('Dictionary which contains input for workflow.'),
update_allowed=True
),
OUTPUT: properties.Schema(
properties.Schema.MAP,
_('Any data structure arbitrarily containing YAQL '
'expressions that defines workflow output. May be '
'nested.'),
update_allowed=True
),
PARAMS: properties.Schema(
properties.Schema.MAP,
_("Workflow additional parameters. If Workflow is reverse typed, "
"params requires 'task_name', which defines initial task."),
update_allowed=True
),
TASK_DEFAULTS: properties.Schema(
properties.Schema.MAP,
_("Default settings for some of task "
"attributes defined "
"at workflow level."),
support_status=support.SupportStatus(version='5.0.0'),
schema={
ON_SUCCESS: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed successfully.')
),
ON_ERROR: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed with an error.')
),
ON_COMPLETE: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed regardless of whether '
'it is successful or not.')
),
REQUIRES: properties.Schema(
properties.Schema.LIST,
_('List of tasks which should be executed before '
'this task. Used only in reverse workflows.')
),
RETRY: properties.Schema(
properties.Schema.MAP,
_('Defines a pattern how task should be repeated in '
'case of an error.')
),
WAIT_BEFORE: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine'
' should wait before starting a task.')
),
WAIT_AFTER: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine'
' should wait after a task has completed before'
' starting next tasks defined in '
'on-success, on-error or on-complete.')
),
PAUSE_BEFORE: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines whether Mistral Engine should put the '
'workflow on hold or not before starting a task')
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Defines a period of time in seconds after which '
'a task will be failed automatically '
'by engine if hasn\'t completed.')
),
},
update_allowed=True
),
TASKS: properties.Schema(
properties.Schema.LIST,
_('Dictionary containing workflow tasks.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TASK_NAME: properties.Schema(
properties.Schema.STRING,
_('Task name.'),
required=True
),
TASK_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Task description.')
),
TASK_INPUT: properties.Schema(
properties.Schema.MAP,
_('Actual input parameter values of the task.')
),
ACTION: properties.Schema(
properties.Schema.STRING,
_('Name of the action associated with the task. '
'Either action or workflow may be defined in the '
'task.')
),
WORKFLOW: properties.Schema(
properties.Schema.STRING,
_('Name of the workflow associated with the task. '
'Can be defined by intrinsic function get_resource '
'or by name of the referenced workflow, i.e. '
'{ workflow: wf_name } or '
'{ workflow: { get_resource: wf_name }}. Either '
'action or workflow may be defined in the task.')
),
PUBLISH: properties.Schema(
properties.Schema.MAP,
_('Dictionary of variables to publish to '
'the workflow context.')
),
ON_SUCCESS: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed successfully.')
),
ON_ERROR: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed with an error.')
),
ON_COMPLETE: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed regardless of whether '
'it is successful or not.')
),
POLICIES: properties.Schema(
properties.Schema.MAP,
_('Dictionary-like section defining task policies '
'that influence how Mistral Engine runs tasks. Must '
'satisfy Mistral DSL v2.'),
support_status=support.SupportStatus(
status=support.DEPRECATED,
version='5.0.0',
message=_('Add needed policies directly to '
'the task, Policy keyword is not '
'needed'),
previous_status=support.SupportStatus(
version='2015.1'))
),
REQUIRES: properties.Schema(
properties.Schema.LIST,
_('List of tasks which should be executed before '
'this task. Used only in reverse workflows.')
),
RETRY: properties.Schema(
properties.Schema.MAP,
_('Defines a pattern how task should be repeated in '
'case of an error.'),
support_status=support.SupportStatus(version='5.0.0')
),
WAIT_BEFORE: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine '
'should wait before starting a task.'),
support_status=support.SupportStatus(version='5.0.0')
),
WAIT_AFTER: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral '
'Engine should wait after '
'a task has completed before starting next tasks '
'defined in on-success, on-error or on-complete.'),
support_status=support.SupportStatus(version='5.0.0')
),
PAUSE_BEFORE: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines whether Mistral Engine should '
'put the workflow on hold '
'or not before starting a task.'),
support_status=support.SupportStatus(version='5.0.0')
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Defines a period of time in seconds after which a '
'task will be failed automatically by engine '
'if hasn\'t completed.'),
support_status=support.SupportStatus(version='5.0.0')
),
WITH_ITEMS: properties.Schema(
properties.Schema.STRING,
_('If configured, it allows to run action or workflow '
'associated with a task multiple times '
'on a provided list of items.'),
support_status=support.SupportStatus(version='5.0.0')
),
KEEP_RESULT: properties.Schema(
properties.Schema.BOOLEAN,
_('Allowing not to store action results '
'after task completion.'),
support_status=support.SupportStatus(version='5.0.0')
),
TARGET: properties.Schema(
properties.Schema.STRING,
_('It defines an executor to which task action '
'should be sent to.'),
support_status=support.SupportStatus(version='5.0.0')
),
},
),
required=True,
update_allowed=True
)
}
attributes_schema = {
WORKFLOW_DATA: attributes.Schema(
_('A dictionary which contains name and input of the workflow.'),
type=attributes.Schema.MAP
),
ALARM_URL: attributes.Schema(
_("A signed url to create executions for workflows specified in "
"Workflow resource."),
type=attributes.Schema.STRING
),
EXECUTIONS: attributes.Schema(
_("List of workflows' executions, each of them is a dictionary "
"with information about execution. Each dictionary returns "
"values for next keys: id, workflow_name, created_at, "
"updated_at, state for current execution state, input, output."),
type=attributes.Schema.LIST
)
}
def get_reference_id(self):
return self._workflow_name()
def _validate_signal_data(self, data):
if data is not None:
input_value = data.get(self.SIGNAL_DATA_INPUT)
params_value = data.get(self.SIGNAL_DATA_PARAMS)
if input_value is not None:
if not isinstance(input_value, dict):
message = (_('Input in signal data must be a map, '
'find a %s') % type(input_value))
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
for key in six.iterkeys(input_value):
if (self.properties.get(self.INPUT) is None
or key not in self.properties.get(self.INPUT)):
message = _('Unknown input %s') % key
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
if params_value is not None and not isinstance(params_value, dict):
message = (_('Params must be a map, find a '
'%s') % type(params_value))
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
def validate(self):
super(Workflow, self).validate()
if self.properties.get(self.TYPE) == 'reverse':
params = self.properties.get(self.PARAMS)
if params is None or not params.get('task_name'):
raise exception.StackValidationFailed(
error=_('Mistral resource validation error'),
path=[self.name,
('properties'
if self.stack.t.VERSION == 'heat_template_version'
else 'Properties'),
self.PARAMS],
message=_("'task_name' is not assigned in 'params' "
"in case of reverse type workflow.")
)
for task in self.properties.get(self.TASKS):
wf_value = task.get(self.WORKFLOW)
action_value = task.get(self.ACTION)
if wf_value and action_value:
raise exception.ResourcePropertyConflict(self.WORKFLOW,
self.ACTION)
if not wf_value and not action_value:
raise exception.PropertyUnspecifiedError(self.WORKFLOW,
self.ACTION)
if (task.get(self.REQUIRES) is not None
and self.properties.get(self.TYPE)) == 'direct':
msg = _("task %(task)s contains property 'requires' "
"in case of direct workflow. Only reverse workflows "
"can contain property 'requires'.") % {
'name': self.name,
'task': task.get(self.TASK_NAME)
}
raise exception.StackValidationFailed(
error=_('Mistral resource validation error'),
path=[self.name,
('properties'
if self.stack.t.VERSION == 'heat_template_version'
else 'Properties'),
self.TASKS,
task.get(self.TASK_NAME),
self.REQUIRES],
message=msg)
if task.get(self.POLICIES) is not None:
for task_item in task.get(self.POLICIES):
if task.get(task_item) is not None:
msg = _('Property %(policies)s and %(item)s cannot be '
'used both at one time.') % {
'policies': self.POLICIES,
'item': task_item
}
raise exception.StackValidationFailed(message=msg)
def _workflow_name(self):
return self.properties.get(self.NAME) or self.physical_resource_name()
def build_tasks(self, props):
for task in props[self.TASKS]:
current_task = {}
wf_value = task.get(self.WORKFLOW)
if wf_value is not None:
if wf_value in [res.resource_id
for res in six.itervalues(self.stack)]:
current_task.update({self.WORKFLOW: wf_value})
else:
msg = _("No such workflow %s") % wf_value
raise ValueError(msg)
# backward support for kilo.
if task.get(self.POLICIES) is not None:
task.update(task.get(self.POLICIES))
task_keys = [key for key in self._TASKS_KEYS
if key not in [
self.WORKFLOW,
self.TASK_NAME,
self.POLICIES
]]
for task_prop in task_keys:
if task.get(task_prop) is not None:
current_task.update(
{task_prop.replace('_', '-'): task[task_prop]})
yield {task[self.TASK_NAME]: current_task}
def prepare_properties(self, props):
"""Prepare correct YAML-formatted definition for Mistral."""
defn_name = self._workflow_name()
definition = {'version': '2.0',
defn_name: {self.TYPE: props.get(self.TYPE),
self.DESCRIPTION: props.get(
self.DESCRIPTION),
self.OUTPUT: props.get(self.OUTPUT)}}
for key in list(definition[defn_name].keys()):
if definition[defn_name][key] is None:
del definition[defn_name][key]
if props.get(self.INPUT) is not None:
definition[defn_name][self.INPUT] = list(props.get(
self.INPUT).keys())
definition[defn_name][self.TASKS] = {}
for task in self.build_tasks(props):
definition.get(defn_name).get(self.TASKS).update(task)
if props.get(self.TASK_DEFAULTS) is not None:
definition[defn_name][self.TASK_DEFAULTS.replace('_', '-')] = {
k.replace('_', '-'): v for k, v in
six.iteritems(props.get(self.TASK_DEFAULTS)) if v}
return yaml.dump(definition, Dumper=yaml.CSafeDumper
if hasattr(yaml, 'CSafeDumper')
else yaml.SafeDumper)
def handle_create(self):
super(Workflow, self).handle_create()
props = self.prepare_properties(self.properties)
try:
workflow = self.client().workflows.create(props)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
# NOTE(prazumovsky): Mistral uses unique names for resource
# identification.
self.resource_id_set(workflow[0].name)
def handle_signal(self, details=None):
self._validate_signal_data(details)
result_input = {}
result_params = {}
if details is not None:
if details.get(self.INPUT) is not None:
# NOTE(prazumovsky): Signal can contains some data, interesting
# for workflow, e.g. inputs. So, if signal data contains input
# we update override inputs, other leaved defined in template.
for key, value in six.iteritems(
self.properties.get(self.INPUT)):
result_input.update(
{key: details.get(
self.SIGNAL_DATA_INPUT).get(key) or value})
if details.get(self.SIGNAL_DATA_PARAMS) is not None:
if self.properties.get(self.PARAMS) is not None:
result_params.update(self.properties.get(self.PARAMS))
result_params.update(details.get(self.SIGNAL_DATA_PARAMS))
if not result_input and self.properties.get(self.INPUT):
result_input.update(self.properties.get(self.INPUT))
if not result_params and self.properties.get(self.PARAMS):
result_params.update(self.properties.get(self.PARAMS))
try:
execution = self.client().executions.create(
self._workflow_name(),
jsonutils.dumps(result_input),
**result_params)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
executions = [execution.id]
if self.EXECUTIONS in self.data():
executions.extend(self.data().get(self.EXECUTIONS).split(','))
self.data_set(self.EXECUTIONS, ','.join(executions))
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
update_allowed = [self.INPUT, self.PARAMS, self.DESCRIPTION]
for prop in update_allowed:
if prop in prop_diff:
del prop_diff[prop]
if len(prop_diff) > 0:
new_props = self.prepare_properties(tmpl_diff['Properties'])
try:
workflow = self.client().workflows.update(new_props)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
self.data_set(self.NAME, workflow[0].name)
self.resource_id_set(workflow[0].name)
def _delete_executions(self):
if self.data().get(self.EXECUTIONS):
for id in self.data().get(self.EXECUTIONS).split(','):
try:
self.client().executions.delete(id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
self.data_delete('executions')
def handle_delete(self):
self._delete_executions()
return super(Workflow, self).handle_delete()
def _resolve_attribute(self, name):
if name == self.EXECUTIONS:
if self.EXECUTIONS not in self.data():
return []
def parse_execution_response(execution):
return {
'id': execution.id,
'workflow_name': execution.workflow_name,
'created_at': execution.created_at,
'updated_at': execution.updated_at,
'state': execution.state,
'input': jsonutils.loads(six.text_type(execution.input)),
'output': jsonutils.loads(six.text_type(execution.output))
}
return [parse_execution_response(
self.client().executions.get(exec_id))
for exec_id in
self.data().get(self.EXECUTIONS).split(',')]
elif name == self.WORKFLOW_DATA:
return {self.NAME: self.resource_id,
self.INPUT: self.properties.get(self.INPUT)}
elif name == self.ALARM_URL:
return six.text_type(self._get_ec2_signed_url())
# TODO(tlashchova): remove this method when mistralclient>1.0.0 is used.
def _show_resource(self):
workflow = self.client().workflows.get(self.resource_id)
if hasattr(workflow, 'to_dict'):
super(Workflow, self)._show_resource()
return workflow._data
def resource_mapping():
return {
'OS::Mistral::Workflow': Workflow
}
| en | 0.756955 | # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # backward support for kilo. Prepare correct YAML-formatted definition for Mistral. # NOTE(prazumovsky): Mistral uses unique names for resource # identification. # NOTE(prazumovsky): Signal can contains some data, interesting # for workflow, e.g. inputs. So, if signal data contains input # we update override inputs, other leaved defined in template. # TODO(tlashchova): remove this method when mistralclient>1.0.0 is used. | 1.63323 | 2 |
keyvaultlib/key_vault.py | Tom-Ganor/keyvaultlib | 0 | 6632211 | import logging
from logging import Logger
# noinspection PyPackageRequirements
from azure.keyvault import KeyVaultClient
from msrestazure.azure_active_directory import MSIAuthentication as MSICredentials, ServicePrincipalCredentials
class KeyVaultOAuthClient(KeyVaultClient):
"""
KeyVaultOAuthClient is a KeyVault client wrapper that supports both MSI and ADAL
authentication mechanisms.
It's helpful for scenarios where one is transitioning from ADAL authentication to MSI,
and exists to save the small code duplication of using either MSIAuthentication or ServicePrincipalCredentials.
"""
KEY_VAULT_RESOURCE_URL = 'https://vault.azure.net'
KEY_VAULT_URL_TEMPLATE = 'https://{key_vault_name}.vault.azure.net/'
LATEST_SECRET_VERSION = ''
def __init__(self, client_id=None, client_secret=None, tenant_id=None, use_msi=False, logger=None, *args, **kwargs):
# type: (str, str, str, bool, Logger|logging) -> None
"""
Initiates a new key vault client with either MSI or ADAL token providers underneath.
:param client_id: An optional (When using MSI) client ID - Of a user or an application that is authorized
with your KeyVault resources
:param client_secret: An optional (When using MSI) client secret - Of a user or an application that is authorized
with your KeyVault resources
:param tenant_id: An optional (When using MSI) tenant ID of your KeyVault resources
:param use_msi: A flag indicated if the client should use MSI (Managed-Service-Identity) to get an OAuth
token for your KeyVault resources
:param logger: An optional logger to use in case of initialization errors
"""
self._logger = logger or logging.getLogger(KeyVaultOAuthClient.__class__.__name__)
if not use_msi and (not client_id or not client_secret or not tenant_id):
err = 'You should either use MSI, or pass a valid client ID, secret and tenant ID'
self._logger.error(err)
raise ValueError(err)
self._using_msi = use_msi
if use_msi:
msi_creds = MSICredentials(resource=self.KEY_VAULT_RESOURCE_URL)
super(KeyVaultOAuthClient, self).__init__(msi_creds, *args, **kwargs)
else:
adal_creds = ServicePrincipalCredentials(client_id, client_secret, tenant=tenant_id,
resource=self.KEY_VAULT_RESOURCE_URL)
super(KeyVaultOAuthClient, self).__init__(adal_creds, *args, **kwargs)
def get_secret_with_key_vault_name(self, key_vault_name, secret_name, secret_version=LATEST_SECRET_VERSION):
# type: (str, str, str) -> basestring
"""
Use this wrapper to get a KeyVault secret by KeyVault name (i.e. not by a full URL).
If successful, the secret's value will be returned, otherwise an error will be logged and an exception thrown.
:param key_vault_name: Name of KeyVault resource (e.g. For 'https://mykv.vault.azure.net/' the name is 'mykv')
:param secret_name: The secret's name inside the KeyVault resource
:param secret_version: An optional version of the secret to fetch (latest being the default)
:return: The secret's value as a string
"""
key_vault_url = self.KEY_VAULT_URL_TEMPLATE.format(key_vault_name=key_vault_name)
try:
return self.get_secret(key_vault_url, secret_name, secret_version).value
except Exception as e:
self._logger.error('Failed retrieving secret vault={} secret={} version={} using_msi={}'.format(
key_vault_url, secret_name, secret_version, self._using_msi
))
raise e
| import logging
from logging import Logger
# noinspection PyPackageRequirements
from azure.keyvault import KeyVaultClient
from msrestazure.azure_active_directory import MSIAuthentication as MSICredentials, ServicePrincipalCredentials
class KeyVaultOAuthClient(KeyVaultClient):
"""
KeyVaultOAuthClient is a KeyVault client wrapper that supports both MSI and ADAL
authentication mechanisms.
It's helpful for scenarios where one is transitioning from ADAL authentication to MSI,
and exists to save the small code duplication of using either MSIAuthentication or ServicePrincipalCredentials.
"""
KEY_VAULT_RESOURCE_URL = 'https://vault.azure.net'
KEY_VAULT_URL_TEMPLATE = 'https://{key_vault_name}.vault.azure.net/'
LATEST_SECRET_VERSION = ''
def __init__(self, client_id=None, client_secret=None, tenant_id=None, use_msi=False, logger=None, *args, **kwargs):
# type: (str, str, str, bool, Logger|logging) -> None
"""
Initiates a new key vault client with either MSI or ADAL token providers underneath.
:param client_id: An optional (When using MSI) client ID - Of a user or an application that is authorized
with your KeyVault resources
:param client_secret: An optional (When using MSI) client secret - Of a user or an application that is authorized
with your KeyVault resources
:param tenant_id: An optional (When using MSI) tenant ID of your KeyVault resources
:param use_msi: A flag indicated if the client should use MSI (Managed-Service-Identity) to get an OAuth
token for your KeyVault resources
:param logger: An optional logger to use in case of initialization errors
"""
self._logger = logger or logging.getLogger(KeyVaultOAuthClient.__class__.__name__)
if not use_msi and (not client_id or not client_secret or not tenant_id):
err = 'You should either use MSI, or pass a valid client ID, secret and tenant ID'
self._logger.error(err)
raise ValueError(err)
self._using_msi = use_msi
if use_msi:
msi_creds = MSICredentials(resource=self.KEY_VAULT_RESOURCE_URL)
super(KeyVaultOAuthClient, self).__init__(msi_creds, *args, **kwargs)
else:
adal_creds = ServicePrincipalCredentials(client_id, client_secret, tenant=tenant_id,
resource=self.KEY_VAULT_RESOURCE_URL)
super(KeyVaultOAuthClient, self).__init__(adal_creds, *args, **kwargs)
def get_secret_with_key_vault_name(self, key_vault_name, secret_name, secret_version=LATEST_SECRET_VERSION):
# type: (str, str, str) -> basestring
"""
Use this wrapper to get a KeyVault secret by KeyVault name (i.e. not by a full URL).
If successful, the secret's value will be returned, otherwise an error will be logged and an exception thrown.
:param key_vault_name: Name of KeyVault resource (e.g. For 'https://mykv.vault.azure.net/' the name is 'mykv')
:param secret_name: The secret's name inside the KeyVault resource
:param secret_version: An optional version of the secret to fetch (latest being the default)
:return: The secret's value as a string
"""
key_vault_url = self.KEY_VAULT_URL_TEMPLATE.format(key_vault_name=key_vault_name)
try:
return self.get_secret(key_vault_url, secret_name, secret_version).value
except Exception as e:
self._logger.error('Failed retrieving secret vault={} secret={} version={} using_msi={}'.format(
key_vault_url, secret_name, secret_version, self._using_msi
))
raise e
| en | 0.704915 | # noinspection PyPackageRequirements KeyVaultOAuthClient is a KeyVault client wrapper that supports both MSI and ADAL authentication mechanisms. It's helpful for scenarios where one is transitioning from ADAL authentication to MSI, and exists to save the small code duplication of using either MSIAuthentication or ServicePrincipalCredentials. # type: (str, str, str, bool, Logger|logging) -> None Initiates a new key vault client with either MSI or ADAL token providers underneath. :param client_id: An optional (When using MSI) client ID - Of a user or an application that is authorized with your KeyVault resources :param client_secret: An optional (When using MSI) client secret - Of a user or an application that is authorized with your KeyVault resources :param tenant_id: An optional (When using MSI) tenant ID of your KeyVault resources :param use_msi: A flag indicated if the client should use MSI (Managed-Service-Identity) to get an OAuth token for your KeyVault resources :param logger: An optional logger to use in case of initialization errors # type: (str, str, str) -> basestring Use this wrapper to get a KeyVault secret by KeyVault name (i.e. not by a full URL). If successful, the secret's value will be returned, otherwise an error will be logged and an exception thrown. :param key_vault_name: Name of KeyVault resource (e.g. For 'https://mykv.vault.azure.net/' the name is 'mykv') :param secret_name: The secret's name inside the KeyVault resource :param secret_version: An optional version of the secret to fetch (latest being the default) :return: The secret's value as a string | 2.373065 | 2 |
blog/models.py | asemarian/weCode | 1 | 6632212 | <reponame>asemarian/weCode<filename>blog/models.py
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from datetime import datetime, date
from ckeditor.fields import RichTextField
from django.db.models.signals import pre_save
from hitcount.models import HitCountMixin, HitCount
from django.contrib.contenttypes.fields import GenericRelation
from taggit.managers import TaggableManager
# from django.utils.text import slugify
class Category(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True, allow_unicode=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def get_absolute_url(self):
return f'/{self.slug}/'
class Post(models.Model, HitCountMixin):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True, allow_unicode=True)
summary = models.CharField(max_length=500)
header_image = models.ImageField(null=True, blank=True, upload_to="images/")
tags = TaggableManager()
author = models.ForeignKey(User, on_delete=models.CASCADE)
body = RichTextField(blank=True, null=True)
post_date = models.DateField(auto_now_add=True)
update_date = models.DateField(auto_now=True)
category = models.ForeignKey(Category, related_name='posts', on_delete=models.CASCADE)
featured = models.BooleanField(default=False)
views = GenericRelation(HitCount, object_id_field='object_pk',related_query_name='hit_count_generic_relation')
class Meta:
ordering = ('-post_date',)
def __str__(self):
return self.title + ' | ' + str(self.author) + ' | ' + str(self.post_date)
def get_absolute_url(self):
return reverse('post-detail', kwargs={"slug":self.slug})
def create_slug(instance, new_slug=None):
slug = arabic_slugify(instance.title)
if new_slug is not None:
slug = new_slug
qs = Post.objects.filter(slug=slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def arabic_slugify(str):
str = str.replace(" ", "-")
str = str.replace(",", "-")
str = str.replace("(", "-")
str = str.replace(")", "")
str = str.replace("؟", "")
return str
def pre_save_post_reciever(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
pre_save.connect(pre_save_post_reciever, sender=Post)
class Comment(models.Model):
post = models.ForeignKey(Post, related_name="comments", on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
body = models.TextField()
post_date = models.DateField(auto_now_add=True)
update_date = models.DateField(auto_now=True)
def __str__(self):
return '%s - %s' % (self.post.title, self.user) | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from datetime import datetime, date
from ckeditor.fields import RichTextField
from django.db.models.signals import pre_save
from hitcount.models import HitCountMixin, HitCount
from django.contrib.contenttypes.fields import GenericRelation
from taggit.managers import TaggableManager
# from django.utils.text import slugify
class Category(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True, allow_unicode=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def get_absolute_url(self):
return f'/{self.slug}/'
class Post(models.Model, HitCountMixin):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True, allow_unicode=True)
summary = models.CharField(max_length=500)
header_image = models.ImageField(null=True, blank=True, upload_to="images/")
tags = TaggableManager()
author = models.ForeignKey(User, on_delete=models.CASCADE)
body = RichTextField(blank=True, null=True)
post_date = models.DateField(auto_now_add=True)
update_date = models.DateField(auto_now=True)
category = models.ForeignKey(Category, related_name='posts', on_delete=models.CASCADE)
featured = models.BooleanField(default=False)
views = GenericRelation(HitCount, object_id_field='object_pk',related_query_name='hit_count_generic_relation')
class Meta:
ordering = ('-post_date',)
def __str__(self):
return self.title + ' | ' + str(self.author) + ' | ' + str(self.post_date)
def get_absolute_url(self):
return reverse('post-detail', kwargs={"slug":self.slug})
def create_slug(instance, new_slug=None):
slug = arabic_slugify(instance.title)
if new_slug is not None:
slug = new_slug
qs = Post.objects.filter(slug=slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def arabic_slugify(str):
str = str.replace(" ", "-")
str = str.replace(",", "-")
str = str.replace("(", "-")
str = str.replace(")", "")
str = str.replace("؟", "")
return str
def pre_save_post_reciever(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
pre_save.connect(pre_save_post_reciever, sender=Post)
class Comment(models.Model):
post = models.ForeignKey(Post, related_name="comments", on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
body = models.TextField()
post_date = models.DateField(auto_now_add=True)
update_date = models.DateField(auto_now=True)
def __str__(self):
return '%s - %s' % (self.post.title, self.user) | en | 0.23675 | # from django.utils.text import slugify | 1.991292 | 2 |
examples/__init__.py | HANhuiyu/SpikingStereoMatching | 1 | 6632213 | from examples.nst_letters import run_experiment_nst
from examples.two_fans import run_experiment_fans
from examples.pendulum import run_experiment_pendulum
| from examples.nst_letters import run_experiment_nst
from examples.two_fans import run_experiment_fans
from examples.pendulum import run_experiment_pendulum
| none | 1 | 1.040603 | 1 |
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cdn/models/WafBlackRuleModel.py | Ureimu/weather-robot | 14 | 6632214 | <gh_stars>10-100
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class WafBlackRuleModel(object):
def __init__(self, id=None, matchOp=None, val=None, valZh=None, enable=None, action=None, updateTime=None):
"""
:param id: (Optional) 黑名单规则id
:param matchOp: (Optional) 匹配模式,uri类型有效,0=完全匹配 1=前缀匹配 2=包含 3=正则 4=大于 5=后缀
:param val: (Optional) 匹配值
:param valZh: (Optional) 匹配值的中文名
:param enable: (Optional) 是否启用, true为启用,false为未启用
:param action: (Optional) null
:param updateTime: (Optional) 黑名单的修改时间,UTC时间
"""
self.id = id
self.matchOp = matchOp
self.val = val
self.valZh = valZh
self.enable = enable
self.action = action
self.updateTime = updateTime
| # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class WafBlackRuleModel(object):
def __init__(self, id=None, matchOp=None, val=None, valZh=None, enable=None, action=None, updateTime=None):
"""
:param id: (Optional) 黑名单规则id
:param matchOp: (Optional) 匹配模式,uri类型有效,0=完全匹配 1=前缀匹配 2=包含 3=正则 4=大于 5=后缀
:param val: (Optional) 匹配值
:param valZh: (Optional) 匹配值的中文名
:param enable: (Optional) 是否启用, true为启用,false为未启用
:param action: (Optional) null
:param updateTime: (Optional) 黑名单的修改时间,UTC时间
"""
self.id = id
self.matchOp = matchOp
self.val = val
self.valZh = valZh
self.enable = enable
self.action = action
self.updateTime = updateTime | en | 0.623542 | # coding=utf8 # Copyright 2018 JDCLOUD.COM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTE: This class is auto generated by the jdcloud code generator program. :param id: (Optional) 黑名单规则id :param matchOp: (Optional) 匹配模式,uri类型有效,0=完全匹配 1=前缀匹配 2=包含 3=正则 4=大于 5=后缀 :param val: (Optional) 匹配值 :param valZh: (Optional) 匹配值的中文名 :param enable: (Optional) 是否启用, true为启用,false为未启用 :param action: (Optional) null :param updateTime: (Optional) 黑名单的修改时间,UTC时间 | 1.699171 | 2 |
src/pudl/workspace/setup.py | kevinsung/pudl | 285 | 6632215 | <filename>src/pudl/workspace/setup.py
"""Tools for setting up and managing PUDL workspaces."""
import importlib
import logging
import pathlib
import shutil
import yaml
from pudl import constants as pc
logger = logging.getLogger(__name__)
def set_defaults(pudl_in, pudl_out, clobber=False):
"""
Set default user input and output locations in ``$HOME/.pudl.yml``.
Create a user settings file for future reference, that defines the default
PUDL input and output directories. If this file already exists, behavior
depends on the clobber parameter, which is False by default. If it's True,
the existing file is replaced. If False, the existing file is not changed.
Args:
pudl_in (os.PathLike): Path to be used as the default input directory
for PUDL -- this is where :mod:`pudl.workspace.datastore` will look
to find the ``data`` directory, full of data from public agencies.
pudl_out (os.PathLike): Path to the default output directory for PUDL,
where results of data processing will be organized.
clobber (bool): If True and a user settings file exists, overwrite it.
If False, do not alter the existing file. Defaults to False.
Returns:
None
"""
settings_file = pathlib.Path.home() / '.pudl.yml'
if settings_file.exists():
if clobber:
logger.info(f"{settings_file} exists: clobbering.")
else:
logger.info(f"{settings_file} exists: not clobbering.")
return
with settings_file.open(mode='w') as f:
f.write(f"pudl_in: {pudl_in.expanduser().resolve()}\n")
f.write(f"pudl_out: {pudl_out.expanduser().resolve()}\n")
def get_defaults():
"""
Read paths to default PUDL input/output dirs from user's $HOME/.pudl.yml.
Args:
None
Returns:
dict: The contents of the user's PUDL settings file, with keys
``pudl_in`` and ``pudl_out`` defining their default PUDL workspace. If
the ``$HOME/.pudl.yml`` file does not exist, set these paths to None.
"""
settings_file = pathlib.Path.home() / '.pudl.yml'
try:
with pathlib.Path(settings_file).open() as f:
default_workspace = yaml.safe_load(f)
except FileNotFoundError:
logger.info("PUDL user settings file .pudl.yml not found.")
default_workspace = {"pudl_in": None, "pudl_out": None}
return default_workspace
# Ensure that no matter what the user has put in this file, we get fully
# specified absolute paths out when we read it:
pudl_in = (
pathlib.Path(default_workspace["pudl_in"]).
expanduser().
resolve()
)
pudl_out = (
pathlib.Path(default_workspace["pudl_out"]).
expanduser().
resolve()
)
return derive_paths(pudl_in, pudl_out)
def derive_paths(pudl_in, pudl_out):
"""
Derive PUDL paths based on given input and output paths.
If no configuration file path is provided, attempt to read in the user
configuration from a file called .pudl.yml in the user's HOME directory.
Presently the only values we expect are pudl_in and pudl_out, directories
that store files that PUDL either depends on that rely on PUDL.
Args:
pudl_in (os.PathLike): Path to the directory containing the PUDL input
files, most notably the ``data`` directory which houses the raw
data downloaded from public agencies by the
:mod:`pudl.workspace.datastore` tools. ``pudl_in`` may be the same
directory as ``pudl_out``.
pudl_out (os.PathLike): Path to the directory where PUDL should write
the outputs it generates. These will be organized into directories
according to the output format (sqlite, datapackage, etc.).
Returns:
dict: A dictionary containing common PUDL settings, derived from those
read out of the YAML file. Mostly paths for inputs & outputs.
"""
# ps is short for pudl settings -- a dictionary of paths, etc.
ps = {}
# The only "inputs" are the datastore and example settings files:
# Convert from input string to Path and make it absolute w/ resolve()
pudl_in = pathlib.Path(pudl_in).expanduser().resolve()
data_dir = pudl_in / "data"
settings_dir = pudl_in / "settings"
# Store these as strings... since we aren't using Paths everywhere yet:
ps["pudl_in"] = str(pudl_in)
ps["data_dir"] = str(data_dir)
ps["settings_dir"] = str(settings_dir)
# Everything else goes into outputs, generally organized by type of file:
pudl_out = pathlib.Path(pudl_out).expanduser().resolve()
ps["pudl_out"] = str(pudl_out)
# One directory per output format, datapackage, sqlite, etc.:
for fmt in pc.output_formats:
ps[f"{fmt}_dir"] = str(pudl_out / fmt)
ferc1_db_file = pathlib.Path(ps['sqlite_dir'], 'ferc1.sqlite')
ps['ferc1_db'] = "sqlite:///" + str(ferc1_db_file.resolve())
ps['pudl_db'] = "sqlite:///" + str(pathlib.Path(
ps['sqlite_dir'], 'pudl.sqlite'))
ps['censusdp1tract_db'] = "sqlite:///" + str(pathlib.Path(
ps['sqlite_dir'], 'censusdp1tract.sqlite'))
return ps
def init(pudl_in, pudl_out, clobber=False):
"""
Set up a new PUDL working environment based on the user settings.
Args:
pudl_in (os.PathLike): Path to the directory containing the PUDL input
files, most notably the ``data`` directory which houses the raw
data downloaded from public agencies by the
:mod:`pudl.workspace.datastore` tools. ``pudl_in`` may be the same
directory as ``pudl_out``.
pudl_out (os.PathLike): Path to the directory where PUDL should write
the outputs it generates. These will be organized into directories
according to the output format (sqlite, datapackage, etc.).
clobber (bool): if True, replace existing files. If False (the default)
do not replace existing files.
Returns:
None
"""
# Generate paths for the workspace:
ps = derive_paths(pudl_in, pudl_out)
# Create tmp directory
tmp_dir = pathlib.Path(ps["data_dir"], "tmp")
tmp_dir.mkdir(parents=True, exist_ok=True)
# These are files that may exist in the package_data directory, but that
# we do not want to deploy into a user workspace:
ignore_files = ['__init__.py', '.gitignore']
# Make a settings directory in the workspace, and deploy settings files:
settings_dir = pathlib.Path(ps['settings_dir'])
settings_dir.mkdir(parents=True, exist_ok=True)
settings_pkg = "pudl.package_data.settings"
deploy(settings_pkg, settings_dir, ignore_files, clobber=clobber)
# Make several output directories:
for fmt in pc.output_formats:
format_dir = pathlib.Path(ps["pudl_out"], fmt)
format_dir.mkdir(parents=True, exist_ok=True)
def deploy(pkg_path, deploy_dir, ignore_files, clobber=False):
"""
Deploy all files from a package_data directory into a workspace.
Args:
pkg_path (str): Dotted module path to the subpackage inside of
package_data containing the resources to be deployed.
deploy_dir (os.PathLike): Directory on the filesystem to which the
files within pkg_path should be deployed.
ignore_files (iterable): List of filenames (strings) that may be
present in the pkg_path subpackage, but that should be ignored.
clobber (bool): if True, replace existing copies of the files that are
being deployed from pkg_path to deploy_dir. If False, do not
replace existing files.
Returns:
None
"""
files = [
file for file in
importlib.resources.contents(pkg_path)
if importlib.resources.is_resource(pkg_path, file)
and file not in ignore_files
]
for file in files:
with importlib.resources.path(pkg_path, file) as f:
dest_file = pathlib.Path(deploy_dir, file)
if pathlib.Path.exists(dest_file):
if clobber:
logger.info(f"CLOBBERING existing file at {dest_file}.")
else:
logger.info(f"Skipping existing file at {dest_file}")
continue
shutil.copy(f, dest_file)
| <filename>src/pudl/workspace/setup.py
"""Tools for setting up and managing PUDL workspaces."""
import importlib
import logging
import pathlib
import shutil
import yaml
from pudl import constants as pc
logger = logging.getLogger(__name__)
def set_defaults(pudl_in, pudl_out, clobber=False):
"""
Set default user input and output locations in ``$HOME/.pudl.yml``.
Create a user settings file for future reference, that defines the default
PUDL input and output directories. If this file already exists, behavior
depends on the clobber parameter, which is False by default. If it's True,
the existing file is replaced. If False, the existing file is not changed.
Args:
pudl_in (os.PathLike): Path to be used as the default input directory
for PUDL -- this is where :mod:`pudl.workspace.datastore` will look
to find the ``data`` directory, full of data from public agencies.
pudl_out (os.PathLike): Path to the default output directory for PUDL,
where results of data processing will be organized.
clobber (bool): If True and a user settings file exists, overwrite it.
If False, do not alter the existing file. Defaults to False.
Returns:
None
"""
settings_file = pathlib.Path.home() / '.pudl.yml'
if settings_file.exists():
if clobber:
logger.info(f"{settings_file} exists: clobbering.")
else:
logger.info(f"{settings_file} exists: not clobbering.")
return
with settings_file.open(mode='w') as f:
f.write(f"pudl_in: {pudl_in.expanduser().resolve()}\n")
f.write(f"pudl_out: {pudl_out.expanduser().resolve()}\n")
def get_defaults():
"""
Read paths to default PUDL input/output dirs from user's $HOME/.pudl.yml.
Args:
None
Returns:
dict: The contents of the user's PUDL settings file, with keys
``pudl_in`` and ``pudl_out`` defining their default PUDL workspace. If
the ``$HOME/.pudl.yml`` file does not exist, set these paths to None.
"""
settings_file = pathlib.Path.home() / '.pudl.yml'
try:
with pathlib.Path(settings_file).open() as f:
default_workspace = yaml.safe_load(f)
except FileNotFoundError:
logger.info("PUDL user settings file .pudl.yml not found.")
default_workspace = {"pudl_in": None, "pudl_out": None}
return default_workspace
# Ensure that no matter what the user has put in this file, we get fully
# specified absolute paths out when we read it:
pudl_in = (
pathlib.Path(default_workspace["pudl_in"]).
expanduser().
resolve()
)
pudl_out = (
pathlib.Path(default_workspace["pudl_out"]).
expanduser().
resolve()
)
return derive_paths(pudl_in, pudl_out)
def derive_paths(pudl_in, pudl_out):
"""
Derive PUDL paths based on given input and output paths.
If no configuration file path is provided, attempt to read in the user
configuration from a file called .pudl.yml in the user's HOME directory.
Presently the only values we expect are pudl_in and pudl_out, directories
that store files that PUDL either depends on that rely on PUDL.
Args:
pudl_in (os.PathLike): Path to the directory containing the PUDL input
files, most notably the ``data`` directory which houses the raw
data downloaded from public agencies by the
:mod:`pudl.workspace.datastore` tools. ``pudl_in`` may be the same
directory as ``pudl_out``.
pudl_out (os.PathLike): Path to the directory where PUDL should write
the outputs it generates. These will be organized into directories
according to the output format (sqlite, datapackage, etc.).
Returns:
dict: A dictionary containing common PUDL settings, derived from those
read out of the YAML file. Mostly paths for inputs & outputs.
"""
# ps is short for pudl settings -- a dictionary of paths, etc.
ps = {}
# The only "inputs" are the datastore and example settings files:
# Convert from input string to Path and make it absolute w/ resolve()
pudl_in = pathlib.Path(pudl_in).expanduser().resolve()
data_dir = pudl_in / "data"
settings_dir = pudl_in / "settings"
# Store these as strings... since we aren't using Paths everywhere yet:
ps["pudl_in"] = str(pudl_in)
ps["data_dir"] = str(data_dir)
ps["settings_dir"] = str(settings_dir)
# Everything else goes into outputs, generally organized by type of file:
pudl_out = pathlib.Path(pudl_out).expanduser().resolve()
ps["pudl_out"] = str(pudl_out)
# One directory per output format, datapackage, sqlite, etc.:
for fmt in pc.output_formats:
ps[f"{fmt}_dir"] = str(pudl_out / fmt)
ferc1_db_file = pathlib.Path(ps['sqlite_dir'], 'ferc1.sqlite')
ps['ferc1_db'] = "sqlite:///" + str(ferc1_db_file.resolve())
ps['pudl_db'] = "sqlite:///" + str(pathlib.Path(
ps['sqlite_dir'], 'pudl.sqlite'))
ps['censusdp1tract_db'] = "sqlite:///" + str(pathlib.Path(
ps['sqlite_dir'], 'censusdp1tract.sqlite'))
return ps
def init(pudl_in, pudl_out, clobber=False):
"""
Set up a new PUDL working environment based on the user settings.
Args:
pudl_in (os.PathLike): Path to the directory containing the PUDL input
files, most notably the ``data`` directory which houses the raw
data downloaded from public agencies by the
:mod:`pudl.workspace.datastore` tools. ``pudl_in`` may be the same
directory as ``pudl_out``.
pudl_out (os.PathLike): Path to the directory where PUDL should write
the outputs it generates. These will be organized into directories
according to the output format (sqlite, datapackage, etc.).
clobber (bool): if True, replace existing files. If False (the default)
do not replace existing files.
Returns:
None
"""
# Generate paths for the workspace:
ps = derive_paths(pudl_in, pudl_out)
# Create tmp directory
tmp_dir = pathlib.Path(ps["data_dir"], "tmp")
tmp_dir.mkdir(parents=True, exist_ok=True)
# These are files that may exist in the package_data directory, but that
# we do not want to deploy into a user workspace:
ignore_files = ['__init__.py', '.gitignore']
# Make a settings directory in the workspace, and deploy settings files:
settings_dir = pathlib.Path(ps['settings_dir'])
settings_dir.mkdir(parents=True, exist_ok=True)
settings_pkg = "pudl.package_data.settings"
deploy(settings_pkg, settings_dir, ignore_files, clobber=clobber)
# Make several output directories:
for fmt in pc.output_formats:
format_dir = pathlib.Path(ps["pudl_out"], fmt)
format_dir.mkdir(parents=True, exist_ok=True)
def deploy(pkg_path, deploy_dir, ignore_files, clobber=False):
"""
Deploy all files from a package_data directory into a workspace.
Args:
pkg_path (str): Dotted module path to the subpackage inside of
package_data containing the resources to be deployed.
deploy_dir (os.PathLike): Directory on the filesystem to which the
files within pkg_path should be deployed.
ignore_files (iterable): List of filenames (strings) that may be
present in the pkg_path subpackage, but that should be ignored.
clobber (bool): if True, replace existing copies of the files that are
being deployed from pkg_path to deploy_dir. If False, do not
replace existing files.
Returns:
None
"""
files = [
file for file in
importlib.resources.contents(pkg_path)
if importlib.resources.is_resource(pkg_path, file)
and file not in ignore_files
]
for file in files:
with importlib.resources.path(pkg_path, file) as f:
dest_file = pathlib.Path(deploy_dir, file)
if pathlib.Path.exists(dest_file):
if clobber:
logger.info(f"CLOBBERING existing file at {dest_file}.")
else:
logger.info(f"Skipping existing file at {dest_file}")
continue
shutil.copy(f, dest_file)
| en | 0.83142 | Tools for setting up and managing PUDL workspaces. Set default user input and output locations in ``$HOME/.pudl.yml``. Create a user settings file for future reference, that defines the default PUDL input and output directories. If this file already exists, behavior depends on the clobber parameter, which is False by default. If it's True, the existing file is replaced. If False, the existing file is not changed. Args: pudl_in (os.PathLike): Path to be used as the default input directory for PUDL -- this is where :mod:`pudl.workspace.datastore` will look to find the ``data`` directory, full of data from public agencies. pudl_out (os.PathLike): Path to the default output directory for PUDL, where results of data processing will be organized. clobber (bool): If True and a user settings file exists, overwrite it. If False, do not alter the existing file. Defaults to False. Returns: None Read paths to default PUDL input/output dirs from user's $HOME/.pudl.yml. Args: None Returns: dict: The contents of the user's PUDL settings file, with keys ``pudl_in`` and ``pudl_out`` defining their default PUDL workspace. If the ``$HOME/.pudl.yml`` file does not exist, set these paths to None. # Ensure that no matter what the user has put in this file, we get fully # specified absolute paths out when we read it: Derive PUDL paths based on given input and output paths. If no configuration file path is provided, attempt to read in the user configuration from a file called .pudl.yml in the user's HOME directory. Presently the only values we expect are pudl_in and pudl_out, directories that store files that PUDL either depends on that rely on PUDL. Args: pudl_in (os.PathLike): Path to the directory containing the PUDL input files, most notably the ``data`` directory which houses the raw data downloaded from public agencies by the :mod:`pudl.workspace.datastore` tools. ``pudl_in`` may be the same directory as ``pudl_out``. pudl_out (os.PathLike): Path to the directory where PUDL should write the outputs it generates. These will be organized into directories according to the output format (sqlite, datapackage, etc.). Returns: dict: A dictionary containing common PUDL settings, derived from those read out of the YAML file. Mostly paths for inputs & outputs. # ps is short for pudl settings -- a dictionary of paths, etc. # The only "inputs" are the datastore and example settings files: # Convert from input string to Path and make it absolute w/ resolve() # Store these as strings... since we aren't using Paths everywhere yet: # Everything else goes into outputs, generally organized by type of file: # One directory per output format, datapackage, sqlite, etc.: Set up a new PUDL working environment based on the user settings. Args: pudl_in (os.PathLike): Path to the directory containing the PUDL input files, most notably the ``data`` directory which houses the raw data downloaded from public agencies by the :mod:`pudl.workspace.datastore` tools. ``pudl_in`` may be the same directory as ``pudl_out``. pudl_out (os.PathLike): Path to the directory where PUDL should write the outputs it generates. These will be organized into directories according to the output format (sqlite, datapackage, etc.). clobber (bool): if True, replace existing files. If False (the default) do not replace existing files. Returns: None # Generate paths for the workspace: # Create tmp directory # These are files that may exist in the package_data directory, but that # we do not want to deploy into a user workspace: # Make a settings directory in the workspace, and deploy settings files: # Make several output directories: Deploy all files from a package_data directory into a workspace. Args: pkg_path (str): Dotted module path to the subpackage inside of package_data containing the resources to be deployed. deploy_dir (os.PathLike): Directory on the filesystem to which the files within pkg_path should be deployed. ignore_files (iterable): List of filenames (strings) that may be present in the pkg_path subpackage, but that should be ignored. clobber (bool): if True, replace existing copies of the files that are being deployed from pkg_path to deploy_dir. If False, do not replace existing files. Returns: None | 2.645661 | 3 |
ts/torch_handler/unit_tests/test_image_classifier.py | akarazniewicz/serve | 1 | 6632216 | # pylint: disable=W0621
# Using the same name as global function is part of pytest
"""
Basic unit test for ImageClassifier class.
Ensures it can load and execute an example model
"""
import sys
import pytest
from ts.torch_handler.image_classifier import ImageClassifier
from .test_utils.mock_context import MockContext
sys.path.append('ts/torch_handler/unit_tests/models/tmp')
@pytest.fixture()
def model_setup():
context = MockContext()
with open('ts/torch_handler/unit_tests/models/tmp/images/kitten.jpg', 'rb') as fin:
image_bytes = fin.read()
return (context, image_bytes)
def test_initialize(model_setup):
model_context, _ = model_setup
handler = ImageClassifier()
handler.initialize(model_context)
assert(True)
return handler
def test_handle(model_setup):
_, image_bytes = model_setup
handler = test_initialize(model_setup)
test_data = [{'data': image_bytes}] * 2
results = handler.handle(test_data, image_bytes)
assert(len(results) == 2)
assert('tiger_cat' in results[0])
| # pylint: disable=W0621
# Using the same name as global function is part of pytest
"""
Basic unit test for ImageClassifier class.
Ensures it can load and execute an example model
"""
import sys
import pytest
from ts.torch_handler.image_classifier import ImageClassifier
from .test_utils.mock_context import MockContext
sys.path.append('ts/torch_handler/unit_tests/models/tmp')
@pytest.fixture()
def model_setup():
context = MockContext()
with open('ts/torch_handler/unit_tests/models/tmp/images/kitten.jpg', 'rb') as fin:
image_bytes = fin.read()
return (context, image_bytes)
def test_initialize(model_setup):
model_context, _ = model_setup
handler = ImageClassifier()
handler.initialize(model_context)
assert(True)
return handler
def test_handle(model_setup):
_, image_bytes = model_setup
handler = test_initialize(model_setup)
test_data = [{'data': image_bytes}] * 2
results = handler.handle(test_data, image_bytes)
assert(len(results) == 2)
assert('tiger_cat' in results[0])
| en | 0.729742 | # pylint: disable=W0621 # Using the same name as global function is part of pytest Basic unit test for ImageClassifier class. Ensures it can load and execute an example model | 2.631016 | 3 |
venv/Lib/site-packages/pandas/tests/series/indexing/test_delitem.py | OliviaNabbosa89/Disaster_Responses | 0 | 6632217 | import pytest
from pandas import Index, Series
import pandas._testing as tm
class TestSeriesDelItem:
def test_delitem(self):
# GH#5542
# should delete the item inplace
s = Series(range(5))
del s[0]
expected = Series(range(1, 5), index=range(1, 5))
tm.assert_series_equal(s, expected)
del s[1]
expected = Series(range(2, 5), index=range(2, 5))
tm.assert_series_equal(s, expected)
# only 1 left, del, add, del
s = Series(1)
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
s[0] = 1
tm.assert_series_equal(s, Series(1))
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
def test_delitem_object_index(self):
# Index(dtype=object)
s = Series(1, index=["a"])
del s["a"]
tm.assert_series_equal(
s, Series(dtype="int64", index=Index([], dtype="object"))
)
s["a"] = 1
tm.assert_series_equal(s, Series(1, index=["a"]))
del s["a"]
tm.assert_series_equal(
s, Series(dtype="int64", index=Index([], dtype="object"))
)
def test_delitem_missing_key(self):
# empty
s = Series(dtype=object)
with pytest.raises(KeyError, match=r"^0$"):
del s[0]
| import pytest
from pandas import Index, Series
import pandas._testing as tm
class TestSeriesDelItem:
def test_delitem(self):
# GH#5542
# should delete the item inplace
s = Series(range(5))
del s[0]
expected = Series(range(1, 5), index=range(1, 5))
tm.assert_series_equal(s, expected)
del s[1]
expected = Series(range(2, 5), index=range(2, 5))
tm.assert_series_equal(s, expected)
# only 1 left, del, add, del
s = Series(1)
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
s[0] = 1
tm.assert_series_equal(s, Series(1))
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
def test_delitem_object_index(self):
# Index(dtype=object)
s = Series(1, index=["a"])
del s["a"]
tm.assert_series_equal(
s, Series(dtype="int64", index=Index([], dtype="object"))
)
s["a"] = 1
tm.assert_series_equal(s, Series(1, index=["a"]))
del s["a"]
tm.assert_series_equal(
s, Series(dtype="int64", index=Index([], dtype="object"))
)
def test_delitem_missing_key(self):
# empty
s = Series(dtype=object)
with pytest.raises(KeyError, match=r"^0$"):
del s[0]
| en | 0.644094 | # GH#5542 # should delete the item inplace # only 1 left, del, add, del # Index(dtype=object) # empty | 2.68467 | 3 |
azext_iot/sdk/iothub/service/models/registry_statistics_py3.py | YingXue/azure-iot-cli-extension | 0 | 6632218 | <gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegistryStatistics(Model):
"""RegistryStatistics.
:param total_device_count:
:type total_device_count: long
:param enabled_device_count:
:type enabled_device_count: long
:param disabled_device_count:
:type disabled_device_count: long
"""
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(self, *, total_device_count: int=None, enabled_device_count: int=None, disabled_device_count: int=None, **kwargs) -> None:
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = total_device_count
self.enabled_device_count = enabled_device_count
self.disabled_device_count = disabled_device_count
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegistryStatistics(Model):
"""RegistryStatistics.
:param total_device_count:
:type total_device_count: long
:param enabled_device_count:
:type enabled_device_count: long
:param disabled_device_count:
:type disabled_device_count: long
"""
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(self, *, total_device_count: int=None, enabled_device_count: int=None, disabled_device_count: int=None, **kwargs) -> None:
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = total_device_count
self.enabled_device_count = enabled_device_count
self.disabled_device_count = disabled_device_count | en | 0.564893 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- RegistryStatistics. :param total_device_count: :type total_device_count: long :param enabled_device_count: :type enabled_device_count: long :param disabled_device_count: :type disabled_device_count: long | 1.842679 | 2 |
Apps/rsp/eqcheck.py | zhanghongce/ila-mcm-fmcad18 | 0 | 6632219 | from traceStep import *
import axiom
# ---------------------------
# Configurations
# ---------------------------
FORCE_ALL_INST_DECODE_TRUE = True
DEBUG = False
# ---------------------------
# Prerequisites
# ---------------------------
def LAnd(l):
if len(l) == 0:
return z3.BoolVal(True)
elif len(l) == 1:
return l[0]
else:
return z3.And(l)
def LOr(l):
if len(l) == 0:
return z3.BoolVal(True)
elif len(l) == 1:
return l[0]
else:
return z3.Or(l)
# ---------------------------
# clInstruction = openclrsp.CL_load_DV_N
# gpuInstruction = [ gpuModel.ev_FETCH_L1 , gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ]
wvopListTp = namedtuple('wvopListTp','WG DV addr value')
rvopListTp = namedtuple('rvopListTp','WG DV addr value')
prevEVsequence = { gpuModel.inst_LD: [gpuModel.ev_FETCH_L1] ,
gpuModel.inst_INC_L1 : [gpuModel.ev_FETCH_L1] }
postEVsequence = {
gpuModel.inst_ST: [ gpuModel.ev_FLUSH_L1, gpuModel.ev_DEQLOC_L1 ], # fetch will also need this
gpuModel.inst_INC_L1 : [ gpuModel.ev_FLUSH_L1 , gpuModel.ev_DEQLOC_L1 ]
}
blockSequence = {
gpuModel.inst_FLU_L1_DV : gpuModel.ev_DEQMARKER_L1, # will wait for many of it
gpuModel.inst_FLU_L1_WG : gpuModel.ev_DEQMARKER_L1
}
# pre is already modeled in the sequence
rfMapInst = {
openclrsp.CL_load_na_or_WG: [ gpuModel.inst_LD ],
openclrsp.CL_load_DV_N: [ gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ], #Corret : [ gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ], # wrong implementation: [ gpuModel.inst_INV_L1_WG , gpuModel.inst_LD ],
openclrsp.CL_load_DV_R: [ gpuModel.inst_LD , gpuModel.inst_FLU_L1_DV , gpuModel.inst_INV_L1_WG ], #[ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_WG, gpuModel.inst_LD, gpuModel.inst_UL_L2 ], # new: [ gpuModel.inst_LD , gpuModel.inst_FLU_L1_DV , gpuModel.inst_INV_L1_WG ] original : [ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_WG, gpuModel.inst_LD, gpuModel.inst_UL_L2 ]
openclrsp.CL_store_na_or_WG:[ gpuModel.inst_ST ],
openclrsp.CL_store_DV_N: [ gpuModel.inst_FLU_L1_WG, gpuModel.inst_ST ],
openclrsp.CL_store_DV_R: [ gpuModel.inst_LK_rmw_DV, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_DV, gpuModel.inst_ST, gpuModel.inst_FLU_L1_WG, gpuModel.inst_INV_L1_DV, gpuModel.inst_UL_rmw_DV ], # Wrong : [ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_WG, gpuModel.inst_ST, gpuModel.inst_INV_L1_DV, gpuModel.inst_UL_L2 ], # this was the wrong thing
openclrsp.CL_fetch_inc_WG: [ gpuModel.inst_INC_L1 ]
}
def HB(a,b):
return a.timestamp < b.timestamp
def HBd(a,b):
return z3.Implies( z3.And( a.inst.decodeFunc, b.inst.decodeFunc), a.timestamp < b.timestamp )
# =============================================
# EQ CHECKER
# =============================================
class eqchecker(object):
def __init__(self, num_dev, num_wg, num_t):
self.PiVarList = {} # pi_var_name (ts + statename) -> [ (sname, pivar, ts, addr ) ]
self.PiVarListOld = {}
self.TsThatWritesVar = {}
self.TsThatWritesVarOld = {}
self.TsNameToObj = {}
self.ConstraintList = []
self.runProp = []
self.num_of_parent_inst_in_scene = 0
self.tsList = [] # Note this is not a list of PO
self.PrevDecode = None # Note that the prev decode may be in
self.num_dev, self.num_wg, self.num_t = num_dev, num_wg, num_t
self.ConstrainedPiVarNames = set([]) # set of String : name of the PiVar
self.L1fr_read_from = {} # ld --> fetch (if fetch.decode then must read from fetch else must others)
# ======================================
# below is the content of the collector
# ======================================
self.load_na_or_WG_list = []
self.load_DV_N_list = []
self.load_DV_R_list = []
self.store_na_or_WG_list = []
self.store_DV_N_list = []
self.store_DV_R_list = []
self.LOAD_list = []
self.STORE_list = []
self.rmw_list = []
self.name_to_list_mapping = {'load_na_or_WG':[self.load_na_or_WG_list, self.LOAD_list],
'load_DV_N':[self.load_DV_N_list ,self.LOAD_list],
'load_DV_R':[self.load_DV_R_list ,self.LOAD_list],
'store_na_or_WG':[self.store_na_or_WG_list ,self.STORE_list],
'store_DV_N':[self.store_DV_N_list ,self.STORE_list],
'store_DV_R':[self.store_DV_R_list ,self.STORE_list],
'fetch_inc_WG':[self.rmw_list, self.LOAD_list,self.STORE_list],
}
def addToList(self,inst):
for lref in self.name_to_list_mapping[inst.name]:
lref.append(inst)
def addParentInst(self, addr, reg, e1, inst_func):
def dummyRead(sname,addr = 0, entity = 0):
return 0
#state = lambda stateName, addr = None , overWriteVA = None: ts1.stateReadFunc(stateName,addr,overWriteVA)
paramDict = {'x': addr, 'r' :reg, 'Eid': e1 , 'state':dummyRead }
inst = inst_func( **paramDict )
self.addToList(inst)
return inst
def addInstToScene(self, addr, reg, e1, inst_func, decodeWaitList, slot = None):
# need to plug-in additional decode in the beginning, if it is not None, will add
# unless specify slot, will not provide
# addr = z3.Int('addr'+str(self.num_of_inst_in_scene) ) # the order here matters
# reg = z3.Int('reg' +str(self.num_of_inst_in_scene) ) # the order here matters
# and you need to check if you need an entity or not
# e1 = self._genEntity() # entity for the instruction
ts1 = TraceStep(entity = e1,
TsThatWritesVar = self.TsThatWritesVar ,
TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList,
PiVarList = self.PiVarList )
self.tsList.append(ts1) # tsList is gathering all these
# concretize the instruction
state = lambda stateName, addr = None , entity = None: ts1.stateReadFunc(stateName,addr,entity)
paramDict = {'x': addr, 'r' :reg, 'Eid': e1 , 'state':state, 'num_dev':self.num_dev, 'num_wg': self.num_wg , 'num_t': self.num_t }
if slot is not None:
paramDict['slot'] = slot
inst = inst_func( **paramDict ) # get the instruction
# here is the logic for the pause after flush
if decodeWaitList is not None: # then it should be a list
if len( decodeWaitList ) == 1:
extraDecodeExpr = ts1.stateReadFunc( 'L1fifotail' ) == decodeWaitList[0] # Previous is FLU_WG
else:
assert len( decodeWaitList ) == self.num_wg
extraDecodeExpr = z3.And( [ \
ts1.stateReadFunc('L1fifotail' , entity = gpuModel.entity(e1.d, wgId ) ) == decodeWaitList[wgId] \
for wgId in range(self.num_wg) ] )
inst.decodeFunc = z3.And( inst.decodeFunc, extraDecodeExpr )
# change the decode function when necessary
ts1.assignInst(inst) # r
return inst,ts1
# PO relations are added by axioms
# w/r vop relations are added by axioms
# prev decode may be in effect for another instruction, but not in our case
# but still it would be best to add parent instruction according their program order ??
def addAChain(self, Eid, parentInstruction, childInstructions, prevTimeStamp): # for one parent-instruction -> should return one child instruction
# entity
# gen addrs
addr = z3.Int('addr'+str(self.num_of_parent_inst_in_scene) ) # the order here matters
reg = z3.Int('reg' +str(self.num_of_parent_inst_in_scene) ) # the order here matters
# and you need to check if you need an entity or not
# e1 = self._genEntity() # entity for the instruction
self.num_of_parent_inst_in_scene += 1
pInst_ref = self.addParentInst(addr = addr, reg = reg, e1 = Eid, inst_func = parentInstruction) #addr, reg, e1, inst_func
prevTs = None
blockSeqSlotList = []
instructions_in_the_chain = []
for uinst in childInstructions:
_ , tsRef = self.addInstToScene(addr = addr, reg = reg, e1 = Eid, inst_func = uinst, decodeWaitList = self.PrevDecode )
instructions_in_the_chain.append(tsRef)
self.ConstraintList.append( tsRef.timestamp > prevTimeStamp )
# every child instruction step is later than init or the previous barrier (not including evs)
if prevTs is not None:
self.ConstraintList.append( HB(prevTs, tsRef) ) # tsRef.timestamp > prevTs.timestamp )
# constraint 2: child instruction po
else: # prevTs is None
assert self.PrevDecode is None
# check that flush is not the last child instruction !!!
self.PrevDecode = None # reset it
# constraint #3: child instructions must be executed
self.ConstraintList.append( tsRef.inst.decodeFunc ) # Constriant DECODE
# here, start to
# check the need to add additional environmental transition
if uinst in prevEVsequence: # FETCH -> LD
seq = prevEVsequence[uinst]
assert len(seq) == 1 # we only handle the case with one fetch (RVOP)
fetch_ev = seq[0]
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = Eid, inst_func = fetch_ev, decodeWaitList = None, slot = None)
# we don't require its decode to be true
valueToBeWritten = tsRef.inst.updateDict[gpuModel.setSname('r',Eid)][0] # get from updateDict
pInst_ref.rvop = rvopListTp( WG = tsRef, DV = tsRef_sub, addr = addr, value = valueToBeWritten )
pInst_ref.timestamp = tsRef.timestamp
self.ConstraintList.append( HBd(tsRef_sub, tsRef) ) # only guarantee if both decodes are true
self.ConstraintList.append( tsRef_sub.timestamp > 0 )
self.L1fr_read_from[tsRef] = tsRef_sub
if uinst in postEVsequence: # ST -> will
seq = postEVsequence[uinst]
parent_or_prev_ts = tsRef # the one that will happenBefore it
tmpTsList = []
# get the slot
storeFIFOSlot = tsRef.stateReadFunc('L1fifotail')
for post_inst in seq:
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = Eid, inst_func = post_inst, decodeWaitList = None, slot = storeFIFOSlot)
self.ConstraintList.append( HB(parent_or_prev_ts, tsRef_sub) ) # parent_or_prev_ts.timestamp < tsRef_sub.timestamp )
self.ConstraintList.append( tsRef_sub.timestamp > 0 )
#
parent_or_prev_ts = tsRef_sub
tmpTsList.append(tsRef_sub)
valueToBeRead = tsRef.stateReadFunc('r', addr = reg) # get from read its local register value
pInst_ref.wvop = wvopListTp( WG = tsRef, DV = tmpTsList[0], addr = addr, value = valueToBeRead )
pInst_ref.timestamp = tsRef.timestamp
if uinst in blockSequence: # FLUSH: either the device or workgroup
blockSeqSlotList = []
flush_ev = blockSequence[uinst] # it will only be one instruction
# but still you won't need to change decode ?
if uinst is gpuModel.inst_FLU_L1_DV:
# in this case we will need to create #WG flush MARKER and
for wgId in range(self.num_wg):
# read the slot from tsRef
DEQ_MARKER_ENTITY = gpuModel.entity(Eid.d, wgId )
slot = tsRef.stateReadFunc( 'L1fifohead' , entity = DEQ_MARKER_ENTITY) # dep marker will need this
blockSeqSlotList.append(slot + 1)
# create a ts after it
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = DEQ_MARKER_ENTITY, inst_func = flush_ev, decodeWaitList = None, slot = slot)
self.ConstraintList.append( HB(tsRef, tsRef_sub) ) # tsRef_sub.timestamp > tsRef.timestamp )
self.ConstraintList.append( tsRef_sub.timestamp > 0 )
# concat decode for all Wg
# may be we can directly instantiate the function (to z3expr) here without calling it
self.PrevDecode = blockSeqSlotList
elif uinst is gpuModel.inst_FLU_L1_WG:
slot = tsRef.stateReadFunc('L1fifohead') # dep marker will need this
flush_ev = blockSequence[uinst] # it will only be one instruction
self.PrevDecode = [ slot +1 ]
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = Eid, inst_func = flush_ev, decodeWaitList = None, slot = slot)
self.ConstraintList.append( HB(tsRef, tsRef_sub) ) # tsRef_sub.timestamp > tsRef.timestamp )
else:
assert False # we won't support other cases
# depends on which one WG or DV
# we need to add decode expr once or all
# add happen before before instructions
prevTs = tsRef
# set the end and start
pInst_ref.begin = instructions_in_the_chain[0]
pInst_ref.end = instructions_in_the_chain[-1]
return instructions_in_the_chain
# Let's add pi function
def setPiFunAW(self, PiVarList , TsThatWritesVar, AllWriteTsList ): # by default should be self.PiVarList
for sname, piVar, tsRef, VAddr in PiVarList.values():
# check its name, so repetition
piVarName = str(piVar)
if piVarName in self.ConstrainedPiVarNames:
continue # else:
self.ConstrainedPiVarNames.add(piVarName)
timestamp = tsRef.timestamp
writerTraceSteps = TsThatWritesVar.get(sname,[])
fetch_ts = None
if gpuModel.removeEntity(sname) == 'L1fr' and tsRef in self.L1fr_read_from:
fetch_ts = self.L1fr_read_from[tsRef]
# fetch_ts.inst.decodeFunc => rf
constraint = mm.build_rf_constraint(sname, piVar, VAddr, TsThatWritesVar, read = tsRef, write = fetch_ts, AllWriteTsList = []) # 3 update check
constraint = z3.Implies( fetch_ts.inst.decodeFunc , z3.Implies( tsRef.inst.decodeFunc , constraint ))
self.ConstraintList.append( constraint )
OrClauses = []
for traceStep in list(writerTraceSteps) + AllWriteTsList:
if traceStep is tsRef: continue
if traceStep is fetch_ts: continue # this will disallow read from the fetch
constraint = mm.build_rf_constraint(sname, piVar, VAddr, TsThatWritesVar, read = tsRef, write = traceStep, AllWriteTsList = AllWriteTsList) # 3 update check
constraint = z3.Implies( tsRef.inst.decodeFunc , constraint ) # check decode
OrClauses.append(constraint)
# but we will add all the inst to be true???
if fetch_ts is not None:
self.ConstraintList.append( z3.Implies( z3.Not(fetch_ts.inst.decodeFunc), z3.Or(OrClauses) ) )
else:
self.ConstraintList.append( z3.Or(OrClauses) )
def POAhead2Store(self, parent_instruction_1, e1, parent_instruction_2, e2):
def constrainIntervals(ts1,ts2):
# ts1 is the init , and ts2 is the interval
assert len(ts1.L1FifoTails) == len(ts2.L1FifoTailDeltas)
assert len(ts1.L1FifoHeads) == len(ts2.L1FifoHeadDeltas)
assert len(ts1.L1FifoTails) == len(ts2.L1FifoHeads)
for sname in ts1.L1FifoHeads.keys(): # this is actually for all dw
head_name = sname
tail_name = sname.replace('head','tail')
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoTails[tail_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= ts1.L1FifoTails[tail_name] )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoTailDeltas[tail_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] == ts2.L1FifoTailDeltas[tail_name] )
# these are the environmental constraint
#first get the micro instructions
tsInit = InitStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append( tsInit )
tsBList = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[ parent_instruction_1 ], prevTimeStamp = tsInit.timestamp )
tsInterval = IntervalStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
TsThatWritesVar = self.TsThatWritesVar , ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsInterval )
tsU = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[ parent_instruction_2 ], prevTimeStamp = tsInterval.timestamp )
self.ConstraintList.append( HB(tsInit,tsBList[0]) ) # do we really need this?
self.ConstraintList.append( HB(tsBList[-1],tsInterval) )
# tsInterval --HB--> tsU is guaranteed by addAChain
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsInterval] )
## TODO: add the invariants
constrainIntervals(tsInit,tsInterval)
def POAfter2Load(self, parent_instruction_1, e1, parent_instruction_2, e2):
#first get the micro instructions
# add init frame
# add instr @ check frame
# add instr that separate
# add inst @ after frame
tsInit = InitStep_any( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append(tsInit)
tsUnderCheck = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[parent_instruction_1], prevTimeStamp = tsInit.timestamp )
tsInterval = IntervalStep_frKeep( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append(tsInterval)
# here let's backup the whole thing
""" push pi var to a stack """
#self.setPiFun(init = tsInit, PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar)
#self.PiVarListOld = self.PiVarList #.update(self.PiVarList)
#self.PiVarList = {}
""" push TsThatWritesVar """
#self.TsThatWritesVarOld = self.TsThatWritesVar #.update(self.TsThatWritesVar)
#self.TsThatWritesVar = {}
tsAfterCheck = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[parent_instruction_2], prevTimeStamp = tsInterval.timestamp ) # instructions are PO after tsInterval
# this will update the PiVarList associated with IntervalStep_frKeep
#
#self.setPiFun(init = tsInterval, PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar)
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsInterval] )
# redo this, but the constrained ones should be okay, because no repetition is there
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsInterval] )
# do it twice because in the first case, there are pi var generated (on demand)
#assert ('pi_intervalts5_L1fr@0,0addr0' in self.PiVarList)
#assert ('pi_intervalts5_L1fr@0,0addr1' in self.PiVarList)
self.ConstraintList.append( HB(tsInit,tsUnderCheck[0]) ) # do we really need this?
self.ConstraintList.append( HB(tsUnderCheck[-1],tsInterval) )
# self.ConstraintList.append( HB(tsInterval,tsAfterCheck[0]) ) # we don't need this, it is already enforced
## TODO: add the invariants
# you will need rf relation as a function
def PO3SLL(self, parent_instruction_1, e1, parent_instruction_2, e2, parent_instruction_3, e3): # for store_DV_Rs : axiom 3
def constrainIntervals(ts1): #,ts2):
# ts1 is the init , and ts2 is the interval
#assert len(ts1.L1FifoTails) == len(ts2.L1FifoTailDeltas)
#assert len(ts1.L1FifoHeads) == len(ts2.L1FifoHeadDeltas)
#assert len(ts1.L1FifoTails) == len(ts2.L1FifoHeads)
for sname in ts1.L1FifoHeads.keys(): # this is actually for all dw
head_name = sname
tail_name = sname.replace('head','tail')
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoTails[tail_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= ts1.L1FifoTails[tail_name] )
#self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] >= 0 )
#self.ConstraintList.append( ts2.L1FifoTailDeltas[tail_name] >= 0 )
#self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] == ts2.L1FifoTailDeltas[tail_name] )
#first get the micro instructions
tsInit = InitStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 ) #InitStep_any( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append( tsInit )
tsBListS1 = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[ parent_instruction_1 ], prevTimeStamp = tsInit.timestamp )
tsIntervalSL = IntervalStep_frKeep( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalSL )
tsBListS2 = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[ parent_instruction_2 ], prevTimeStamp = tsIntervalSL.timestamp )
tsIntervalLL = IntervalStep_frKeep( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalLL )
tsUListR = self.addAChain( parentInstruction = parent_instruction_3, Eid = e3,
childInstructions = rfMapInst[ parent_instruction_3 ], prevTimeStamp = tsIntervalLL.timestamp )
#self.ConstraintList.append( HB(tsInit,tsBListS1[0]) )
self.ConstraintList.append( HB(tsBListS1[-1], tsIntervalSL) )
self.ConstraintList.append( HB(tsBListS2[-1], tsIntervalLL) )
# tsIntervalSL --HB--> tsU is guaranteed by addAChain
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsIntervalSL, tsIntervalLL] )
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsIntervalSL, tsIntervalLL] )
constrainIntervals(tsInit)
def PO3SSL(self, parent_instruction_1, e1, parent_instruction_2, e2, parent_instruction_3, e3): # for load_DV_Rs : axiom 3
def constrainIntervals(ts1,ts2, ts3):
# ts1 is the init , and ts2 is the interval
assert len(ts1.L1FifoTails) == len(ts2.L1FifoTailDeltas)
assert len(ts1.L1FifoHeads) == len(ts2.L1FifoHeadDeltas)
assert len(ts1.L1FifoTails) == len(ts2.L1FifoHeads)
assert len(ts1.L1FifoTails) == len(ts3.L1FifoTailDeltas)
assert len(ts1.L1FifoHeads) == len(ts3.L1FifoHeadDeltas)
assert len(ts1.L1FifoTails) == len(ts3.L1FifoHeads)
for sname in ts1.L1FifoHeads.keys(): # this is actually for all dw
head_name = sname
tail_name = sname.replace('head','tail')
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoTails[tail_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= ts1.L1FifoTails[tail_name] )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoTailDeltas[tail_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoHeads[head_name] >= ts2.L1FifoTails[tail_name] )
self.ConstraintList.append( ts3.L1FifoHeadDeltas[head_name] >= 0 )
self.ConstraintList.append( ts3.L1FifoTailDeltas[tail_name] >= 0 )
self.ConstraintList.append( ts3.L1FifoHeads[head_name] >= ts3.L1FifoTails[tail_name] )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] + ts3.L1FifoHeadDeltas[head_name] >=
ts2.L1FifoTailDeltas[tail_name] + ts3.L1FifoTailDeltas[tail_name] )
# these are the environmental constraint
#first get the micro instructions
tsInit = InitStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append( tsInit )
tsBListS1 = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[ parent_instruction_1 ], prevTimeStamp = tsInit.timestamp )
tsIntervalSS = IntervalStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
TsThatWritesVar = self.TsThatWritesVar , ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalSS )
tsBListS2 = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[ parent_instruction_2 ], prevTimeStamp = tsIntervalSS.timestamp )
tsIntervalSL = IntervalStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
TsThatWritesVar = self.TsThatWritesVar , ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalSL )
tsUListR = self.addAChain( parentInstruction = parent_instruction_3, Eid = e3,
childInstructions = rfMapInst[ parent_instruction_3 ], prevTimeStamp = tsIntervalSL.timestamp )
#self.ConstraintList.append( HB(tsInit,tsBListS1[0]) )
self.ConstraintList.append( HB(tsBListS1[-1], tsIntervalSS) )
self.ConstraintList.append( HB(tsBListS2[-1], tsIntervalSL) )
# tsIntervalSS --HB--> tsU is guaranteed by addAChain
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsIntervalSS, tsIntervalSL] )
## TODO: add the invariants
constrainIntervals(tsInit,tsIntervalSS,tsIntervalSL)
def add_check_axiom(self):
#axiom.store_dv_r_3(self)
#axiom.load_dv_r_3(self)
axiom.PO2Axioms(self)
def pushInstConstraintAndCheck(self):
self.solver = z3.Solver()
s = self.solver
s.set(unsat_core=True if DEBUG else False)
idx = 0
for c in self.ConstraintList:
if DEBUG:
print >>self.logcat, idx,':', c
#raw_input()
if c is False:
print '<W>: False cannot be satisfied'
elif c is True:
pass
else:
s.assert_and_track(c,'n'+str(idx))
idx += 1
else:
s.add(c)
result = s.check()
print 'w.o. run Property:', result
self.modelCheckResult = result
if result == z3.sat:
self.model = s.model()
else: # unsat
if DEBUG:
print >>self.logcat, s.unsat_core()
print '<W>: Not all the reads can be executed! Trace debug info dumped.'
return result
def solve(self):
if self.modelCheckResult == z3.unsat:
print '<W>: inconsistent model. Won\'t continue to check the properties.'
return
s = self.solver
if len(self.runProp) == 0:
print '<W>: no property specified.'
return
else:
runProp = z3.Not( LAnd(self.runProp) )
if DEBUG:
print >> self.logcat, 'rp : ', runProp
s.assert_and_track(runProp,'rp')
else:
s.add(runProp)
# extract module
result = s.check()
print result
if result == z3.unsat: # this may use the old one
if DEBUG:
print >>self.logcat, s.unsat_core()
elif result == z3.sat:
self.model = s.model()
print 'negation is witnessed'
#self.printModel()
else:
print 'status Unknown'
return result
def printModel(self):
print >> self.logcat, '====================== MODEL ====================='
for ts in self.tsList:
print >>self.logcat, ts.name, ' entity:', ts.entity.toName() if 'entity' in ts.__dict__ else 'GLOBAL'
t,d,val,readCol = ts.getConcreteModel(self.model)
instName = ts.inst.name if 'inst' in ts.__dict__ else 'INIT'
print >>self.logcat, '@%s, %s:%s'%(str(t), instName, d)
for n,v in readCol.items():
if isinstance(v, tuple):
value = str(v[0]) + '\taddr: ' + str('ANY' if v[1].anyAddr else ( 'z3:' + str(v[1].eval(self.model) ) + ', id:' + v[1].vaid ) )
print >> self.logcat, '\t (Read %s = %s)' % (n, value )
else:
print >>self.logcat, '\t (Read %s = %s)' % (n, str(v) )
for n,v in val.items():
if isinstance(v, tuple):
value = str(v[0]) + '\taddr: ' + str('ANY' if v[1].anyAddr else ( 'z3:' + str(v[1].eval(self.model) ) + ', id:' + v[1].vaid ) )
print >>self.logcat, '\t %s:%s' % (n,value)
else:
print >>self.logcat, '\t %s:%s' % (n,str(v))
self.logcat.flush()
def check2store(num_dev,num_wg,num_t):
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eqc.POAhead2Store(parent_instruction_1 = openclrsp.CL_store_DV_R, e1 = eid1,
parent_instruction_2 = openclrsp.CL_store_na_or_WG, e2 = eid2 )
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
def checkSSL(num_dev,num_wg,num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
for devIdx3 in range(num_dev):
for wgId3 in range(num_wg):
for tIdx3 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eid3 = gpuModel.entity(devIdx3,wgId3,tIdx3)
eqc.PO3SSL(
parent_instruction_1 = openclrsp.CL_store_DV_N, e1 = eid1,
parent_instruction_2 = openclrsp.CL_store_DV_N, e2 = eid2,
parent_instruction_3 = openclrsp.CL_load_DV_R, e3 = eid3
)
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
#eqc.printModel()
#eqc.logcat.close()
#exit(1)
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
#eqc.printModel()
#eqc.logcat.close()
#exit(1)
def checkSLL(num_dev,num_wg,num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
#devIdx2 = 0; wgId2 = 1; tIdx2 = 0
#devIdx3 = 0; wgId3 = 0; tIdx3 = 0
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
for devIdx3 in range(num_dev):
for wgId3 in range(num_wg):
for tIdx3 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eid3 = gpuModel.entity(devIdx3,wgId3,tIdx3)
eqc.PO3SLL(
parent_instruction_1 = openclrsp.CL_store_DV_R, e1 = eid1,
parent_instruction_2 = openclrsp.CL_load_na_or_WG, e2 = eid2,
parent_instruction_3 = openclrsp.CL_load_na_or_WG, e3 = eid3
)
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
def check2load(num_dev,num_wg,num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eqc.POAfter2Load(parent_instruction_1 = openclrsp.CL_load_DV_N, e1 = eid1,
parent_instruction_2 = openclrsp.CL_load_DV_N, e2 = eid2 )
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
#eqc.printModel()
#eqc.logcat.close()
#exit(1)
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
if __name__ == '__main__':
#check2store(2,2,2) # should also check for store_DV_R to previous stores AXIOM 1 2
check2load(2,2,2)
#checkSSL(2,2,2)
#checkSLL(2,2,2)
| from traceStep import *
import axiom
# ---------------------------
# Configurations
# ---------------------------
FORCE_ALL_INST_DECODE_TRUE = True
DEBUG = False
# ---------------------------
# Prerequisites
# ---------------------------
def LAnd(l):
if len(l) == 0:
return z3.BoolVal(True)
elif len(l) == 1:
return l[0]
else:
return z3.And(l)
def LOr(l):
if len(l) == 0:
return z3.BoolVal(True)
elif len(l) == 1:
return l[0]
else:
return z3.Or(l)
# ---------------------------
# clInstruction = openclrsp.CL_load_DV_N
# gpuInstruction = [ gpuModel.ev_FETCH_L1 , gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ]
wvopListTp = namedtuple('wvopListTp','WG DV addr value')
rvopListTp = namedtuple('rvopListTp','WG DV addr value')
prevEVsequence = { gpuModel.inst_LD: [gpuModel.ev_FETCH_L1] ,
gpuModel.inst_INC_L1 : [gpuModel.ev_FETCH_L1] }
postEVsequence = {
gpuModel.inst_ST: [ gpuModel.ev_FLUSH_L1, gpuModel.ev_DEQLOC_L1 ], # fetch will also need this
gpuModel.inst_INC_L1 : [ gpuModel.ev_FLUSH_L1 , gpuModel.ev_DEQLOC_L1 ]
}
blockSequence = {
gpuModel.inst_FLU_L1_DV : gpuModel.ev_DEQMARKER_L1, # will wait for many of it
gpuModel.inst_FLU_L1_WG : gpuModel.ev_DEQMARKER_L1
}
# pre is already modeled in the sequence
rfMapInst = {
openclrsp.CL_load_na_or_WG: [ gpuModel.inst_LD ],
openclrsp.CL_load_DV_N: [ gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ], #Corret : [ gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ], # wrong implementation: [ gpuModel.inst_INV_L1_WG , gpuModel.inst_LD ],
openclrsp.CL_load_DV_R: [ gpuModel.inst_LD , gpuModel.inst_FLU_L1_DV , gpuModel.inst_INV_L1_WG ], #[ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_WG, gpuModel.inst_LD, gpuModel.inst_UL_L2 ], # new: [ gpuModel.inst_LD , gpuModel.inst_FLU_L1_DV , gpuModel.inst_INV_L1_WG ] original : [ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_WG, gpuModel.inst_LD, gpuModel.inst_UL_L2 ]
openclrsp.CL_store_na_or_WG:[ gpuModel.inst_ST ],
openclrsp.CL_store_DV_N: [ gpuModel.inst_FLU_L1_WG, gpuModel.inst_ST ],
openclrsp.CL_store_DV_R: [ gpuModel.inst_LK_rmw_DV, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_DV, gpuModel.inst_ST, gpuModel.inst_FLU_L1_WG, gpuModel.inst_INV_L1_DV, gpuModel.inst_UL_rmw_DV ], # Wrong : [ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_WG, gpuModel.inst_ST, gpuModel.inst_INV_L1_DV, gpuModel.inst_UL_L2 ], # this was the wrong thing
openclrsp.CL_fetch_inc_WG: [ gpuModel.inst_INC_L1 ]
}
def HB(a,b):
return a.timestamp < b.timestamp
def HBd(a,b):
return z3.Implies( z3.And( a.inst.decodeFunc, b.inst.decodeFunc), a.timestamp < b.timestamp )
# =============================================
# EQ CHECKER
# =============================================
class eqchecker(object):
def __init__(self, num_dev, num_wg, num_t):
self.PiVarList = {} # pi_var_name (ts + statename) -> [ (sname, pivar, ts, addr ) ]
self.PiVarListOld = {}
self.TsThatWritesVar = {}
self.TsThatWritesVarOld = {}
self.TsNameToObj = {}
self.ConstraintList = []
self.runProp = []
self.num_of_parent_inst_in_scene = 0
self.tsList = [] # Note this is not a list of PO
self.PrevDecode = None # Note that the prev decode may be in
self.num_dev, self.num_wg, self.num_t = num_dev, num_wg, num_t
self.ConstrainedPiVarNames = set([]) # set of String : name of the PiVar
self.L1fr_read_from = {} # ld --> fetch (if fetch.decode then must read from fetch else must others)
# ======================================
# below is the content of the collector
# ======================================
self.load_na_or_WG_list = []
self.load_DV_N_list = []
self.load_DV_R_list = []
self.store_na_or_WG_list = []
self.store_DV_N_list = []
self.store_DV_R_list = []
self.LOAD_list = []
self.STORE_list = []
self.rmw_list = []
self.name_to_list_mapping = {'load_na_or_WG':[self.load_na_or_WG_list, self.LOAD_list],
'load_DV_N':[self.load_DV_N_list ,self.LOAD_list],
'load_DV_R':[self.load_DV_R_list ,self.LOAD_list],
'store_na_or_WG':[self.store_na_or_WG_list ,self.STORE_list],
'store_DV_N':[self.store_DV_N_list ,self.STORE_list],
'store_DV_R':[self.store_DV_R_list ,self.STORE_list],
'fetch_inc_WG':[self.rmw_list, self.LOAD_list,self.STORE_list],
}
def addToList(self,inst):
for lref in self.name_to_list_mapping[inst.name]:
lref.append(inst)
def addParentInst(self, addr, reg, e1, inst_func):
def dummyRead(sname,addr = 0, entity = 0):
return 0
#state = lambda stateName, addr = None , overWriteVA = None: ts1.stateReadFunc(stateName,addr,overWriteVA)
paramDict = {'x': addr, 'r' :reg, 'Eid': e1 , 'state':dummyRead }
inst = inst_func( **paramDict )
self.addToList(inst)
return inst
def addInstToScene(self, addr, reg, e1, inst_func, decodeWaitList, slot = None):
# need to plug-in additional decode in the beginning, if it is not None, will add
# unless specify slot, will not provide
# addr = z3.Int('addr'+str(self.num_of_inst_in_scene) ) # the order here matters
# reg = z3.Int('reg' +str(self.num_of_inst_in_scene) ) # the order here matters
# and you need to check if you need an entity or not
# e1 = self._genEntity() # entity for the instruction
ts1 = TraceStep(entity = e1,
TsThatWritesVar = self.TsThatWritesVar ,
TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList,
PiVarList = self.PiVarList )
self.tsList.append(ts1) # tsList is gathering all these
# concretize the instruction
state = lambda stateName, addr = None , entity = None: ts1.stateReadFunc(stateName,addr,entity)
paramDict = {'x': addr, 'r' :reg, 'Eid': e1 , 'state':state, 'num_dev':self.num_dev, 'num_wg': self.num_wg , 'num_t': self.num_t }
if slot is not None:
paramDict['slot'] = slot
inst = inst_func( **paramDict ) # get the instruction
# here is the logic for the pause after flush
if decodeWaitList is not None: # then it should be a list
if len( decodeWaitList ) == 1:
extraDecodeExpr = ts1.stateReadFunc( 'L1fifotail' ) == decodeWaitList[0] # Previous is FLU_WG
else:
assert len( decodeWaitList ) == self.num_wg
extraDecodeExpr = z3.And( [ \
ts1.stateReadFunc('L1fifotail' , entity = gpuModel.entity(e1.d, wgId ) ) == decodeWaitList[wgId] \
for wgId in range(self.num_wg) ] )
inst.decodeFunc = z3.And( inst.decodeFunc, extraDecodeExpr )
# change the decode function when necessary
ts1.assignInst(inst) # r
return inst,ts1
# PO relations are added by axioms
# w/r vop relations are added by axioms
# prev decode may be in effect for another instruction, but not in our case
# but still it would be best to add parent instruction according their program order ??
def addAChain(self, Eid, parentInstruction, childInstructions, prevTimeStamp): # for one parent-instruction -> should return one child instruction
# entity
# gen addrs
addr = z3.Int('addr'+str(self.num_of_parent_inst_in_scene) ) # the order here matters
reg = z3.Int('reg' +str(self.num_of_parent_inst_in_scene) ) # the order here matters
# and you need to check if you need an entity or not
# e1 = self._genEntity() # entity for the instruction
self.num_of_parent_inst_in_scene += 1
pInst_ref = self.addParentInst(addr = addr, reg = reg, e1 = Eid, inst_func = parentInstruction) #addr, reg, e1, inst_func
prevTs = None
blockSeqSlotList = []
instructions_in_the_chain = []
for uinst in childInstructions:
_ , tsRef = self.addInstToScene(addr = addr, reg = reg, e1 = Eid, inst_func = uinst, decodeWaitList = self.PrevDecode )
instructions_in_the_chain.append(tsRef)
self.ConstraintList.append( tsRef.timestamp > prevTimeStamp )
# every child instruction step is later than init or the previous barrier (not including evs)
if prevTs is not None:
self.ConstraintList.append( HB(prevTs, tsRef) ) # tsRef.timestamp > prevTs.timestamp )
# constraint 2: child instruction po
else: # prevTs is None
assert self.PrevDecode is None
# check that flush is not the last child instruction !!!
self.PrevDecode = None # reset it
# constraint #3: child instructions must be executed
self.ConstraintList.append( tsRef.inst.decodeFunc ) # Constriant DECODE
# here, start to
# check the need to add additional environmental transition
if uinst in prevEVsequence: # FETCH -> LD
seq = prevEVsequence[uinst]
assert len(seq) == 1 # we only handle the case with one fetch (RVOP)
fetch_ev = seq[0]
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = Eid, inst_func = fetch_ev, decodeWaitList = None, slot = None)
# we don't require its decode to be true
valueToBeWritten = tsRef.inst.updateDict[gpuModel.setSname('r',Eid)][0] # get from updateDict
pInst_ref.rvop = rvopListTp( WG = tsRef, DV = tsRef_sub, addr = addr, value = valueToBeWritten )
pInst_ref.timestamp = tsRef.timestamp
self.ConstraintList.append( HBd(tsRef_sub, tsRef) ) # only guarantee if both decodes are true
self.ConstraintList.append( tsRef_sub.timestamp > 0 )
self.L1fr_read_from[tsRef] = tsRef_sub
if uinst in postEVsequence: # ST -> will
seq = postEVsequence[uinst]
parent_or_prev_ts = tsRef # the one that will happenBefore it
tmpTsList = []
# get the slot
storeFIFOSlot = tsRef.stateReadFunc('L1fifotail')
for post_inst in seq:
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = Eid, inst_func = post_inst, decodeWaitList = None, slot = storeFIFOSlot)
self.ConstraintList.append( HB(parent_or_prev_ts, tsRef_sub) ) # parent_or_prev_ts.timestamp < tsRef_sub.timestamp )
self.ConstraintList.append( tsRef_sub.timestamp > 0 )
#
parent_or_prev_ts = tsRef_sub
tmpTsList.append(tsRef_sub)
valueToBeRead = tsRef.stateReadFunc('r', addr = reg) # get from read its local register value
pInst_ref.wvop = wvopListTp( WG = tsRef, DV = tmpTsList[0], addr = addr, value = valueToBeRead )
pInst_ref.timestamp = tsRef.timestamp
if uinst in blockSequence: # FLUSH: either the device or workgroup
blockSeqSlotList = []
flush_ev = blockSequence[uinst] # it will only be one instruction
# but still you won't need to change decode ?
if uinst is gpuModel.inst_FLU_L1_DV:
# in this case we will need to create #WG flush MARKER and
for wgId in range(self.num_wg):
# read the slot from tsRef
DEQ_MARKER_ENTITY = gpuModel.entity(Eid.d, wgId )
slot = tsRef.stateReadFunc( 'L1fifohead' , entity = DEQ_MARKER_ENTITY) # dep marker will need this
blockSeqSlotList.append(slot + 1)
# create a ts after it
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = DEQ_MARKER_ENTITY, inst_func = flush_ev, decodeWaitList = None, slot = slot)
self.ConstraintList.append( HB(tsRef, tsRef_sub) ) # tsRef_sub.timestamp > tsRef.timestamp )
self.ConstraintList.append( tsRef_sub.timestamp > 0 )
# concat decode for all Wg
# may be we can directly instantiate the function (to z3expr) here without calling it
self.PrevDecode = blockSeqSlotList
elif uinst is gpuModel.inst_FLU_L1_WG:
slot = tsRef.stateReadFunc('L1fifohead') # dep marker will need this
flush_ev = blockSequence[uinst] # it will only be one instruction
self.PrevDecode = [ slot +1 ]
inst_sub, tsRef_sub = self.addInstToScene(addr = addr , reg = reg, e1 = Eid, inst_func = flush_ev, decodeWaitList = None, slot = slot)
self.ConstraintList.append( HB(tsRef, tsRef_sub) ) # tsRef_sub.timestamp > tsRef.timestamp )
else:
assert False # we won't support other cases
# depends on which one WG or DV
# we need to add decode expr once or all
# add happen before before instructions
prevTs = tsRef
# set the end and start
pInst_ref.begin = instructions_in_the_chain[0]
pInst_ref.end = instructions_in_the_chain[-1]
return instructions_in_the_chain
# Let's add pi function
def setPiFunAW(self, PiVarList , TsThatWritesVar, AllWriteTsList ): # by default should be self.PiVarList
for sname, piVar, tsRef, VAddr in PiVarList.values():
# check its name, so repetition
piVarName = str(piVar)
if piVarName in self.ConstrainedPiVarNames:
continue # else:
self.ConstrainedPiVarNames.add(piVarName)
timestamp = tsRef.timestamp
writerTraceSteps = TsThatWritesVar.get(sname,[])
fetch_ts = None
if gpuModel.removeEntity(sname) == 'L1fr' and tsRef in self.L1fr_read_from:
fetch_ts = self.L1fr_read_from[tsRef]
# fetch_ts.inst.decodeFunc => rf
constraint = mm.build_rf_constraint(sname, piVar, VAddr, TsThatWritesVar, read = tsRef, write = fetch_ts, AllWriteTsList = []) # 3 update check
constraint = z3.Implies( fetch_ts.inst.decodeFunc , z3.Implies( tsRef.inst.decodeFunc , constraint ))
self.ConstraintList.append( constraint )
OrClauses = []
for traceStep in list(writerTraceSteps) + AllWriteTsList:
if traceStep is tsRef: continue
if traceStep is fetch_ts: continue # this will disallow read from the fetch
constraint = mm.build_rf_constraint(sname, piVar, VAddr, TsThatWritesVar, read = tsRef, write = traceStep, AllWriteTsList = AllWriteTsList) # 3 update check
constraint = z3.Implies( tsRef.inst.decodeFunc , constraint ) # check decode
OrClauses.append(constraint)
# but we will add all the inst to be true???
if fetch_ts is not None:
self.ConstraintList.append( z3.Implies( z3.Not(fetch_ts.inst.decodeFunc), z3.Or(OrClauses) ) )
else:
self.ConstraintList.append( z3.Or(OrClauses) )
def POAhead2Store(self, parent_instruction_1, e1, parent_instruction_2, e2):
def constrainIntervals(ts1,ts2):
# ts1 is the init , and ts2 is the interval
assert len(ts1.L1FifoTails) == len(ts2.L1FifoTailDeltas)
assert len(ts1.L1FifoHeads) == len(ts2.L1FifoHeadDeltas)
assert len(ts1.L1FifoTails) == len(ts2.L1FifoHeads)
for sname in ts1.L1FifoHeads.keys(): # this is actually for all dw
head_name = sname
tail_name = sname.replace('head','tail')
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoTails[tail_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= ts1.L1FifoTails[tail_name] )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoTailDeltas[tail_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] == ts2.L1FifoTailDeltas[tail_name] )
# these are the environmental constraint
#first get the micro instructions
tsInit = InitStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append( tsInit )
tsBList = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[ parent_instruction_1 ], prevTimeStamp = tsInit.timestamp )
tsInterval = IntervalStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
TsThatWritesVar = self.TsThatWritesVar , ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsInterval )
tsU = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[ parent_instruction_2 ], prevTimeStamp = tsInterval.timestamp )
self.ConstraintList.append( HB(tsInit,tsBList[0]) ) # do we really need this?
self.ConstraintList.append( HB(tsBList[-1],tsInterval) )
# tsInterval --HB--> tsU is guaranteed by addAChain
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsInterval] )
## TODO: add the invariants
constrainIntervals(tsInit,tsInterval)
def POAfter2Load(self, parent_instruction_1, e1, parent_instruction_2, e2):
#first get the micro instructions
# add init frame
# add instr @ check frame
# add instr that separate
# add inst @ after frame
tsInit = InitStep_any( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append(tsInit)
tsUnderCheck = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[parent_instruction_1], prevTimeStamp = tsInit.timestamp )
tsInterval = IntervalStep_frKeep( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append(tsInterval)
# here let's backup the whole thing
""" push pi var to a stack """
#self.setPiFun(init = tsInit, PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar)
#self.PiVarListOld = self.PiVarList #.update(self.PiVarList)
#self.PiVarList = {}
""" push TsThatWritesVar """
#self.TsThatWritesVarOld = self.TsThatWritesVar #.update(self.TsThatWritesVar)
#self.TsThatWritesVar = {}
tsAfterCheck = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[parent_instruction_2], prevTimeStamp = tsInterval.timestamp ) # instructions are PO after tsInterval
# this will update the PiVarList associated with IntervalStep_frKeep
#
#self.setPiFun(init = tsInterval, PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar)
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsInterval] )
# redo this, but the constrained ones should be okay, because no repetition is there
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsInterval] )
# do it twice because in the first case, there are pi var generated (on demand)
#assert ('pi_intervalts5_L1fr@0,0addr0' in self.PiVarList)
#assert ('pi_intervalts5_L1fr@0,0addr1' in self.PiVarList)
self.ConstraintList.append( HB(tsInit,tsUnderCheck[0]) ) # do we really need this?
self.ConstraintList.append( HB(tsUnderCheck[-1],tsInterval) )
# self.ConstraintList.append( HB(tsInterval,tsAfterCheck[0]) ) # we don't need this, it is already enforced
## TODO: add the invariants
# you will need rf relation as a function
def PO3SLL(self, parent_instruction_1, e1, parent_instruction_2, e2, parent_instruction_3, e3): # for store_DV_Rs : axiom 3
def constrainIntervals(ts1): #,ts2):
# ts1 is the init , and ts2 is the interval
#assert len(ts1.L1FifoTails) == len(ts2.L1FifoTailDeltas)
#assert len(ts1.L1FifoHeads) == len(ts2.L1FifoHeadDeltas)
#assert len(ts1.L1FifoTails) == len(ts2.L1FifoHeads)
for sname in ts1.L1FifoHeads.keys(): # this is actually for all dw
head_name = sname
tail_name = sname.replace('head','tail')
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoTails[tail_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= ts1.L1FifoTails[tail_name] )
#self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] >= 0 )
#self.ConstraintList.append( ts2.L1FifoTailDeltas[tail_name] >= 0 )
#self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] == ts2.L1FifoTailDeltas[tail_name] )
#first get the micro instructions
tsInit = InitStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 ) #InitStep_any( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append( tsInit )
tsBListS1 = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[ parent_instruction_1 ], prevTimeStamp = tsInit.timestamp )
tsIntervalSL = IntervalStep_frKeep( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalSL )
tsBListS2 = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[ parent_instruction_2 ], prevTimeStamp = tsIntervalSL.timestamp )
tsIntervalLL = IntervalStep_frKeep( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalLL )
tsUListR = self.addAChain( parentInstruction = parent_instruction_3, Eid = e3,
childInstructions = rfMapInst[ parent_instruction_3 ], prevTimeStamp = tsIntervalLL.timestamp )
#self.ConstraintList.append( HB(tsInit,tsBListS1[0]) )
self.ConstraintList.append( HB(tsBListS1[-1], tsIntervalSL) )
self.ConstraintList.append( HB(tsBListS2[-1], tsIntervalLL) )
# tsIntervalSL --HB--> tsU is guaranteed by addAChain
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsIntervalSL, tsIntervalLL] )
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsIntervalSL, tsIntervalLL] )
constrainIntervals(tsInit)
def PO3SSL(self, parent_instruction_1, e1, parent_instruction_2, e2, parent_instruction_3, e3): # for load_DV_Rs : axiom 3
def constrainIntervals(ts1,ts2, ts3):
# ts1 is the init , and ts2 is the interval
assert len(ts1.L1FifoTails) == len(ts2.L1FifoTailDeltas)
assert len(ts1.L1FifoHeads) == len(ts2.L1FifoHeadDeltas)
assert len(ts1.L1FifoTails) == len(ts2.L1FifoHeads)
assert len(ts1.L1FifoTails) == len(ts3.L1FifoTailDeltas)
assert len(ts1.L1FifoHeads) == len(ts3.L1FifoHeadDeltas)
assert len(ts1.L1FifoTails) == len(ts3.L1FifoHeads)
for sname in ts1.L1FifoHeads.keys(): # this is actually for all dw
head_name = sname
tail_name = sname.replace('head','tail')
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoTails[tail_name] >= 0 )
self.ConstraintList.append( ts1.L1FifoHeads[head_name] >= ts1.L1FifoTails[tail_name] )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoTailDeltas[tail_name] >= 0 )
self.ConstraintList.append( ts2.L1FifoHeads[head_name] >= ts2.L1FifoTails[tail_name] )
self.ConstraintList.append( ts3.L1FifoHeadDeltas[head_name] >= 0 )
self.ConstraintList.append( ts3.L1FifoTailDeltas[tail_name] >= 0 )
self.ConstraintList.append( ts3.L1FifoHeads[head_name] >= ts3.L1FifoTails[tail_name] )
self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] + ts3.L1FifoHeadDeltas[head_name] >=
ts2.L1FifoTailDeltas[tail_name] + ts3.L1FifoTailDeltas[tail_name] )
# these are the environmental constraint
#first get the micro instructions
tsInit = InitStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 )
self.tsList.append( tsInit )
tsBListS1 = self.addAChain( parentInstruction = parent_instruction_1, Eid = e1,
childInstructions = rfMapInst[ parent_instruction_1 ], prevTimeStamp = tsInit.timestamp )
tsIntervalSS = IntervalStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
TsThatWritesVar = self.TsThatWritesVar , ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalSS )
tsBListS2 = self.addAChain( parentInstruction = parent_instruction_2, Eid = e2,
childInstructions = rfMapInst[ parent_instruction_2 ], prevTimeStamp = tsIntervalSS.timestamp )
tsIntervalSL = IntervalStep_fifo( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj,
TsThatWritesVar = self.TsThatWritesVar , ConstraintList = self.ConstraintList , PiVarList = self.PiVarList )
self.tsList.append( tsIntervalSL )
tsUListR = self.addAChain( parentInstruction = parent_instruction_3, Eid = e3,
childInstructions = rfMapInst[ parent_instruction_3 ], prevTimeStamp = tsIntervalSL.timestamp )
#self.ConstraintList.append( HB(tsInit,tsBListS1[0]) )
self.ConstraintList.append( HB(tsBListS1[-1], tsIntervalSS) )
self.ConstraintList.append( HB(tsBListS2[-1], tsIntervalSL) )
# tsIntervalSS --HB--> tsU is guaranteed by addAChain
self.setPiFunAW( PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar, AllWriteTsList = [tsInit, tsIntervalSS, tsIntervalSL] )
## TODO: add the invariants
constrainIntervals(tsInit,tsIntervalSS,tsIntervalSL)
def add_check_axiom(self):
#axiom.store_dv_r_3(self)
#axiom.load_dv_r_3(self)
axiom.PO2Axioms(self)
def pushInstConstraintAndCheck(self):
self.solver = z3.Solver()
s = self.solver
s.set(unsat_core=True if DEBUG else False)
idx = 0
for c in self.ConstraintList:
if DEBUG:
print >>self.logcat, idx,':', c
#raw_input()
if c is False:
print '<W>: False cannot be satisfied'
elif c is True:
pass
else:
s.assert_and_track(c,'n'+str(idx))
idx += 1
else:
s.add(c)
result = s.check()
print 'w.o. run Property:', result
self.modelCheckResult = result
if result == z3.sat:
self.model = s.model()
else: # unsat
if DEBUG:
print >>self.logcat, s.unsat_core()
print '<W>: Not all the reads can be executed! Trace debug info dumped.'
return result
def solve(self):
if self.modelCheckResult == z3.unsat:
print '<W>: inconsistent model. Won\'t continue to check the properties.'
return
s = self.solver
if len(self.runProp) == 0:
print '<W>: no property specified.'
return
else:
runProp = z3.Not( LAnd(self.runProp) )
if DEBUG:
print >> self.logcat, 'rp : ', runProp
s.assert_and_track(runProp,'rp')
else:
s.add(runProp)
# extract module
result = s.check()
print result
if result == z3.unsat: # this may use the old one
if DEBUG:
print >>self.logcat, s.unsat_core()
elif result == z3.sat:
self.model = s.model()
print 'negation is witnessed'
#self.printModel()
else:
print 'status Unknown'
return result
def printModel(self):
print >> self.logcat, '====================== MODEL ====================='
for ts in self.tsList:
print >>self.logcat, ts.name, ' entity:', ts.entity.toName() if 'entity' in ts.__dict__ else 'GLOBAL'
t,d,val,readCol = ts.getConcreteModel(self.model)
instName = ts.inst.name if 'inst' in ts.__dict__ else 'INIT'
print >>self.logcat, '@%s, %s:%s'%(str(t), instName, d)
for n,v in readCol.items():
if isinstance(v, tuple):
value = str(v[0]) + '\taddr: ' + str('ANY' if v[1].anyAddr else ( 'z3:' + str(v[1].eval(self.model) ) + ', id:' + v[1].vaid ) )
print >> self.logcat, '\t (Read %s = %s)' % (n, value )
else:
print >>self.logcat, '\t (Read %s = %s)' % (n, str(v) )
for n,v in val.items():
if isinstance(v, tuple):
value = str(v[0]) + '\taddr: ' + str('ANY' if v[1].anyAddr else ( 'z3:' + str(v[1].eval(self.model) ) + ', id:' + v[1].vaid ) )
print >>self.logcat, '\t %s:%s' % (n,value)
else:
print >>self.logcat, '\t %s:%s' % (n,str(v))
self.logcat.flush()
def check2store(num_dev,num_wg,num_t):
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eqc.POAhead2Store(parent_instruction_1 = openclrsp.CL_store_DV_R, e1 = eid1,
parent_instruction_2 = openclrsp.CL_store_na_or_WG, e2 = eid2 )
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
def checkSSL(num_dev,num_wg,num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
for devIdx3 in range(num_dev):
for wgId3 in range(num_wg):
for tIdx3 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eid3 = gpuModel.entity(devIdx3,wgId3,tIdx3)
eqc.PO3SSL(
parent_instruction_1 = openclrsp.CL_store_DV_N, e1 = eid1,
parent_instruction_2 = openclrsp.CL_store_DV_N, e2 = eid2,
parent_instruction_3 = openclrsp.CL_load_DV_R, e3 = eid3
)
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
#eqc.printModel()
#eqc.logcat.close()
#exit(1)
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
#eqc.printModel()
#eqc.logcat.close()
#exit(1)
def checkSLL(num_dev,num_wg,num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
#devIdx2 = 0; wgId2 = 1; tIdx2 = 0
#devIdx3 = 0; wgId3 = 0; tIdx3 = 0
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
for devIdx3 in range(num_dev):
for wgId3 in range(num_wg):
for tIdx3 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eid3 = gpuModel.entity(devIdx3,wgId3,tIdx3)
eqc.PO3SLL(
parent_instruction_1 = openclrsp.CL_store_DV_R, e1 = eid1,
parent_instruction_2 = openclrsp.CL_load_na_or_WG, e2 = eid2,
parent_instruction_3 = openclrsp.CL_load_na_or_WG, e3 = eid3
)
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
def check2load(num_dev,num_wg,num_t):
devIdx1 = 0; wgId1 = 0; tIdx1 = 0
#for devIdx1 in range(num_dev):
# for wgId1 in range(num_wg):
# for tIdx1 in range(num_t):
for devIdx2 in range(num_dev):
for wgId2 in range(num_wg):
for tIdx2 in range(num_t):
eqc = eqchecker(num_dev,num_wg,num_t)
eqc.logcat = open('eqcheck.log', 'wt')
eid1 = gpuModel.entity(devIdx1,wgId1,tIdx1)
eid2 = gpuModel.entity(devIdx2,wgId2,tIdx2)
eqc.POAfter2Load(parent_instruction_1 = openclrsp.CL_load_DV_N, e1 = eid1,
parent_instruction_2 = openclrsp.CL_load_DV_N, e2 = eid2 )
eqc.add_check_axiom()
if eqc.pushInstConstraintAndCheck() != z3.sat:
print 'unrealistic scenario!'
assert False
#eqc.printModel()
#eqc.logcat.close()
#exit(1)
if eqc.solve() == z3.sat:
print 'Ordering is not guaranteed.'
eqc.printModel()
assert False
eqc.logcat.close()
if __name__ == '__main__':
#check2store(2,2,2) # should also check for store_DV_R to previous stores AXIOM 1 2
check2load(2,2,2)
#checkSSL(2,2,2)
#checkSLL(2,2,2)
| en | 0.667276 | # --------------------------- # Configurations # --------------------------- # --------------------------- # Prerequisites # --------------------------- # --------------------------- # clInstruction = openclrsp.CL_load_DV_N # gpuInstruction = [ gpuModel.ev_FETCH_L1 , gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ] # fetch will also need this # will wait for many of it # pre is already modeled in the sequence #Corret : [ gpuModel.inst_LD , gpuModel.inst_INV_L1_WG ], # wrong implementation: [ gpuModel.inst_INV_L1_WG , gpuModel.inst_LD ], #[ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_WG, gpuModel.inst_LD, gpuModel.inst_UL_L2 ], # new: [ gpuModel.inst_LD , gpuModel.inst_FLU_L1_DV , gpuModel.inst_INV_L1_WG ] original : [ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_DV, gpuModel.inst_INV_L1_WG, gpuModel.inst_LD, gpuModel.inst_UL_L2 ] # Wrong : [ gpuModel.inst_LK_L2, gpuModel.inst_FLU_L1_WG, gpuModel.inst_ST, gpuModel.inst_INV_L1_DV, gpuModel.inst_UL_L2 ], # this was the wrong thing # ============================================= # EQ CHECKER # ============================================= # pi_var_name (ts + statename) -> [ (sname, pivar, ts, addr ) ] # Note this is not a list of PO # Note that the prev decode may be in # set of String : name of the PiVar # ld --> fetch (if fetch.decode then must read from fetch else must others) # ====================================== # below is the content of the collector # ====================================== #state = lambda stateName, addr = None , overWriteVA = None: ts1.stateReadFunc(stateName,addr,overWriteVA) # need to plug-in additional decode in the beginning, if it is not None, will add # unless specify slot, will not provide # addr = z3.Int('addr'+str(self.num_of_inst_in_scene) ) # the order here matters # reg = z3.Int('reg' +str(self.num_of_inst_in_scene) ) # the order here matters # and you need to check if you need an entity or not # e1 = self._genEntity() # entity for the instruction # tsList is gathering all these # concretize the instruction # get the instruction # here is the logic for the pause after flush # then it should be a list # Previous is FLU_WG # change the decode function when necessary # r # PO relations are added by axioms # w/r vop relations are added by axioms # prev decode may be in effect for another instruction, but not in our case # but still it would be best to add parent instruction according their program order ?? # for one parent-instruction -> should return one child instruction # entity # gen addrs # the order here matters # the order here matters # and you need to check if you need an entity or not # e1 = self._genEntity() # entity for the instruction #addr, reg, e1, inst_func # every child instruction step is later than init or the previous barrier (not including evs) # tsRef.timestamp > prevTs.timestamp ) # constraint 2: child instruction po # prevTs is None # check that flush is not the last child instruction !!! # reset it # constraint #3: child instructions must be executed # Constriant DECODE # here, start to # check the need to add additional environmental transition # FETCH -> LD # we only handle the case with one fetch (RVOP) # we don't require its decode to be true # get from updateDict # only guarantee if both decodes are true # ST -> will # the one that will happenBefore it # get the slot # parent_or_prev_ts.timestamp < tsRef_sub.timestamp ) # # get from read its local register value # FLUSH: either the device or workgroup # it will only be one instruction # but still you won't need to change decode ? # in this case we will need to create #WG flush MARKER and # read the slot from tsRef # dep marker will need this # create a ts after it # tsRef_sub.timestamp > tsRef.timestamp ) # concat decode for all Wg # may be we can directly instantiate the function (to z3expr) here without calling it # dep marker will need this # it will only be one instruction # tsRef_sub.timestamp > tsRef.timestamp ) # we won't support other cases # depends on which one WG or DV # we need to add decode expr once or all # add happen before before instructions # set the end and start # Let's add pi function # by default should be self.PiVarList # check its name, so repetition # else: # fetch_ts.inst.decodeFunc => rf # 3 update check # this will disallow read from the fetch # 3 update check # check decode # but we will add all the inst to be true??? # ts1 is the init , and ts2 is the interval # this is actually for all dw # these are the environmental constraint #first get the micro instructions # do we really need this? # tsInterval --HB--> tsU is guaranteed by addAChain ## TODO: add the invariants #first get the micro instructions # add init frame # add instr @ check frame # add instr that separate # add inst @ after frame # here let's backup the whole thing push pi var to a stack #self.setPiFun(init = tsInit, PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar) #self.PiVarListOld = self.PiVarList #.update(self.PiVarList) #self.PiVarList = {} push TsThatWritesVar #self.TsThatWritesVarOld = self.TsThatWritesVar #.update(self.TsThatWritesVar) #self.TsThatWritesVar = {} # instructions are PO after tsInterval # this will update the PiVarList associated with IntervalStep_frKeep # #self.setPiFun(init = tsInterval, PiVarList = self.PiVarList, TsThatWritesVar = self.TsThatWritesVar) # redo this, but the constrained ones should be okay, because no repetition is there # do it twice because in the first case, there are pi var generated (on demand) #assert ('pi_intervalts5_L1fr@0,0addr0' in self.PiVarList) #assert ('pi_intervalts5_L1fr@0,0addr1' in self.PiVarList) # do we really need this? # self.ConstraintList.append( HB(tsInterval,tsAfterCheck[0]) ) # we don't need this, it is already enforced ## TODO: add the invariants # you will need rf relation as a function # for store_DV_Rs : axiom 3 #,ts2): # ts1 is the init , and ts2 is the interval #assert len(ts1.L1FifoTails) == len(ts2.L1FifoTailDeltas) #assert len(ts1.L1FifoHeads) == len(ts2.L1FifoHeadDeltas) #assert len(ts1.L1FifoTails) == len(ts2.L1FifoHeads) # this is actually for all dw #self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] >= 0 ) #self.ConstraintList.append( ts2.L1FifoTailDeltas[tail_name] >= 0 ) #self.ConstraintList.append( ts2.L1FifoHeadDeltas[head_name] == ts2.L1FifoTailDeltas[tail_name] ) #first get the micro instructions #InitStep_any( num_dev = self.num_dev, num_wg = self.num_wg, num_t = self.num_t, TsNameToObj = self.TsNameToObj , overwriteTimeStamp = 0 ) #self.ConstraintList.append( HB(tsInit,tsBListS1[0]) ) # tsIntervalSL --HB--> tsU is guaranteed by addAChain # for load_DV_Rs : axiom 3 # ts1 is the init , and ts2 is the interval # this is actually for all dw # these are the environmental constraint #first get the micro instructions #self.ConstraintList.append( HB(tsInit,tsBListS1[0]) ) # tsIntervalSS --HB--> tsU is guaranteed by addAChain ## TODO: add the invariants #axiom.store_dv_r_3(self) #axiom.load_dv_r_3(self) #raw_input() # unsat # extract module # this may use the old one #self.printModel() #for devIdx1 in range(num_dev): # for wgId1 in range(num_wg): # for tIdx1 in range(num_t): #for devIdx1 in range(num_dev): # for wgId1 in range(num_wg): # for tIdx1 in range(num_t): #eqc.printModel() #eqc.logcat.close() #exit(1) #eqc.printModel() #eqc.logcat.close() #exit(1) #devIdx2 = 0; wgId2 = 1; tIdx2 = 0 #devIdx3 = 0; wgId3 = 0; tIdx3 = 0 #for devIdx1 in range(num_dev): # for wgId1 in range(num_wg): # for tIdx1 in range(num_t): #for devIdx1 in range(num_dev): # for wgId1 in range(num_wg): # for tIdx1 in range(num_t): #eqc.printModel() #eqc.logcat.close() #exit(1) #check2store(2,2,2) # should also check for store_DV_R to previous stores AXIOM 1 2 #checkSSL(2,2,2) #checkSLL(2,2,2) | 1.903766 | 2 |
src/python/pants/backend/python/target_types_rules.py | rcuza/pants | 0 | 6632220 | <reponame>rcuza/pants
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Rules for the core Python target types.
This is a separate module to avoid circular dependencies. Note that all types used by call sites are
defined in `target_types.py`.
"""
import dataclasses
import os.path
from pants.backend.python.dependency_inference.module_mapper import PythonModule, PythonModuleOwners
from pants.backend.python.dependency_inference.rules import PythonInferSubsystem, import_rules
from pants.backend.python.target_types import (
PexBinaryDependencies,
PexEntryPointField,
PythonDistributionDependencies,
PythonProvidesField,
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest,
)
from pants.engine.addresses import Address, Addresses, UnparsedAddressInputs
from pants.engine.fs import GlobMatchErrorBehavior, PathGlobs, Paths
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
InjectDependenciesRequest,
InjectedDependencies,
InvalidFieldException,
WrappedTarget,
)
from pants.engine.unions import UnionRule
from pants.source.source_root import SourceRoot, SourceRootRequest
# -----------------------------------------------------------------------------------------------
# `pex_binary` rules
# -----------------------------------------------------------------------------------------------
@rule(desc="Determining the entry point for a `pex_binary` target")
async def resolve_pex_entry_point(request: ResolvePexEntryPointRequest) -> ResolvedPexEntryPoint:
ep_val = request.entry_point_field.value
address = request.entry_point_field.address
# We support several different schemes:
# 1) `<none>` or `<None>` => set to `None`.
# 2) `path.to.module` => preserve exactly.
# 3) `path.to.module:func` => preserve exactly.
# 4) `app.py` => convert into `path.to.app`.
# 5) `app.py:func` => convert into `path.to.app:func`.
# Case #1.
if ep_val.module in ("<none>", "<None>"):
return ResolvedPexEntryPoint(None)
# If it's already a module (cases #2 and #3), simply use that. Otherwise, convert the file name
# into a module path (cases #4 and #5).
if not ep_val.module.endswith(".py"):
return ResolvedPexEntryPoint(ep_val)
# Use the engine to validate that the file exists and that it resolves to only one file.
full_glob = os.path.join(address.spec_path, ep_val.module)
entry_point_paths = await Get(
Paths,
PathGlobs(
[full_glob],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"{address}'s `{request.entry_point_field.alias}` field",
),
)
# We will have already raised if the glob did not match, i.e. if there were no files. But
# we need to check if they used a file glob (`*` or `**`) that resolved to >1 file.
if len(entry_point_paths.files) != 1:
raise InvalidFieldException(
f"Multiple files matched for the `{request.entry_point_field.alias}` "
f"{ep_val.spec!r} for the target {address}, but only one file expected. Are you using "
f"a glob, rather than a file name?\n\n"
f"All matching files: {list(entry_point_paths.files)}."
)
entry_point_path = entry_point_paths.files[0]
source_root = await Get(
SourceRoot,
SourceRootRequest,
SourceRootRequest.for_file(entry_point_path),
)
stripped_source_path = os.path.relpath(entry_point_path, source_root.path)
module_base, _ = os.path.splitext(stripped_source_path)
normalized_path = module_base.replace(os.path.sep, ".")
return ResolvedPexEntryPoint(dataclasses.replace(ep_val, module=normalized_path))
class InjectPexBinaryEntryPointDependency(InjectDependenciesRequest):
inject_for = PexBinaryDependencies
@rule(desc="Inferring dependency from the pex_binary `entry_point` field")
async def inject_pex_binary_entry_point_dependency(
request: InjectPexBinaryEntryPointDependency, python_infer_subsystem: PythonInferSubsystem
) -> InjectedDependencies:
if not python_infer_subsystem.entry_points:
return InjectedDependencies()
original_tgt = await Get(WrappedTarget, Address, request.dependencies_field.address)
explicitly_provided_deps, entry_point = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(original_tgt.target[Dependencies])),
Get(
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest(original_tgt.target[PexEntryPointField]),
),
)
if entry_point.val is None:
return InjectedDependencies()
owners = await Get(PythonModuleOwners, PythonModule(entry_point.val.module))
address = original_tgt.target.address
explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
owners.ambiguous,
address,
import_reference="module",
context=(
f"The pex_binary target {address} has the field "
f"`entry_point={repr(original_tgt.target[PexEntryPointField].value.spec)}`, which "
f"maps to the Python module `{entry_point.val.module}`"
),
)
maybe_disambiguated = explicitly_provided_deps.disambiguated_via_ignores(owners.ambiguous)
unambiguous_owners = owners.unambiguous or (
(maybe_disambiguated,) if maybe_disambiguated else ()
)
return InjectedDependencies(unambiguous_owners)
# -----------------------------------------------------------------------------------------------
# `python_distribution` rules
# -----------------------------------------------------------------------------------------------
class InjectPythonDistributionDependencies(InjectDependenciesRequest):
inject_for = PythonDistributionDependencies
@rule
async def inject_python_distribution_dependencies(
request: InjectPythonDistributionDependencies,
) -> InjectedDependencies:
"""Inject any `.with_binaries()` values, as it would be redundant to have to include in the
`dependencies` field."""
original_tgt = await Get(WrappedTarget, Address, request.dependencies_field.address)
with_binaries = original_tgt.target[PythonProvidesField].value.binaries
if not with_binaries:
return InjectedDependencies()
# Note that we don't validate that these are all `pex_binary` targets; we don't care about
# that here. `setup_py.py` will do that validation.
addresses = await Get(
Addresses,
UnparsedAddressInputs(
with_binaries.values(), owning_address=request.dependencies_field.address
),
)
return InjectedDependencies(addresses)
def rules():
return (
*collect_rules(),
*import_rules(),
UnionRule(InjectDependenciesRequest, InjectPexBinaryEntryPointDependency),
UnionRule(InjectDependenciesRequest, InjectPythonDistributionDependencies),
)
| # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Rules for the core Python target types.
This is a separate module to avoid circular dependencies. Note that all types used by call sites are
defined in `target_types.py`.
"""
import dataclasses
import os.path
from pants.backend.python.dependency_inference.module_mapper import PythonModule, PythonModuleOwners
from pants.backend.python.dependency_inference.rules import PythonInferSubsystem, import_rules
from pants.backend.python.target_types import (
PexBinaryDependencies,
PexEntryPointField,
PythonDistributionDependencies,
PythonProvidesField,
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest,
)
from pants.engine.addresses import Address, Addresses, UnparsedAddressInputs
from pants.engine.fs import GlobMatchErrorBehavior, PathGlobs, Paths
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
InjectDependenciesRequest,
InjectedDependencies,
InvalidFieldException,
WrappedTarget,
)
from pants.engine.unions import UnionRule
from pants.source.source_root import SourceRoot, SourceRootRequest
# -----------------------------------------------------------------------------------------------
# `pex_binary` rules
# -----------------------------------------------------------------------------------------------
@rule(desc="Determining the entry point for a `pex_binary` target")
async def resolve_pex_entry_point(request: ResolvePexEntryPointRequest) -> ResolvedPexEntryPoint:
ep_val = request.entry_point_field.value
address = request.entry_point_field.address
# We support several different schemes:
# 1) `<none>` or `<None>` => set to `None`.
# 2) `path.to.module` => preserve exactly.
# 3) `path.to.module:func` => preserve exactly.
# 4) `app.py` => convert into `path.to.app`.
# 5) `app.py:func` => convert into `path.to.app:func`.
# Case #1.
if ep_val.module in ("<none>", "<None>"):
return ResolvedPexEntryPoint(None)
# If it's already a module (cases #2 and #3), simply use that. Otherwise, convert the file name
# into a module path (cases #4 and #5).
if not ep_val.module.endswith(".py"):
return ResolvedPexEntryPoint(ep_val)
# Use the engine to validate that the file exists and that it resolves to only one file.
full_glob = os.path.join(address.spec_path, ep_val.module)
entry_point_paths = await Get(
Paths,
PathGlobs(
[full_glob],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"{address}'s `{request.entry_point_field.alias}` field",
),
)
# We will have already raised if the glob did not match, i.e. if there were no files. But
# we need to check if they used a file glob (`*` or `**`) that resolved to >1 file.
if len(entry_point_paths.files) != 1:
raise InvalidFieldException(
f"Multiple files matched for the `{request.entry_point_field.alias}` "
f"{ep_val.spec!r} for the target {address}, but only one file expected. Are you using "
f"a glob, rather than a file name?\n\n"
f"All matching files: {list(entry_point_paths.files)}."
)
entry_point_path = entry_point_paths.files[0]
source_root = await Get(
SourceRoot,
SourceRootRequest,
SourceRootRequest.for_file(entry_point_path),
)
stripped_source_path = os.path.relpath(entry_point_path, source_root.path)
module_base, _ = os.path.splitext(stripped_source_path)
normalized_path = module_base.replace(os.path.sep, ".")
return ResolvedPexEntryPoint(dataclasses.replace(ep_val, module=normalized_path))
class InjectPexBinaryEntryPointDependency(InjectDependenciesRequest):
inject_for = PexBinaryDependencies
@rule(desc="Inferring dependency from the pex_binary `entry_point` field")
async def inject_pex_binary_entry_point_dependency(
request: InjectPexBinaryEntryPointDependency, python_infer_subsystem: PythonInferSubsystem
) -> InjectedDependencies:
if not python_infer_subsystem.entry_points:
return InjectedDependencies()
original_tgt = await Get(WrappedTarget, Address, request.dependencies_field.address)
explicitly_provided_deps, entry_point = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(original_tgt.target[Dependencies])),
Get(
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest(original_tgt.target[PexEntryPointField]),
),
)
if entry_point.val is None:
return InjectedDependencies()
owners = await Get(PythonModuleOwners, PythonModule(entry_point.val.module))
address = original_tgt.target.address
explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
owners.ambiguous,
address,
import_reference="module",
context=(
f"The pex_binary target {address} has the field "
f"`entry_point={repr(original_tgt.target[PexEntryPointField].value.spec)}`, which "
f"maps to the Python module `{entry_point.val.module}`"
),
)
maybe_disambiguated = explicitly_provided_deps.disambiguated_via_ignores(owners.ambiguous)
unambiguous_owners = owners.unambiguous or (
(maybe_disambiguated,) if maybe_disambiguated else ()
)
return InjectedDependencies(unambiguous_owners)
# -----------------------------------------------------------------------------------------------
# `python_distribution` rules
# -----------------------------------------------------------------------------------------------
class InjectPythonDistributionDependencies(InjectDependenciesRequest):
inject_for = PythonDistributionDependencies
@rule
async def inject_python_distribution_dependencies(
request: InjectPythonDistributionDependencies,
) -> InjectedDependencies:
"""Inject any `.with_binaries()` values, as it would be redundant to have to include in the
`dependencies` field."""
original_tgt = await Get(WrappedTarget, Address, request.dependencies_field.address)
with_binaries = original_tgt.target[PythonProvidesField].value.binaries
if not with_binaries:
return InjectedDependencies()
# Note that we don't validate that these are all `pex_binary` targets; we don't care about
# that here. `setup_py.py` will do that validation.
addresses = await Get(
Addresses,
UnparsedAddressInputs(
with_binaries.values(), owning_address=request.dependencies_field.address
),
)
return InjectedDependencies(addresses)
def rules():
return (
*collect_rules(),
*import_rules(),
UnionRule(InjectDependenciesRequest, InjectPexBinaryEntryPointDependency),
UnionRule(InjectDependenciesRequest, InjectPythonDistributionDependencies),
) | en | 0.707392 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). Rules for the core Python target types. This is a separate module to avoid circular dependencies. Note that all types used by call sites are defined in `target_types.py`. # ----------------------------------------------------------------------------------------------- # `pex_binary` rules # ----------------------------------------------------------------------------------------------- # We support several different schemes: # 1) `<none>` or `<None>` => set to `None`. # 2) `path.to.module` => preserve exactly. # 3) `path.to.module:func` => preserve exactly. # 4) `app.py` => convert into `path.to.app`. # 5) `app.py:func` => convert into `path.to.app:func`. # Case #1. # If it's already a module (cases #2 and #3), simply use that. Otherwise, convert the file name # into a module path (cases #4 and #5). # Use the engine to validate that the file exists and that it resolves to only one file. # We will have already raised if the glob did not match, i.e. if there were no files. But # we need to check if they used a file glob (`*` or `**`) that resolved to >1 file. # ----------------------------------------------------------------------------------------------- # `python_distribution` rules # ----------------------------------------------------------------------------------------------- Inject any `.with_binaries()` values, as it would be redundant to have to include in the `dependencies` field. # Note that we don't validate that these are all `pex_binary` targets; we don't care about # that here. `setup_py.py` will do that validation. | 1.916352 | 2 |
reskit/test/test_CosmoSource.py | r-beer/RESKit | 0 | 6632221 | def test___init__():
# (s, source, bounds=None, indexPad=0, **kwargs):
print( "__init__ not tested...")
def test_loc2Index():
# (s, loc, outsideOkay=False, asInt=True):
print( "loc2Index not tested...")
def test_loadRadiation():
# (s):
print( "loadRadiation not tested...")
def test_loadWindSpeedLevels():
# (s):
print( "loadWindSpeedLevels not tested...")
def test_loadWindSpeedAtHeight():
# (s, height=100):
print( "loadWindSpeedAtHeight not tested...")
def test_loadTemperature():
# (s, processor=lambda x: x-273.15):
print( "loadTemperature not tested...")
def test_loadPressure():
# (s):
print( "loadPressure not tested...")
def test_loadSet_PV():
# (s):
print( "loadSet_PV not tested...")
def test_getWindSpeedAtHeights():
# (s, locations, heights, spatialInterpolation='near', forceDataFrame=False, outsideOkay=False, _indicies=None):
print( "getWindSpeedAtHeights not tested...")
if __name__ == "__main__":
test___init__()
test_loc2Index()
test_loadRadiation()
test_loadWindSpeedLevels()
test_loadWindSpeedAtHeight()
test_loadTemperature()
test_loadPressure()
test_loadSet_PV()
test_getWindSpeedAtHeights() | def test___init__():
# (s, source, bounds=None, indexPad=0, **kwargs):
print( "__init__ not tested...")
def test_loc2Index():
# (s, loc, outsideOkay=False, asInt=True):
print( "loc2Index not tested...")
def test_loadRadiation():
# (s):
print( "loadRadiation not tested...")
def test_loadWindSpeedLevels():
# (s):
print( "loadWindSpeedLevels not tested...")
def test_loadWindSpeedAtHeight():
# (s, height=100):
print( "loadWindSpeedAtHeight not tested...")
def test_loadTemperature():
# (s, processor=lambda x: x-273.15):
print( "loadTemperature not tested...")
def test_loadPressure():
# (s):
print( "loadPressure not tested...")
def test_loadSet_PV():
# (s):
print( "loadSet_PV not tested...")
def test_getWindSpeedAtHeights():
# (s, locations, heights, spatialInterpolation='near', forceDataFrame=False, outsideOkay=False, _indicies=None):
print( "getWindSpeedAtHeights not tested...")
if __name__ == "__main__":
test___init__()
test_loc2Index()
test_loadRadiation()
test_loadWindSpeedLevels()
test_loadWindSpeedAtHeight()
test_loadTemperature()
test_loadPressure()
test_loadSet_PV()
test_getWindSpeedAtHeights() | en | 0.74998 | # (s, source, bounds=None, indexPad=0, **kwargs): # (s, loc, outsideOkay=False, asInt=True): # (s): # (s): # (s, height=100): # (s, processor=lambda x: x-273.15): # (s): # (s): # (s, locations, heights, spatialInterpolation='near', forceDataFrame=False, outsideOkay=False, _indicies=None): | 2.254461 | 2 |
hdf2mic/writer_dri.py | ralph0101/hdf2mic-converter | 1 | 6632222 | # -*- coding: utf-8 -*-
r"""
This module contains the MICRESS input driving file writer for the script hdf2mic.py.
"""
import os
import re
from hdf2mic.data import *
from hdf2mic.writer import *
from hdf2mic.arg_mapping import ArgMap_settingsOutputDri
class DriWriter(Writer):
"""
Reads a tagged MICRESS driving template file, replaces tags and writes result.
Tags in the template are of the form <mytagname>.
Notes
-----
Use with a context manager (see example).
Examples
--------
Write to file.
>>> #...
>>> data = reader.data
>>> writer = DriWriter(ouputfile_txt)
>>> with writer as f:
... writer.write(data)
"""
def __init__(self, filepath):
Writer.__init__(self, filepath)
self.template = None
self._msg_warn_tag = "Warning: dri template is missing tag: {}."
self._msg_err_outfile = "Error: driving template file requires output file {}, but was not found. Abort."
self._inputfile_template = None
def _replace(self, tag, replacement):
match = re.match('(<.*>)', tag)
if not match or len(match.groups()) != 1:
if not tag:
print("Warning: tag not specified for:\n\treplacement = {}\n\tin template = {}."
.format(replacement, self._inputfile_template))
else:
print("Warning: tag '{}' does not comply with recommended format '<mytagname>' for "
"\n\treplacement = {}".format(tag, replacement))
if tag in self.template:
self.template = self.template.replace(tag, str(replacement))
else:
print(self._msg_warn_tag.format(tag))
def write(self, data, settings, verbose, inputfile_template, f_vtk='', f_txt=''):
"""
Parameters
----------
data : Data
hdf2mic Data object
settings : ArgMap_settingsOutputDri
verbose : bool
inputfile_template : str
the hdf2mic input MICRESS driving template file
f_vtk : str
the optional hdf2mic output vtk file
f_txt : str
the optional hdf2mic output txt file (grain properties)
Raises
------
AttributeError
If not called inside a context manager.
"""
super(DriWriter, self).write(data, settings, verbose)
self._inputfile_template = inputfile_template
# check output files existence
# os.path.isfile(filename_txt)
# os.path.isfile(filename_vtk)
# read driving template file
template = ""
try:
with open(inputfile_template, 'r') as f_t:
template = f_t.read()
if not template:
raise Hdf2Mic_DrivingFileTaggingError(
"Dri_writer: processing driving template file was implied, "
"but template file {} was empty."
.format(inputfile_template)
)
else:
self.template = template
# replace implicit tags:
# regarding TAG_CELLS: DRI requires #cells, not #nodes,
# so decrement by one.
for i in range(data.dim):
self._replace(Data.DRI_TAG_CELLS[i],
str(data.dimensions[i] - 1))
self._replace(Data.DRI_TAG_SPACING,
data.spacing[0])
# DEVNOTE: tag distinct-phases deactivated in v0.5
# as per request by RA
# self._replace(Data.INPUT_DRI_TAG_PHASES,
# len(np.unique(data.phases)))
if f_txt:
if (os.path.isfile(f_txt)):
if self.settings.makePathsAbsolute:
f_txt = os.path.abspath(f_txt)
self._replace(Data.DRI_TAG_GRAIN_PROPERTIES, f_txt)
else:
raise Hdf2Mic_DrivingFileTaggingError(
self._msg_err_outfile.format(f_txt))
# replace user-defined tags
if f_vtk:
if (os.path.isfile(f_vtk)):
for tupl in data.celldata:
# from Data.celldata:
# tuples have either length 5 or 7
dataName = tupl[1]
tag = tupl[-1]
# replacement:
fieldArrayName = ""
if (len(tupl) == 7):
fieldArrayName = tupl[-2]
if self.settings.makePathsAbsolute:
f_vtk = os.path.abspath(f_vtk)
replacement = f_vtk + " " + dataName
if fieldArrayName:
replacement += " " + fieldArrayName
self._replace(tag, replacement)
else:
raise Hdf2Mic_DrivingFileTaggingError(
self._msg_err_outfile.format(f_vtk))
# save formatted driving template file
self._f.write(self.template)
except IOError as e:
print("ERROR: could not find or open input file \'{}\'.".format(inputfile_template))
| # -*- coding: utf-8 -*-
r"""
This module contains the MICRESS input driving file writer for the script hdf2mic.py.
"""
import os
import re
from hdf2mic.data import *
from hdf2mic.writer import *
from hdf2mic.arg_mapping import ArgMap_settingsOutputDri
class DriWriter(Writer):
"""
Reads a tagged MICRESS driving template file, replaces tags and writes result.
Tags in the template are of the form <mytagname>.
Notes
-----
Use with a context manager (see example).
Examples
--------
Write to file.
>>> #...
>>> data = reader.data
>>> writer = DriWriter(ouputfile_txt)
>>> with writer as f:
... writer.write(data)
"""
def __init__(self, filepath):
Writer.__init__(self, filepath)
self.template = None
self._msg_warn_tag = "Warning: dri template is missing tag: {}."
self._msg_err_outfile = "Error: driving template file requires output file {}, but was not found. Abort."
self._inputfile_template = None
def _replace(self, tag, replacement):
match = re.match('(<.*>)', tag)
if not match or len(match.groups()) != 1:
if not tag:
print("Warning: tag not specified for:\n\treplacement = {}\n\tin template = {}."
.format(replacement, self._inputfile_template))
else:
print("Warning: tag '{}' does not comply with recommended format '<mytagname>' for "
"\n\treplacement = {}".format(tag, replacement))
if tag in self.template:
self.template = self.template.replace(tag, str(replacement))
else:
print(self._msg_warn_tag.format(tag))
def write(self, data, settings, verbose, inputfile_template, f_vtk='', f_txt=''):
"""
Parameters
----------
data : Data
hdf2mic Data object
settings : ArgMap_settingsOutputDri
verbose : bool
inputfile_template : str
the hdf2mic input MICRESS driving template file
f_vtk : str
the optional hdf2mic output vtk file
f_txt : str
the optional hdf2mic output txt file (grain properties)
Raises
------
AttributeError
If not called inside a context manager.
"""
super(DriWriter, self).write(data, settings, verbose)
self._inputfile_template = inputfile_template
# check output files existence
# os.path.isfile(filename_txt)
# os.path.isfile(filename_vtk)
# read driving template file
template = ""
try:
with open(inputfile_template, 'r') as f_t:
template = f_t.read()
if not template:
raise Hdf2Mic_DrivingFileTaggingError(
"Dri_writer: processing driving template file was implied, "
"but template file {} was empty."
.format(inputfile_template)
)
else:
self.template = template
# replace implicit tags:
# regarding TAG_CELLS: DRI requires #cells, not #nodes,
# so decrement by one.
for i in range(data.dim):
self._replace(Data.DRI_TAG_CELLS[i],
str(data.dimensions[i] - 1))
self._replace(Data.DRI_TAG_SPACING,
data.spacing[0])
# DEVNOTE: tag distinct-phases deactivated in v0.5
# as per request by RA
# self._replace(Data.INPUT_DRI_TAG_PHASES,
# len(np.unique(data.phases)))
if f_txt:
if (os.path.isfile(f_txt)):
if self.settings.makePathsAbsolute:
f_txt = os.path.abspath(f_txt)
self._replace(Data.DRI_TAG_GRAIN_PROPERTIES, f_txt)
else:
raise Hdf2Mic_DrivingFileTaggingError(
self._msg_err_outfile.format(f_txt))
# replace user-defined tags
if f_vtk:
if (os.path.isfile(f_vtk)):
for tupl in data.celldata:
# from Data.celldata:
# tuples have either length 5 or 7
dataName = tupl[1]
tag = tupl[-1]
# replacement:
fieldArrayName = ""
if (len(tupl) == 7):
fieldArrayName = tupl[-2]
if self.settings.makePathsAbsolute:
f_vtk = os.path.abspath(f_vtk)
replacement = f_vtk + " " + dataName
if fieldArrayName:
replacement += " " + fieldArrayName
self._replace(tag, replacement)
else:
raise Hdf2Mic_DrivingFileTaggingError(
self._msg_err_outfile.format(f_vtk))
# save formatted driving template file
self._f.write(self.template)
except IOError as e:
print("ERROR: could not find or open input file \'{}\'.".format(inputfile_template))
| en | 0.591926 | # -*- coding: utf-8 -*- This module contains the MICRESS input driving file writer for the script hdf2mic.py. Reads a tagged MICRESS driving template file, replaces tags and writes result. Tags in the template are of the form <mytagname>. Notes ----- Use with a context manager (see example). Examples -------- Write to file. >>> #... >>> data = reader.data >>> writer = DriWriter(ouputfile_txt) >>> with writer as f: ... writer.write(data) Parameters ---------- data : Data hdf2mic Data object settings : ArgMap_settingsOutputDri verbose : bool inputfile_template : str the hdf2mic input MICRESS driving template file f_vtk : str the optional hdf2mic output vtk file f_txt : str the optional hdf2mic output txt file (grain properties) Raises ------ AttributeError If not called inside a context manager. # check output files existence # os.path.isfile(filename_txt) # os.path.isfile(filename_vtk) # read driving template file # replace implicit tags: # regarding TAG_CELLS: DRI requires #cells, not #nodes, # so decrement by one. # DEVNOTE: tag distinct-phases deactivated in v0.5 # as per request by RA # self._replace(Data.INPUT_DRI_TAG_PHASES, # len(np.unique(data.phases))) # replace user-defined tags # from Data.celldata: # tuples have either length 5 or 7 # replacement: # save formatted driving template file | 2.842708 | 3 |
camera-test.py | AluminatiFRC/Vision2016 | 4 | 6632223 | <gh_stars>1-10
import cv2
cap = cv2.VideoCapture(0)
print "OpenCV version: " + cv2.__version__
while(True):
ret, frame = cap.read()
if ret:
cv2.imshow('source', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| import cv2
cap = cv2.VideoCapture(0)
print "OpenCV version: " + cv2.__version__
while(True):
ret, frame = cap.read()
if ret:
cv2.imshow('source', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | none | 1 | 2.889409 | 3 |
|
hwt/synthesizer/rtlLevel/signalUtils/ops.py | mgielda/hwt | 0 | 6632224 | <reponame>mgielda/hwt
from hwt.doc_markers import internal
from hwt.hdl.assignment import Assignment
from hwt.hdl.operatorDefs import AllOps
from hwt.hdl.types.defs import BOOL
from hwt.hdl.types.sliceUtils import slice_to_SLICE
from hwt.hdl.types.typeCast import toHVal
from hwt.synthesizer.exceptions import TypeConversionErr
from hwt.synthesizer.interfaceLevel.mainBases import InterfaceBase
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.synthesizer.rtlLevel.signalUtils.exceptions import MultipleDriversErr,\
NoDriverErr
def tv(signal):
"""
Value class for type of signal
"""
return signal._dtype.getValueCls()
class RtlSignalOps():
"""
Definitions of operators and other operator functions for RtlSignal
:ivar _usedOps: cache for expressions with this signal
"""
def _auto_cast(self, toT):
return self._dtype.auto_cast(self, toT)
def _reinterpret_cast(self, toT):
return self._dtype.reinterpret_cast(self, toT)
@internal
def naryOp(self, operator, opCreateDelegate, *otherOps) -> RtlSignalBase:
"""
Try lookup operator with this parameters in _usedOps
if not found create new one and soter it in _usedOps
:param operator: instance of OpDefinition
:param opCreateDelegate: function (*ops) to create operator
:param otherOps: other operands (ops = self + otherOps)
:return: RtlSignal which is result of newly created operator
"""
k = (operator, *otherOps)
used = self._usedOps
try:
return used[k]
except KeyError:
pass
o = opCreateDelegate(self, *otherOps)
# input operads may be type converted,
# search if this happend, and return always same result signal
try:
op_instanciated = (o.origin.operator == operator
and o.origin.operands[0] is self)
except AttributeError:
op_instanciated = False
if op_instanciated:
k_real = (operator, *o.origin.operands[1:])
real_o = used.get(k_real, None)
if real_o is not None:
# destroy newly created operator and result, because it is same
# as
ctx = self.ctx
if ctx is not None:
ctx.signals.remove(o)
op = o.origin
o.origin = None
o.drivers.clear()
for inp in op.operands:
if isinstance(inp, RtlSignalBase):
inp.endpoints.remove(op)
o = real_o
else:
used[k_real] = o
used[k] = o
return o
def __invert__(self):
return self.naryOp(AllOps.NOT, tv(self).__invert__)
def _onRisingEdge(self, now=None):
return self.naryOp(AllOps.RISING_EDGE, tv(self)._onRisingEdge, now)
def _onFallingEdge(self, now=None):
return self.naryOp(AllOps.FALLING_EDGE, tv(self)._onFallingEdge, now)
def _isOn(self):
return self._auto_cast(BOOL)
# conversions
def _convSign(self, signed):
return tv(self)._convSign(self, signed)
def _signed(self):
return tv(self)._signed(self)
def _unsigned(self):
return tv(self)._unsigned(self)
def _vec(self):
return tv(self)._vec(self)
# logic
def __and__(self, other):
return self.naryOp(AllOps.AND, tv(self).__and__, other)
def __xor__(self, other):
return self.naryOp(AllOps.XOR, tv(self).__xor__, other)
def __or__(self, other):
return self.naryOp(AllOps.OR, tv(self).__or__, other)
# cmp
def _eq(self, other):
"""
__eq__ is not overloaded because it will destroy hashability of object
"""
return self.naryOp(AllOps.EQ, tv(self)._eq, other)
def __ne__(self, other):
return self.naryOp(AllOps.NEQ, tv(self).__ne__, other)
def __ge__(self, other):
return self.naryOp(AllOps.GE, tv(self).__ge__, other)
def __gt__(self, other):
return self.naryOp(AllOps.GT, tv(self).__gt__, other)
def __lt__(self, other):
return self.naryOp(AllOps.LT, tv(self).__lt__, other)
def __le__(self, other):
return self.naryOp(AllOps.LE, tv(self).__le__, other)
# arithmetic
def __add__(self, other):
return self.naryOp(AllOps.ADD, tv(self).__add__, other)
def __sub__(self, other):
return self.naryOp(AllOps.SUB, tv(self).__sub__, other)
def __mul__(self, other):
return self.naryOp(AllOps.MUL, tv(self).__mul__, other)
def __floordiv__(self, divider):
return self.naryOp(AllOps.DIV, tv(self).__floordiv__, divider)
# selections
def _downto(self, to):
return self.naryOp(AllOps.DOWNTO, tv(self)._downto, to)
def __getitem__(self, key):
if isinstance(key, slice):
key = slice_to_SLICE(key, self._dtype.bit_length())
return self.naryOp(AllOps.INDEX, tv(self).__getitem__, key)
def _concat(self, *operands):
return self.naryOp(AllOps.CONCAT, tv(self)._concat, *operands)
def _ternary(self, ifTrue, ifFalse):
return self.naryOp(AllOps.TERNARY, tv(self)._ternary, ifTrue, ifFalse)
@internal
def _getIndexCascade(self):
"""
Find out if this signal is something indexed
"""
try:
# now I am result of the index xxx[xx] <= source
# get index op
d = self.singleDriver()
try:
op = d.operator
except AttributeError:
return
if op == AllOps.INDEX:
# get signal on which is index applied
indexedOn = d.operands[0]
if isinstance(indexedOn, RtlSignalBase):
# [TODO] multidimensional indexing
return indexedOn, [d.operands[1]]
else:
raise Exception(
"can not drive static value %r" % indexedOn)
except (MultipleDriversErr, NoDriverErr):
pass
def __call__(self, source) -> Assignment:
"""
Create assignment to this signal
:attention: it is not call of function it is operator of assignment
:return: list of assignments
"""
if isinstance(source, InterfaceBase):
assert source._isAccessible
source = source._sig
if source is None:
source = self._dtype.fromPy(None)
else:
source = toHVal(source, suggestedType=self._dtype)
err = False
try:
source = source._auto_cast(self._dtype)
except TypeConversionErr:
err = True
if err:
raise TypeConversionErr(
("Can not connect %r (of type %r) to %r "
"(of type %r) due type incompatibility")
% (source, source._dtype, self, self._dtype))
tmp = self._getIndexCascade()
if tmp:
mainSig, indexCascade = tmp
self = mainSig
else:
indexCascade = None
# self = self._tryMyIndexToEndpoint()
return Assignment(source, self, indexCascade)
def __int__(self):
if not self._const:
raise TypeError("Int value of signal can be evaluated"
" because it is not constant expression:", self)
else:
return int(self._val)
def __bool__(self):
if not self._const:
raise TypeError("Bool value of signal can be evaluated"
" because it is not constant expression:", self)
else:
return bool(self._val)
def _isFullVld(self):
return self._const and self._val._isFullVld()
| from hwt.doc_markers import internal
from hwt.hdl.assignment import Assignment
from hwt.hdl.operatorDefs import AllOps
from hwt.hdl.types.defs import BOOL
from hwt.hdl.types.sliceUtils import slice_to_SLICE
from hwt.hdl.types.typeCast import toHVal
from hwt.synthesizer.exceptions import TypeConversionErr
from hwt.synthesizer.interfaceLevel.mainBases import InterfaceBase
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.synthesizer.rtlLevel.signalUtils.exceptions import MultipleDriversErr,\
NoDriverErr
def tv(signal):
"""
Value class for type of signal
"""
return signal._dtype.getValueCls()
class RtlSignalOps():
"""
Definitions of operators and other operator functions for RtlSignal
:ivar _usedOps: cache for expressions with this signal
"""
def _auto_cast(self, toT):
return self._dtype.auto_cast(self, toT)
def _reinterpret_cast(self, toT):
return self._dtype.reinterpret_cast(self, toT)
@internal
def naryOp(self, operator, opCreateDelegate, *otherOps) -> RtlSignalBase:
"""
Try lookup operator with this parameters in _usedOps
if not found create new one and soter it in _usedOps
:param operator: instance of OpDefinition
:param opCreateDelegate: function (*ops) to create operator
:param otherOps: other operands (ops = self + otherOps)
:return: RtlSignal which is result of newly created operator
"""
k = (operator, *otherOps)
used = self._usedOps
try:
return used[k]
except KeyError:
pass
o = opCreateDelegate(self, *otherOps)
# input operads may be type converted,
# search if this happend, and return always same result signal
try:
op_instanciated = (o.origin.operator == operator
and o.origin.operands[0] is self)
except AttributeError:
op_instanciated = False
if op_instanciated:
k_real = (operator, *o.origin.operands[1:])
real_o = used.get(k_real, None)
if real_o is not None:
# destroy newly created operator and result, because it is same
# as
ctx = self.ctx
if ctx is not None:
ctx.signals.remove(o)
op = o.origin
o.origin = None
o.drivers.clear()
for inp in op.operands:
if isinstance(inp, RtlSignalBase):
inp.endpoints.remove(op)
o = real_o
else:
used[k_real] = o
used[k] = o
return o
def __invert__(self):
return self.naryOp(AllOps.NOT, tv(self).__invert__)
def _onRisingEdge(self, now=None):
return self.naryOp(AllOps.RISING_EDGE, tv(self)._onRisingEdge, now)
def _onFallingEdge(self, now=None):
return self.naryOp(AllOps.FALLING_EDGE, tv(self)._onFallingEdge, now)
def _isOn(self):
return self._auto_cast(BOOL)
# conversions
def _convSign(self, signed):
return tv(self)._convSign(self, signed)
def _signed(self):
return tv(self)._signed(self)
def _unsigned(self):
return tv(self)._unsigned(self)
def _vec(self):
return tv(self)._vec(self)
# logic
def __and__(self, other):
return self.naryOp(AllOps.AND, tv(self).__and__, other)
def __xor__(self, other):
return self.naryOp(AllOps.XOR, tv(self).__xor__, other)
def __or__(self, other):
return self.naryOp(AllOps.OR, tv(self).__or__, other)
# cmp
def _eq(self, other):
"""
__eq__ is not overloaded because it will destroy hashability of object
"""
return self.naryOp(AllOps.EQ, tv(self)._eq, other)
def __ne__(self, other):
return self.naryOp(AllOps.NEQ, tv(self).__ne__, other)
def __ge__(self, other):
return self.naryOp(AllOps.GE, tv(self).__ge__, other)
def __gt__(self, other):
return self.naryOp(AllOps.GT, tv(self).__gt__, other)
def __lt__(self, other):
return self.naryOp(AllOps.LT, tv(self).__lt__, other)
def __le__(self, other):
return self.naryOp(AllOps.LE, tv(self).__le__, other)
# arithmetic
def __add__(self, other):
return self.naryOp(AllOps.ADD, tv(self).__add__, other)
def __sub__(self, other):
return self.naryOp(AllOps.SUB, tv(self).__sub__, other)
def __mul__(self, other):
return self.naryOp(AllOps.MUL, tv(self).__mul__, other)
def __floordiv__(self, divider):
return self.naryOp(AllOps.DIV, tv(self).__floordiv__, divider)
# selections
def _downto(self, to):
return self.naryOp(AllOps.DOWNTO, tv(self)._downto, to)
def __getitem__(self, key):
if isinstance(key, slice):
key = slice_to_SLICE(key, self._dtype.bit_length())
return self.naryOp(AllOps.INDEX, tv(self).__getitem__, key)
def _concat(self, *operands):
return self.naryOp(AllOps.CONCAT, tv(self)._concat, *operands)
def _ternary(self, ifTrue, ifFalse):
return self.naryOp(AllOps.TERNARY, tv(self)._ternary, ifTrue, ifFalse)
@internal
def _getIndexCascade(self):
"""
Find out if this signal is something indexed
"""
try:
# now I am result of the index xxx[xx] <= source
# get index op
d = self.singleDriver()
try:
op = d.operator
except AttributeError:
return
if op == AllOps.INDEX:
# get signal on which is index applied
indexedOn = d.operands[0]
if isinstance(indexedOn, RtlSignalBase):
# [TODO] multidimensional indexing
return indexedOn, [d.operands[1]]
else:
raise Exception(
"can not drive static value %r" % indexedOn)
except (MultipleDriversErr, NoDriverErr):
pass
def __call__(self, source) -> Assignment:
"""
Create assignment to this signal
:attention: it is not call of function it is operator of assignment
:return: list of assignments
"""
if isinstance(source, InterfaceBase):
assert source._isAccessible
source = source._sig
if source is None:
source = self._dtype.fromPy(None)
else:
source = toHVal(source, suggestedType=self._dtype)
err = False
try:
source = source._auto_cast(self._dtype)
except TypeConversionErr:
err = True
if err:
raise TypeConversionErr(
("Can not connect %r (of type %r) to %r "
"(of type %r) due type incompatibility")
% (source, source._dtype, self, self._dtype))
tmp = self._getIndexCascade()
if tmp:
mainSig, indexCascade = tmp
self = mainSig
else:
indexCascade = None
# self = self._tryMyIndexToEndpoint()
return Assignment(source, self, indexCascade)
def __int__(self):
if not self._const:
raise TypeError("Int value of signal can be evaluated"
" because it is not constant expression:", self)
else:
return int(self._val)
def __bool__(self):
if not self._const:
raise TypeError("Bool value of signal can be evaluated"
" because it is not constant expression:", self)
else:
return bool(self._val)
def _isFullVld(self):
return self._const and self._val._isFullVld() | en | 0.819578 | Value class for type of signal Definitions of operators and other operator functions for RtlSignal :ivar _usedOps: cache for expressions with this signal Try lookup operator with this parameters in _usedOps if not found create new one and soter it in _usedOps :param operator: instance of OpDefinition :param opCreateDelegate: function (*ops) to create operator :param otherOps: other operands (ops = self + otherOps) :return: RtlSignal which is result of newly created operator # input operads may be type converted, # search if this happend, and return always same result signal # destroy newly created operator and result, because it is same # as # conversions # logic # cmp __eq__ is not overloaded because it will destroy hashability of object # arithmetic # selections Find out if this signal is something indexed # now I am result of the index xxx[xx] <= source # get index op # get signal on which is index applied # [TODO] multidimensional indexing Create assignment to this signal :attention: it is not call of function it is operator of assignment :return: list of assignments # self = self._tryMyIndexToEndpoint() | 1.802101 | 2 |
neo_utils/core.py | Pierre-Thibault/neo-utils | 0 | 6632225 | <filename>neo_utils/core.py
# -*- coding: utf-8 -*-
'''
Fundamental functions and class helpers to support everyday programming.
@author: <NAME> (<EMAIL>re.thibault1 -at- gmail.com)
@license: MIT
@since: 2010-11-10
'''
__docformat__ = "epytext en"
import functools
class Prototype:
"""
An empty class to create objects as prototypes.
Client are free to add properties to instances of this type. I created
this class because we cannot do the same with object type.
"""
def count(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return the number of times the predicate returns value considered True.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to count.
@return: The number of true element.
"""
result = 0L
for i in iterable:
if predicate(i):
result += 1
return result
def every(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return True if all the predicate returns True for all the values.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to test.
@return: Return True if all the elements are True based on the predicate.
If iterable is empty, it returns True.
"""
for i in iterable:
if not predicate(i):
return False
return True
def inverse_linked_list(list_to_inverse, next_node_property_name="next"):
"""
A linked list inversor. No new nodes are created.
@param list_to_inverse: The linked list to inverse.
@param next_node_property_name: The name of property pointing to the next
node.
@return: The head of the inversed list.
"""
source_current_node = list_to_inverse
dest_head = None
while source_current_node:
old_source_head_next = getattr(source_current_node,
next_node_property_name)
setattr(source_current_node, next_node_property_name, dest_head)
dest_head = source_current_node
source_current_node = old_source_head_next
return dest_head
def negate(function):
"""
Return the opposite function of function.
@param function: The function to negate.
@return: The negated function.
"""
@functools.wraps(function)
def result(*args, **keywords):
return not function(*args, **keywords)
return result
def some(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return True if the predicate returns True at least one time.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to test.
@return: Return True if any element is True based on the predicate.
If iterable is empty, it returns False.
"""
for i in iterable:
if predicate(i):
return True
return False
def transform(func, sequence):
"""
Process all the elements in sequence with func and store the result again
in sequence at the same place. Sequence must be mutable.
@param func: Transformation function.
@param sequence: The sequence to process.
"""
for index, value in enumerate(sequence):
sequence[index] = func(value)
| <filename>neo_utils/core.py
# -*- coding: utf-8 -*-
'''
Fundamental functions and class helpers to support everyday programming.
@author: <NAME> (<EMAIL>re.thibault1 -at- gmail.com)
@license: MIT
@since: 2010-11-10
'''
__docformat__ = "epytext en"
import functools
class Prototype:
"""
An empty class to create objects as prototypes.
Client are free to add properties to instances of this type. I created
this class because we cannot do the same with object type.
"""
def count(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return the number of times the predicate returns value considered True.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to count.
@return: The number of true element.
"""
result = 0L
for i in iterable:
if predicate(i):
result += 1
return result
def every(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return True if all the predicate returns True for all the values.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to test.
@return: Return True if all the elements are True based on the predicate.
If iterable is empty, it returns True.
"""
for i in iterable:
if not predicate(i):
return False
return True
def inverse_linked_list(list_to_inverse, next_node_property_name="next"):
"""
A linked list inversor. No new nodes are created.
@param list_to_inverse: The linked list to inverse.
@param next_node_property_name: The name of property pointing to the next
node.
@return: The head of the inversed list.
"""
source_current_node = list_to_inverse
dest_head = None
while source_current_node:
old_source_head_next = getattr(source_current_node,
next_node_property_name)
setattr(source_current_node, next_node_property_name, dest_head)
dest_head = source_current_node
source_current_node = old_source_head_next
return dest_head
def negate(function):
"""
Return the opposite function of function.
@param function: The function to negate.
@return: The negated function.
"""
@functools.wraps(function)
def result(*args, **keywords):
return not function(*args, **keywords)
return result
def some(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return True if the predicate returns True at least one time.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to test.
@return: Return True if any element is True based on the predicate.
If iterable is empty, it returns False.
"""
for i in iterable:
if predicate(i):
return True
return False
def transform(func, sequence):
"""
Process all the elements in sequence with func and store the result again
in sequence at the same place. Sequence must be mutable.
@param func: Transformation function.
@param sequence: The sequence to process.
"""
for index, value in enumerate(sequence):
sequence[index] = func(value)
| en | 0.685306 | # -*- coding: utf-8 -*- Fundamental functions and class helpers to support everyday programming. @author: <NAME> (<EMAIL>re.thibault1 -at- gmail.com) @license: MIT @since: 2010-11-10 An empty class to create objects as prototypes. Client are free to add properties to instances of this type. I created this class because we cannot do the same with object type. Iterate over iterable, pass the value to the predicate predicate and return the number of times the predicate returns value considered True. @param predicate: Predicate function. @param iterable: Iterable containing the elements to count. @return: The number of true element. Iterate over iterable, pass the value to the predicate predicate and return True if all the predicate returns True for all the values. @param predicate: Predicate function. @param iterable: Iterable containing the elements to test. @return: Return True if all the elements are True based on the predicate. If iterable is empty, it returns True. A linked list inversor. No new nodes are created. @param list_to_inverse: The linked list to inverse. @param next_node_property_name: The name of property pointing to the next node. @return: The head of the inversed list. Return the opposite function of function. @param function: The function to negate. @return: The negated function. Iterate over iterable, pass the value to the predicate predicate and return True if the predicate returns True at least one time. @param predicate: Predicate function. @param iterable: Iterable containing the elements to test. @return: Return True if any element is True based on the predicate. If iterable is empty, it returns False. Process all the elements in sequence with func and store the result again in sequence at the same place. Sequence must be mutable. @param func: Transformation function. @param sequence: The sequence to process. | 3.338797 | 3 |
core/models.py | ResearchKernel/search | 2 | 6632226 | <reponame>ResearchKernel/search
from uuid import uuid4
from django.db import models
class BaseModelMixin(models.Model):
created = models.DateTimeField(auto_now_add=True, null=True)
modified = models.DateTimeField(auto_now=True)
id = models.UUIDField(
primary_key=True, default=uuid4, editable=False,
max_length=36, unique=True
)
class Meta:
abstract = True
| from uuid import uuid4
from django.db import models
class BaseModelMixin(models.Model):
created = models.DateTimeField(auto_now_add=True, null=True)
modified = models.DateTimeField(auto_now=True)
id = models.UUIDField(
primary_key=True, default=uuid4, editable=False,
max_length=36, unique=True
)
class Meta:
abstract = True | none | 1 | 2.237287 | 2 |
|
predict.py | TerenceChen95/Retina-Unet-Pytorch | 5 | 6632227 | import torch
from PIL import Image
from torch.autograd import Variable
import numpy as np
from matplotlib import pyplot as plt
from models.net2 import UNET
from torchvision import transforms as transforms
import torch.nn.functional as F
from config import config
from posprocess import rgb2gray, pad_border, recover_overlap, get_data_testing_overlap, clahe_equal, adjust_gamma
from torch.utils.data import DataLoader
import cv2
def prepro(img):
img = cv2.imread(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img[9:574,:]
img = clahe_equal(img)
img = adjust_gamma(img)
img = np.expand_dims(img, 0)
img = np.expand_dims(img, 0)
return img
img = '/home/tianshu/unet/data/training/images/22_training.tif'
state = torch.load('./saved/BEST_checkpoint.pth.tar', map_location={'cuda:1':'cuda:0'})['model']
net = UNET(1, 1)
net.load_state_dict(state)
device = torch.device('cuda:0')
#normalize input
img = prepro(img)
stride_h = config['stride_h']
stride_w = config['stride_w']
patch_h = config['patch_height']
patch_w = config['patch_width']
patches_img_test, new_h, new_w = get_data_testing_overlap(img, patch_h, patch_w, stride_h, stride_w)
batch_size = config['batch_size']
#batch_size = 32
test_loader = DataLoader(patches_img_test, batch_size=batch_size, shuffle=False)
net = net.to(device)
net.eval()
outsize = patches_img_test.shape
msks = np.empty((outsize))
activate = torch.nn.Sigmoid()
#patches too large to be put into model all at once
for i, data in enumerate(test_loader):
with torch.no_grad():
data = data.to(device, dtype=torch.float32)
msk = net(data)
msk = activate(msk)
try:
msks[i*batch_size:(i+1)*batch_size] = msk.detach().data.cpu().numpy()
except Exception as e:
print(e)
msks[i*batch_size:] = msk.detach().data.cpu().numpy()
pred_img = recover_overlap(msks, new_h, new_w, stride_h, stride_w)
print(pred_img.shape)
pred_img = pred_img[0][0]
print(np.max(pred_img), np.min(pred_img))
threshold = np.zeros((pred_img.shape))
for j in range(pred_img.shape[1]):
for i in range(pred_img.shape[0]):
if pred_img[i,j] > 0.5:
threshold[i,j] = 1
else:
threshold[i,j] = 0
out_img = Image.fromarray((threshold*255))
plt.figure()
plt.imshow(out_img)
plt.savefig('out.jpg')
| import torch
from PIL import Image
from torch.autograd import Variable
import numpy as np
from matplotlib import pyplot as plt
from models.net2 import UNET
from torchvision import transforms as transforms
import torch.nn.functional as F
from config import config
from posprocess import rgb2gray, pad_border, recover_overlap, get_data_testing_overlap, clahe_equal, adjust_gamma
from torch.utils.data import DataLoader
import cv2
def prepro(img):
img = cv2.imread(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img[9:574,:]
img = clahe_equal(img)
img = adjust_gamma(img)
img = np.expand_dims(img, 0)
img = np.expand_dims(img, 0)
return img
img = '/home/tianshu/unet/data/training/images/22_training.tif'
state = torch.load('./saved/BEST_checkpoint.pth.tar', map_location={'cuda:1':'cuda:0'})['model']
net = UNET(1, 1)
net.load_state_dict(state)
device = torch.device('cuda:0')
#normalize input
img = prepro(img)
stride_h = config['stride_h']
stride_w = config['stride_w']
patch_h = config['patch_height']
patch_w = config['patch_width']
patches_img_test, new_h, new_w = get_data_testing_overlap(img, patch_h, patch_w, stride_h, stride_w)
batch_size = config['batch_size']
#batch_size = 32
test_loader = DataLoader(patches_img_test, batch_size=batch_size, shuffle=False)
net = net.to(device)
net.eval()
outsize = patches_img_test.shape
msks = np.empty((outsize))
activate = torch.nn.Sigmoid()
#patches too large to be put into model all at once
for i, data in enumerate(test_loader):
with torch.no_grad():
data = data.to(device, dtype=torch.float32)
msk = net(data)
msk = activate(msk)
try:
msks[i*batch_size:(i+1)*batch_size] = msk.detach().data.cpu().numpy()
except Exception as e:
print(e)
msks[i*batch_size:] = msk.detach().data.cpu().numpy()
pred_img = recover_overlap(msks, new_h, new_w, stride_h, stride_w)
print(pred_img.shape)
pred_img = pred_img[0][0]
print(np.max(pred_img), np.min(pred_img))
threshold = np.zeros((pred_img.shape))
for j in range(pred_img.shape[1]):
for i in range(pred_img.shape[0]):
if pred_img[i,j] > 0.5:
threshold[i,j] = 1
else:
threshold[i,j] = 0
out_img = Image.fromarray((threshold*255))
plt.figure()
plt.imshow(out_img)
plt.savefig('out.jpg')
| en | 0.92714 | #normalize input #batch_size = 32 #patches too large to be put into model all at once | 2.189336 | 2 |
demo/examples/sum.summa/usage.py | YourNorth/rezak-summarizator | 3 | 6632228 | """
NOTE: Здесь описываем базовое использование модуля (без прочих настроек)
"""
from summa.summarizer import summarize
example = """
Automatic summarization is the process of reducing a text document with a \
computer program in order to create a summary that retains the most important points \
of the original document. As the problem of information overload has grown, and as \
the quantity of data has increased, so has interest in automatic summarization. \
Technologies that can make a coherent summary take into account variables such as \
length, writing style and syntax. An example of the use of summarization technology \
is search engines such as Google. Document summarization is another.
"""
summarize(example)
"""
Automatic summarization is the process of reducing a text document with a computer
program in order to create a summary that retains the most important points of the
original document.
"""
| """
NOTE: Здесь описываем базовое использование модуля (без прочих настроек)
"""
from summa.summarizer import summarize
example = """
Automatic summarization is the process of reducing a text document with a \
computer program in order to create a summary that retains the most important points \
of the original document. As the problem of information overload has grown, and as \
the quantity of data has increased, so has interest in automatic summarization. \
Technologies that can make a coherent summary take into account variables such as \
length, writing style and syntax. An example of the use of summarization technology \
is search engines such as Google. Document summarization is another.
"""
summarize(example)
"""
Automatic summarization is the process of reducing a text document with a computer
program in order to create a summary that retains the most important points of the
original document.
"""
| en | 0.844707 | NOTE: Здесь описываем базовое использование модуля (без прочих настроек) Automatic summarization is the process of reducing a text document with a \ computer program in order to create a summary that retains the most important points \ of the original document. As the problem of information overload has grown, and as \ the quantity of data has increased, so has interest in automatic summarization. \ Technologies that can make a coherent summary take into account variables such as \ length, writing style and syntax. An example of the use of summarization technology \ is search engines such as Google. Document summarization is another. Automatic summarization is the process of reducing a text document with a computer program in order to create a summary that retains the most important points of the original document. | 3.959 | 4 |
01-presentation-example/01_simple_open.py | ryansmccoy/201911-spreadsheets-to-dataframes | 28 | 6632229 |
filename = r'data\WMT_US.csv'
f = open(filename, 'r')
print(f)
data = f.read()
print(data)
f.close()
f = open(filename, 'r') # open file
for line in f:
print(line)
f.close() # close file
|
filename = r'data\WMT_US.csv'
f = open(filename, 'r')
print(f)
data = f.read()
print(data)
f.close()
f = open(filename, 'r') # open file
for line in f:
print(line)
f.close() # close file
| en | 0.848327 | # open file # close file | 3.352008 | 3 |
source/CRRMonitor/CRRMonitor.py | AugusYin/aws-crr-monitor-master-GCR | 41 | 6632230 | #!/usr/bin/python
# -*- coding: utf-8 -*-
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
from __future__ import print_function
import json
import boto3
from botocore.exceptions import ClientError
import os
from datetime import datetime, timedelta
import urllib.request
def getparm(parmname, defaultval):
try:
myval = os.environ[parmname]
print('Environmental variable \'' + parmname + '\' = ' + str(myval))
if isinstance(defaultval, int):
return int(myval)
else:
return myval
except:
print('Environmental variable \'' + parmname + '\' not found. Using default [' + \
str(defaultval) + ']')
return defaultval
# =====================================================================
# Configuration
#
# appname: the names of AWS resources are derived from this. It is not
# recommended that you change this from the default 'CRRMonitor'
appname = getparm('appname', 'CRRMonitor')
# maxtask: Tune this parameter to get the most effective use of a single
# instance of your lambda. It should be roughly 300,000 divided by the average
# time required to process a single SQS record (160ms). Example: if it takes an
# average of 500ms to process a single SQS record you would set this to
# 300 / 0.5 = 600. This parameter tells the lambda when to ask for help:
# If the queue depth is > maxtask it will spawn a copy of itself.
maxtask = getparm('maxtask', 1800)
# maxspawn: This parameter limits how many copies of itself the lambda
# can spawn. This should not allow you to exceed your maximum concurrent
# lambda execution limit (default 100). By default the lambda is set
# to execute every minute and time out after 5. With a default maxspawn
# of 25 this will allow 100 concurrent lambdas to execute. This should
# allow capacity of 200 events per second at an average processing time
# of 500ms per event, or 100 CRR replications per second. Scale and
# request limits accordingly.
maxspawn = getparm('maxspawn', 20)
# How long to keep records for completed transfers
purge_thresh = getparm('purge_thresh', 24)
# DEBUG
DEBUG = getparm('debug', 0)
# VERSION_ID: The version of this solution
VERSION_ID = getparm('SolutionVersion', "").strip()
# ANONYMOUS_SOLUTION_ID: An anonymous identifier for this instance of the solution
ANONYMOUS_SOLUTION_ID = getparm('UUID', "").strip()
# SEND_ANONYMOUS_USAGE_METRIC: A flag indicating whether the solution should
# report anonymous usage metrics to AWS
SEND_ANONYMOUS_USAGE_METRIC = (getparm('AnonymousUsage', 'No') == 'Yes')
# Make sure the VERSION_ID and ANONYMOUS_SOLUTION_ID are valid
if VERSION_ID is None or VERSION_ID == "":
SEND_ANONYMOUS_USAGE_METRIC = False
if ANONYMOUS_SOLUTION_ID is None or ANONYMOUS_SOLUTION_ID == "":
SEND_ANONYMOUS_USAGE_METRIC = False
#
# ddbtable and stattable: name of the DynamoDB tables. The tables are
# created in the CloudFormation stack and defaults to the value of appname.
# Do not change this without changing the template.
ddbtable = appname
stattable = ddbtable + 'Statistics'
# queue: name of the SQS queue. Derived from the appname. The SQS queue
# is created in the CloudFormation template. Do not change this without
# changing the template
queue = appname + 'Queue'
# timefmt: used to format timestamps. Do not change.
timefmt = '%Y-%m-%dT%H:%M:%SZ'
# client: defines the api client connections to create
client={
'ddb': {'service': 'dynamodb'},
'sqs': {'service': 'sqs'},
'lbd': {'service': 'lambda'}
}
s3client = {} # will hold client handle for s3 per region
initfail = {} # hash of source buckets to handle FAILED counter initialization
# =====================================================================
# connect_clients
# ---------------
# Connect to all the clients. We will do this once per instantiation of
# the Lambda function (not per execution)
# =====================================================================
def connect_clients(clients_to_connect):
for c in clients_to_connect:
try:
if 'region' in clients_to_connect[c]:
clients_to_connect[c]['handle'] = boto3.client(clients_to_connect[c]['service'], region_name=clients_to_connect[c]['region'])
else:
clients_to_connect[c]['handle'] = boto3.client(clients_to_connect[c]['service'])
except Exception as e:
print(e)
print('Error connecting to ' + clients_to_connect[c]['service'])
raise e
return clients_to_connect
def message_handler(event):
def log_statistics(Src, Dst, Tstamp, Size, ET, roundTo):
# -------------------------------------------------------------
# Derive the statistic bucket from source/dest and time bucket
# (5 minute rolling window)
#
statbucket = Src + ':' + Dst
ts = datetime.strptime(Tstamp, timefmt)
secs = (ts.replace(tzinfo=None) - ts.min).seconds
rounding = (secs+roundTo/2) // roundTo * roundTo
ts = ts + timedelta(0, rounding-secs, -ts.microsecond)
timebucket = datetime.strftime(ts, timefmt)
statbucket += ':' + timebucket
# -------------------------------------------------------------
# Init a dict to use to hold our attrs for DDB
stat_exp_attrs = {}
# -------------------------------------------------------------
# Build the DDB UpdateExpression
stat_update_exp = 'SET timebucket = :t, source_bucket = :o, dest_bucket = :r ADD objects :a, size :c, elapsed :d'
# -------------------------------------------------------------
# push the first attr: s3Object
stat_exp_attrs[':a'] = {'N': '1'}
stat_exp_attrs[':c'] = {'N': Size}
stat_exp_attrs[':d'] = {'N': ET}
stat_exp_attrs[':t'] = {'S': timebucket}
stat_exp_attrs[':o'] = {'S': Src}
stat_exp_attrs[':r'] = {'S': Dst}
# Update the DDB table
try:
response = client['ddb']['handle'].update_item(
TableName=stattable,
Key={'OriginReplicaBucket': {'S': statbucket}},
UpdateExpression=stat_update_exp,
ExpressionAttributeValues=stat_exp_attrs)
except Exception as e:
print(e)
print('Table ' + stattable + ' update failed')
raise e
# Initialize a counter for failed replications for the source bucket
if not Src in initfail:
initfail[Src] = 'foo'
if Dst != 'FAILED' and initfail[Src] != timebucket:
print('Initializing FAILED bucket for ' + Src + ':' + timebucket)
statbucket = Src + ':FAILED:' + timebucket
stat_exp_attrs = {}
# -------------------------------------------------------------
# Build the DDB UpdateExpression
stat_update_exp = 'SET timebucket = :t, source_bucket = :o, dest_bucket = :r ADD objects :a, size :c, elapsed :d'
# -------------------------------------------------------------
# push the first attr: s3Object
stat_exp_attrs[':a'] = {'N': '0'}
stat_exp_attrs[':c'] = {'N': '1'}
stat_exp_attrs[':d'] = {'N': '1'}
stat_exp_attrs[':t'] = {'S': timebucket}
stat_exp_attrs[':o'] = {'S': Src}
stat_exp_attrs[':r'] = {'S': 'FAILED'}
try:
response = client['ddb']['handle'].update_item(
TableName=stattable,
Key={'OriginReplicaBucket': {'S': statbucket }},
UpdateExpression=stat_update_exp,
ExpressionAttributeValues=stat_exp_attrs)
initfail[Src] = timebucket
except Exception as e:
print(e)
print('Table ' + stattable + ' update failed')
raise e
#print('Stats written to ' + statbucket)
# So this will work with CloudWatch Events directly or via SNS, let's look
# at the structure of the incoming JSON. Note that this has not been
# tested with CloudWatch events directly, but should be a simple matter.
# I kept the code here as it adds no overhead but is a solid flexible
# example.
#
# A Cloudwatch Event looks like event[event json]
# An SNS notification looks like event['Records'][0][event json]
# print("Received raw event: " + json.dumps(event, indent=2))
# Create a reference in evdata that points to the correct element in the
# event dictionary
if 'detail-type' in event:
evdata = event
elif 'Records' in event:
# An SNS notification will have another layer in the dict. Look for
# EventSource = aws:sns. Otherwise generate an exception and get out.
if event['Records'][0]['EventSource'] == 'aws:sns':
#print('Message is ' + event['Records'][0]['Sns']['Message'])
evdata = json.loads(event['Records'][0]['Sns']['Message'])
#print("Message event: " + json.dumps(evdata, indent=2))
else:
# Unrecognized event format: uncomment print statements to
# identify the format and enhance this logic. At the end of
# the day, evdata must contain the dict for the event record
# of the Cloudwatch log event for the S3 update notification
print('Error: unrecognized event format received')
raise Exception('Unrecognized event format')
elif 'MessageId' in event:
evdata = json.loads(event['Message'])
else:
evdata = event
if DEBUG > 1:
print(json.dumps(evdata))
#-----------------------------------------------------------------
# Quietly ignore all but PutObject
#
if evdata['detail']['eventName'] != 'PutObject':
if DEBUG > 0:
print('Ignoring ' + evdata['detail']['eventName'] + ' event')
return
#-----------------------------------------------------------------
#
# Collect the data we want for the DynamoDB table
#
region = evdata['region']
bucket = evdata['detail']['requestParameters']['bucketName']
key = evdata['detail']['requestParameters']['key']
# This timestamp is from the CW Event record and is most accurate
now = evdata['detail']['eventTime']
# Init a dict to use to hold our attrs for DDB
ddb_exp_attrs = {}
# Build th e DDB UpdateExpression
ddb_update_exp = 'set s3Object = :a'
# push the first attr: s3Object
ddb_exp_attrs[':a'] = {'S': key}
# establish s3 client per region, but only once.
if not region in s3client:
s3client[region] = boto3.client('s3', region)
# -----------------------------------------------------------------
# Do a head_object. If the object no longer exists just return.
#
try:
response = s3client[region].head_object(
Bucket=bucket,
Key=key
)
except ClientError as e:
# { "Error": {
# "Code": "403",
# "Message": "Forbidden"
# },
# "ResponseMetadata": {
# "RequestId": "B7C8873E3C067128",
# "HostId": "kYARs5PKMuah57ewyzYq6l5laO4xu9fcWFYVnEPLMHeqNSF4yLhrYIhbbUT0Tw7hp3f2PgCQO9E=",
# "HTTPStatusCode": 403,
# "HTTPHeaders": {
# "x-amz-request-id": "B7C8873E3C067128",
# "x-amz-id-2": "<KEY>
# "content-type": "application/xml",
# "transfer-encoding": "chunked",
# "date": "Tue, 25 Sep 2018 11:58:48 GMT",
# "server": "AmazonS3"
# },
# "RetryAttempts": 0
# }
# }
if e.response['Error']['Code'] == '403':
print('IGNORING: CRRMonitor does not have access to Object - ' + \
evdata['detail']['requestParameters']['bucketName'] + '/' + \
evdata['detail']['requestParameters']['key'])
elif e.response['Error']['Code'] == '404':
print('IGNORING: Object no longer exists - ' + \
evdata['detail']['requestParameters']['bucketName'] + '/' + \
evdata['detail']['requestParameters']['key'])
else:
# Need to improve this to recognize specifically a 404
print('Unhandled ClientError ' + str(e))
print(json.dumps(e.response))
#print('Removing from queue / ignoring')
return
except Exception as e:
# Need to improve this to recognize specifically a 404
print('Unandled Exception ' + str(e))
print('Removing from queue / ignoring')
return
# 2) check that the x-amz-replication-status header is present
# response['ResponseMetadata']['HTTPHeaders']['x-amz-replication-status']
#
# Note that this function is only called when an object is written. Assume that
# the object was written and the x-amz-replication-status is a final status for
# this object in this bucket. So, if it is the source it can be COMPLETED, PENDING,
# or FAILED. If it is the replica it can only be REPLICA.
#
# That in mind, the update date/time for the REPLICA will always be definitive for
# the end_datetime column
#
# Conversely, the source object is always definitive for the start_datetime.
#
# Code must not assume that the events (source and dest) are processed in the correct
# order. Any process consuming the DynamoDB table should do their own Elapsed Time
# calculation.
#
# Reference the dict we want for clarity in the code
headers = response['ResponseMetadata']['HTTPHeaders']
# If this object has no x-amz-replication-status header then we can leave
if 'x-amz-replication-status' not in headers:
# This is not a replicated object - get out
if DEBUG > 0:
print('Not a replicated object')
return()
# repstatus is a pointer to the headers (for code clarity)
repstatus = headers['x-amz-replication-status']
# -----------------------------------------------------------------
# Verify that the DynamoDB table exists. Note: we could create it
# but that takes so long that the lambda function may time out.
# Better to create it in the CFn template and handle this as a
# failure condition
#
try:
response = client['ddb']['handle'].describe_table(
TableName=ddbtable
)
except Exception as e:
print(e)
print('Table ' + ddbtable + ' does not exist - need to create it')
raise e
# Update object size
objsize = headers['content-length']
ddb_update_exp += ', ObjectSize = :s'
ddb_exp_attrs[':s'] = {'N': objsize}
ETag = {'S': headers['etag'][1:-1] + ':' + headers['x-amz-version-id'][1:-1]}
# -----------------------------------------------------------------
# If the object already has a DDB record get it
#
ddbdata = client['ddb']['handle'].get_item(
TableName=ddbtable,
Key={'ETag': ETag},
ConsistentRead=True
)
ddbitem = {} # reset the dict
if 'Item' in ddbdata:
ddbitem = ddbdata['Item']
if DEBUG > 4:
print("DDB record: " + json.dumps(ddbitem, indent=2))
#
# Is this a REPLICA? Use timestamp as completion time
#
# Note: replica only updates s3Replica, replication_status, and end_datetime.
#
# We do this so we don't have to handle conditional update of fields that might get
# stepped on of the events are processed out of order.
#
if repstatus == 'REPLICA':
# print('Processing a REPLICA object: ' + ETag['S'])
ddb_update_exp += ', s3Replica = :d'
ddb_exp_attrs[':d'] = {'S': bucket}
#print('s3Replica: ' + bucket)
ddb_update_exp += ', end_datetime = :e'
ddb_exp_attrs[':e'] = {'S': now} # 'now' is from the event data
#print('end_datetime: ' + now)
# Set the ttl
purge = datetime.strptime(now, timefmt) - timedelta(hours=purge_thresh) # datetime object
ttl = purge.strftime('%s')
ddb_update_exp += ', itemttl = :p'
ddb_exp_attrs[':p'] = {'N': ttl}
# If this is a replica then status is COMPLETE
ddb_update_exp += ', replication_status = :b'
ddb_exp_attrs[':b'] = {'S': 'COMPLETED'}
#print('replication_status: COMPLETED (implied)')
if 'start_datetime' in ddbitem and 'crr_rate' not in ddbitem:
etime = datetime.strptime(now, timefmt) - datetime.strptime(ddbitem['start_datetime']['S'], timefmt)
etimesecs = (etime.days * 24 * 60 * 60) + etime.seconds
#print("Calculate elapsed time in seconds")
crr_rate = int(objsize) * 8 / (etimesecs + 1) # Add 1 to prevent /0 errors
ddb_update_exp += ', crr_rate = :r'
ddb_exp_attrs[':r'] = {'N': str(crr_rate)}
#print('crr_rate: ', crr_rate)
ddb_update_exp += ', elapsed = :t'
ddb_exp_attrs[':t'] = {'N': str(etimesecs)}
#print('elapsed: ', etimesecs)
log_statistics(
ddbitem['s3Origin']['S'],
bucket,
ddbitem['start_datetime']['S'],
objsize,
str(etimesecs),
300)
# -----------------------------------------------------------------
# Or is this a SOURCE? Use timestamp as replication start time
#
else:
ddb_update_exp += ', s3Origin = :f'
ddb_exp_attrs[':f'] = {'S': bucket}
# If this is not a replica then do not report status. It's not important and
# makes the DynamoDB update much more complicated. Just get the start time
#
# We also do not care what the status is. If it has a FAILED status we could
# write code to send a notification, but that's outside our scope.
if repstatus == 'COMPLETED' or repstatus == 'FAILED' or repstatus == 'PENDING':
# print('Processing a ORIGINAL object: ' + ETag['S'] + ' status: ' + repstatus)
ddb_update_exp += ', start_datetime = :g'
ddb_exp_attrs[':g'] = {'S': now}
# ---------------------------------------------------------
# If we already got the replica event...
#
if 'end_datetime' in ddbitem and 'crr_rate' not in ddbitem:
etime = datetime.strptime(ddbitem['end_datetime']['S'], timefmt) - datetime.strptime(now, timefmt)
etimesecs = (etime.days * 24 * 60 * 60) + etime.seconds
#print("Calculate elapsed time in seconds")
crr_rate = int(objsize) * 8 / (etimesecs + 1) # Add 1 to prevent /0 errors
ddb_update_exp += ', crr_rate = :r'
ddb_exp_attrs[':r'] = {'N': str(crr_rate)}
# Set the ttl
purge = datetime.strptime(ddbitem['end_datetime']['S'], timefmt) - timedelta(hours=purge_thresh) # datetime object
ttl = purge.strftime('%s')
ddb_update_exp += ', itemttl = :p'
ddb_exp_attrs[':p'] = {'N': ttl}
ddb_update_exp += ', elapsed = :t'
ddb_exp_attrs[':t'] = {'N': str(etimesecs)}
log_statistics(
bucket,ddbitem['s3Replica']['S'],
ddbitem['end_datetime']['S'],
objsize,
str(etimesecs),300)
# ---------------------------------------------------------
# We did not yet get the replica event
#
else:
if repstatus == 'FAILED':
# If replication failed this is the only time we will see this object.
# Update the status to FAILED
ddb_update_exp += ', replication_status = :b'
ddb_exp_attrs[':b'] = {'S': 'FAILED'}
log_statistics(
bucket,
'FAILED',
now,
'0',
'1',
300)
else:
print('Unknown Replication Status: ' + repstatus)
raise Exception('Unknown Replication Status')
# Create a record in the DDB table
try:
response = client['ddb']['handle'].update_item(
TableName=ddbtable,
Key={'ETag': ETag},
UpdateExpression=ddb_update_exp,
ExpressionAttributeValues=ddb_exp_attrs)
except Exception as e:
print(e)
print('Table ' + ddbtable + ' update failed')
raise e
# =====================================================================
# queue_handler
# -------------
# Main entry point
# Count the SQS queue and manage scale.
# Here's what my event looks like:
# {
# "account": "SAMPLE12345",
# "region": "us-east-2",
# "detail": {},
# "detail-type": "Scheduled Event",
# "source": "aws.events",
# "version": "0",
# "time": "2017-02-09T13:56:03Z",
# "id": "a8b4f046-06c5-4b3c-b543-90c3fdaaac14",
# "resources": [
# "arn:aws:events:us-east-2:SAMPLE12345:rule/CRRMonitor-2"
# ]
# }
#
# When I spawn a child process I will change "detail-type" to "Spawned Event"
# and add "child-number", where 0 is the top-level
# =====================================================================
def queue_handler(event, context):
cnum = 0
if 'child-number' in event:
cnum = int(event['child-number'])
message_floor = cnum * maxtask
# {
# "Attributes": {"ApproximateNumberOfMessages": "1040"},
# "ResponseMetadata": {
# "RetryAttempts": 0,
# "HTTPStatusCode": 200,
# "RequestId": "51c43b7e-9b05-59c8-b68e-6a68f3f3b999",
# "HTTPHeaders": {
# "x-amzn-requestid": "51c43b7e-9b05-59c8-b68e-6a68f3f3b999",
# "content-length": "360",
# "server": "Server",
# "connection": "keep-alive",
# "date": "Thu, 09 Feb 2017 12:55:18 GMT",
# "content-type": "text/xml"
# }
# }
# }
response = client['sqs']['handle'].get_queue_attributes(
QueueUrl=queue_endpoint,
AttributeNames=['ApproximateNumberOfMessages']
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
print('Bad status from ' + queue + ': ' + response['ResponseMetadata']['HTTPStatusCode'])
return
queue_sz = int(response['Attributes']['ApproximateNumberOfMessages'])
queue_backlog = queue_sz - message_floor
print('INFO [CNUM-' + str(cnum) + '] Queue is ' + str(queue_sz) + \
' deep. Backlog is ' + str(queue_backlog))
# We subtracted the number of messages for which processes are already
# running. If the backlog is still too deep them first spawn another child,
# updating child-number + 1
if queue_backlog > maxtask:
# increment child-number (or initialize to 1) in the event dict
# spawn another lambda, passing the event and context dicts
if cnum < maxspawn:
event['child-number'] = cnum + 1
try:
client['lbd']['handle'].invoke(
FunctionName=context.function_name,
InvocationType='Event',
Payload=json.dumps(event)
)
print('Spawning a child because there are ' + str(queue_sz) + ' messages in the queue. I am child ' + str(cnum) + ' with a max capacity of ' + str(maxtask) + '. Message floor is ' + str(message_floor))
print('Reproduction successful - child ' + str(cnum+1) + ' spawned')
except Exception as e:
print(e)
print('ERROR[CNUM-' + str(cnum) + '] Failed to reproduce')
raise e
else:
print('WARNING: maxspawn(' + str(maxspawn) + ') exceeded. Not spawning a helper.')
# -----------------------------------------------------------------
# Now we get to work. Process messages from the queue until empty
# or we time out. This is the secret sauce to our horizontal scale
print('INFO [CNUM-' + str(cnum) + '] Priming read from SQS...')
msg_ctr = 0 # keep a count of messages processed
sqs_msgs = client['sqs']['handle'].receive_message(
QueueUrl=queue_endpoint,
AttributeNames=['All'],
MaxNumberOfMessages=10,
VisibilityTimeout=60
)
sqs_delete = []
while 'Messages' in sqs_msgs:
print('INFO [CNUM-' + str(cnum) + '] Processing ' + str(len(sqs_msgs['Messages'])) + ' messages')
for message in sqs_msgs['Messages']:
rc = message_handler(json.loads(message['Body']))
# If we did not get a 0 return code let the record time out back
# back into the queue
if not rc:
sqs_delete.append({'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']})
msg_ctr += 1 # keep a count of messages processed
if len(sqs_delete) > 0:
# Delete the messages we just processed
response = client['sqs']['handle'].delete_message_batch(
QueueUrl=queue_endpoint,
Entries=sqs_delete
)
if len(response['Successful']) < len(sqs_delete):
print('ERROR[CNUM-' + str(cnum) + ']: processed ' + str(len(sqs_msgs)) + ' messages but only deleted ' + str(len(response['Successful'])) + ' messages')
sqs_delete = [] # reset the list
print('INFO [CNUM-' + str(cnum) + '] Reading from SQS...')
sqs_msgs = client['sqs']['handle'].receive_message(
QueueUrl=queue_endpoint,
AttributeNames=['All'],
MaxNumberOfMessages=10,
VisibilityTimeout=60
)
print('INFO [CNUM-' + str(cnum) + '] Completed - ' + str(msg_ctr) + ' messages processed')
if SEND_ANONYMOUS_USAGE_METRIC and msg_ctr > 0:
send_anonymous_usage_metric({
"Action": f"Num messages processed by CRRMonitor: {msg_ctr}"
})
def send_anonymous_usage_metric(metric_data={}):
try:
if type(metric_data) is not dict or not dict:
raise Exception('Invalid metric_data passed to send_anonymous_usage_metric')
metric_endpoint = 'https://metrics.awssolutionsbuilder.com/generic'
metric_payload = {
"Solution": "SO0022",
"UUID": ANONYMOUS_SOLUTION_ID,
"Version": VERSION_ID,
"Timestamp": str(datetime.utcnow()),
"Data": metric_data
}
data = bytes(json.dumps(metric_payload), 'utf-8')
headers = { "Content-Type": "application/json" }
print(f"Sending anonymous usage metric: {str(metric_payload)}")
req = urllib.request.Request(url=metric_endpoint, data=data, method='POST', headers=headers)
with urllib.request.urlopen(req) as f:
print(f"Anonymous usage metric send status: {f.status}")
except Exception as e:
# Log the exception but do not raise it again
print(f'Exception while sending anonymous usage metric: {e}')
###### M A I N ######
client = connect_clients(client)
try:
queue_endpoint = client['sqs']['handle'].get_queue_url(
QueueName=queue
)['QueueUrl']
except Exception as e:
print(e)
print('Could not get the url for ' + queue)
raise e
| #!/usr/bin/python
# -*- coding: utf-8 -*-
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
from __future__ import print_function
import json
import boto3
from botocore.exceptions import ClientError
import os
from datetime import datetime, timedelta
import urllib.request
def getparm(parmname, defaultval):
try:
myval = os.environ[parmname]
print('Environmental variable \'' + parmname + '\' = ' + str(myval))
if isinstance(defaultval, int):
return int(myval)
else:
return myval
except:
print('Environmental variable \'' + parmname + '\' not found. Using default [' + \
str(defaultval) + ']')
return defaultval
# =====================================================================
# Configuration
#
# appname: the names of AWS resources are derived from this. It is not
# recommended that you change this from the default 'CRRMonitor'
appname = getparm('appname', 'CRRMonitor')
# maxtask: Tune this parameter to get the most effective use of a single
# instance of your lambda. It should be roughly 300,000 divided by the average
# time required to process a single SQS record (160ms). Example: if it takes an
# average of 500ms to process a single SQS record you would set this to
# 300 / 0.5 = 600. This parameter tells the lambda when to ask for help:
# If the queue depth is > maxtask it will spawn a copy of itself.
maxtask = getparm('maxtask', 1800)
# maxspawn: This parameter limits how many copies of itself the lambda
# can spawn. This should not allow you to exceed your maximum concurrent
# lambda execution limit (default 100). By default the lambda is set
# to execute every minute and time out after 5. With a default maxspawn
# of 25 this will allow 100 concurrent lambdas to execute. This should
# allow capacity of 200 events per second at an average processing time
# of 500ms per event, or 100 CRR replications per second. Scale and
# request limits accordingly.
maxspawn = getparm('maxspawn', 20)
# How long to keep records for completed transfers
purge_thresh = getparm('purge_thresh', 24)
# DEBUG
DEBUG = getparm('debug', 0)
# VERSION_ID: The version of this solution
VERSION_ID = getparm('SolutionVersion', "").strip()
# ANONYMOUS_SOLUTION_ID: An anonymous identifier for this instance of the solution
ANONYMOUS_SOLUTION_ID = getparm('UUID', "").strip()
# SEND_ANONYMOUS_USAGE_METRIC: A flag indicating whether the solution should
# report anonymous usage metrics to AWS
SEND_ANONYMOUS_USAGE_METRIC = (getparm('AnonymousUsage', 'No') == 'Yes')
# Make sure the VERSION_ID and ANONYMOUS_SOLUTION_ID are valid
if VERSION_ID is None or VERSION_ID == "":
SEND_ANONYMOUS_USAGE_METRIC = False
if ANONYMOUS_SOLUTION_ID is None or ANONYMOUS_SOLUTION_ID == "":
SEND_ANONYMOUS_USAGE_METRIC = False
#
# ddbtable and stattable: name of the DynamoDB tables. The tables are
# created in the CloudFormation stack and defaults to the value of appname.
# Do not change this without changing the template.
ddbtable = appname
stattable = ddbtable + 'Statistics'
# queue: name of the SQS queue. Derived from the appname. The SQS queue
# is created in the CloudFormation template. Do not change this without
# changing the template
queue = appname + 'Queue'
# timefmt: used to format timestamps. Do not change.
timefmt = '%Y-%m-%dT%H:%M:%SZ'
# client: defines the api client connections to create
client={
'ddb': {'service': 'dynamodb'},
'sqs': {'service': 'sqs'},
'lbd': {'service': 'lambda'}
}
s3client = {} # will hold client handle for s3 per region
initfail = {} # hash of source buckets to handle FAILED counter initialization
# =====================================================================
# connect_clients
# ---------------
# Connect to all the clients. We will do this once per instantiation of
# the Lambda function (not per execution)
# =====================================================================
def connect_clients(clients_to_connect):
for c in clients_to_connect:
try:
if 'region' in clients_to_connect[c]:
clients_to_connect[c]['handle'] = boto3.client(clients_to_connect[c]['service'], region_name=clients_to_connect[c]['region'])
else:
clients_to_connect[c]['handle'] = boto3.client(clients_to_connect[c]['service'])
except Exception as e:
print(e)
print('Error connecting to ' + clients_to_connect[c]['service'])
raise e
return clients_to_connect
def message_handler(event):
def log_statistics(Src, Dst, Tstamp, Size, ET, roundTo):
# -------------------------------------------------------------
# Derive the statistic bucket from source/dest and time bucket
# (5 minute rolling window)
#
statbucket = Src + ':' + Dst
ts = datetime.strptime(Tstamp, timefmt)
secs = (ts.replace(tzinfo=None) - ts.min).seconds
rounding = (secs+roundTo/2) // roundTo * roundTo
ts = ts + timedelta(0, rounding-secs, -ts.microsecond)
timebucket = datetime.strftime(ts, timefmt)
statbucket += ':' + timebucket
# -------------------------------------------------------------
# Init a dict to use to hold our attrs for DDB
stat_exp_attrs = {}
# -------------------------------------------------------------
# Build the DDB UpdateExpression
stat_update_exp = 'SET timebucket = :t, source_bucket = :o, dest_bucket = :r ADD objects :a, size :c, elapsed :d'
# -------------------------------------------------------------
# push the first attr: s3Object
stat_exp_attrs[':a'] = {'N': '1'}
stat_exp_attrs[':c'] = {'N': Size}
stat_exp_attrs[':d'] = {'N': ET}
stat_exp_attrs[':t'] = {'S': timebucket}
stat_exp_attrs[':o'] = {'S': Src}
stat_exp_attrs[':r'] = {'S': Dst}
# Update the DDB table
try:
response = client['ddb']['handle'].update_item(
TableName=stattable,
Key={'OriginReplicaBucket': {'S': statbucket}},
UpdateExpression=stat_update_exp,
ExpressionAttributeValues=stat_exp_attrs)
except Exception as e:
print(e)
print('Table ' + stattable + ' update failed')
raise e
# Initialize a counter for failed replications for the source bucket
if not Src in initfail:
initfail[Src] = 'foo'
if Dst != 'FAILED' and initfail[Src] != timebucket:
print('Initializing FAILED bucket for ' + Src + ':' + timebucket)
statbucket = Src + ':FAILED:' + timebucket
stat_exp_attrs = {}
# -------------------------------------------------------------
# Build the DDB UpdateExpression
stat_update_exp = 'SET timebucket = :t, source_bucket = :o, dest_bucket = :r ADD objects :a, size :c, elapsed :d'
# -------------------------------------------------------------
# push the first attr: s3Object
stat_exp_attrs[':a'] = {'N': '0'}
stat_exp_attrs[':c'] = {'N': '1'}
stat_exp_attrs[':d'] = {'N': '1'}
stat_exp_attrs[':t'] = {'S': timebucket}
stat_exp_attrs[':o'] = {'S': Src}
stat_exp_attrs[':r'] = {'S': 'FAILED'}
try:
response = client['ddb']['handle'].update_item(
TableName=stattable,
Key={'OriginReplicaBucket': {'S': statbucket }},
UpdateExpression=stat_update_exp,
ExpressionAttributeValues=stat_exp_attrs)
initfail[Src] = timebucket
except Exception as e:
print(e)
print('Table ' + stattable + ' update failed')
raise e
#print('Stats written to ' + statbucket)
# So this will work with CloudWatch Events directly or via SNS, let's look
# at the structure of the incoming JSON. Note that this has not been
# tested with CloudWatch events directly, but should be a simple matter.
# I kept the code here as it adds no overhead but is a solid flexible
# example.
#
# A Cloudwatch Event looks like event[event json]
# An SNS notification looks like event['Records'][0][event json]
# print("Received raw event: " + json.dumps(event, indent=2))
# Create a reference in evdata that points to the correct element in the
# event dictionary
if 'detail-type' in event:
evdata = event
elif 'Records' in event:
# An SNS notification will have another layer in the dict. Look for
# EventSource = aws:sns. Otherwise generate an exception and get out.
if event['Records'][0]['EventSource'] == 'aws:sns':
#print('Message is ' + event['Records'][0]['Sns']['Message'])
evdata = json.loads(event['Records'][0]['Sns']['Message'])
#print("Message event: " + json.dumps(evdata, indent=2))
else:
# Unrecognized event format: uncomment print statements to
# identify the format and enhance this logic. At the end of
# the day, evdata must contain the dict for the event record
# of the Cloudwatch log event for the S3 update notification
print('Error: unrecognized event format received')
raise Exception('Unrecognized event format')
elif 'MessageId' in event:
evdata = json.loads(event['Message'])
else:
evdata = event
if DEBUG > 1:
print(json.dumps(evdata))
#-----------------------------------------------------------------
# Quietly ignore all but PutObject
#
if evdata['detail']['eventName'] != 'PutObject':
if DEBUG > 0:
print('Ignoring ' + evdata['detail']['eventName'] + ' event')
return
#-----------------------------------------------------------------
#
# Collect the data we want for the DynamoDB table
#
region = evdata['region']
bucket = evdata['detail']['requestParameters']['bucketName']
key = evdata['detail']['requestParameters']['key']
# This timestamp is from the CW Event record and is most accurate
now = evdata['detail']['eventTime']
# Init a dict to use to hold our attrs for DDB
ddb_exp_attrs = {}
# Build th e DDB UpdateExpression
ddb_update_exp = 'set s3Object = :a'
# push the first attr: s3Object
ddb_exp_attrs[':a'] = {'S': key}
# establish s3 client per region, but only once.
if not region in s3client:
s3client[region] = boto3.client('s3', region)
# -----------------------------------------------------------------
# Do a head_object. If the object no longer exists just return.
#
try:
response = s3client[region].head_object(
Bucket=bucket,
Key=key
)
except ClientError as e:
# { "Error": {
# "Code": "403",
# "Message": "Forbidden"
# },
# "ResponseMetadata": {
# "RequestId": "B7C8873E3C067128",
# "HostId": "kYARs5PKMuah57ewyzYq6l5laO4xu9fcWFYVnEPLMHeqNSF4yLhrYIhbbUT0Tw7hp3f2PgCQO9E=",
# "HTTPStatusCode": 403,
# "HTTPHeaders": {
# "x-amz-request-id": "B7C8873E3C067128",
# "x-amz-id-2": "<KEY>
# "content-type": "application/xml",
# "transfer-encoding": "chunked",
# "date": "Tue, 25 Sep 2018 11:58:48 GMT",
# "server": "AmazonS3"
# },
# "RetryAttempts": 0
# }
# }
if e.response['Error']['Code'] == '403':
print('IGNORING: CRRMonitor does not have access to Object - ' + \
evdata['detail']['requestParameters']['bucketName'] + '/' + \
evdata['detail']['requestParameters']['key'])
elif e.response['Error']['Code'] == '404':
print('IGNORING: Object no longer exists - ' + \
evdata['detail']['requestParameters']['bucketName'] + '/' + \
evdata['detail']['requestParameters']['key'])
else:
# Need to improve this to recognize specifically a 404
print('Unhandled ClientError ' + str(e))
print(json.dumps(e.response))
#print('Removing from queue / ignoring')
return
except Exception as e:
# Need to improve this to recognize specifically a 404
print('Unandled Exception ' + str(e))
print('Removing from queue / ignoring')
return
# 2) check that the x-amz-replication-status header is present
# response['ResponseMetadata']['HTTPHeaders']['x-amz-replication-status']
#
# Note that this function is only called when an object is written. Assume that
# the object was written and the x-amz-replication-status is a final status for
# this object in this bucket. So, if it is the source it can be COMPLETED, PENDING,
# or FAILED. If it is the replica it can only be REPLICA.
#
# That in mind, the update date/time for the REPLICA will always be definitive for
# the end_datetime column
#
# Conversely, the source object is always definitive for the start_datetime.
#
# Code must not assume that the events (source and dest) are processed in the correct
# order. Any process consuming the DynamoDB table should do their own Elapsed Time
# calculation.
#
# Reference the dict we want for clarity in the code
headers = response['ResponseMetadata']['HTTPHeaders']
# If this object has no x-amz-replication-status header then we can leave
if 'x-amz-replication-status' not in headers:
# This is not a replicated object - get out
if DEBUG > 0:
print('Not a replicated object')
return()
# repstatus is a pointer to the headers (for code clarity)
repstatus = headers['x-amz-replication-status']
# -----------------------------------------------------------------
# Verify that the DynamoDB table exists. Note: we could create it
# but that takes so long that the lambda function may time out.
# Better to create it in the CFn template and handle this as a
# failure condition
#
try:
response = client['ddb']['handle'].describe_table(
TableName=ddbtable
)
except Exception as e:
print(e)
print('Table ' + ddbtable + ' does not exist - need to create it')
raise e
# Update object size
objsize = headers['content-length']
ddb_update_exp += ', ObjectSize = :s'
ddb_exp_attrs[':s'] = {'N': objsize}
ETag = {'S': headers['etag'][1:-1] + ':' + headers['x-amz-version-id'][1:-1]}
# -----------------------------------------------------------------
# If the object already has a DDB record get it
#
ddbdata = client['ddb']['handle'].get_item(
TableName=ddbtable,
Key={'ETag': ETag},
ConsistentRead=True
)
ddbitem = {} # reset the dict
if 'Item' in ddbdata:
ddbitem = ddbdata['Item']
if DEBUG > 4:
print("DDB record: " + json.dumps(ddbitem, indent=2))
#
# Is this a REPLICA? Use timestamp as completion time
#
# Note: replica only updates s3Replica, replication_status, and end_datetime.
#
# We do this so we don't have to handle conditional update of fields that might get
# stepped on of the events are processed out of order.
#
if repstatus == 'REPLICA':
# print('Processing a REPLICA object: ' + ETag['S'])
ddb_update_exp += ', s3Replica = :d'
ddb_exp_attrs[':d'] = {'S': bucket}
#print('s3Replica: ' + bucket)
ddb_update_exp += ', end_datetime = :e'
ddb_exp_attrs[':e'] = {'S': now} # 'now' is from the event data
#print('end_datetime: ' + now)
# Set the ttl
purge = datetime.strptime(now, timefmt) - timedelta(hours=purge_thresh) # datetime object
ttl = purge.strftime('%s')
ddb_update_exp += ', itemttl = :p'
ddb_exp_attrs[':p'] = {'N': ttl}
# If this is a replica then status is COMPLETE
ddb_update_exp += ', replication_status = :b'
ddb_exp_attrs[':b'] = {'S': 'COMPLETED'}
#print('replication_status: COMPLETED (implied)')
if 'start_datetime' in ddbitem and 'crr_rate' not in ddbitem:
etime = datetime.strptime(now, timefmt) - datetime.strptime(ddbitem['start_datetime']['S'], timefmt)
etimesecs = (etime.days * 24 * 60 * 60) + etime.seconds
#print("Calculate elapsed time in seconds")
crr_rate = int(objsize) * 8 / (etimesecs + 1) # Add 1 to prevent /0 errors
ddb_update_exp += ', crr_rate = :r'
ddb_exp_attrs[':r'] = {'N': str(crr_rate)}
#print('crr_rate: ', crr_rate)
ddb_update_exp += ', elapsed = :t'
ddb_exp_attrs[':t'] = {'N': str(etimesecs)}
#print('elapsed: ', etimesecs)
log_statistics(
ddbitem['s3Origin']['S'],
bucket,
ddbitem['start_datetime']['S'],
objsize,
str(etimesecs),
300)
# -----------------------------------------------------------------
# Or is this a SOURCE? Use timestamp as replication start time
#
else:
ddb_update_exp += ', s3Origin = :f'
ddb_exp_attrs[':f'] = {'S': bucket}
# If this is not a replica then do not report status. It's not important and
# makes the DynamoDB update much more complicated. Just get the start time
#
# We also do not care what the status is. If it has a FAILED status we could
# write code to send a notification, but that's outside our scope.
if repstatus == 'COMPLETED' or repstatus == 'FAILED' or repstatus == 'PENDING':
# print('Processing a ORIGINAL object: ' + ETag['S'] + ' status: ' + repstatus)
ddb_update_exp += ', start_datetime = :g'
ddb_exp_attrs[':g'] = {'S': now}
# ---------------------------------------------------------
# If we already got the replica event...
#
if 'end_datetime' in ddbitem and 'crr_rate' not in ddbitem:
etime = datetime.strptime(ddbitem['end_datetime']['S'], timefmt) - datetime.strptime(now, timefmt)
etimesecs = (etime.days * 24 * 60 * 60) + etime.seconds
#print("Calculate elapsed time in seconds")
crr_rate = int(objsize) * 8 / (etimesecs + 1) # Add 1 to prevent /0 errors
ddb_update_exp += ', crr_rate = :r'
ddb_exp_attrs[':r'] = {'N': str(crr_rate)}
# Set the ttl
purge = datetime.strptime(ddbitem['end_datetime']['S'], timefmt) - timedelta(hours=purge_thresh) # datetime object
ttl = purge.strftime('%s')
ddb_update_exp += ', itemttl = :p'
ddb_exp_attrs[':p'] = {'N': ttl}
ddb_update_exp += ', elapsed = :t'
ddb_exp_attrs[':t'] = {'N': str(etimesecs)}
log_statistics(
bucket,ddbitem['s3Replica']['S'],
ddbitem['end_datetime']['S'],
objsize,
str(etimesecs),300)
# ---------------------------------------------------------
# We did not yet get the replica event
#
else:
if repstatus == 'FAILED':
# If replication failed this is the only time we will see this object.
# Update the status to FAILED
ddb_update_exp += ', replication_status = :b'
ddb_exp_attrs[':b'] = {'S': 'FAILED'}
log_statistics(
bucket,
'FAILED',
now,
'0',
'1',
300)
else:
print('Unknown Replication Status: ' + repstatus)
raise Exception('Unknown Replication Status')
# Create a record in the DDB table
try:
response = client['ddb']['handle'].update_item(
TableName=ddbtable,
Key={'ETag': ETag},
UpdateExpression=ddb_update_exp,
ExpressionAttributeValues=ddb_exp_attrs)
except Exception as e:
print(e)
print('Table ' + ddbtable + ' update failed')
raise e
# =====================================================================
# queue_handler
# -------------
# Main entry point
# Count the SQS queue and manage scale.
# Here's what my event looks like:
# {
# "account": "SAMPLE12345",
# "region": "us-east-2",
# "detail": {},
# "detail-type": "Scheduled Event",
# "source": "aws.events",
# "version": "0",
# "time": "2017-02-09T13:56:03Z",
# "id": "a8b4f046-06c5-4b3c-b543-90c3fdaaac14",
# "resources": [
# "arn:aws:events:us-east-2:SAMPLE12345:rule/CRRMonitor-2"
# ]
# }
#
# When I spawn a child process I will change "detail-type" to "Spawned Event"
# and add "child-number", where 0 is the top-level
# =====================================================================
def queue_handler(event, context):
cnum = 0
if 'child-number' in event:
cnum = int(event['child-number'])
message_floor = cnum * maxtask
# {
# "Attributes": {"ApproximateNumberOfMessages": "1040"},
# "ResponseMetadata": {
# "RetryAttempts": 0,
# "HTTPStatusCode": 200,
# "RequestId": "51c43b7e-9b05-59c8-b68e-6a68f3f3b999",
# "HTTPHeaders": {
# "x-amzn-requestid": "51c43b7e-9b05-59c8-b68e-6a68f3f3b999",
# "content-length": "360",
# "server": "Server",
# "connection": "keep-alive",
# "date": "Thu, 09 Feb 2017 12:55:18 GMT",
# "content-type": "text/xml"
# }
# }
# }
response = client['sqs']['handle'].get_queue_attributes(
QueueUrl=queue_endpoint,
AttributeNames=['ApproximateNumberOfMessages']
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
print('Bad status from ' + queue + ': ' + response['ResponseMetadata']['HTTPStatusCode'])
return
queue_sz = int(response['Attributes']['ApproximateNumberOfMessages'])
queue_backlog = queue_sz - message_floor
print('INFO [CNUM-' + str(cnum) + '] Queue is ' + str(queue_sz) + \
' deep. Backlog is ' + str(queue_backlog))
# We subtracted the number of messages for which processes are already
# running. If the backlog is still too deep them first spawn another child,
# updating child-number + 1
if queue_backlog > maxtask:
# increment child-number (or initialize to 1) in the event dict
# spawn another lambda, passing the event and context dicts
if cnum < maxspawn:
event['child-number'] = cnum + 1
try:
client['lbd']['handle'].invoke(
FunctionName=context.function_name,
InvocationType='Event',
Payload=json.dumps(event)
)
print('Spawning a child because there are ' + str(queue_sz) + ' messages in the queue. I am child ' + str(cnum) + ' with a max capacity of ' + str(maxtask) + '. Message floor is ' + str(message_floor))
print('Reproduction successful - child ' + str(cnum+1) + ' spawned')
except Exception as e:
print(e)
print('ERROR[CNUM-' + str(cnum) + '] Failed to reproduce')
raise e
else:
print('WARNING: maxspawn(' + str(maxspawn) + ') exceeded. Not spawning a helper.')
# -----------------------------------------------------------------
# Now we get to work. Process messages from the queue until empty
# or we time out. This is the secret sauce to our horizontal scale
print('INFO [CNUM-' + str(cnum) + '] Priming read from SQS...')
msg_ctr = 0 # keep a count of messages processed
sqs_msgs = client['sqs']['handle'].receive_message(
QueueUrl=queue_endpoint,
AttributeNames=['All'],
MaxNumberOfMessages=10,
VisibilityTimeout=60
)
sqs_delete = []
while 'Messages' in sqs_msgs:
print('INFO [CNUM-' + str(cnum) + '] Processing ' + str(len(sqs_msgs['Messages'])) + ' messages')
for message in sqs_msgs['Messages']:
rc = message_handler(json.loads(message['Body']))
# If we did not get a 0 return code let the record time out back
# back into the queue
if not rc:
sqs_delete.append({'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']})
msg_ctr += 1 # keep a count of messages processed
if len(sqs_delete) > 0:
# Delete the messages we just processed
response = client['sqs']['handle'].delete_message_batch(
QueueUrl=queue_endpoint,
Entries=sqs_delete
)
if len(response['Successful']) < len(sqs_delete):
print('ERROR[CNUM-' + str(cnum) + ']: processed ' + str(len(sqs_msgs)) + ' messages but only deleted ' + str(len(response['Successful'])) + ' messages')
sqs_delete = [] # reset the list
print('INFO [CNUM-' + str(cnum) + '] Reading from SQS...')
sqs_msgs = client['sqs']['handle'].receive_message(
QueueUrl=queue_endpoint,
AttributeNames=['All'],
MaxNumberOfMessages=10,
VisibilityTimeout=60
)
print('INFO [CNUM-' + str(cnum) + '] Completed - ' + str(msg_ctr) + ' messages processed')
if SEND_ANONYMOUS_USAGE_METRIC and msg_ctr > 0:
send_anonymous_usage_metric({
"Action": f"Num messages processed by CRRMonitor: {msg_ctr}"
})
def send_anonymous_usage_metric(metric_data={}):
try:
if type(metric_data) is not dict or not dict:
raise Exception('Invalid metric_data passed to send_anonymous_usage_metric')
metric_endpoint = 'https://metrics.awssolutionsbuilder.com/generic'
metric_payload = {
"Solution": "SO0022",
"UUID": ANONYMOUS_SOLUTION_ID,
"Version": VERSION_ID,
"Timestamp": str(datetime.utcnow()),
"Data": metric_data
}
data = bytes(json.dumps(metric_payload), 'utf-8')
headers = { "Content-Type": "application/json" }
print(f"Sending anonymous usage metric: {str(metric_payload)}")
req = urllib.request.Request(url=metric_endpoint, data=data, method='POST', headers=headers)
with urllib.request.urlopen(req) as f:
print(f"Anonymous usage metric send status: {f.status}")
except Exception as e:
# Log the exception but do not raise it again
print(f'Exception while sending anonymous usage metric: {e}')
###### M A I N ######
client = connect_clients(client)
try:
queue_endpoint = client['sqs']['handle'].get_queue_url(
QueueName=queue
)['QueueUrl']
except Exception as e:
print(e)
print('Could not get the url for ' + queue)
raise e
| en | 0.729153 | #!/usr/bin/python # -*- coding: utf-8 -*- ###################################################################################################################### # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. A copy of the License is located at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # # and limitations under the License. # ###################################################################################################################### # ===================================================================== # Configuration # # appname: the names of AWS resources are derived from this. It is not # recommended that you change this from the default 'CRRMonitor' # maxtask: Tune this parameter to get the most effective use of a single # instance of your lambda. It should be roughly 300,000 divided by the average # time required to process a single SQS record (160ms). Example: if it takes an # average of 500ms to process a single SQS record you would set this to # 300 / 0.5 = 600. This parameter tells the lambda when to ask for help: # If the queue depth is > maxtask it will spawn a copy of itself. # maxspawn: This parameter limits how many copies of itself the lambda # can spawn. This should not allow you to exceed your maximum concurrent # lambda execution limit (default 100). By default the lambda is set # to execute every minute and time out after 5. With a default maxspawn # of 25 this will allow 100 concurrent lambdas to execute. This should # allow capacity of 200 events per second at an average processing time # of 500ms per event, or 100 CRR replications per second. Scale and # request limits accordingly. # How long to keep records for completed transfers # DEBUG # VERSION_ID: The version of this solution # ANONYMOUS_SOLUTION_ID: An anonymous identifier for this instance of the solution # SEND_ANONYMOUS_USAGE_METRIC: A flag indicating whether the solution should # report anonymous usage metrics to AWS # Make sure the VERSION_ID and ANONYMOUS_SOLUTION_ID are valid # # ddbtable and stattable: name of the DynamoDB tables. The tables are # created in the CloudFormation stack and defaults to the value of appname. # Do not change this without changing the template. # queue: name of the SQS queue. Derived from the appname. The SQS queue # is created in the CloudFormation template. Do not change this without # changing the template # timefmt: used to format timestamps. Do not change. # client: defines the api client connections to create # will hold client handle for s3 per region # hash of source buckets to handle FAILED counter initialization # ===================================================================== # connect_clients # --------------- # Connect to all the clients. We will do this once per instantiation of # the Lambda function (not per execution) # ===================================================================== # ------------------------------------------------------------- # Derive the statistic bucket from source/dest and time bucket # (5 minute rolling window) # # ------------------------------------------------------------- # Init a dict to use to hold our attrs for DDB # ------------------------------------------------------------- # Build the DDB UpdateExpression # ------------------------------------------------------------- # push the first attr: s3Object # Update the DDB table # Initialize a counter for failed replications for the source bucket # ------------------------------------------------------------- # Build the DDB UpdateExpression # ------------------------------------------------------------- # push the first attr: s3Object #print('Stats written to ' + statbucket) # So this will work with CloudWatch Events directly or via SNS, let's look # at the structure of the incoming JSON. Note that this has not been # tested with CloudWatch events directly, but should be a simple matter. # I kept the code here as it adds no overhead but is a solid flexible # example. # # A Cloudwatch Event looks like event[event json] # An SNS notification looks like event['Records'][0][event json] # print("Received raw event: " + json.dumps(event, indent=2)) # Create a reference in evdata that points to the correct element in the # event dictionary # An SNS notification will have another layer in the dict. Look for # EventSource = aws:sns. Otherwise generate an exception and get out. #print('Message is ' + event['Records'][0]['Sns']['Message']) #print("Message event: " + json.dumps(evdata, indent=2)) # Unrecognized event format: uncomment print statements to # identify the format and enhance this logic. At the end of # the day, evdata must contain the dict for the event record # of the Cloudwatch log event for the S3 update notification #----------------------------------------------------------------- # Quietly ignore all but PutObject # #----------------------------------------------------------------- # # Collect the data we want for the DynamoDB table # # This timestamp is from the CW Event record and is most accurate # Init a dict to use to hold our attrs for DDB # Build th e DDB UpdateExpression # push the first attr: s3Object # establish s3 client per region, but only once. # ----------------------------------------------------------------- # Do a head_object. If the object no longer exists just return. # # { "Error": { # "Code": "403", # "Message": "Forbidden" # }, # "ResponseMetadata": { # "RequestId": "B7C8873E3C067128", # "HostId": "kYARs5PKMuah57ewyzYq6l5laO4xu9fcWFYVnEPLMHeqNSF4yLhrYIhbbUT0Tw7hp3f2PgCQO9E=", # "HTTPStatusCode": 403, # "HTTPHeaders": { # "x-amz-request-id": "B7C8873E3C067128", # "x-amz-id-2": "<KEY> # "content-type": "application/xml", # "transfer-encoding": "chunked", # "date": "Tue, 25 Sep 2018 11:58:48 GMT", # "server": "AmazonS3" # }, # "RetryAttempts": 0 # } # } # Need to improve this to recognize specifically a 404 #print('Removing from queue / ignoring') # Need to improve this to recognize specifically a 404 # 2) check that the x-amz-replication-status header is present # response['ResponseMetadata']['HTTPHeaders']['x-amz-replication-status'] # # Note that this function is only called when an object is written. Assume that # the object was written and the x-amz-replication-status is a final status for # this object in this bucket. So, if it is the source it can be COMPLETED, PENDING, # or FAILED. If it is the replica it can only be REPLICA. # # That in mind, the update date/time for the REPLICA will always be definitive for # the end_datetime column # # Conversely, the source object is always definitive for the start_datetime. # # Code must not assume that the events (source and dest) are processed in the correct # order. Any process consuming the DynamoDB table should do their own Elapsed Time # calculation. # # Reference the dict we want for clarity in the code # If this object has no x-amz-replication-status header then we can leave # This is not a replicated object - get out # repstatus is a pointer to the headers (for code clarity) # ----------------------------------------------------------------- # Verify that the DynamoDB table exists. Note: we could create it # but that takes so long that the lambda function may time out. # Better to create it in the CFn template and handle this as a # failure condition # # Update object size # ----------------------------------------------------------------- # If the object already has a DDB record get it # # reset the dict # # Is this a REPLICA? Use timestamp as completion time # # Note: replica only updates s3Replica, replication_status, and end_datetime. # # We do this so we don't have to handle conditional update of fields that might get # stepped on of the events are processed out of order. # # print('Processing a REPLICA object: ' + ETag['S']) #print('s3Replica: ' + bucket) # 'now' is from the event data #print('end_datetime: ' + now) # Set the ttl # datetime object # If this is a replica then status is COMPLETE #print('replication_status: COMPLETED (implied)') #print("Calculate elapsed time in seconds") # Add 1 to prevent /0 errors #print('crr_rate: ', crr_rate) #print('elapsed: ', etimesecs) # ----------------------------------------------------------------- # Or is this a SOURCE? Use timestamp as replication start time # # If this is not a replica then do not report status. It's not important and # makes the DynamoDB update much more complicated. Just get the start time # # We also do not care what the status is. If it has a FAILED status we could # write code to send a notification, but that's outside our scope. # print('Processing a ORIGINAL object: ' + ETag['S'] + ' status: ' + repstatus) # --------------------------------------------------------- # If we already got the replica event... # #print("Calculate elapsed time in seconds") # Add 1 to prevent /0 errors # Set the ttl # datetime object # --------------------------------------------------------- # We did not yet get the replica event # # If replication failed this is the only time we will see this object. # Update the status to FAILED # Create a record in the DDB table # ===================================================================== # queue_handler # ------------- # Main entry point # Count the SQS queue and manage scale. # Here's what my event looks like: # { # "account": "SAMPLE12345", # "region": "us-east-2", # "detail": {}, # "detail-type": "Scheduled Event", # "source": "aws.events", # "version": "0", # "time": "2017-02-09T13:56:03Z", # "id": "a8b4f046-06c5-4b3c-b543-90c3fdaaac14", # "resources": [ # "arn:aws:events:us-east-2:SAMPLE12345:rule/CRRMonitor-2" # ] # } # # When I spawn a child process I will change "detail-type" to "Spawned Event" # and add "child-number", where 0 is the top-level # ===================================================================== # { # "Attributes": {"ApproximateNumberOfMessages": "1040"}, # "ResponseMetadata": { # "RetryAttempts": 0, # "HTTPStatusCode": 200, # "RequestId": "51c43b7e-9b05-59c8-b68e-6a68f3f3b999", # "HTTPHeaders": { # "x-amzn-requestid": "51c43b7e-9b05-59c8-b68e-6a68f3f3b999", # "content-length": "360", # "server": "Server", # "connection": "keep-alive", # "date": "Thu, 09 Feb 2017 12:55:18 GMT", # "content-type": "text/xml" # } # } # } # We subtracted the number of messages for which processes are already # running. If the backlog is still too deep them first spawn another child, # updating child-number + 1 # increment child-number (or initialize to 1) in the event dict # spawn another lambda, passing the event and context dicts # ----------------------------------------------------------------- # Now we get to work. Process messages from the queue until empty # or we time out. This is the secret sauce to our horizontal scale # keep a count of messages processed # If we did not get a 0 return code let the record time out back # back into the queue # keep a count of messages processed # Delete the messages we just processed # reset the list # Log the exception but do not raise it again ###### M A I N ###### | 2.085945 | 2 |
app/controllers/authControllers.py | nicolunardi/travela-server | 0 | 6632231 | <reponame>nicolunardi/travela-server
from fastapi import HTTPException, status, Depends
from email_validator import EmailNotValidError
from sqlalchemy.orm import Session
from app.schemas.tokens import Token
from app.models.users import User as UserModel
from app.schemas.users import UserCreate, UserLogin
from app.dependencies.authentication import (
get_password_hash,
check_valid_email,
get_user_by_email,
verify_password,
)
from app.dependencies.JWTtokens import create_access_token
from app.config.database import get_db
def create_user(db: Session, user: UserCreate):
# check the email address isn't already in use
db_user = get_user_by_email(db, user.email)
if db_user:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Email already registered.",
)
# ensure the email address is valid
try:
email = check_valid_email(user.email)
except EmailNotValidError as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
)
hashed_password = get_password_hash(user.password)
# create the user
new_user = UserModel(
email=user.email, name=user.name, hashed_password=hashed_password
)
# add the user to the db
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
def register_user(db: Session, user: UserCreate):
new_user = create_user(db, user)
# if the user was created without problems, generate the jwt token
if new_user:
token = create_access_token(
data={
"email": new_user.email,
"name": new_user.name,
"id": new_user.id,
}
)
return Token(access_token=token, token_type="bearer")
else:
raise HTTPException(
status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Something went wrong.",
)
def login_user(form_data: UserLogin, db):
# check if the user exists in the db
curr_user = get_user_by_email(db, form_data.username)
if not curr_user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No user exists with that email address.",
)
# check if the passwords match
if not verify_password(form_data.password, curr_user.hashed_password):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Incorrect password.",
)
token = create_access_token(
data={
"email": curr_user.email,
"name": curr_user.name,
"id": curr_user.id,
}
)
return Token(access_token=token, token_type="bearer")
| from fastapi import HTTPException, status, Depends
from email_validator import EmailNotValidError
from sqlalchemy.orm import Session
from app.schemas.tokens import Token
from app.models.users import User as UserModel
from app.schemas.users import UserCreate, UserLogin
from app.dependencies.authentication import (
get_password_hash,
check_valid_email,
get_user_by_email,
verify_password,
)
from app.dependencies.JWTtokens import create_access_token
from app.config.database import get_db
def create_user(db: Session, user: UserCreate):
# check the email address isn't already in use
db_user = get_user_by_email(db, user.email)
if db_user:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Email already registered.",
)
# ensure the email address is valid
try:
email = check_valid_email(user.email)
except EmailNotValidError as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
)
hashed_password = get_password_hash(user.password)
# create the user
new_user = UserModel(
email=user.email, name=user.name, hashed_password=hashed_password
)
# add the user to the db
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
def register_user(db: Session, user: UserCreate):
new_user = create_user(db, user)
# if the user was created without problems, generate the jwt token
if new_user:
token = create_access_token(
data={
"email": new_user.email,
"name": new_user.name,
"id": new_user.id,
}
)
return Token(access_token=token, token_type="bearer")
else:
raise HTTPException(
status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Something went wrong.",
)
def login_user(form_data: UserLogin, db):
# check if the user exists in the db
curr_user = get_user_by_email(db, form_data.username)
if not curr_user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No user exists with that email address.",
)
# check if the passwords match
if not verify_password(form_data.password, curr_user.hashed_password):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Incorrect password.",
)
token = create_access_token(
data={
"email": curr_user.email,
"name": curr_user.name,
"id": curr_user.id,
}
)
return Token(access_token=token, token_type="bearer") | en | 0.883382 | # check the email address isn't already in use # ensure the email address is valid # create the user # add the user to the db # if the user was created without problems, generate the jwt token # check if the user exists in the db # check if the passwords match | 2.785975 | 3 |
tests/parsers/bsm.py | ir4n6/plaso | 0 | 6632232 | <filename>tests/parsers/bsm.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Basic Security Module (BSM) file parser."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import bsm as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers import bsm
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class MacOSBSMParserTest(test_lib.ParserTestCase):
"""Tests for Basic Security Module (BSM) file parser."""
@shared_test_lib.skipUnlessHasTestFile(['apple.bsm'])
def testParse(self):
"""Tests the Parse function on a MacOS BSM file."""
parser = bsm.BSMParser()
knowledge_base_values = {
'operating_system': definitions.OPERATING_SYSTEM_MACOS}
storage_writer = self._ParseFile(
['apple.bsm'], parser,
knowledge_base_values=knowledge_base_values)
self.assertEqual(storage_writer.number_of_events, 54)
events = list(storage_writer.GetEvents())
event = events[0]
self.assertEqual(event.data_type, 'bsm:event')
self.CheckTimestamp(event.timestamp, '2013-11-04 18:36:20.000381')
self.assertEqual(event.event_type, 'audit crash recovery (45029)')
expected_extra_tokens = {
'BSM_TOKEN_PATH': '/var/audit/20131104171720.crash_recovery',
'BSM_TOKEN_RETURN32': {
'call_status': 0,
'error': 'Success',
'token_status': 0},
'BSM_TOKEN_TEXT': 'launchctl::Audit recovery',
'BSM_TOKEN_TRAILER': 104
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 0,
'error': 'Success',
'token_status': 0
}
self.assertEqual(event.return_value, expected_return_value)
event = events[15]
self.CheckTimestamp(event.timestamp, '2013-11-04 18:36:26.000171')
self.assertEqual(event.event_type, 'user authentication (45023)')
expected_extra_tokens = {
'BSM_TOKEN_RETURN32': {
'call_status': 5000,
'error': 'Unknown',
'token_status': 255},
'BSM_TOKEN_SUBJECT32': {
'aid': 4294967295,
'egid': 92,
'euid': 92,
'gid': 92,
'pid': 143,
'session_id': 100004,
'terminal_ip': '0.0.0.0',
'terminal_port': 143,
'uid': 92},
'BSM_TOKEN_TEXT': (
'Verify password for record type Users \'moxilo\' node '
'\'/Local/Default\''),
'BSM_TOKEN_TRAILER': 140
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 5000,
'error': 'Unknown',
'token_status': 255
}
self.assertEqual(event.return_value, expected_return_value)
event = events[31]
self.CheckTimestamp(event.timestamp, '2013-11-04 18:36:26.000530')
self.assertEqual(event.event_type, 'SecSrvr AuthEngine (45025)')
expected_extra_tokens = {
'BSM_TOKEN_RETURN32': {
'call_status': 0,
'error': 'Success',
'token_status': 0},
'BSM_TOKEN_SUBJECT32': {
'aid': 4294967295,
'egid': 0,
'euid': 0,
'gid': 0,
'pid': 67,
'session_id': 100004,
'terminal_ip': '0.0.0.0',
'terminal_port': 67,
'uid': 0},
'BSM_TOKEN_TEXT': 'system.<PASSWORD>',
'BSM_TOKEN_TRAILER': 110
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 0,
'error': 'Success',
'token_status': 0
}
self.assertEqual(event.return_value, expected_return_value)
event = events[50]
self.CheckTimestamp(event.timestamp, '2013-11-04 18:37:36.000399')
self.assertEqual(event.event_type, 'session end (44903)')
expected_extra_tokens = {
'BSM_TOKEN_ARGUMENT32': {
'is': 12288,
'num_arg': 3,
'string': 'am_failure'},
'BSM_TOKEN_ARGUMENT64': {
'is': 0,
'num_arg': 1,
'string': 'sflags'},
'BSM_TOKEN_RETURN32': {
'call_status': 0,
'error': 'Success',
'token_status': 0},
'BSM_TOKEN_SUBJECT32': {
'aid': 4294967295,
'egid': 0,
'euid': 0,
'gid': 0,
'pid': 0,
'session_id': 100015,
'terminal_ip': '0.0.0.0',
'terminal_port': 0,
'uid': 0},
'BSM_TOKEN_TRAILER': 125
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 0,
'error': 'Success',
'token_status': 0
}
self.assertEqual(event.return_value, expected_return_value)
class OpenBSMParserTest(test_lib.ParserTestCase):
"""Tests for Basic Security Module (BSM) file parser."""
@shared_test_lib.skipUnlessHasTestFile(['openbsm.bsm'])
def testParse(self):
"""Tests the Parse function on a "generic" BSM file."""
parser = bsm.BSMParser()
knowledge_base_values = {
'operating_system': definitions.OPERATING_SYSTEM_LINUX}
storage_writer = self._ParseFile(
['openbsm.bsm'], parser,
knowledge_base_values=knowledge_base_values)
self.assertEqual(storage_writer.number_of_events, 50)
events = list(storage_writer.GetEvents())
expected_extra_tokens = [
{'BSM_TOKEN_ARGUMENT32': {
'is': 2882400000,
'num_arg': 3,
'string': 'test_arg32_token'},
'BSM_TOKEN_TRAILER': 50},
{'BSM_TOKEN_DATA':{
'data': 'SomeData',
'format': 'String'},
'BSM_TOKEN_TRAILER': 39},
{'BSM_TOKEN_FILE': {
'string': 'test',
'timestamp': '1970-01-01 20:42:45.000424'},
'BSM_TOKEN_TRAILER': 41},
{'BSM_TOKEN_ADDR': '192.168.100.15',
'BSM_TOKEN_TRAILER': 30},
{'BSM_TOKEN_TRAILER': 46,
'IPv4_Header': '0x400000145478000040010000c0a8649bc0a86e30]'},
{'BSM_TOKEN_IPC': {
'object_id': 305419896,
'object_type': 1},
'BSM_TOKEN_TRAILER': 31},
{'BSM_TOKEN_PORT': 20480,
'BSM_TOKEN_TRAILER': 28},
{'BSM_TOKEN_OPAQUE': 'aabb<PASSWORD>',
'BSM_TOKEN_TRAILER': 32},
{'BSM_TOKEN_PATH': '/test/this/is/a/test',
'BSM_TOKEN_TRAILER': 49},
{'BSM_TOKEN_PROCESS32': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': '127.0.0.1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 62},
{'BSM_TOKEN_PROCESS64': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': '127.0.0.1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 66},
{'BSM_TOKEN_RETURN32': {
'call_status': 305419896,
'error': 'Invalid argument',
'token_status': 22},
'BSM_TOKEN_TRAILER': 31},
{'BSM_TOKEN_SEQUENCE': 305419896,
'BSM_TOKEN_TRAILER': 30},
{'BSM_TOKEN_AUT_SOCKINET32_EX':{
'from': '127.0.0.1',
'from_port': 0,
'to': '127.0.0.1',
'to_port': 0},
'BSM_TOKEN_TRAILER': 44},
{'BSM_TOKEN_SUBJECT32': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': '127.0.0.1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 62},
{'BSM_TOKEN_SUBJECT32_EX': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': 'fe80::1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 78},
{'BSM_TOKEN_TEXT': 'This is a test.',
'BSM_TOKEN_TRAILER': 44},
{'BSM_TOKEN_TRAILER': 37,
'BSM_TOKEN_ZONENAME': '<PASSWORD>'},
{'BSM_TOKEN_RETURN32': {
'call_status': 4294967295,
'error':
'Argument list too long',
'token_status': 7},
'BSM_TOKEN_TRAILER': 31}
]
for event_index in range(0, 19):
event = events[event_index]
expected_extra_tokens_dict = expected_extra_tokens[event_index]
extra_tokens_dict = getattr(event, 'extra_tokens', {})
self.CheckDictContents(extra_tokens_dict, expected_extra_tokens_dict)
if __name__ == '__main__':
unittest.main()
| <filename>tests/parsers/bsm.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Basic Security Module (BSM) file parser."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import bsm as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers import bsm
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class MacOSBSMParserTest(test_lib.ParserTestCase):
"""Tests for Basic Security Module (BSM) file parser."""
@shared_test_lib.skipUnlessHasTestFile(['apple.bsm'])
def testParse(self):
"""Tests the Parse function on a MacOS BSM file."""
parser = bsm.BSMParser()
knowledge_base_values = {
'operating_system': definitions.OPERATING_SYSTEM_MACOS}
storage_writer = self._ParseFile(
['apple.bsm'], parser,
knowledge_base_values=knowledge_base_values)
self.assertEqual(storage_writer.number_of_events, 54)
events = list(storage_writer.GetEvents())
event = events[0]
self.assertEqual(event.data_type, 'bsm:event')
self.CheckTimestamp(event.timestamp, '2013-11-04 18:36:20.000381')
self.assertEqual(event.event_type, 'audit crash recovery (45029)')
expected_extra_tokens = {
'BSM_TOKEN_PATH': '/var/audit/20131104171720.crash_recovery',
'BSM_TOKEN_RETURN32': {
'call_status': 0,
'error': 'Success',
'token_status': 0},
'BSM_TOKEN_TEXT': 'launchctl::Audit recovery',
'BSM_TOKEN_TRAILER': 104
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 0,
'error': 'Success',
'token_status': 0
}
self.assertEqual(event.return_value, expected_return_value)
event = events[15]
self.CheckTimestamp(event.timestamp, '2013-11-04 18:36:26.000171')
self.assertEqual(event.event_type, 'user authentication (45023)')
expected_extra_tokens = {
'BSM_TOKEN_RETURN32': {
'call_status': 5000,
'error': 'Unknown',
'token_status': 255},
'BSM_TOKEN_SUBJECT32': {
'aid': 4294967295,
'egid': 92,
'euid': 92,
'gid': 92,
'pid': 143,
'session_id': 100004,
'terminal_ip': '0.0.0.0',
'terminal_port': 143,
'uid': 92},
'BSM_TOKEN_TEXT': (
'Verify password for record type Users \'moxilo\' node '
'\'/Local/Default\''),
'BSM_TOKEN_TRAILER': 140
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 5000,
'error': 'Unknown',
'token_status': 255
}
self.assertEqual(event.return_value, expected_return_value)
event = events[31]
self.CheckTimestamp(event.timestamp, '2013-11-04 18:36:26.000530')
self.assertEqual(event.event_type, 'SecSrvr AuthEngine (45025)')
expected_extra_tokens = {
'BSM_TOKEN_RETURN32': {
'call_status': 0,
'error': 'Success',
'token_status': 0},
'BSM_TOKEN_SUBJECT32': {
'aid': 4294967295,
'egid': 0,
'euid': 0,
'gid': 0,
'pid': 67,
'session_id': 100004,
'terminal_ip': '0.0.0.0',
'terminal_port': 67,
'uid': 0},
'BSM_TOKEN_TEXT': 'system.<PASSWORD>',
'BSM_TOKEN_TRAILER': 110
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 0,
'error': 'Success',
'token_status': 0
}
self.assertEqual(event.return_value, expected_return_value)
event = events[50]
self.CheckTimestamp(event.timestamp, '2013-11-04 18:37:36.000399')
self.assertEqual(event.event_type, 'session end (44903)')
expected_extra_tokens = {
'BSM_TOKEN_ARGUMENT32': {
'is': 12288,
'num_arg': 3,
'string': 'am_failure'},
'BSM_TOKEN_ARGUMENT64': {
'is': 0,
'num_arg': 1,
'string': 'sflags'},
'BSM_TOKEN_RETURN32': {
'call_status': 0,
'error': 'Success',
'token_status': 0},
'BSM_TOKEN_SUBJECT32': {
'aid': 4294967295,
'egid': 0,
'euid': 0,
'gid': 0,
'pid': 0,
'session_id': 100015,
'terminal_ip': '0.0.0.0',
'terminal_port': 0,
'uid': 0},
'BSM_TOKEN_TRAILER': 125
}
self.assertEqual(event.extra_tokens, expected_extra_tokens)
expected_return_value = {
'call_status': 0,
'error': 'Success',
'token_status': 0
}
self.assertEqual(event.return_value, expected_return_value)
class OpenBSMParserTest(test_lib.ParserTestCase):
"""Tests for Basic Security Module (BSM) file parser."""
@shared_test_lib.skipUnlessHasTestFile(['openbsm.bsm'])
def testParse(self):
"""Tests the Parse function on a "generic" BSM file."""
parser = bsm.BSMParser()
knowledge_base_values = {
'operating_system': definitions.OPERATING_SYSTEM_LINUX}
storage_writer = self._ParseFile(
['openbsm.bsm'], parser,
knowledge_base_values=knowledge_base_values)
self.assertEqual(storage_writer.number_of_events, 50)
events = list(storage_writer.GetEvents())
expected_extra_tokens = [
{'BSM_TOKEN_ARGUMENT32': {
'is': 2882400000,
'num_arg': 3,
'string': 'test_arg32_token'},
'BSM_TOKEN_TRAILER': 50},
{'BSM_TOKEN_DATA':{
'data': 'SomeData',
'format': 'String'},
'BSM_TOKEN_TRAILER': 39},
{'BSM_TOKEN_FILE': {
'string': 'test',
'timestamp': '1970-01-01 20:42:45.000424'},
'BSM_TOKEN_TRAILER': 41},
{'BSM_TOKEN_ADDR': '192.168.100.15',
'BSM_TOKEN_TRAILER': 30},
{'BSM_TOKEN_TRAILER': 46,
'IPv4_Header': '0x400000145478000040010000c0a8649bc0a86e30]'},
{'BSM_TOKEN_IPC': {
'object_id': 305419896,
'object_type': 1},
'BSM_TOKEN_TRAILER': 31},
{'BSM_TOKEN_PORT': 20480,
'BSM_TOKEN_TRAILER': 28},
{'BSM_TOKEN_OPAQUE': 'aabb<PASSWORD>',
'BSM_TOKEN_TRAILER': 32},
{'BSM_TOKEN_PATH': '/test/this/is/a/test',
'BSM_TOKEN_TRAILER': 49},
{'BSM_TOKEN_PROCESS32': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': '127.0.0.1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 62},
{'BSM_TOKEN_PROCESS64': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': '127.0.0.1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 66},
{'BSM_TOKEN_RETURN32': {
'call_status': 305419896,
'error': 'Invalid argument',
'token_status': 22},
'BSM_TOKEN_TRAILER': 31},
{'BSM_TOKEN_SEQUENCE': 305419896,
'BSM_TOKEN_TRAILER': 30},
{'BSM_TOKEN_AUT_SOCKINET32_EX':{
'from': '127.0.0.1',
'from_port': 0,
'to': '127.0.0.1',
'to_port': 0},
'BSM_TOKEN_TRAILER': 44},
{'BSM_TOKEN_SUBJECT32': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': '127.0.0.1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 62},
{'BSM_TOKEN_SUBJECT32_EX': {
'aid': 305419896,
'egid': 591751049,
'euid': 19088743,
'gid': 159868227,
'pid': 321140038,
'session_id': 2542171492,
'terminal_ip': 'fe80::1',
'terminal_port': 374945606,
'uid': 2557891634},
'BSM_TOKEN_TRAILER': 78},
{'BSM_TOKEN_TEXT': 'This is a test.',
'BSM_TOKEN_TRAILER': 44},
{'BSM_TOKEN_TRAILER': 37,
'BSM_TOKEN_ZONENAME': '<PASSWORD>'},
{'BSM_TOKEN_RETURN32': {
'call_status': 4294967295,
'error':
'Argument list too long',
'token_status': 7},
'BSM_TOKEN_TRAILER': 31}
]
for event_index in range(0, 19):
event = events[event_index]
expected_extra_tokens_dict = expected_extra_tokens[event_index]
extra_tokens_dict = getattr(event, 'extra_tokens', {})
self.CheckDictContents(extra_tokens_dict, expected_extra_tokens_dict)
if __name__ == '__main__':
unittest.main()
| en | 0.523967 | #!/usr/bin/python # -*- coding: utf-8 -*- Tests for Basic Security Module (BSM) file parser. # pylint: disable=unused-import Tests for Basic Security Module (BSM) file parser. Tests the Parse function on a MacOS BSM file. Tests for Basic Security Module (BSM) file parser. Tests the Parse function on a "generic" BSM file. | 2.497015 | 2 |
strategy/deribit_cross_remote_future.py | Hudie/crypto_algo_trading | 20 | 6632233 | # -*- coding: utf-8 -*-
import zmq.asyncio
import asyncio
import json
from crypto_trading.service.base import ServiceState, ServiceBase, start_service
# from crypto_trading.config import *
DERIBIT_ACCOUNT_ID = 'maxlu'
SYMBOL = 'BTC'
MINIMUM_TICK_SIZE = 0.5
NEAR_FUTURE = 'BTC-25SEP20'
FAR_FUTURE = 'BTC-25DEC20'
LONG_GAP = [180, 225, 270, 315, 360]
LONG_POSITION_SIZE_THRESHOLD = [300000 * i for i in [1, 2, 3, 4, 5]]
SHORT_GAP = [-135, -180, -225, -270, -315]
SHORT_POSITION_SIZE_THRESHOLD = [250000 * i for i in [1, 2, 3, 4, 5]]
SIZE_PER_TRADE = 1000
# margin: [equity, initial_margin, maintenance_margin]
margin = [0, 0, 0]
future = None
future_size = 0
perpetual = None
perpetual_size = 0
class OrderState():
def __init__(self, if_placed=False, if_changing=False, if_cancelling=False, label='', order={}):
self.if_placed = if_placed
self.if_changing = if_changing
self.if_cancelling = if_cancelling
self.label = label
self.order = order
def reset(self):
self.if_placed = False
self.if_changing = False
self.if_cancelling = False
self.label = ''
self.order = {}
f_limit_order = OrderState()
p_limit_order = OrderState()
class Quote():
def __init__(self, bid, bidsize, ask, asksize, index_price):
self.bid = bid
self.bidsize = bidsize
self.ask = ask
self.asksize = asksize
self.index_price = index_price
class FutureArbitrage(ServiceBase):
def __init__(self, logger_name):
ServiceBase.__init__(self, logger_name)
# subscribe market data
self.deribitmd = self.ctx.socket(zmq.SUB)
self.deribitmd.connect('tcp://localhost:9050')
self.deribitmd.setsockopt_string(zmq.SUBSCRIBE, '')
# request client for transaction
self.deribittdreq = self.ctx.socket(zmq.REQ)
self.deribittdreq.connect('tcp://localhost:9020')
# subscribe transaction data
self.deribittd = self.ctx.socket(zmq.SUB)
self.deribittd.connect('tcp://localhost:9010')
self.deribittd.setsockopt_string(zmq.SUBSCRIBE, '')
# async queue to sequentially combine market data and tx data
self.msg = asyncio.Queue()
# find gap between perpetual and current season future, and make transaction when conditions are satisfied
async def find_quotes_gap(self):
try:
global future, future_size, f_limit_order, perpetual, perpetual_size, p_limit_order, margin
if min(future.bid - perpetual.bid, future.ask - perpetual.ask) >= LONG_GAP[0]:
pos_idx = sum([1 if perpetual_size >= i else 0 for i in LONG_POSITION_SIZE_THRESHOLD])
pos_idx = min(pos_idx, len(LONG_POSITION_SIZE_THRESHOLD) - 1)
if all((min(future.bid - perpetual.bid, future.ask - perpetual.ask) >= LONG_GAP[pos_idx],
perpetual_size < LONG_POSITION_SIZE_THRESHOLD[pos_idx])):
if not f_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'sell',
'params': {'instrument_name': FAR_FUTURE,
'amount': min(SIZE_PER_TRADE, perpetual.asksize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': future.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
f_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
f_limit_order.if_placed = True
else:
if f_limit_order.order:
if not f_limit_order.order['order_state'] in ('filled', 'cancelled'):
if future.ask < f_limit_order.order['price'] and not f_limit_order.if_changing:
self.logger.info('**** change price to: {}, future sell limit order ****'.format(future.ask - MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': f_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, perpetual.asksize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'price': future.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
f_limit_order.if_changing = True
else:
f_limit_order.reset()
if not p_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'buy',
'params': {'instrument_name': NEAR_FUTURE,
'amount': min(SIZE_PER_TRADE, future.bidsize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': perpetual.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
p_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
p_limit_order.if_placed = True
else:
if p_limit_order.order:
if not p_limit_order.order['order_state'] in ('filled', 'cancelled'):
if perpetual.bid > p_limit_order.order['price'] and not p_limit_order.if_changing:
self.logger.info('**** change price to: {}, perpetual buy limit order ****'.format(perpetual.bid + MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': p_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, future.bidsize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'price': perpetual.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
p_limit_order.if_changing = True
else:
p_limit_order.reset()
# perpetual > future situation
elif max(future.bid - perpetual.bid, future.ask - perpetual.ask) <= SHORT_GAP[0]:
pos_idx = sum([1 if future_size >= i else 0 for i in SHORT_POSITION_SIZE_THRESHOLD])
pos_idx = min(pos_idx, len(SHORT_POSITION_SIZE_THRESHOLD) - 1)
if all((max(future.bid - perpetual.bid, future.ask - perpetual.ask) <= SHORT_GAP[pos_idx],
future_size < SHORT_POSITION_SIZE_THRESHOLD[pos_idx])):
if not f_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'buy',
'params': {'instrument_name': FAR_FUTURE,
'amount': min(SIZE_PER_TRADE, perpetual.bidsize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': future.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
f_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
f_limit_order.if_placed = True
else:
if f_limit_order.order:
if not f_limit_order.order['order_state'] in ('filled', 'cancelled'):
if future.bid > f_limit_order.order['price'] and not f_limit_order.if_changing:
self.logger.info('**** change price to: {}, future buy limit order ****'.format(future.bid + MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': f_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, perpetual.bidsize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'price': future.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
f_limit_order.if_changing = True
else:
f_limit_order.reset()
if not p_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'sell',
'params': {'instrument_name': NEAR_FUTURE,
'amount': min(SIZE_PER_TRADE, future.asksize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': perpetual.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
p_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
p_limit_order.if_placed = True
else:
if p_limit_order.order:
if not p_limit_order.order['order_state'] in ('filled', 'cancelled'):
if perpetual.ask < p_limit_order.order['price'] and not p_limit_order.if_changing:
self.logger.info('**** change price to: {}, perpetual sell limit order ****'.format(perpetual.ask - MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': p_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, future.asksize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'price': perpetual.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
p_limit_order.if_changing = True
else:
p_limit_order.reset()
else:
if f_limit_order.if_placed or p_limit_order.if_placed:
if not f_limit_order.if_cancelling or not p_limit_order.if_cancelling:
self.logger.info('**** gap disppear, cancel_all ****')
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {}
}))
await self.deribittdreq.recv_string()
f_limit_order.if_cancelling = True
p_limit_order.if_cancelling = True
except AttributeError:
pass
except Exception as e:
self.logger.exception(e)
async def process_msg(self):
try:
global future, future_size, perpetual, perpetual_size, margin, f_limit_order, p_limit_order
while self.state == ServiceState.started:
msg = await self.msg.get()
if msg['type'] not in ('quote', 'user.portfolio', 'buy', 'sell', 'edit'):
self.logger.info('---- td res: {}, {}'.format(msg['type'], msg['data']))
if msg.get('error', ''):
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {}
}))
await self.deribittdreq.recv_string()
continue
if msg['type'] == 'quote':
d = msg['data']
if d['instrument_name'] == NEAR_FUTURE:
perpetual = Quote(d['best_bid_price'], d['best_bid_amount'], d['best_ask_price'], d['best_ask_amount'], d['index_price'])
elif d['instrument_name'] == FAR_FUTURE:
future = Quote(d['best_bid_price'], d['best_bid_amount'], d['best_ask_price'], d['best_ask_amount'], d['index_price'])
await self.find_quotes_gap()
elif msg['type'] == 'user.changes.future':
changes = msg['data']
if changes['instrument_name'] == FAR_FUTURE:
if changes['trades']:
filled = sum([tx['amount'] if tx['order_type'] == 'limit' else 0 for tx in changes['trades']])
if filled > 0:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID,
'method': 'buy' if changes['trades'][0]['direction'] == 'sell' else 'sell',
'params': {'instrument_name': NEAR_FUTURE, 'amount': filled, 'type': 'market', }
}))
await self.deribittdreq.recv_string()
if changes['positions']:
future_size = changes['positions'][0]['size']
if changes['orders']:
for order in changes['orders']:
if order['order_type'] == 'limit' and f_limit_order.if_placed == True and f_limit_order.label == order['label']:
f_limit_order.order = order
f_limit_order.if_changing = False
break
elif changes['instrument_name'] == NEAR_FUTURE:
if changes['trades']:
filled = sum([tx['amount'] if tx['order_type'] == 'limit' else 0 for tx in changes['trades']])
if filled > 0:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID,
'method': 'buy' if changes['trades'][0]['direction'] == 'sell' else 'sell',
'params': {'instrument_name': FAR_FUTURE, 'amount': filled, 'type': 'market', }
}))
await self.deribittdreq.recv_string()
if changes['positions']:
perpetual_size = changes['positions'][0]['size']
if changes['orders']:
for order in changes['orders']:
if order['order_type'] == 'limit' and p_limit_order.if_placed == True and p_limit_order.label == order['label']:
p_limit_order.order = order
p_limit_order.if_changing = False
break
elif msg['type'] == 'user.portfolio':
portfolio = msg['data']
margin = [portfolio['equity'], portfolio['initial_margin'], portfolio['maintenance_margin']]
elif msg['type'] == 'cancel_all':
f_limit_order.reset()
p_limit_order.reset()
elif msg['type'] == 'positions':
for d in msg['data']:
if d['instrument_name'] == FAR_FUTURE:
future_size = d['size']
elif d['instrument_name'] == NEAR_FUTURE:
perpetual_size = d['size']
elif msg['type'] == 'account_summary':
d = msg['data']
margin = [d['equity'], d['initial_margin'], d['maintenance_margin']]
elif msg['type'] in ('buy', 'sell', 'edit'):
pass
self.msg.task_done()
except Exception as e:
self.logger.exception(e)
await self.process_msg()
async def sub_msg_md(self):
try:
await asyncio.sleep(1)
while self.state == ServiceState.started:
task = asyncio.ensure_future(self.deribitmd.recv_string())
done, pending = await asyncio.wait({task}, timeout=5)
for t in pending:
t.cancel()
msg = json.loads(done.pop().result()) if done else {}
if msg:
if msg['type'] == 'quote':
await self.msg.put(msg)
else:
self.logger.info('cant receive msg from future md')
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {}
}))
await self.deribittdreq.recv_string()
except Exception as e:
self.logger.exception(e)
await self.sub_msg_md()
async def sub_msg_td(self):
try:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'get_positions',
'params': {'currency': SYMBOL, 'kind': 'future'}
}))
await self.deribittdreq.recv_string()
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'get_account_summary',
'params': {'currency': SYMBOL, }
}))
await self.deribittdreq.recv_string()
while self.state == ServiceState.started:
msg = json.loads(await self.deribittd.recv_string())
if msg['accountid'] == DERIBIT_ACCOUNT_ID:
await self.msg.put(msg)
except Exception as e:
self.logger.exception(e)
await self.sub_msg_td()
async def balance_positions(self):
try:
global f_limit_order, p_limit_order
while self.state == ServiceState.started:
await asyncio.sleep(60)
if not (f_limit_order.if_placed or p_limit_order.if_placed):
unbalanced = future_size + perpetual_size
if unbalanced != 0:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID,
'method': 'sell' if unbalanced > 0 else 'buy',
'params': {'instrument_name': FAR_FUTURE if abs(future_size) > abs(perpetual_size) else NEAR_FUTURE,
'amount': abs(unbalanced),
'type': 'market', }
}))
await self.deribittdreq.recv_string()
except Exception as e:
self.logger.exception(e)
await self.balance_positions()
async def run(self):
if self.state == ServiceState.started:
self.logger.error('tried to run service, but state is %s' % self.state)
else:
self.state = ServiceState.started
asyncio.ensure_future(self.process_msg())
asyncio.ensure_future(self.sub_msg_md())
asyncio.ensure_future(self.sub_msg_td())
asyncio.ensure_future(self.balance_positions())
if __name__ == '__main__':
service = FutureArbitrage('cross-remote-future')
start_service(service, {})
| # -*- coding: utf-8 -*-
import zmq.asyncio
import asyncio
import json
from crypto_trading.service.base import ServiceState, ServiceBase, start_service
# from crypto_trading.config import *
DERIBIT_ACCOUNT_ID = 'maxlu'
SYMBOL = 'BTC'
MINIMUM_TICK_SIZE = 0.5
NEAR_FUTURE = 'BTC-25SEP20'
FAR_FUTURE = 'BTC-25DEC20'
LONG_GAP = [180, 225, 270, 315, 360]
LONG_POSITION_SIZE_THRESHOLD = [300000 * i for i in [1, 2, 3, 4, 5]]
SHORT_GAP = [-135, -180, -225, -270, -315]
SHORT_POSITION_SIZE_THRESHOLD = [250000 * i for i in [1, 2, 3, 4, 5]]
SIZE_PER_TRADE = 1000
# margin: [equity, initial_margin, maintenance_margin]
margin = [0, 0, 0]
future = None
future_size = 0
perpetual = None
perpetual_size = 0
class OrderState():
def __init__(self, if_placed=False, if_changing=False, if_cancelling=False, label='', order={}):
self.if_placed = if_placed
self.if_changing = if_changing
self.if_cancelling = if_cancelling
self.label = label
self.order = order
def reset(self):
self.if_placed = False
self.if_changing = False
self.if_cancelling = False
self.label = ''
self.order = {}
f_limit_order = OrderState()
p_limit_order = OrderState()
class Quote():
def __init__(self, bid, bidsize, ask, asksize, index_price):
self.bid = bid
self.bidsize = bidsize
self.ask = ask
self.asksize = asksize
self.index_price = index_price
class FutureArbitrage(ServiceBase):
def __init__(self, logger_name):
ServiceBase.__init__(self, logger_name)
# subscribe market data
self.deribitmd = self.ctx.socket(zmq.SUB)
self.deribitmd.connect('tcp://localhost:9050')
self.deribitmd.setsockopt_string(zmq.SUBSCRIBE, '')
# request client for transaction
self.deribittdreq = self.ctx.socket(zmq.REQ)
self.deribittdreq.connect('tcp://localhost:9020')
# subscribe transaction data
self.deribittd = self.ctx.socket(zmq.SUB)
self.deribittd.connect('tcp://localhost:9010')
self.deribittd.setsockopt_string(zmq.SUBSCRIBE, '')
# async queue to sequentially combine market data and tx data
self.msg = asyncio.Queue()
# find gap between perpetual and current season future, and make transaction when conditions are satisfied
async def find_quotes_gap(self):
try:
global future, future_size, f_limit_order, perpetual, perpetual_size, p_limit_order, margin
if min(future.bid - perpetual.bid, future.ask - perpetual.ask) >= LONG_GAP[0]:
pos_idx = sum([1 if perpetual_size >= i else 0 for i in LONG_POSITION_SIZE_THRESHOLD])
pos_idx = min(pos_idx, len(LONG_POSITION_SIZE_THRESHOLD) - 1)
if all((min(future.bid - perpetual.bid, future.ask - perpetual.ask) >= LONG_GAP[pos_idx],
perpetual_size < LONG_POSITION_SIZE_THRESHOLD[pos_idx])):
if not f_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'sell',
'params': {'instrument_name': FAR_FUTURE,
'amount': min(SIZE_PER_TRADE, perpetual.asksize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': future.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
f_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
f_limit_order.if_placed = True
else:
if f_limit_order.order:
if not f_limit_order.order['order_state'] in ('filled', 'cancelled'):
if future.ask < f_limit_order.order['price'] and not f_limit_order.if_changing:
self.logger.info('**** change price to: {}, future sell limit order ****'.format(future.ask - MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': f_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, perpetual.asksize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'price': future.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
f_limit_order.if_changing = True
else:
f_limit_order.reset()
if not p_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'buy',
'params': {'instrument_name': NEAR_FUTURE,
'amount': min(SIZE_PER_TRADE, future.bidsize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': perpetual.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
p_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
p_limit_order.if_placed = True
else:
if p_limit_order.order:
if not p_limit_order.order['order_state'] in ('filled', 'cancelled'):
if perpetual.bid > p_limit_order.order['price'] and not p_limit_order.if_changing:
self.logger.info('**** change price to: {}, perpetual buy limit order ****'.format(perpetual.bid + MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': p_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, future.bidsize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'price': perpetual.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
p_limit_order.if_changing = True
else:
p_limit_order.reset()
# perpetual > future situation
elif max(future.bid - perpetual.bid, future.ask - perpetual.ask) <= SHORT_GAP[0]:
pos_idx = sum([1 if future_size >= i else 0 for i in SHORT_POSITION_SIZE_THRESHOLD])
pos_idx = min(pos_idx, len(SHORT_POSITION_SIZE_THRESHOLD) - 1)
if all((max(future.bid - perpetual.bid, future.ask - perpetual.ask) <= SHORT_GAP[pos_idx],
future_size < SHORT_POSITION_SIZE_THRESHOLD[pos_idx])):
if not f_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'buy',
'params': {'instrument_name': FAR_FUTURE,
'amount': min(SIZE_PER_TRADE, perpetual.bidsize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': future.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
f_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
f_limit_order.if_placed = True
else:
if f_limit_order.order:
if not f_limit_order.order['order_state'] in ('filled', 'cancelled'):
if future.bid > f_limit_order.order['price'] and not f_limit_order.if_changing:
self.logger.info('**** change price to: {}, future buy limit order ****'.format(future.bid + MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': f_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, perpetual.bidsize,
abs(future_size) if abs(future_size) > 0 else SIZE_PER_TRADE),
'price': future.bid + MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
f_limit_order.if_changing = True
else:
f_limit_order.reset()
if not p_limit_order.if_placed:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'sell',
'params': {'instrument_name': NEAR_FUTURE,
'amount': min(SIZE_PER_TRADE, future.asksize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'type': 'limit',
'price': perpetual.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
p_limit_order.label = json.loads(await self.deribittdreq.recv_string())['internalid']
p_limit_order.if_placed = True
else:
if p_limit_order.order:
if not p_limit_order.order['order_state'] in ('filled', 'cancelled'):
if perpetual.ask < p_limit_order.order['price'] and not p_limit_order.if_changing:
self.logger.info('**** change price to: {}, perpetual sell limit order ****'.format(perpetual.ask - MINIMUM_TICK_SIZE))
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'edit',
'params': {'order_id': p_limit_order.order['order_id'],
'amount': min(SIZE_PER_TRADE, future.asksize,
abs(perpetual_size) if abs(perpetual_size) > 0 else SIZE_PER_TRADE),
'price': perpetual.ask - MINIMUM_TICK_SIZE,
'post_only': True, }
}))
await self.deribittdreq.recv_string()
p_limit_order.if_changing = True
else:
p_limit_order.reset()
else:
if f_limit_order.if_placed or p_limit_order.if_placed:
if not f_limit_order.if_cancelling or not p_limit_order.if_cancelling:
self.logger.info('**** gap disppear, cancel_all ****')
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {}
}))
await self.deribittdreq.recv_string()
f_limit_order.if_cancelling = True
p_limit_order.if_cancelling = True
except AttributeError:
pass
except Exception as e:
self.logger.exception(e)
async def process_msg(self):
try:
global future, future_size, perpetual, perpetual_size, margin, f_limit_order, p_limit_order
while self.state == ServiceState.started:
msg = await self.msg.get()
if msg['type'] not in ('quote', 'user.portfolio', 'buy', 'sell', 'edit'):
self.logger.info('---- td res: {}, {}'.format(msg['type'], msg['data']))
if msg.get('error', ''):
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {}
}))
await self.deribittdreq.recv_string()
continue
if msg['type'] == 'quote':
d = msg['data']
if d['instrument_name'] == NEAR_FUTURE:
perpetual = Quote(d['best_bid_price'], d['best_bid_amount'], d['best_ask_price'], d['best_ask_amount'], d['index_price'])
elif d['instrument_name'] == FAR_FUTURE:
future = Quote(d['best_bid_price'], d['best_bid_amount'], d['best_ask_price'], d['best_ask_amount'], d['index_price'])
await self.find_quotes_gap()
elif msg['type'] == 'user.changes.future':
changes = msg['data']
if changes['instrument_name'] == FAR_FUTURE:
if changes['trades']:
filled = sum([tx['amount'] if tx['order_type'] == 'limit' else 0 for tx in changes['trades']])
if filled > 0:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID,
'method': 'buy' if changes['trades'][0]['direction'] == 'sell' else 'sell',
'params': {'instrument_name': NEAR_FUTURE, 'amount': filled, 'type': 'market', }
}))
await self.deribittdreq.recv_string()
if changes['positions']:
future_size = changes['positions'][0]['size']
if changes['orders']:
for order in changes['orders']:
if order['order_type'] == 'limit' and f_limit_order.if_placed == True and f_limit_order.label == order['label']:
f_limit_order.order = order
f_limit_order.if_changing = False
break
elif changes['instrument_name'] == NEAR_FUTURE:
if changes['trades']:
filled = sum([tx['amount'] if tx['order_type'] == 'limit' else 0 for tx in changes['trades']])
if filled > 0:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID,
'method': 'buy' if changes['trades'][0]['direction'] == 'sell' else 'sell',
'params': {'instrument_name': FAR_FUTURE, 'amount': filled, 'type': 'market', }
}))
await self.deribittdreq.recv_string()
if changes['positions']:
perpetual_size = changes['positions'][0]['size']
if changes['orders']:
for order in changes['orders']:
if order['order_type'] == 'limit' and p_limit_order.if_placed == True and p_limit_order.label == order['label']:
p_limit_order.order = order
p_limit_order.if_changing = False
break
elif msg['type'] == 'user.portfolio':
portfolio = msg['data']
margin = [portfolio['equity'], portfolio['initial_margin'], portfolio['maintenance_margin']]
elif msg['type'] == 'cancel_all':
f_limit_order.reset()
p_limit_order.reset()
elif msg['type'] == 'positions':
for d in msg['data']:
if d['instrument_name'] == FAR_FUTURE:
future_size = d['size']
elif d['instrument_name'] == NEAR_FUTURE:
perpetual_size = d['size']
elif msg['type'] == 'account_summary':
d = msg['data']
margin = [d['equity'], d['initial_margin'], d['maintenance_margin']]
elif msg['type'] in ('buy', 'sell', 'edit'):
pass
self.msg.task_done()
except Exception as e:
self.logger.exception(e)
await self.process_msg()
async def sub_msg_md(self):
try:
await asyncio.sleep(1)
while self.state == ServiceState.started:
task = asyncio.ensure_future(self.deribitmd.recv_string())
done, pending = await asyncio.wait({task}, timeout=5)
for t in pending:
t.cancel()
msg = json.loads(done.pop().result()) if done else {}
if msg:
if msg['type'] == 'quote':
await self.msg.put(msg)
else:
self.logger.info('cant receive msg from future md')
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {}
}))
await self.deribittdreq.recv_string()
except Exception as e:
self.logger.exception(e)
await self.sub_msg_md()
async def sub_msg_td(self):
try:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'get_positions',
'params': {'currency': SYMBOL, 'kind': 'future'}
}))
await self.deribittdreq.recv_string()
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID, 'method': 'get_account_summary',
'params': {'currency': SYMBOL, }
}))
await self.deribittdreq.recv_string()
while self.state == ServiceState.started:
msg = json.loads(await self.deribittd.recv_string())
if msg['accountid'] == DERIBIT_ACCOUNT_ID:
await self.msg.put(msg)
except Exception as e:
self.logger.exception(e)
await self.sub_msg_td()
async def balance_positions(self):
try:
global f_limit_order, p_limit_order
while self.state == ServiceState.started:
await asyncio.sleep(60)
if not (f_limit_order.if_placed or p_limit_order.if_placed):
unbalanced = future_size + perpetual_size
if unbalanced != 0:
await self.deribittdreq.send_string(json.dumps({
'accountid': DERIBIT_ACCOUNT_ID,
'method': 'sell' if unbalanced > 0 else 'buy',
'params': {'instrument_name': FAR_FUTURE if abs(future_size) > abs(perpetual_size) else NEAR_FUTURE,
'amount': abs(unbalanced),
'type': 'market', }
}))
await self.deribittdreq.recv_string()
except Exception as e:
self.logger.exception(e)
await self.balance_positions()
async def run(self):
if self.state == ServiceState.started:
self.logger.error('tried to run service, but state is %s' % self.state)
else:
self.state = ServiceState.started
asyncio.ensure_future(self.process_msg())
asyncio.ensure_future(self.sub_msg_md())
asyncio.ensure_future(self.sub_msg_td())
asyncio.ensure_future(self.balance_positions())
if __name__ == '__main__':
service = FutureArbitrage('cross-remote-future')
start_service(service, {})
| en | 0.816902 | # -*- coding: utf-8 -*- # from crypto_trading.config import * # margin: [equity, initial_margin, maintenance_margin] # subscribe market data # request client for transaction # subscribe transaction data # async queue to sequentially combine market data and tx data # find gap between perpetual and current season future, and make transaction when conditions are satisfied # perpetual > future situation | 2.196787 | 2 |
tool/comparispawn.bzl | grencez/lace | 1 | 6632234 | <reponame>grencez/lace
load("@fildesh//tool:spawn.bzl", "spawn_test")
def fildesh_expect_test(name, srcs, expect,
data=[], args=[], size="small",
**kwargs):
spawn_test(
name = name,
data = [
expect,
"@fildesh//tool:comparispawn",
"@fildesh//:fildesh",
] + srcs + data,
args = [
"$(location @fildesh//tool:comparispawn)",
"$(location " + expect + ")",
"$(location @fildesh//:fildesh)",
"-x",
"$(location " + srcs[0] + ")",
] + args,
size = size,
**kwargs,
)
| load("@fildesh//tool:spawn.bzl", "spawn_test")
def fildesh_expect_test(name, srcs, expect,
data=[], args=[], size="small",
**kwargs):
spawn_test(
name = name,
data = [
expect,
"@fildesh//tool:comparispawn",
"@fildesh//:fildesh",
] + srcs + data,
args = [
"$(location @fildesh//tool:comparispawn)",
"$(location " + expect + ")",
"$(location @fildesh//:fildesh)",
"-x",
"$(location " + srcs[0] + ")",
] + args,
size = size,
**kwargs,
) | none | 1 | 1.863409 | 2 |
|
tests/utils/thread.py | gururajo/Self-Driving-Car | 0 | 6632235 | import threading
import time
numli=[1,2,3,4,5]
def add():
global num,s1,s2
for numbur in numli:
print(threading.currentThread(),"waiting for s1")
s1.acquire()
num=numbur
print(threading.currentThread(),"got s1")
#num= int(input())
num+=3
print(threading.currentThread()," num= ",num)
s2.release()
# time.sleep(10)
def mul(bt):
print("inside mul")
global num,s1,s2,s3,num2
while True:
# bt.join()
print(threading.currentThread(),"waiting for s2")
s2.acquire()
print(threading.currentThread(),"got s2")
print(threading.currentThread(),"waiting for s3")
s3.acquire()
print(threading.currentThread(),"got s3")
temp=num
s1.release()
print(threading.currentThread(),"read num as",num)
temp*=10
time.sleep(1.8)
print(threading.currentThread()," num= ",temp)
num2=temp
s4.release()
# time.sleep(10)
def sub(bt):
print("inside sub")
global num,s3,s4,num2
# bt.join()
while True:
print(threading.currentThread(),"waiting for s4")
s4.acquire()
print(threading.currentThread(),"got s4")
temp=num2
s3.release()
temp/=10
print(threading.currentThread()," num= ",temp)
# time.sleep(10)
start_t=time.time()
num2=num=0
s1=threading.Semaphore(value=1)
s2=threading.Semaphore(value=0)
s3=threading.Semaphore(value=1)
s4=threading.Semaphore(value=0)
t1=threading.Thread(target=add)
t2=threading.Thread(target=mul,args=[t1])
t3=threading.Thread(target=sub,args=[t2])
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
# threading.excepthook(args=[])
end_t=time.time()
print(end_t-start_t)
| import threading
import time
numli=[1,2,3,4,5]
def add():
global num,s1,s2
for numbur in numli:
print(threading.currentThread(),"waiting for s1")
s1.acquire()
num=numbur
print(threading.currentThread(),"got s1")
#num= int(input())
num+=3
print(threading.currentThread()," num= ",num)
s2.release()
# time.sleep(10)
def mul(bt):
print("inside mul")
global num,s1,s2,s3,num2
while True:
# bt.join()
print(threading.currentThread(),"waiting for s2")
s2.acquire()
print(threading.currentThread(),"got s2")
print(threading.currentThread(),"waiting for s3")
s3.acquire()
print(threading.currentThread(),"got s3")
temp=num
s1.release()
print(threading.currentThread(),"read num as",num)
temp*=10
time.sleep(1.8)
print(threading.currentThread()," num= ",temp)
num2=temp
s4.release()
# time.sleep(10)
def sub(bt):
print("inside sub")
global num,s3,s4,num2
# bt.join()
while True:
print(threading.currentThread(),"waiting for s4")
s4.acquire()
print(threading.currentThread(),"got s4")
temp=num2
s3.release()
temp/=10
print(threading.currentThread()," num= ",temp)
# time.sleep(10)
start_t=time.time()
num2=num=0
s1=threading.Semaphore(value=1)
s2=threading.Semaphore(value=0)
s3=threading.Semaphore(value=1)
s4=threading.Semaphore(value=0)
t1=threading.Thread(target=add)
t2=threading.Thread(target=mul,args=[t1])
t3=threading.Thread(target=sub,args=[t2])
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
# threading.excepthook(args=[])
end_t=time.time()
print(end_t-start_t)
| en | 0.265228 | #num= int(input()) # time.sleep(10) # bt.join() # time.sleep(10) # bt.join() # time.sleep(10) # threading.excepthook(args=[]) | 3.997319 | 4 |
199_NSFW/demo/demo_nsfw_onnx.py | IgiArdiyanto/PINTO_model_zoo | 2 | 6632236 | <filename>199_NSFW/demo/demo_nsfw_onnx.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import argparse
import cv2 as cv
import numpy as np
import onnxruntime
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--image",
type=str,
default='image/sample.jpg',
)
parser.add_argument(
"--model",
type=str,
default='saved_model_nsfw/model_float32.onnx',
)
parser.add_argument(
"--input_size",
type=str,
default='224,224',
)
args = parser.parse_args()
return args
def run_inference(
onnx_session,
input_size,
image,
):
# Pre process:Resize, RGB->BGR, Transpose, float32 cast
input_image = cv.resize(image, dsize=(input_size[1], input_size[0]))
input_image = cv.cvtColor(input_image, cv.COLOR_BGR2RGB)
vgg_mean = [104, 117, 123]
input_image = input_image - vgg_mean
input_image = input_image.transpose(2, 0, 1)
input_image = np.expand_dims(input_image, axis=0)
input_image = input_image.astype('float32')
# Inference
input_name = onnx_session.get_inputs()[0].name
output_name = onnx_session.get_outputs()[0].name
onnx_result = onnx_session.run(
[output_name],
{input_name: input_image},
)
# Post process
onnx_result = np.squeeze(onnx_result).astype(np.float32)
return onnx_result
def main():
args = get_args()
model_path = args.model
input_size = [int(i) for i in args.input_size.split(',')]
# Load model
onnx_session = onnxruntime.InferenceSession(model_path)
# read image
image_path = args.image
image = cv.imread(image_path)
# Inference execution
start_time = time.time()
result = run_inference(
onnx_session,
input_size,
image,
)
elapsed_time = time.time() - start_time
print('Elapsed Time :', '{:.1f}'.format(elapsed_time * 1000) + "ms")
print('sfw :', '{:.3f}'.format(result[0]))
print('nsfw:', '{:.3f}'.format(result[1]))
if __name__ == '__main__':
main()
| <filename>199_NSFW/demo/demo_nsfw_onnx.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import argparse
import cv2 as cv
import numpy as np
import onnxruntime
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--image",
type=str,
default='image/sample.jpg',
)
parser.add_argument(
"--model",
type=str,
default='saved_model_nsfw/model_float32.onnx',
)
parser.add_argument(
"--input_size",
type=str,
default='224,224',
)
args = parser.parse_args()
return args
def run_inference(
onnx_session,
input_size,
image,
):
# Pre process:Resize, RGB->BGR, Transpose, float32 cast
input_image = cv.resize(image, dsize=(input_size[1], input_size[0]))
input_image = cv.cvtColor(input_image, cv.COLOR_BGR2RGB)
vgg_mean = [104, 117, 123]
input_image = input_image - vgg_mean
input_image = input_image.transpose(2, 0, 1)
input_image = np.expand_dims(input_image, axis=0)
input_image = input_image.astype('float32')
# Inference
input_name = onnx_session.get_inputs()[0].name
output_name = onnx_session.get_outputs()[0].name
onnx_result = onnx_session.run(
[output_name],
{input_name: input_image},
)
# Post process
onnx_result = np.squeeze(onnx_result).astype(np.float32)
return onnx_result
def main():
args = get_args()
model_path = args.model
input_size = [int(i) for i in args.input_size.split(',')]
# Load model
onnx_session = onnxruntime.InferenceSession(model_path)
# read image
image_path = args.image
image = cv.imread(image_path)
# Inference execution
start_time = time.time()
result = run_inference(
onnx_session,
input_size,
image,
)
elapsed_time = time.time() - start_time
print('Elapsed Time :', '{:.1f}'.format(elapsed_time * 1000) + "ms")
print('sfw :', '{:.3f}'.format(result[0]))
print('nsfw:', '{:.3f}'.format(result[1]))
if __name__ == '__main__':
main()
| en | 0.671448 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Pre process:Resize, RGB->BGR, Transpose, float32 cast # Inference # Post process # Load model # read image # Inference execution | 2.456852 | 2 |
apps/zop/zop.py | herimonster/zoid | 0 | 6632237 | <reponame>herimonster/zoid<filename>apps/zop/zop.py<gh_stars>0
import zapp
import zframe
import zborderframe
import zlabel
import zkeylabel
import zlineedit
import ztextedit
import ztable
import zutils
import os
import re
import pwd
class zop(zapp.zapp):
PROC_DIR = "/proc"
def get_user(self, id):
try:
return str(pwd.getpwuid( id ).pw_name)
except:
return str(id)
def rebuild(self):
pattern = re.compile("^[0-9]+$")
listing = os.listdir(self.PROC_DIR)
res = []
for infile in listing:
if not pattern.match(infile):
continue
if not os.path.isdir(self.PROC_DIR + "/" + infile):
continue
#print(infile)
data = ""
with open(self.PROC_DIR+"/"+infile+"/status", 'r') as f:
data = f.read()
with open(self.PROC_DIR+"/"+infile+"/cmdline", 'r') as f:
data += "CmdLine: " + f.read()
#d = [item.split(":") for item in data.split("\n")]
d = []
for pair in [item.split(":", 1) for item in data.split("\n")]:
if len(pair) < 2:
continue
pair[0] = pair[0].strip()
pair[1] = pair[1].strip()
d.append(pair)
data = dict(d)
res.append(data)
return res
def reinsert(self, tab):
procs = self.rebuild()
tab.rows.clear();
for i in range(len(procs)):
#tab.add_row([str(i), "Test", "root", "1024", "43", "1", "/bin/Test"])
self.HUI = i
self.HUI2 = procs[i]
pid = int(procs[i]["Pid"])
name = procs[i]["Name"]
owner = self.get_user(int(procs[i]["Uid"].split("\t")[0]))
mem = procs[i]["VmRSS"] if "VmRSS" in procs[i] else ""
if mem != "":
#mem = str(int(float(mem.split(" ")[0]) / 1000.0))
mem = int(mem.split(" ")[0])
else:
mem = 0;
cpu = "43"
parent = int(procs[i]["PPid"])
#tab.rows.append([str(i), procs[i]["Name"], self.get_user(int(procs[i]["Pid"])), procs[i]["VmSize"] if "VmSize" in procs[i] else "", "43", procs[i]["PPid"], procs[i]["CmdLine"]])
tab.rows.append([pid, name, owner, mem, cpu, parent, ""])
tab.resort()
def do_run(self):
r = self._root_frame
tab = ztable.ztable(r, (0,0), r._size, 7)
r.add_child(tab)
tab.head[0]["caption"] = "PID"
tab.head[0]["width"] = 6
tab.head[0]["align"] = ztable.ztable.AL_RIGHT
tab.head[1]["caption"] = "Name"
tab.head[1]["width"] = 16
tab.head[2]["caption"] = "Owner"
tab.head[2]["width"] = 10
tab.head[3]["caption"] = "Mem"
tab.head[3]["width"] = 9
tab.head[3]["align"] = ztable.ztable.AL_RIGHT
tab.head[4]["caption"] = "CPU"
tab.head[4]["width"] = 4
tab.head[4]["align"] = ztable.ztable.AL_RIGHT
tab.head[5]["caption"] = "Parent"
tab.head[5]["width"] = 6
tab.head[5]["align"] = ztable.ztable.AL_RIGHT
w = 0
for i in range(6):
w += tab.head[i]["width"]
w = tab._size[0] - w
tab.head[6]["caption"] = "Command Line"
tab.head[6]["width"] = w
#wupp = ztextedit.ztextedit(r, (10,5), (20, 8), "")
#r.add_child(wupp)
r.next_focus()
#bf = zborderframe.zborderframe(self._root_frame, (2,2), (70,20), "Hallo")
#bf2 = zborderframe.zborderframe(self._root_frame, (75,2), (20,10), "Hallo2")
#lbl = zkeylabel.zkeylabel(bf2, (1,1), (18,1), "HUHU")
#ed = zlineedit.zlineedit(bf, (1,1), (18,1), "Test text" )
#tab = ztable.ztable(bf, (1,2), (68,17), 4)
#bf2.add_child(lbl)
#bf.add_child(ed)
#bf.add_child(tab)
#tab.add_row(["Test " + str(i), "i*i = " + str(i*i), "i^i = " + str(i**i), "Ende", ])
#bf = zborderframe.zborderframe(None, (2,2), (20,20), "Hallo")
#self._root_frame.add_child(bf)
#self._root_frame.add_child(bf2)
#ed.on_enter = lambda sender: tab.add_row([ed.get_text(), "2", "3", "4"]) or ed.set_text("")
#ed.on_change = lambda sender: lbl2.set_caption(sender.get_text()) or lbl2.set_fcolor(-1)
self.reinsert(tab)
while True:
self.render()
c, buf = self._get_char()
if(buf == "r"):
self.reinsert(tab)
elif(buf == "d"):
tab.set_desc(not tab.get_desc())
elif(buf == ">"):
tab.set_order_by(tab.get_order_by()+1)
elif(buf == "<"):
tab.set_order_by(tab.get_order_by()-1)
self._root_frame.on_key(c, buf)
app = zop()
app.run()
| import zapp
import zframe
import zborderframe
import zlabel
import zkeylabel
import zlineedit
import ztextedit
import ztable
import zutils
import os
import re
import pwd
class zop(zapp.zapp):
PROC_DIR = "/proc"
def get_user(self, id):
try:
return str(pwd.getpwuid( id ).pw_name)
except:
return str(id)
def rebuild(self):
pattern = re.compile("^[0-9]+$")
listing = os.listdir(self.PROC_DIR)
res = []
for infile in listing:
if not pattern.match(infile):
continue
if not os.path.isdir(self.PROC_DIR + "/" + infile):
continue
#print(infile)
data = ""
with open(self.PROC_DIR+"/"+infile+"/status", 'r') as f:
data = f.read()
with open(self.PROC_DIR+"/"+infile+"/cmdline", 'r') as f:
data += "CmdLine: " + f.read()
#d = [item.split(":") for item in data.split("\n")]
d = []
for pair in [item.split(":", 1) for item in data.split("\n")]:
if len(pair) < 2:
continue
pair[0] = pair[0].strip()
pair[1] = pair[1].strip()
d.append(pair)
data = dict(d)
res.append(data)
return res
def reinsert(self, tab):
procs = self.rebuild()
tab.rows.clear();
for i in range(len(procs)):
#tab.add_row([str(i), "Test", "root", "1024", "43", "1", "/bin/Test"])
self.HUI = i
self.HUI2 = procs[i]
pid = int(procs[i]["Pid"])
name = procs[i]["Name"]
owner = self.get_user(int(procs[i]["Uid"].split("\t")[0]))
mem = procs[i]["VmRSS"] if "VmRSS" in procs[i] else ""
if mem != "":
#mem = str(int(float(mem.split(" ")[0]) / 1000.0))
mem = int(mem.split(" ")[0])
else:
mem = 0;
cpu = "43"
parent = int(procs[i]["PPid"])
#tab.rows.append([str(i), procs[i]["Name"], self.get_user(int(procs[i]["Pid"])), procs[i]["VmSize"] if "VmSize" in procs[i] else "", "43", procs[i]["PPid"], procs[i]["CmdLine"]])
tab.rows.append([pid, name, owner, mem, cpu, parent, ""])
tab.resort()
def do_run(self):
r = self._root_frame
tab = ztable.ztable(r, (0,0), r._size, 7)
r.add_child(tab)
tab.head[0]["caption"] = "PID"
tab.head[0]["width"] = 6
tab.head[0]["align"] = ztable.ztable.AL_RIGHT
tab.head[1]["caption"] = "Name"
tab.head[1]["width"] = 16
tab.head[2]["caption"] = "Owner"
tab.head[2]["width"] = 10
tab.head[3]["caption"] = "Mem"
tab.head[3]["width"] = 9
tab.head[3]["align"] = ztable.ztable.AL_RIGHT
tab.head[4]["caption"] = "CPU"
tab.head[4]["width"] = 4
tab.head[4]["align"] = ztable.ztable.AL_RIGHT
tab.head[5]["caption"] = "Parent"
tab.head[5]["width"] = 6
tab.head[5]["align"] = ztable.ztable.AL_RIGHT
w = 0
for i in range(6):
w += tab.head[i]["width"]
w = tab._size[0] - w
tab.head[6]["caption"] = "Command Line"
tab.head[6]["width"] = w
#wupp = ztextedit.ztextedit(r, (10,5), (20, 8), "")
#r.add_child(wupp)
r.next_focus()
#bf = zborderframe.zborderframe(self._root_frame, (2,2), (70,20), "Hallo")
#bf2 = zborderframe.zborderframe(self._root_frame, (75,2), (20,10), "Hallo2")
#lbl = zkeylabel.zkeylabel(bf2, (1,1), (18,1), "HUHU")
#ed = zlineedit.zlineedit(bf, (1,1), (18,1), "Test text" )
#tab = ztable.ztable(bf, (1,2), (68,17), 4)
#bf2.add_child(lbl)
#bf.add_child(ed)
#bf.add_child(tab)
#tab.add_row(["Test " + str(i), "i*i = " + str(i*i), "i^i = " + str(i**i), "Ende", ])
#bf = zborderframe.zborderframe(None, (2,2), (20,20), "Hallo")
#self._root_frame.add_child(bf)
#self._root_frame.add_child(bf2)
#ed.on_enter = lambda sender: tab.add_row([ed.get_text(), "2", "3", "4"]) or ed.set_text("")
#ed.on_change = lambda sender: lbl2.set_caption(sender.get_text()) or lbl2.set_fcolor(-1)
self.reinsert(tab)
while True:
self.render()
c, buf = self._get_char()
if(buf == "r"):
self.reinsert(tab)
elif(buf == "d"):
tab.set_desc(not tab.get_desc())
elif(buf == ">"):
tab.set_order_by(tab.get_order_by()+1)
elif(buf == "<"):
tab.set_order_by(tab.get_order_by()-1)
self._root_frame.on_key(c, buf)
app = zop()
app.run() | en | 0.200178 | #print(infile) #d = [item.split(":") for item in data.split("\n")] #tab.add_row([str(i), "Test", "root", "1024", "43", "1", "/bin/Test"]) #mem = str(int(float(mem.split(" ")[0]) / 1000.0)) #tab.rows.append([str(i), procs[i]["Name"], self.get_user(int(procs[i]["Pid"])), procs[i]["VmSize"] if "VmSize" in procs[i] else "", "43", procs[i]["PPid"], procs[i]["CmdLine"]]) #wupp = ztextedit.ztextedit(r, (10,5), (20, 8), "") #r.add_child(wupp) #bf = zborderframe.zborderframe(self._root_frame, (2,2), (70,20), "Hallo") #bf2 = zborderframe.zborderframe(self._root_frame, (75,2), (20,10), "Hallo2") #lbl = zkeylabel.zkeylabel(bf2, (1,1), (18,1), "HUHU") #ed = zlineedit.zlineedit(bf, (1,1), (18,1), "Test text" ) #tab = ztable.ztable(bf, (1,2), (68,17), 4) #bf2.add_child(lbl) #bf.add_child(ed) #bf.add_child(tab) #tab.add_row(["Test " + str(i), "i*i = " + str(i*i), "i^i = " + str(i**i), "Ende", ]) #bf = zborderframe.zborderframe(None, (2,2), (20,20), "Hallo") #self._root_frame.add_child(bf) #self._root_frame.add_child(bf2) #ed.on_enter = lambda sender: tab.add_row([ed.get_text(), "2", "3", "4"]) or ed.set_text("") #ed.on_change = lambda sender: lbl2.set_caption(sender.get_text()) or lbl2.set_fcolor(-1) | 2.193503 | 2 |
tests/gridworld_test.py | Duckie-town-isu/tulip-control | 91 | 6632238 | <filename>tests/gridworld_test.py
"""Tests for the tulip.gridworld."""
from __future__ import division
from __future__ import print_function
try:
from dd import cudd as dd_cudd
except ImportError:
dd_cudd = None
import numpy as np
import pytest
import tulip.gridworld as gw
import unittest
from tulip.synth import is_realizable
REFERENCE_GWFILE = """
# A very small example, realizable by itself.
6 10
* G*
*** ***
*
I * * *
****** *
*
"""
UNREACHABLE_GOAL_GWFILE = """
4 4
**G
**
I* *
"""
TRIVIAL_GWFILE = """
2 2
*
"""
# Module-level fixture setup
def setup_module():
np.random.seed(0) # Make pseudorandom number sequence repeatable
class GridWorld_test(object):
def setup_method(self):
self.prefix = "testworld"
self.X = gw.GridWorld(REFERENCE_GWFILE, prefix=self.prefix)
self.Y_testpaths = gw.GridWorld(UNREACHABLE_GOAL_GWFILE,
prefix=self.prefix)
def teardown_method(self):
self.X = None
self.Y_testpaths = None
def test_reachability(self):
# Reachability is assumed to be bidirectional
assert not self.Y_testpaths.is_reachable((3,0), (0,2))
assert not self.Y_testpaths.is_reachable((0,2), (3,0))
assert self.Y_testpaths.is_reachable((1,1), (2,3))
assert self.Y_testpaths.is_reachable((2,3), (1,1))
def test_size(self):
assert self.X.size() == (6, 10)
def test_copy(self):
Z = self.X.copy()
assert Z is not self.X
assert Z.W is not self.X.W
assert Z == self.X
def test_getitem(self):
assert self.X.__getitem__(
(0,0), nonbool=False) == self.prefix+"_"+str(0)+"_"+str(0)
assert self.X.__getitem__(
(-1,0), nonbool=False) == self.prefix+"_"+str(5)+"_"+str(0)
assert self.X.__getitem__(
(-1,-2), nonbool=False) == self.prefix+"_"+str(5)+"_"+str(8)
def test_state(self):
assert self.X.state((2,3), nonbool=False) == {
'testworld_3_9': 0, 'testworld_1_8': 0, 'testworld_1_9': 0,
'testworld_1_4': 0, 'testworld_1_5': 0, 'testworld_1_6': 0,
'testworld_1_7': 0, 'testworld_1_0': 0, 'testworld_1_1': 0,
'testworld_1_2': 0, 'testworld_1_3': 0, 'testworld_0_5': 0,
'testworld_0_4': 0, 'testworld_0_7': 0, 'testworld_0_6': 0,
'testworld_0_1': 0, 'testworld_0_0': 0, 'testworld_0_3': 0,
'testworld_0_2': 0, 'testworld_5_7': 0, 'testworld_0_9': 0,
'testworld_0_8': 0, 'testworld_3_2': 0, 'testworld_3_3': 0,
'testworld_2_9': 0, 'testworld_2_8': 0, 'testworld_3_6': 0,
'testworld_3_7': 0, 'testworld_3_4': 0, 'testworld_3_5': 0,
'testworld_2_3': 1, 'testworld_2_2': 0, 'testworld_2_1': 0,
'testworld_2_0': 0, 'testworld_2_7': 0, 'testworld_2_6': 0,
'testworld_2_5': 0, 'testworld_2_4': 0, 'testworld_4_1': 0,
'testworld_4_0': 0, 'testworld_4_3': 0, 'testworld_4_2': 0,
'testworld_4_5': 0, 'testworld_4_4': 0, 'testworld_4_7': 0,
'testworld_4_6': 0, 'testworld_4_9': 0, 'testworld_4_8': 0,
'testworld_5_8': 0, 'testworld_5_2': 0, 'testworld_5_9': 0,
'testworld_3_0': 0, 'testworld_3_1': 0, 'testworld_5_3': 0,
'testworld_5_5': 0, 'testworld_5_0': 0, 'testworld_5_4': 0,
'testworld_5_1': 0, 'testworld_5_6': 0, 'testworld_3_8': 0}
assert self.X.state((-1,0), nonbool=False) == {
'testworld_3_9': 0, 'testworld_1_8': 0, 'testworld_1_9': 0,
'testworld_1_4': 0, 'testworld_1_5': 0, 'testworld_1_6': 0,
'testworld_1_7': 0, 'testworld_1_0': 0, 'testworld_1_1': 0,
'testworld_1_2': 0, 'testworld_1_3': 0, 'testworld_0_5': 0,
'testworld_0_4': 0, 'testworld_0_7': 0, 'testworld_0_6': 0,
'testworld_0_1': 0, 'testworld_0_0': 0, 'testworld_0_3': 0,
'testworld_0_2': 0, 'testworld_5_7': 0, 'testworld_0_9': 0,
'testworld_0_8': 0, 'testworld_3_2': 0, 'testworld_3_3': 0,
'testworld_2_9': 0, 'testworld_2_8': 0, 'testworld_3_6': 0,
'testworld_3_7': 0, 'testworld_3_4': 0, 'testworld_3_5': 0,
'testworld_2_3': 0, 'testworld_2_2': 0, 'testworld_2_1': 0,
'testworld_2_0': 0, 'testworld_2_7': 0, 'testworld_2_6': 0,
'testworld_2_5': 0, 'testworld_2_4': 0, 'testworld_4_1': 0,
'testworld_4_0': 0, 'testworld_4_3': 0, 'testworld_4_2': 0,
'testworld_4_5': 0, 'testworld_4_4': 0, 'testworld_4_7': 0,
'testworld_4_6': 0, 'testworld_4_9': 0, 'testworld_4_8': 0,
'testworld_5_8': 0, 'testworld_5_2': 0, 'testworld_5_9': 0,
'testworld_3_0': 0, 'testworld_3_1': 0, 'testworld_5_3': 0,
'testworld_5_5': 0, 'testworld_5_0': 1, 'testworld_5_4': 0,
'testworld_5_1': 0, 'testworld_5_6': 0, 'testworld_3_8': 0}
assert self.X.state((-1,-1), nonbool=False) == {
'testworld_3_9': 0, 'testworld_1_8': 0, 'testworld_1_9': 0,
'testworld_1_4': 0, 'testworld_1_5': 0, 'testworld_1_6': 0,
'testworld_1_7': 0, 'testworld_1_0': 0, 'testworld_1_1': 0,
'testworld_1_2': 0, 'testworld_1_3': 0, 'testworld_0_5': 0,
'testworld_0_4': 0, 'testworld_0_7': 0, 'testworld_0_6': 0,
'testworld_0_1': 0, 'testworld_0_0': 0, 'testworld_0_3': 0,
'testworld_0_2': 0, 'testworld_5_7': 0, 'testworld_0_9': 0,
'testworld_0_8': 0, 'testworld_3_2': 0, 'testworld_3_3': 0,
'testworld_2_9': 0, 'testworld_2_8': 0, 'testworld_3_6': 0,
'testworld_3_7': 0, 'testworld_3_4': 0, 'testworld_3_5': 0,
'testworld_2_3': 0, 'testworld_2_2': 0, 'testworld_2_1': 0,
'testworld_2_0': 0, 'testworld_2_7': 0, 'testworld_2_6': 0,
'testworld_2_5': 0, 'testworld_2_4': 0, 'testworld_4_1': 0,
'testworld_4_0': 0, 'testworld_4_3': 0, 'testworld_4_2': 0,
'testworld_4_5': 0, 'testworld_4_4': 0, 'testworld_4_7': 0,
'testworld_4_6': 0, 'testworld_4_9': 0, 'testworld_4_8': 0,
'testworld_5_8': 0, 'testworld_5_2': 0, 'testworld_5_9': 1,
'testworld_3_0': 0, 'testworld_3_1': 0, 'testworld_5_3': 0,
'testworld_5_5': 0, 'testworld_5_0': 0, 'testworld_5_4': 0,
'testworld_5_1': 0, 'testworld_5_6': 0, 'testworld_3_8': 0}
def test_equality(self):
assert self.X == gw.GridWorld(REFERENCE_GWFILE)
Y = gw.GridWorld()
assert self.X != Y
Y = gw.GridWorld(TRIVIAL_GWFILE)
assert self.X != Y
def test_dumploadloop(self):
assert self.X == gw.GridWorld(self.X.dumps())
@unittest.skipIf(dd_cudd is None,
'`dd.cudd` not installed')
def test_spec_realizable_bool(self):
spec = self.X.spec(nonbool=False)
spec.moore = False
spec.plus_one = False
spec.qinit = r'\A \E'
assert is_realizable(spec)
def test_spec_realizable(self):
spec = self.X.spec()
spec.moore = False
spec.plus_one = False
spec.qinit = r'\A \E'
assert is_realizable(spec)
@pytest.mark.parametrize('coord,expected',
[((0, 0), False), ((0, 1), True),
((-1, 0), False), ((0, -1), True)])
def test_is_empty(self, coord, expected):
assert self.X.is_empty(coord) == expected
@pytest.mark.parametrize('coord,expected',
[((0, 0), False), ((0, 1), True),
((-1, 0), False), ((0, -1), False)])
def test_is_empty_extend(self, coord, expected):
assert self.X.is_empty(coord, extend=True) == expected
def test_dump_subworld(self):
# No offset
X_local = self.X.dump_subworld((2,4), prefix="X")
assert X_local.size() == (2, 4)
assert X_local.__getitem__((0,0), nonbool=False) == "X_0_0"
assert not X_local.is_empty((0,0))
assert X_local.is_empty((0,1))
# Offset
X_local = self.X.dump_subworld((2,4), offset=(1,0), prefix="Xoff")
assert X_local.size() == (2, 4)
assert X_local.__getitem__((0,0), nonbool=False) == "Xoff_0_0"
assert X_local.is_empty((0,0))
assert X_local.is_empty((0,1))
assert not X_local.is_empty((0,3))
def test_dump_subworld_extend(self):
# No offset
Xsize = self.X.size()
X_local = self.X.dump_subworld((Xsize[0]+1, Xsize[1]), prefix="X",
extend=True)
X_local.goal_list = self.X.goal_list[:]
X_local.init_list = self.X.init_list[:]
assert X_local.size() == (7, 10)
assert X_local.__getitem__((0,0), nonbool=False) == "X_0_0"
assert not X_local.is_empty((0,0))
assert X_local.is_empty((0,1))
# Equal except for the last row, which should be all occupied in X_local
X_local_s = X_local.dumps().splitlines()
assert np.all(X_local_s[1:-1] == self.X.dumps().splitlines()[1:])
assert not X_local.is_empty((6,1))
assert X_local_s[-1] == "*"*10
# Offset
X_local = self.X.dump_subworld((3,4), offset=(-1,0), prefix="Xoff",
extend=True)
assert X_local.size() == (3, 4)
assert X_local.__getitem__((0,0), nonbool=False) == "Xoff_0_0"
assert not X_local.is_empty((0,0))
assert not X_local.is_empty((0,1))
assert not X_local.is_empty((0,3))
assert X_local.is_empty((1,1))
@pytest.mark.slow
class RandomWorld_test(object):
def setup_method(self):
self.wall_densities = [.2, .4, .6]
self.sizes = [(4,5), (4,5), (10,20)]
self.rworlds = [
gw.random_world(
self.sizes[r], wall_density=self.wall_densities[r], prefix="Y")
for r in range(len(self.sizes))]
self.rworlds_ensuredfeasible = [
gw.random_world(
self.sizes[r], self.wall_densities[r], num_init=2,
num_goals=2, ensure_feasible=True)
for r in range(len(self.sizes))]
def teardown_method(self):
self.rworlds = []
def test_feasibility(self):
for r in range(len(self.rworlds_ensuredfeasible)):
print('test "ensured feasible" world index', r)
print(self.rworlds_ensuredfeasible[r])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].init_list[0],
self.rworlds_ensuredfeasible[r].init_list[1])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].init_list[1],
self.rworlds_ensuredfeasible[r].goal_list[0])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].goal_list[0],
self.rworlds_ensuredfeasible[r].goal_list[1])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].goal_list[1],
self.rworlds_ensuredfeasible[r].init_list[0])
def test_size(self):
for r in range(len(self.rworlds)):
print("test world index", r)
print(self.rworlds[r])
assert self.sizes[r] == self.rworlds[r].size()
def test_density(self):
for r in range(len(self.rworlds)):
print("test world index", r)
print(self.rworlds[r])
(num_rows, num_cols) = self.rworlds[r].size()
num_occupied = 0
for i in range(num_rows):
for j in range(num_cols):
if not self.rworlds[r].is_empty((i,j)):
num_occupied += 1
assert (
float(num_occupied) / (num_rows*num_cols) ==
self.wall_densities[r])
@pytest.mark.parametrize('label,expected_coord',
[('test_3_0', ('test', 3, 0)),
('obstacle_5_4_11', ('obstacle_5', 4, 11)),
('test3_0', None)])
def extract_coord_test(label, expected_coord):
assert gw.extract_coord(label) == expected_coord
def eq_gridworld_param():
empty = gw.GridWorld()
trivial_nonempty = gw.GridWorld(TRIVIAL_GWFILE)
trivial_diff = gw.GridWorld(TRIVIAL_GWFILE)
if trivial_diff.is_empty((0, 0)):
trivial_diff.mark_occupied((0, 0))
else:
trivial_diff.mark_empty((0, 0))
trivial_nonempty_2goals = gw.GridWorld(TRIVIAL_GWFILE)
trivial_nonempty_2goals.goal_list = [(0, 0), (1, 1)]
trivial_nonempty_2init = gw.GridWorld(TRIVIAL_GWFILE)
trivial_nonempty_2init.init_list = [(0, 0), (1, 1)]
for (G, H, is_equal) in [
(gw.GridWorld(), gw.GridWorld(), True),
(empty, trivial_nonempty, False),
(trivial_nonempty_2goals, trivial_nonempty, False),
(trivial_nonempty_2init, trivial_nonempty, False),
(trivial_nonempty, trivial_diff, False),
(gw.unoccupied((3, 5)), gw.unoccupied((1, 1)), False)]:
yield (G, H, is_equal)
@pytest.mark.parametrize('G,H,eq', eq_gridworld_param())
def eq_gridworld_test(G, H, eq):
if eq:
G == H
else:
not (G == H)
def narrow_passage_test():
G = gw.narrow_passage((5, 10), num_init=1, num_goals=1)
assert G.is_reachable(G.init_list[0], G.goal_list[0])
def scale_gridworld_test():
G = gw.unoccupied((1, 2))
assert G.size() == (1, 2)
assert G.scale().size() == G.size()
assert G.scale(xf=1, yf=1).size() == G.size()
assert G.scale(xf=2).size() == (1, 4)
assert G.scale(yf=2).size() == (2, 2)
assert G.scale(xf=3, yf=4).size() == (4, 6)
def add_trolls_test():
G = gw.unoccupied((3, 5))
G.init_list = [(0, 0)]
G.goal_list = [(0, 4)]
spc = gw.add_trolls(G, [((2, 2), 1)], get_moves_lists=False)
spc.moore = False
spc.plus_one = False
spc.qinit = r'\A \E'
assert is_realizable(spc)
| <filename>tests/gridworld_test.py
"""Tests for the tulip.gridworld."""
from __future__ import division
from __future__ import print_function
try:
from dd import cudd as dd_cudd
except ImportError:
dd_cudd = None
import numpy as np
import pytest
import tulip.gridworld as gw
import unittest
from tulip.synth import is_realizable
REFERENCE_GWFILE = """
# A very small example, realizable by itself.
6 10
* G*
*** ***
*
I * * *
****** *
*
"""
UNREACHABLE_GOAL_GWFILE = """
4 4
**G
**
I* *
"""
TRIVIAL_GWFILE = """
2 2
*
"""
# Module-level fixture setup
def setup_module():
np.random.seed(0) # Make pseudorandom number sequence repeatable
class GridWorld_test(object):
def setup_method(self):
self.prefix = "testworld"
self.X = gw.GridWorld(REFERENCE_GWFILE, prefix=self.prefix)
self.Y_testpaths = gw.GridWorld(UNREACHABLE_GOAL_GWFILE,
prefix=self.prefix)
def teardown_method(self):
self.X = None
self.Y_testpaths = None
def test_reachability(self):
# Reachability is assumed to be bidirectional
assert not self.Y_testpaths.is_reachable((3,0), (0,2))
assert not self.Y_testpaths.is_reachable((0,2), (3,0))
assert self.Y_testpaths.is_reachable((1,1), (2,3))
assert self.Y_testpaths.is_reachable((2,3), (1,1))
def test_size(self):
assert self.X.size() == (6, 10)
def test_copy(self):
Z = self.X.copy()
assert Z is not self.X
assert Z.W is not self.X.W
assert Z == self.X
def test_getitem(self):
assert self.X.__getitem__(
(0,0), nonbool=False) == self.prefix+"_"+str(0)+"_"+str(0)
assert self.X.__getitem__(
(-1,0), nonbool=False) == self.prefix+"_"+str(5)+"_"+str(0)
assert self.X.__getitem__(
(-1,-2), nonbool=False) == self.prefix+"_"+str(5)+"_"+str(8)
def test_state(self):
assert self.X.state((2,3), nonbool=False) == {
'testworld_3_9': 0, 'testworld_1_8': 0, 'testworld_1_9': 0,
'testworld_1_4': 0, 'testworld_1_5': 0, 'testworld_1_6': 0,
'testworld_1_7': 0, 'testworld_1_0': 0, 'testworld_1_1': 0,
'testworld_1_2': 0, 'testworld_1_3': 0, 'testworld_0_5': 0,
'testworld_0_4': 0, 'testworld_0_7': 0, 'testworld_0_6': 0,
'testworld_0_1': 0, 'testworld_0_0': 0, 'testworld_0_3': 0,
'testworld_0_2': 0, 'testworld_5_7': 0, 'testworld_0_9': 0,
'testworld_0_8': 0, 'testworld_3_2': 0, 'testworld_3_3': 0,
'testworld_2_9': 0, 'testworld_2_8': 0, 'testworld_3_6': 0,
'testworld_3_7': 0, 'testworld_3_4': 0, 'testworld_3_5': 0,
'testworld_2_3': 1, 'testworld_2_2': 0, 'testworld_2_1': 0,
'testworld_2_0': 0, 'testworld_2_7': 0, 'testworld_2_6': 0,
'testworld_2_5': 0, 'testworld_2_4': 0, 'testworld_4_1': 0,
'testworld_4_0': 0, 'testworld_4_3': 0, 'testworld_4_2': 0,
'testworld_4_5': 0, 'testworld_4_4': 0, 'testworld_4_7': 0,
'testworld_4_6': 0, 'testworld_4_9': 0, 'testworld_4_8': 0,
'testworld_5_8': 0, 'testworld_5_2': 0, 'testworld_5_9': 0,
'testworld_3_0': 0, 'testworld_3_1': 0, 'testworld_5_3': 0,
'testworld_5_5': 0, 'testworld_5_0': 0, 'testworld_5_4': 0,
'testworld_5_1': 0, 'testworld_5_6': 0, 'testworld_3_8': 0}
assert self.X.state((-1,0), nonbool=False) == {
'testworld_3_9': 0, 'testworld_1_8': 0, 'testworld_1_9': 0,
'testworld_1_4': 0, 'testworld_1_5': 0, 'testworld_1_6': 0,
'testworld_1_7': 0, 'testworld_1_0': 0, 'testworld_1_1': 0,
'testworld_1_2': 0, 'testworld_1_3': 0, 'testworld_0_5': 0,
'testworld_0_4': 0, 'testworld_0_7': 0, 'testworld_0_6': 0,
'testworld_0_1': 0, 'testworld_0_0': 0, 'testworld_0_3': 0,
'testworld_0_2': 0, 'testworld_5_7': 0, 'testworld_0_9': 0,
'testworld_0_8': 0, 'testworld_3_2': 0, 'testworld_3_3': 0,
'testworld_2_9': 0, 'testworld_2_8': 0, 'testworld_3_6': 0,
'testworld_3_7': 0, 'testworld_3_4': 0, 'testworld_3_5': 0,
'testworld_2_3': 0, 'testworld_2_2': 0, 'testworld_2_1': 0,
'testworld_2_0': 0, 'testworld_2_7': 0, 'testworld_2_6': 0,
'testworld_2_5': 0, 'testworld_2_4': 0, 'testworld_4_1': 0,
'testworld_4_0': 0, 'testworld_4_3': 0, 'testworld_4_2': 0,
'testworld_4_5': 0, 'testworld_4_4': 0, 'testworld_4_7': 0,
'testworld_4_6': 0, 'testworld_4_9': 0, 'testworld_4_8': 0,
'testworld_5_8': 0, 'testworld_5_2': 0, 'testworld_5_9': 0,
'testworld_3_0': 0, 'testworld_3_1': 0, 'testworld_5_3': 0,
'testworld_5_5': 0, 'testworld_5_0': 1, 'testworld_5_4': 0,
'testworld_5_1': 0, 'testworld_5_6': 0, 'testworld_3_8': 0}
assert self.X.state((-1,-1), nonbool=False) == {
'testworld_3_9': 0, 'testworld_1_8': 0, 'testworld_1_9': 0,
'testworld_1_4': 0, 'testworld_1_5': 0, 'testworld_1_6': 0,
'testworld_1_7': 0, 'testworld_1_0': 0, 'testworld_1_1': 0,
'testworld_1_2': 0, 'testworld_1_3': 0, 'testworld_0_5': 0,
'testworld_0_4': 0, 'testworld_0_7': 0, 'testworld_0_6': 0,
'testworld_0_1': 0, 'testworld_0_0': 0, 'testworld_0_3': 0,
'testworld_0_2': 0, 'testworld_5_7': 0, 'testworld_0_9': 0,
'testworld_0_8': 0, 'testworld_3_2': 0, 'testworld_3_3': 0,
'testworld_2_9': 0, 'testworld_2_8': 0, 'testworld_3_6': 0,
'testworld_3_7': 0, 'testworld_3_4': 0, 'testworld_3_5': 0,
'testworld_2_3': 0, 'testworld_2_2': 0, 'testworld_2_1': 0,
'testworld_2_0': 0, 'testworld_2_7': 0, 'testworld_2_6': 0,
'testworld_2_5': 0, 'testworld_2_4': 0, 'testworld_4_1': 0,
'testworld_4_0': 0, 'testworld_4_3': 0, 'testworld_4_2': 0,
'testworld_4_5': 0, 'testworld_4_4': 0, 'testworld_4_7': 0,
'testworld_4_6': 0, 'testworld_4_9': 0, 'testworld_4_8': 0,
'testworld_5_8': 0, 'testworld_5_2': 0, 'testworld_5_9': 1,
'testworld_3_0': 0, 'testworld_3_1': 0, 'testworld_5_3': 0,
'testworld_5_5': 0, 'testworld_5_0': 0, 'testworld_5_4': 0,
'testworld_5_1': 0, 'testworld_5_6': 0, 'testworld_3_8': 0}
def test_equality(self):
assert self.X == gw.GridWorld(REFERENCE_GWFILE)
Y = gw.GridWorld()
assert self.X != Y
Y = gw.GridWorld(TRIVIAL_GWFILE)
assert self.X != Y
def test_dumploadloop(self):
assert self.X == gw.GridWorld(self.X.dumps())
@unittest.skipIf(dd_cudd is None,
'`dd.cudd` not installed')
def test_spec_realizable_bool(self):
spec = self.X.spec(nonbool=False)
spec.moore = False
spec.plus_one = False
spec.qinit = r'\A \E'
assert is_realizable(spec)
def test_spec_realizable(self):
spec = self.X.spec()
spec.moore = False
spec.plus_one = False
spec.qinit = r'\A \E'
assert is_realizable(spec)
@pytest.mark.parametrize('coord,expected',
[((0, 0), False), ((0, 1), True),
((-1, 0), False), ((0, -1), True)])
def test_is_empty(self, coord, expected):
assert self.X.is_empty(coord) == expected
@pytest.mark.parametrize('coord,expected',
[((0, 0), False), ((0, 1), True),
((-1, 0), False), ((0, -1), False)])
def test_is_empty_extend(self, coord, expected):
assert self.X.is_empty(coord, extend=True) == expected
def test_dump_subworld(self):
# No offset
X_local = self.X.dump_subworld((2,4), prefix="X")
assert X_local.size() == (2, 4)
assert X_local.__getitem__((0,0), nonbool=False) == "X_0_0"
assert not X_local.is_empty((0,0))
assert X_local.is_empty((0,1))
# Offset
X_local = self.X.dump_subworld((2,4), offset=(1,0), prefix="Xoff")
assert X_local.size() == (2, 4)
assert X_local.__getitem__((0,0), nonbool=False) == "Xoff_0_0"
assert X_local.is_empty((0,0))
assert X_local.is_empty((0,1))
assert not X_local.is_empty((0,3))
def test_dump_subworld_extend(self):
# No offset
Xsize = self.X.size()
X_local = self.X.dump_subworld((Xsize[0]+1, Xsize[1]), prefix="X",
extend=True)
X_local.goal_list = self.X.goal_list[:]
X_local.init_list = self.X.init_list[:]
assert X_local.size() == (7, 10)
assert X_local.__getitem__((0,0), nonbool=False) == "X_0_0"
assert not X_local.is_empty((0,0))
assert X_local.is_empty((0,1))
# Equal except for the last row, which should be all occupied in X_local
X_local_s = X_local.dumps().splitlines()
assert np.all(X_local_s[1:-1] == self.X.dumps().splitlines()[1:])
assert not X_local.is_empty((6,1))
assert X_local_s[-1] == "*"*10
# Offset
X_local = self.X.dump_subworld((3,4), offset=(-1,0), prefix="Xoff",
extend=True)
assert X_local.size() == (3, 4)
assert X_local.__getitem__((0,0), nonbool=False) == "Xoff_0_0"
assert not X_local.is_empty((0,0))
assert not X_local.is_empty((0,1))
assert not X_local.is_empty((0,3))
assert X_local.is_empty((1,1))
@pytest.mark.slow
class RandomWorld_test(object):
def setup_method(self):
self.wall_densities = [.2, .4, .6]
self.sizes = [(4,5), (4,5), (10,20)]
self.rworlds = [
gw.random_world(
self.sizes[r], wall_density=self.wall_densities[r], prefix="Y")
for r in range(len(self.sizes))]
self.rworlds_ensuredfeasible = [
gw.random_world(
self.sizes[r], self.wall_densities[r], num_init=2,
num_goals=2, ensure_feasible=True)
for r in range(len(self.sizes))]
def teardown_method(self):
self.rworlds = []
def test_feasibility(self):
for r in range(len(self.rworlds_ensuredfeasible)):
print('test "ensured feasible" world index', r)
print(self.rworlds_ensuredfeasible[r])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].init_list[0],
self.rworlds_ensuredfeasible[r].init_list[1])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].init_list[1],
self.rworlds_ensuredfeasible[r].goal_list[0])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].goal_list[0],
self.rworlds_ensuredfeasible[r].goal_list[1])
assert self.rworlds_ensuredfeasible[r].is_reachable(
self.rworlds_ensuredfeasible[r].goal_list[1],
self.rworlds_ensuredfeasible[r].init_list[0])
def test_size(self):
for r in range(len(self.rworlds)):
print("test world index", r)
print(self.rworlds[r])
assert self.sizes[r] == self.rworlds[r].size()
def test_density(self):
for r in range(len(self.rworlds)):
print("test world index", r)
print(self.rworlds[r])
(num_rows, num_cols) = self.rworlds[r].size()
num_occupied = 0
for i in range(num_rows):
for j in range(num_cols):
if not self.rworlds[r].is_empty((i,j)):
num_occupied += 1
assert (
float(num_occupied) / (num_rows*num_cols) ==
self.wall_densities[r])
@pytest.mark.parametrize('label,expected_coord',
[('test_3_0', ('test', 3, 0)),
('obstacle_5_4_11', ('obstacle_5', 4, 11)),
('test3_0', None)])
def extract_coord_test(label, expected_coord):
assert gw.extract_coord(label) == expected_coord
def eq_gridworld_param():
empty = gw.GridWorld()
trivial_nonempty = gw.GridWorld(TRIVIAL_GWFILE)
trivial_diff = gw.GridWorld(TRIVIAL_GWFILE)
if trivial_diff.is_empty((0, 0)):
trivial_diff.mark_occupied((0, 0))
else:
trivial_diff.mark_empty((0, 0))
trivial_nonempty_2goals = gw.GridWorld(TRIVIAL_GWFILE)
trivial_nonempty_2goals.goal_list = [(0, 0), (1, 1)]
trivial_nonempty_2init = gw.GridWorld(TRIVIAL_GWFILE)
trivial_nonempty_2init.init_list = [(0, 0), (1, 1)]
for (G, H, is_equal) in [
(gw.GridWorld(), gw.GridWorld(), True),
(empty, trivial_nonempty, False),
(trivial_nonempty_2goals, trivial_nonempty, False),
(trivial_nonempty_2init, trivial_nonempty, False),
(trivial_nonempty, trivial_diff, False),
(gw.unoccupied((3, 5)), gw.unoccupied((1, 1)), False)]:
yield (G, H, is_equal)
@pytest.mark.parametrize('G,H,eq', eq_gridworld_param())
def eq_gridworld_test(G, H, eq):
if eq:
G == H
else:
not (G == H)
def narrow_passage_test():
G = gw.narrow_passage((5, 10), num_init=1, num_goals=1)
assert G.is_reachable(G.init_list[0], G.goal_list[0])
def scale_gridworld_test():
G = gw.unoccupied((1, 2))
assert G.size() == (1, 2)
assert G.scale().size() == G.size()
assert G.scale(xf=1, yf=1).size() == G.size()
assert G.scale(xf=2).size() == (1, 4)
assert G.scale(yf=2).size() == (2, 2)
assert G.scale(xf=3, yf=4).size() == (4, 6)
def add_trolls_test():
G = gw.unoccupied((3, 5))
G.init_list = [(0, 0)]
G.goal_list = [(0, 4)]
spc = gw.add_trolls(G, [((2, 2), 1)], get_moves_lists=False)
spc.moore = False
spc.plus_one = False
spc.qinit = r'\A \E'
assert is_realizable(spc)
| en | 0.78507 | Tests for the tulip.gridworld. # A very small example, realizable by itself. 6 10 * G* *** *** * I * * * ****** * * 4 4 **G ** I* * 2 2 * # Module-level fixture setup # Make pseudorandom number sequence repeatable # Reachability is assumed to be bidirectional # No offset # Offset # No offset # Equal except for the last row, which should be all occupied in X_local # Offset | 2.585206 | 3 |
jackselect/devmonitor.py | SpotlightKid/jack-select | 12 | 6632239 | # -*- coding: utf-8 -*-
"""Set up an udev monitor to be notified about changes in attached sound devices."""
import logging
from pyudev import Context, Monitor
from .pyudev_gobject import MonitorObserver
log = logging.getLogger(__name__)
class AlsaDevMonitor:
def __init__(self, callback):
# set up udev device monitor
context = Context()
self._monitor = Monitor.from_netlink(context)
self._monitor.filter_by(subsystem='sound')
self._observer = MonitorObserver(self._monitor)
self._observer.connect('device-event', callback)
def start(self):
log.debug("Starting AlsaDevMonitor...")
self._monitor.start()
| # -*- coding: utf-8 -*-
"""Set up an udev monitor to be notified about changes in attached sound devices."""
import logging
from pyudev import Context, Monitor
from .pyudev_gobject import MonitorObserver
log = logging.getLogger(__name__)
class AlsaDevMonitor:
def __init__(self, callback):
# set up udev device monitor
context = Context()
self._monitor = Monitor.from_netlink(context)
self._monitor.filter_by(subsystem='sound')
self._observer = MonitorObserver(self._monitor)
self._observer.connect('device-event', callback)
def start(self):
log.debug("Starting AlsaDevMonitor...")
self._monitor.start()
| en | 0.806524 | # -*- coding: utf-8 -*- Set up an udev monitor to be notified about changes in attached sound devices. # set up udev device monitor | 2.611645 | 3 |
objectModel/Python/cdm/objectmodel/cdm_folder_collection.py | Venkata1920/CDM | 1 | 6632240 | <reponame>Venkata1920/CDM
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Union, List
from cdm.enums import CdmObjectType
from .cdm_collection import CdmCollection
class CdmFolderCollection(CdmCollection):
def __init__(self, ctx: 'CdmCorpusContext', owner: 'CdmObject'):
super().__init__(ctx, owner, CdmObjectType.FOLDER_DEF)
def append(self, obj: Union[str, 'CdmFolderDefinition']) -> 'CdmFolderDefinition':
if isinstance(obj, str):
return self.append(self.ctx.corpus.make_object(self.default_type, obj))
self._add_item_modifications(obj)
return super().append(obj)
def insert(self, index: int, obj: 'CdmFolderDefinition') -> None:
self._add_item_modifications(obj)
super().insert(index, obj)
def extend(self, folder_list: List['CdmFolderDefinition']) -> None:
for folder in folder_list:
self.append(folder)
def _add_item_modifications(self, obj: 'CdmFolderDefinition') -> None:
obj._corpus = self.owner._corpus
obj.namespace = self.owner.namespace
obj.folder_path = '{}{}/'.format(self.owner.folder_path, obj.name)
# TODO: At this point we should also propagate the root adapter into the child folder
# and all its sub-folders and contained documents. For now, don't add things to the
# folder unless it's tied to an adapter root.
@property
def _owner(self) -> 'CdmFolderDefinition':
return super().owner
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Union, List
from cdm.enums import CdmObjectType
from .cdm_collection import CdmCollection
class CdmFolderCollection(CdmCollection):
def __init__(self, ctx: 'CdmCorpusContext', owner: 'CdmObject'):
super().__init__(ctx, owner, CdmObjectType.FOLDER_DEF)
def append(self, obj: Union[str, 'CdmFolderDefinition']) -> 'CdmFolderDefinition':
if isinstance(obj, str):
return self.append(self.ctx.corpus.make_object(self.default_type, obj))
self._add_item_modifications(obj)
return super().append(obj)
def insert(self, index: int, obj: 'CdmFolderDefinition') -> None:
self._add_item_modifications(obj)
super().insert(index, obj)
def extend(self, folder_list: List['CdmFolderDefinition']) -> None:
for folder in folder_list:
self.append(folder)
def _add_item_modifications(self, obj: 'CdmFolderDefinition') -> None:
obj._corpus = self.owner._corpus
obj.namespace = self.owner.namespace
obj.folder_path = '{}{}/'.format(self.owner.folder_path, obj.name)
# TODO: At this point we should also propagate the root adapter into the child folder
# and all its sub-folders and contained documents. For now, don't add things to the
# folder unless it's tied to an adapter root.
@property
def _owner(self) -> 'CdmFolderDefinition':
return super().owner | en | 0.904216 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # TODO: At this point we should also propagate the root adapter into the child folder # and all its sub-folders and contained documents. For now, don't add things to the # folder unless it's tied to an adapter root. | 2.031296 | 2 |
python/day10.py | JohanWranker/advent-of-code-2020 | 0 | 6632241 | <filename>python/day10.py<gh_stars>0
import os
import re
from datetime import datetime
import time
print("day10")
path = "input"
if not os.path.exists(path):
print(" File not exist ", os.path.abspath(path))
exit(1)
rawtext = open(path).readlines()
# Get all joltage all inital 0 and sort it
data = sorted([0] + [int(x) for x in rawtext])
# List with "next" joltage - include output which is last +3
next =data[1:] + [data[-1]+3]
# Make a difflist
diff = [y-x for x,y in zip(data, next)]
print(f"first mission {diff.count(1)*diff.count(3)}")
assert 2210 == diff.count(1)*diff.count(3)
#dict over the paths to the device, add the last jolt which has 1 exit path
paths = {data[-1] : 1}
for val in reversed(data[0:-1]):
# handle the jolts starting at the last-but-one
paths[val] = 0
# Test each jolt with offset 1-3
for step in [1,2,3]:
if val+step in paths:
# New path found, add based on the number of way "next step" has
paths[val] += paths[val+step]
print(f"ways {paths[0]} ")
assert 7086739046912 == paths[0]
| <filename>python/day10.py<gh_stars>0
import os
import re
from datetime import datetime
import time
print("day10")
path = "input"
if not os.path.exists(path):
print(" File not exist ", os.path.abspath(path))
exit(1)
rawtext = open(path).readlines()
# Get all joltage all inital 0 and sort it
data = sorted([0] + [int(x) for x in rawtext])
# List with "next" joltage - include output which is last +3
next =data[1:] + [data[-1]+3]
# Make a difflist
diff = [y-x for x,y in zip(data, next)]
print(f"first mission {diff.count(1)*diff.count(3)}")
assert 2210 == diff.count(1)*diff.count(3)
#dict over the paths to the device, add the last jolt which has 1 exit path
paths = {data[-1] : 1}
for val in reversed(data[0:-1]):
# handle the jolts starting at the last-but-one
paths[val] = 0
# Test each jolt with offset 1-3
for step in [1,2,3]:
if val+step in paths:
# New path found, add based on the number of way "next step" has
paths[val] += paths[val+step]
print(f"ways {paths[0]} ")
assert 7086739046912 == paths[0]
| en | 0.918999 | # Get all joltage all inital 0 and sort it # List with "next" joltage - include output which is last +3 # Make a difflist #dict over the paths to the device, add the last jolt which has 1 exit path # handle the jolts starting at the last-but-one # Test each jolt with offset 1-3 # New path found, add based on the number of way "next step" has | 3.072922 | 3 |
Test/test_DirtyWordOfFilter.py | Rainstyd/rainsty | 1 | 6632242 | <filename>Test/test_DirtyWordOfFilter.py
# test_DirtyWordOfFilter.py
from Algorithm.DirtyWordOfFilter import DFAFilter
import time
def main():
time1 = time.time()
gfw = DFAFilter()
text = "你真是个大傻逼,大傻子,傻大个,大坏蛋,坏人。乱交, 乱小"
result = gfw.filter(text)
print(text)
print(result)
time2 = time.time()
print('总共耗时:' + str(time2 - time1) + 's')
if __name__ == '__main__':
main()
| <filename>Test/test_DirtyWordOfFilter.py
# test_DirtyWordOfFilter.py
from Algorithm.DirtyWordOfFilter import DFAFilter
import time
def main():
time1 = time.time()
gfw = DFAFilter()
text = "你真是个大傻逼,大傻子,傻大个,大坏蛋,坏人。乱交, 乱小"
result = gfw.filter(text)
print(text)
print(result)
time2 = time.time()
print('总共耗时:' + str(time2 - time1) + 's')
if __name__ == '__main__':
main()
| en | 0.170308 | # test_DirtyWordOfFilter.py | 2.89309 | 3 |
models/shum/ashum.py | INTENS-FI/intens | 0 | 6632243 | """Attempt to use asyncio.
Failed because asyncio subprocess support requires setup in the main
thread, which we do not control in the Dask worker.
"""
from concurrent.futures import CancelledError
import asyncio, os, traceback as tb
import dask
async def run_it(spec):
#TODO
script = os.path.join(os.path.dirname(__file__), "sum.sh")
proc = await asyncio.create_subprocess_exec(
script, *(str(spec.inputs[n]) for n in ['x', 'y']),
cwd=spec.workdir)
out, err = await proc.communicate()
if err:
raise RuntimeError(err)
else:
return {'sum': int(out)}
async def poll_cancel(run, cancel):
while not cancel.get():
await asyncio.sleep(5)
run.cancel()
async def main(spec, cancel):
run = asyncio.ensure_future(run_it(spec))
watch = asyncio.ensure_future(poll_cancel(run, cancel))
try:
return await run
finally:
watch.cancel()
@dask.delayed
def task(spec, cancel):
try:
loop = asyncio.get_event_loop()
return loop.run_until_complete(main(spec, cancel))
except:
tb.print_exc()
raise
| """Attempt to use asyncio.
Failed because asyncio subprocess support requires setup in the main
thread, which we do not control in the Dask worker.
"""
from concurrent.futures import CancelledError
import asyncio, os, traceback as tb
import dask
async def run_it(spec):
#TODO
script = os.path.join(os.path.dirname(__file__), "sum.sh")
proc = await asyncio.create_subprocess_exec(
script, *(str(spec.inputs[n]) for n in ['x', 'y']),
cwd=spec.workdir)
out, err = await proc.communicate()
if err:
raise RuntimeError(err)
else:
return {'sum': int(out)}
async def poll_cancel(run, cancel):
while not cancel.get():
await asyncio.sleep(5)
run.cancel()
async def main(spec, cancel):
run = asyncio.ensure_future(run_it(spec))
watch = asyncio.ensure_future(poll_cancel(run, cancel))
try:
return await run
finally:
watch.cancel()
@dask.delayed
def task(spec, cancel):
try:
loop = asyncio.get_event_loop()
return loop.run_until_complete(main(spec, cancel))
except:
tb.print_exc()
raise
| en | 0.871562 | Attempt to use asyncio. Failed because asyncio subprocess support requires setup in the main thread, which we do not control in the Dask worker. #TODO | 2.497871 | 2 |
gdc_rnaseq_tools/merge_junctions.py | NCI-GDC/gdc-rnaseq-tool | 0 | 6632244 | """A gdc-rnaseq-tools subcommand to format and merge STAR junction counts
files from the same sample.
@author: <NAME> <<EMAIL>>
"""
from operator import itemgetter
from gdc_rnaseq_tools.utils import get_logger, get_open_function
COLUMN_NAMES = [
"chromosome",
"intron_start",
"intron_end",
"strand",
"intron_motif",
"annotation",
"n_unique_map",
"n_multi_map",
"max_splice_overhang",
]
class StarJunctionRecord:
"""Represents a row in the SJ file"""
def __init__(
self,
chromosome,
intron_first,
intron_last,
strand,
motif,
annotation,
n_unique_mapped,
n_multi_mapped,
max_splice_overhang,
):
self.chromosome = chromosome
self.intron_first = int(intron_first)
self.intron_last = int(intron_last)
self.strand = int(strand)
self.motif = int(motif)
self.annotation = int(annotation)
self.n_unique_mapped = int(n_unique_mapped)
self.n_multi_mapped = int(n_multi_mapped)
self.max_splice_overhang = int(max_splice_overhang)
@property
def key(self):
"""
The first six columns are the identifiers and are used to match
between input files.
"""
return (
self.chromosome,
self.intron_first,
self.intron_last,
self.strand,
self.motif,
self.annotation,
)
@classmethod
def from_line(cls, line):
"""
Initialize record from a line.
"""
return cls(*line.rstrip("\r\n").split("\t"))
def __iadd__(self, other):
"""
Used to merge overlapping records by adding the appropriate columns and
getting the max of the splice overhang column.
"""
assert self.key == other.key
self.n_unique_mapped += other.n_unique_mapped
self.n_multi_mapped += other.n_multi_mapped
self.max_splice_overhang = max(
self.max_splice_overhang, other.max_splice_overhang
)
return self
def __str__(self):
return "\t".join(
map(
str,
[
self.chromosome,
self.intron_first,
self.intron_last,
self.strand,
self.motif,
self.annotation,
self.n_unique_mapped,
self.n_multi_mapped,
self.max_splice_overhang,
],
)
)
def process_files(args, logger):
"""
All the logic for formatting/merging STAR junction counts.
:param args: argparser
:param logger: `logging.Logger` instance
"""
writer = get_open_function(args.output)
logger.info("Writing outputs to {0}".format(args.output))
with writer(args.output, "wt") as o:
# Write header row as comment
o.write("#" + "\t".join(COLUMN_NAMES) + "\n")
if len(args.input) > 1:
logger.info("Merging {0} STAR gene counts files.".format(len(args.input)))
# Load
dic = dict()
for fil in args.input:
dic = load_junction_file(fil, dic)
logger.info(
"Writing merged STAR junction counts to {0}.".format(args.output)
)
# Merge and write
for key in sorted(dic, key=itemgetter(0, 1, 2)):
o.write(str(dic[key]) + "\n")
else:
logger.info(
"Only 1 STAR junction counts file provided. "
+ "A new STAR junction counts file will be produced "
+ "with a header line."
)
logger.info(
"Writing formatted STAR junction "
+ "counts to {0}.".format(args.output)
)
fil = args.input[0]
reader = get_open_function(fil)
with reader(fil, "rt") as fh:
for line in fh:
o.write(line)
def load_junction_file(fil, dic):
"""
Load star junction file into a dictionary.
:param fil: path to STAR counts file to load
:param dic: dict to load file to
:returns: updated dictionary
"""
reader = get_open_function(fil)
with reader(fil, "rt") as fh:
for line in fh:
rec = StarJunctionRecord.from_line(line)
if rec.key not in dic:
dic[rec.key] = rec
else:
dic[rec.key] += rec
return dic
def main(args):
"""
Main entrypoint for merge_star_gene_counts.
"""
logger = get_logger("merge_star_junction_counts")
logger.info(
"Merging/Formatting {0} STAR junction counts files.".format(len(args.input))
)
process_files(args, logger)
| """A gdc-rnaseq-tools subcommand to format and merge STAR junction counts
files from the same sample.
@author: <NAME> <<EMAIL>>
"""
from operator import itemgetter
from gdc_rnaseq_tools.utils import get_logger, get_open_function
COLUMN_NAMES = [
"chromosome",
"intron_start",
"intron_end",
"strand",
"intron_motif",
"annotation",
"n_unique_map",
"n_multi_map",
"max_splice_overhang",
]
class StarJunctionRecord:
"""Represents a row in the SJ file"""
def __init__(
self,
chromosome,
intron_first,
intron_last,
strand,
motif,
annotation,
n_unique_mapped,
n_multi_mapped,
max_splice_overhang,
):
self.chromosome = chromosome
self.intron_first = int(intron_first)
self.intron_last = int(intron_last)
self.strand = int(strand)
self.motif = int(motif)
self.annotation = int(annotation)
self.n_unique_mapped = int(n_unique_mapped)
self.n_multi_mapped = int(n_multi_mapped)
self.max_splice_overhang = int(max_splice_overhang)
@property
def key(self):
"""
The first six columns are the identifiers and are used to match
between input files.
"""
return (
self.chromosome,
self.intron_first,
self.intron_last,
self.strand,
self.motif,
self.annotation,
)
@classmethod
def from_line(cls, line):
"""
Initialize record from a line.
"""
return cls(*line.rstrip("\r\n").split("\t"))
def __iadd__(self, other):
"""
Used to merge overlapping records by adding the appropriate columns and
getting the max of the splice overhang column.
"""
assert self.key == other.key
self.n_unique_mapped += other.n_unique_mapped
self.n_multi_mapped += other.n_multi_mapped
self.max_splice_overhang = max(
self.max_splice_overhang, other.max_splice_overhang
)
return self
def __str__(self):
return "\t".join(
map(
str,
[
self.chromosome,
self.intron_first,
self.intron_last,
self.strand,
self.motif,
self.annotation,
self.n_unique_mapped,
self.n_multi_mapped,
self.max_splice_overhang,
],
)
)
def process_files(args, logger):
"""
All the logic for formatting/merging STAR junction counts.
:param args: argparser
:param logger: `logging.Logger` instance
"""
writer = get_open_function(args.output)
logger.info("Writing outputs to {0}".format(args.output))
with writer(args.output, "wt") as o:
# Write header row as comment
o.write("#" + "\t".join(COLUMN_NAMES) + "\n")
if len(args.input) > 1:
logger.info("Merging {0} STAR gene counts files.".format(len(args.input)))
# Load
dic = dict()
for fil in args.input:
dic = load_junction_file(fil, dic)
logger.info(
"Writing merged STAR junction counts to {0}.".format(args.output)
)
# Merge and write
for key in sorted(dic, key=itemgetter(0, 1, 2)):
o.write(str(dic[key]) + "\n")
else:
logger.info(
"Only 1 STAR junction counts file provided. "
+ "A new STAR junction counts file will be produced "
+ "with a header line."
)
logger.info(
"Writing formatted STAR junction "
+ "counts to {0}.".format(args.output)
)
fil = args.input[0]
reader = get_open_function(fil)
with reader(fil, "rt") as fh:
for line in fh:
o.write(line)
def load_junction_file(fil, dic):
"""
Load star junction file into a dictionary.
:param fil: path to STAR counts file to load
:param dic: dict to load file to
:returns: updated dictionary
"""
reader = get_open_function(fil)
with reader(fil, "rt") as fh:
for line in fh:
rec = StarJunctionRecord.from_line(line)
if rec.key not in dic:
dic[rec.key] = rec
else:
dic[rec.key] += rec
return dic
def main(args):
"""
Main entrypoint for merge_star_gene_counts.
"""
logger = get_logger("merge_star_junction_counts")
logger.info(
"Merging/Formatting {0} STAR junction counts files.".format(len(args.input))
)
process_files(args, logger)
| en | 0.83875 | A gdc-rnaseq-tools subcommand to format and merge STAR junction counts files from the same sample. @author: <NAME> <<EMAIL>> Represents a row in the SJ file The first six columns are the identifiers and are used to match between input files. Initialize record from a line. Used to merge overlapping records by adding the appropriate columns and getting the max of the splice overhang column. All the logic for formatting/merging STAR junction counts. :param args: argparser :param logger: `logging.Logger` instance # Write header row as comment # Load # Merge and write Load star junction file into a dictionary. :param fil: path to STAR counts file to load :param dic: dict to load file to :returns: updated dictionary Main entrypoint for merge_star_gene_counts. | 2.902023 | 3 |
django_messages/forms.py | mirumee/django-messages | 16 | 6632245 | <reponame>mirumee/django-messages<filename>django_messages/forms.py
import datetime
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop
from django.contrib.auth.models import User
import uuid
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from django_messages.models import Message
from django_messages.fields import CommaSeparatedUserField
from django_messages.utils import format_quote
class MessageForm(forms.ModelForm):
"""
base message form
"""
recipients = CommaSeparatedUserField(label=_(u"Recipient"))
subject = forms.CharField(label=_(u"Subject"))
body = forms.CharField(label=_(u"Body"),
widget=forms.Textarea(attrs={'rows': '12', 'cols':'55'}))
class Meta:
model = Message
fields = ('recipients', 'subject', 'body',)
def __init__(self, sender, *args, **kw):
recipient_filter = kw.pop('recipient_filter', None)
self.sender = sender
super(MessageForm, self).__init__(*args, **kw)
if recipient_filter is not None:
self.fields['recipients']._recipient_filter = recipient_filter
def create_recipient_message(self, recipient, message):
return Message(
owner = recipient,
sender = self.sender,
to = recipient.username,
recipient = recipient,
subject = message.subject,
body = message.body,
thread = message.thread,
sent_at = message.sent_at,
)
def get_thread(self, message):
return message.thread or uuid.uuid4().hex
def save(self, commit=True):
recipients = self.cleaned_data['recipients']
instance = super(MessageForm, self).save(commit=False)
instance.sender = self.sender
instance.owner = self.sender
instance.recipient = recipients[0]
instance.thread = self.get_thread(instance)
instance.unread = False
instance.sent_at = datetime.datetime.now()
message_list = []
# clone messages in recipients inboxes
for r in recipients:
if r == self.sender: # skip duplicates
continue
msg = self.create_recipient_message(r, instance)
message_list.append(msg)
instance.to = ','.join([r.username for r in recipients])
if commit:
instance.save()
for msg in message_list:
msg.save()
if notification:
notification.send([msg.recipient],
"messages_received", {'message': msg,})
return instance, message_list
class ComposeForm(MessageForm):
"""
A simple default form for private messages.
"""
class Meta:
model = Message
fields = ('recipients', 'subject', 'body',)
class ReplyForm(MessageForm):
"""
reply to form
"""
class Meta:
model = Message
fields = ('recipients', 'subject', 'body',)
def __init__(self, sender, message, *args, **kw):
self.parent_message = message
initial = kw.pop('initial', {})
initial['recipients'] = message.sender.username
initial['body'] = self.quote_message(message)
initial['subject'] = self.quote_subject(message.subject)
kw['initial'] = initial
super(ReplyForm, self).__init__(sender, *args, **kw)
def quote_message(self, original_message):
return format_quote(original_message.sender, original_message.body)
def quote_subject(self, subject):
return u'Re: %s' % subject
def create_recipient_message(self, recipient, message):
msg = super(ReplyForm, self).create_recipient_message(recipient, message)
msg.replied_at = datetime.datetime.now()
# find parent in recipient messages
try:
msg.parent_msg = Message.objects.get(
owner=recipient,
sender=message.recipient,
recipient=message.sender,
thread=message.thread)
except (Message.DoesNotExist, Message.MultipleObjectsReturned):
# message may be deleted
pass
return msg
def get_thread(self, message):
return self.parent_message.thread
def save(self, commit=True):
instance, message_list = super(ReplyForm, self).save(commit=False)
instance.replied_at = datetime.datetime.now()
instance.parent_msg = self.parent_message
if commit:
instance.save()
for msg in message_list:
msg.save()
if notification:
notification.send([msg.recipient],
"messages_reply_received", {
'message': msg,
'parent_msg': self.parent_message,
})
return instance, message_list
| import datetime
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop
from django.contrib.auth.models import User
import uuid
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from django_messages.models import Message
from django_messages.fields import CommaSeparatedUserField
from django_messages.utils import format_quote
class MessageForm(forms.ModelForm):
"""
base message form
"""
recipients = CommaSeparatedUserField(label=_(u"Recipient"))
subject = forms.CharField(label=_(u"Subject"))
body = forms.CharField(label=_(u"Body"),
widget=forms.Textarea(attrs={'rows': '12', 'cols':'55'}))
class Meta:
model = Message
fields = ('recipients', 'subject', 'body',)
def __init__(self, sender, *args, **kw):
recipient_filter = kw.pop('recipient_filter', None)
self.sender = sender
super(MessageForm, self).__init__(*args, **kw)
if recipient_filter is not None:
self.fields['recipients']._recipient_filter = recipient_filter
def create_recipient_message(self, recipient, message):
return Message(
owner = recipient,
sender = self.sender,
to = recipient.username,
recipient = recipient,
subject = message.subject,
body = message.body,
thread = message.thread,
sent_at = message.sent_at,
)
def get_thread(self, message):
return message.thread or uuid.uuid4().hex
def save(self, commit=True):
recipients = self.cleaned_data['recipients']
instance = super(MessageForm, self).save(commit=False)
instance.sender = self.sender
instance.owner = self.sender
instance.recipient = recipients[0]
instance.thread = self.get_thread(instance)
instance.unread = False
instance.sent_at = datetime.datetime.now()
message_list = []
# clone messages in recipients inboxes
for r in recipients:
if r == self.sender: # skip duplicates
continue
msg = self.create_recipient_message(r, instance)
message_list.append(msg)
instance.to = ','.join([r.username for r in recipients])
if commit:
instance.save()
for msg in message_list:
msg.save()
if notification:
notification.send([msg.recipient],
"messages_received", {'message': msg,})
return instance, message_list
class ComposeForm(MessageForm):
"""
A simple default form for private messages.
"""
class Meta:
model = Message
fields = ('recipients', 'subject', 'body',)
class ReplyForm(MessageForm):
"""
reply to form
"""
class Meta:
model = Message
fields = ('recipients', 'subject', 'body',)
def __init__(self, sender, message, *args, **kw):
self.parent_message = message
initial = kw.pop('initial', {})
initial['recipients'] = message.sender.username
initial['body'] = self.quote_message(message)
initial['subject'] = self.quote_subject(message.subject)
kw['initial'] = initial
super(ReplyForm, self).__init__(sender, *args, **kw)
def quote_message(self, original_message):
return format_quote(original_message.sender, original_message.body)
def quote_subject(self, subject):
return u'Re: %s' % subject
def create_recipient_message(self, recipient, message):
msg = super(ReplyForm, self).create_recipient_message(recipient, message)
msg.replied_at = datetime.datetime.now()
# find parent in recipient messages
try:
msg.parent_msg = Message.objects.get(
owner=recipient,
sender=message.recipient,
recipient=message.sender,
thread=message.thread)
except (Message.DoesNotExist, Message.MultipleObjectsReturned):
# message may be deleted
pass
return msg
def get_thread(self, message):
return self.parent_message.thread
def save(self, commit=True):
instance, message_list = super(ReplyForm, self).save(commit=False)
instance.replied_at = datetime.datetime.now()
instance.parent_msg = self.parent_message
if commit:
instance.save()
for msg in message_list:
msg.save()
if notification:
notification.send([msg.recipient],
"messages_reply_received", {
'message': msg,
'parent_msg': self.parent_message,
})
return instance, message_list | en | 0.729635 | base message form # clone messages in recipients inboxes # skip duplicates A simple default form for private messages. reply to form # find parent in recipient messages # message may be deleted | 2.09474 | 2 |
Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py | Alpine-DAV/WarpX | 0 | 6632246 | <gh_stars>0
#!/usr/bin/env python3
from pywarpx import picmi
# Physical constants
c = picmi.constants.c
q_e = picmi.constants.q_e
# Number of time steps
max_steps = 100
# Number of cells
nx = 32
ny = 32
nz = 256
# Physical domain
xmin = -30e-06
xmax = 30e-06
ymin = -30e-06
ymax = 30e-06
zmin = -56e-06
zmax = 12e-06
# Domain decomposition
max_grid_size = 64
blocking_factor = 32
# Create grid
grid = picmi.Cartesian3DGrid(
number_of_cells = [nx, ny, nz],
lower_bound = [xmin, ymin, zmin],
upper_bound = [xmax, ymax, zmax],
lower_boundary_conditions = ['periodic', 'periodic', 'dirichlet'],
upper_boundary_conditions = ['periodic', 'periodic', 'dirichlet'],
lower_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'],
upper_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'],
moving_window_velocity = [0., 0., c],
warpx_max_grid_size = max_grid_size,
warpx_blocking_factor = blocking_factor)
# Particles: plasma electrons
plasma_density = 2e23
plasma_xmin = -20e-06
plasma_ymin = -20e-06
plasma_zmin = 0
plasma_xmax = 20e-06
plasma_ymax = 20e-06
plasma_zmax = None
uniform_distribution = picmi.UniformDistribution(
density = plasma_density,
lower_bound = [plasma_xmin, plasma_ymin, plasma_zmin],
upper_bound = [plasma_xmax, plasma_ymax, plasma_zmax],
fill_in = True)
electrons = picmi.Species(
particle_type = 'electron',
name = 'electrons',
initial_distribution = uniform_distribution)
# Particles: beam electrons
q_tot = 1e-12
x_m = 0.
y_m = 0.
z_m = -28e-06
x_rms = 0.5e-06
y_rms = 0.5e-06
z_rms = 0.5e-06
ux_m = 0.
uy_m = 0.
uz_m = 500.
ux_th = 2.
uy_th = 2.
uz_th = 50.
gaussian_bunch_distribution = picmi.GaussianBunchDistribution(
n_physical_particles = q_tot / q_e,
rms_bunch_size = [x_rms, y_rms, z_rms],
rms_velocity = [c*ux_th, c*uy_th, c*uz_th],
centroid_position = [x_m, y_m, z_m],
centroid_velocity = [c*ux_m, c*uy_m, c*uz_m])
beam = picmi.Species(
particle_type = 'electron',
name = 'beam',
initial_distribution = gaussian_bunch_distribution)
# Laser
e_max = 16e12
position_z = 9e-06
profile_t_peak = 30.e-15
profile_focal_distance = 100e-06
laser = picmi.GaussianLaser(
wavelength = 0.8e-06,
waist = 5e-06,
duration = 15e-15,
focal_position = [0, 0, profile_focal_distance + position_z],
centroid_position = [0, 0, position_z - c*profile_t_peak],
propagation_direction = [0, 0, 1],
polarization_direction = [0, 1, 0],
E0 = e_max,
fill_in = False)
laser_antenna = picmi.LaserAntenna(
position = [0., 0., position_z],
normal_vector = [0, 0, 1])
# Electromagnetic solver
solver = picmi.ElectromagneticSolver(
grid = grid,
method = 'Yee',
cfl = 1.,
divE_cleaning = 0)
# Diagnostics
diag_field_list = ['B', 'E', 'J', 'rho']
field_diag = picmi.FieldDiagnostic(
name = 'diag1',
grid = grid,
period = 100,
data_list = diag_field_list,
write_dir = '.',
warpx_file_prefix = 'Python_LaserAcceleration_plt')
# Set up simulation
sim = picmi.Simulation(
solver = solver,
max_steps = max_steps,
verbose = 1,
particle_shape = 'cubic',
warpx_use_filter = 1,
warpx_serialize_ics = 1,
warpx_do_dynamic_scheduling = 0)
# Add plasma electrons
sim.add_species(
electrons,
layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [1, 1, 1]))
# Add beam electrons
sim.add_species(
beam,
layout = picmi.PseudoRandomLayout(grid = grid, n_macroparticles = 100))
# Add laser
sim.add_laser(
laser,
injection_method = laser_antenna)
# Add diagnostics
sim.add_diagnostic(field_diag)
# Write input file that can be used to run with the compiled version
sim.write_input_file(file_name = 'inputs_3d_picmi')
# Initialize inputs and WarpX instance
sim.initialize_inputs()
sim.initialize_warpx()
# Advance simulation until last time step
sim.step(max_steps)
| #!/usr/bin/env python3
from pywarpx import picmi
# Physical constants
c = picmi.constants.c
q_e = picmi.constants.q_e
# Number of time steps
max_steps = 100
# Number of cells
nx = 32
ny = 32
nz = 256
# Physical domain
xmin = -30e-06
xmax = 30e-06
ymin = -30e-06
ymax = 30e-06
zmin = -56e-06
zmax = 12e-06
# Domain decomposition
max_grid_size = 64
blocking_factor = 32
# Create grid
grid = picmi.Cartesian3DGrid(
number_of_cells = [nx, ny, nz],
lower_bound = [xmin, ymin, zmin],
upper_bound = [xmax, ymax, zmax],
lower_boundary_conditions = ['periodic', 'periodic', 'dirichlet'],
upper_boundary_conditions = ['periodic', 'periodic', 'dirichlet'],
lower_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'],
upper_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'],
moving_window_velocity = [0., 0., c],
warpx_max_grid_size = max_grid_size,
warpx_blocking_factor = blocking_factor)
# Particles: plasma electrons
plasma_density = 2e23
plasma_xmin = -20e-06
plasma_ymin = -20e-06
plasma_zmin = 0
plasma_xmax = 20e-06
plasma_ymax = 20e-06
plasma_zmax = None
uniform_distribution = picmi.UniformDistribution(
density = plasma_density,
lower_bound = [plasma_xmin, plasma_ymin, plasma_zmin],
upper_bound = [plasma_xmax, plasma_ymax, plasma_zmax],
fill_in = True)
electrons = picmi.Species(
particle_type = 'electron',
name = 'electrons',
initial_distribution = uniform_distribution)
# Particles: beam electrons
q_tot = 1e-12
x_m = 0.
y_m = 0.
z_m = -28e-06
x_rms = 0.5e-06
y_rms = 0.5e-06
z_rms = 0.5e-06
ux_m = 0.
uy_m = 0.
uz_m = 500.
ux_th = 2.
uy_th = 2.
uz_th = 50.
gaussian_bunch_distribution = picmi.GaussianBunchDistribution(
n_physical_particles = q_tot / q_e,
rms_bunch_size = [x_rms, y_rms, z_rms],
rms_velocity = [c*ux_th, c*uy_th, c*uz_th],
centroid_position = [x_m, y_m, z_m],
centroid_velocity = [c*ux_m, c*uy_m, c*uz_m])
beam = picmi.Species(
particle_type = 'electron',
name = 'beam',
initial_distribution = gaussian_bunch_distribution)
# Laser
e_max = 16e12
position_z = 9e-06
profile_t_peak = 30.e-15
profile_focal_distance = 100e-06
laser = picmi.GaussianLaser(
wavelength = 0.8e-06,
waist = 5e-06,
duration = 15e-15,
focal_position = [0, 0, profile_focal_distance + position_z],
centroid_position = [0, 0, position_z - c*profile_t_peak],
propagation_direction = [0, 0, 1],
polarization_direction = [0, 1, 0],
E0 = e_max,
fill_in = False)
laser_antenna = picmi.LaserAntenna(
position = [0., 0., position_z],
normal_vector = [0, 0, 1])
# Electromagnetic solver
solver = picmi.ElectromagneticSolver(
grid = grid,
method = 'Yee',
cfl = 1.,
divE_cleaning = 0)
# Diagnostics
diag_field_list = ['B', 'E', 'J', 'rho']
field_diag = picmi.FieldDiagnostic(
name = 'diag1',
grid = grid,
period = 100,
data_list = diag_field_list,
write_dir = '.',
warpx_file_prefix = 'Python_LaserAcceleration_plt')
# Set up simulation
sim = picmi.Simulation(
solver = solver,
max_steps = max_steps,
verbose = 1,
particle_shape = 'cubic',
warpx_use_filter = 1,
warpx_serialize_ics = 1,
warpx_do_dynamic_scheduling = 0)
# Add plasma electrons
sim.add_species(
electrons,
layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [1, 1, 1]))
# Add beam electrons
sim.add_species(
beam,
layout = picmi.PseudoRandomLayout(grid = grid, n_macroparticles = 100))
# Add laser
sim.add_laser(
laser,
injection_method = laser_antenna)
# Add diagnostics
sim.add_diagnostic(field_diag)
# Write input file that can be used to run with the compiled version
sim.write_input_file(file_name = 'inputs_3d_picmi')
# Initialize inputs and WarpX instance
sim.initialize_inputs()
sim.initialize_warpx()
# Advance simulation until last time step
sim.step(max_steps) | en | 0.637696 | #!/usr/bin/env python3 # Physical constants # Number of time steps # Number of cells # Physical domain # Domain decomposition # Create grid # Particles: plasma electrons # Particles: beam electrons # Laser # Electromagnetic solver # Diagnostics # Set up simulation # Add plasma electrons # Add beam electrons # Add laser # Add diagnostics # Write input file that can be used to run with the compiled version # Initialize inputs and WarpX instance # Advance simulation until last time step | 1.855406 | 2 |
Examples/input_text_example.py | mmorandi/DearPyGui | 0 | 6632247 | <filename>Examples/input_text_example.py
from dearpygui.core import *
from dearpygui.simple import *
set_main_window_size(500, 500)
# callback
def retrieve_callback(sender, callback):
show_logger()
log_info(get_value("Regular##inputtext"))
log_info(get_value("With hint##inputtext"))
log_info(get_value("No spaces##inputtext"))
log_info(get_value("Uppercase##inputtext"))
log_info(get_value("Decimal##inputtext"))
log_info(get_value("Hexadecimal##inputtext"))
log_info(get_value("Read Only##inputtext"))
log_info(get_value("Password##inputtext"))
log_info(get_value("Multiline##inputtext"))
add_text("This example demonstrates the input text widget.", bullet=True)
add_text("Press the 'Retrieve' button to display the inputed values in the logger", wrap = 500, bullet=True)
add_input_text("Regular##inputtext")
add_input_text("With hint##inputtext", hint="A hint")
add_input_text("No spaces##inputtext", no_spaces=True)
add_input_text("Uppercase##inputtext", uppercase=True)
add_input_text("Decimal##inputtext", decimal=True)
add_input_text("Hexadecimal##inputtext", hexadecimal=True)
add_input_text("Read Only##inputtext", readonly=True, default_value="read only")
add_input_text("Password##inputtext", password=True)
add_input_text("Multiline##inputtext", multiline=True)
add_button("Retrieve", callback=retrieve_callback)
start_dearpygui()
| <filename>Examples/input_text_example.py
from dearpygui.core import *
from dearpygui.simple import *
set_main_window_size(500, 500)
# callback
def retrieve_callback(sender, callback):
show_logger()
log_info(get_value("Regular##inputtext"))
log_info(get_value("With hint##inputtext"))
log_info(get_value("No spaces##inputtext"))
log_info(get_value("Uppercase##inputtext"))
log_info(get_value("Decimal##inputtext"))
log_info(get_value("Hexadecimal##inputtext"))
log_info(get_value("Read Only##inputtext"))
log_info(get_value("Password##inputtext"))
log_info(get_value("Multiline##inputtext"))
add_text("This example demonstrates the input text widget.", bullet=True)
add_text("Press the 'Retrieve' button to display the inputed values in the logger", wrap = 500, bullet=True)
add_input_text("Regular##inputtext")
add_input_text("With hint##inputtext", hint="A hint")
add_input_text("No spaces##inputtext", no_spaces=True)
add_input_text("Uppercase##inputtext", uppercase=True)
add_input_text("Decimal##inputtext", decimal=True)
add_input_text("Hexadecimal##inputtext", hexadecimal=True)
add_input_text("Read Only##inputtext", readonly=True, default_value="read only")
add_input_text("Password##inputtext", password=True)
add_input_text("Multiline##inputtext", multiline=True)
add_button("Retrieve", callback=retrieve_callback)
start_dearpygui()
| ja | 0.37475 | # callback ##inputtext")) ##inputtext")) ##inputtext")) ##inputtext")) ##inputtext")) ##inputtext")) ##inputtext")) ##inputtext")) ##inputtext")) ##inputtext") ##inputtext", hint="A hint") ##inputtext", no_spaces=True) ##inputtext", uppercase=True) ##inputtext", decimal=True) ##inputtext", hexadecimal=True) ##inputtext", readonly=True, default_value="read only") ##inputtext", password=True) ##inputtext", multiline=True) | 3.228554 | 3 |
commands/setcannon.py | 1757WestwoodRobotics/mentorbot | 2 | 6632248 | <reponame>1757WestwoodRobotics/mentorbot<gh_stars>1-10
from enum import Enum, auto
from subsystems.cannonsubsystem import CannonSubsystem
from commands2 import CommandBase
class SetCannon(CommandBase):
class Mode(Enum):
Off = auto()
Fill = auto()
Launch = auto()
def __init__(self, cannon: CannonSubsystem, mode: Mode) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.cannon = cannon
self.mode = mode
self.addRequirements([self.cannon])
self.funcs = {
SetCannon.Mode.Off: self.cannon.close,
SetCannon.Mode.Fill: self.cannon.fill,
SetCannon.Mode.Launch: self.cannon.launch
}
self.isFinished = lambda: True
def execute(self) -> None:
self.funcs[self.mode]()
| from enum import Enum, auto
from subsystems.cannonsubsystem import CannonSubsystem
from commands2 import CommandBase
class SetCannon(CommandBase):
class Mode(Enum):
Off = auto()
Fill = auto()
Launch = auto()
def __init__(self, cannon: CannonSubsystem, mode: Mode) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.cannon = cannon
self.mode = mode
self.addRequirements([self.cannon])
self.funcs = {
SetCannon.Mode.Off: self.cannon.close,
SetCannon.Mode.Fill: self.cannon.fill,
SetCannon.Mode.Launch: self.cannon.launch
}
self.isFinished = lambda: True
def execute(self) -> None:
self.funcs[self.mode]() | none | 1 | 2.769027 | 3 |
|
python-lib/org_manager_bot.py | abrazite/sc-org | 0 | 6632249 | import discord
from discord.ext import commands, tasks
import org_manager_api
import theimc
import secrets
import string
import os
os.environ['REQUESTS_CA_BUNDLE'] = os.path.join(
'/etc/ssl/certs/',
'ca-certificates.crt')
# API Server
API_SERVER = 'https://api.org-manager.space/1.0.0'
api = org_manager_api.OrgManagerAPI(API_SERVER, secrets.ORGANIZATION_ID)
# Prefix for calling bot in discord
client = commands.Bot(command_prefix='.')
# Runs on start to show bot is online
@client.event
async def on_ready():
await client.change_presence(status=discord.Status.online, activity=discord.Game('.help'))
print("Bot is ready")
app_info = await client.application_info()
print(f'https://discord.com/oauth2/authorize?client_id={app_info.id}&bot')
@client.command(brief='superadmin')
async def create_theimc_from_discord(ctx):
if ctx.author.name == 'abrazite':
theimc.create_org(api, create_api_context(ctx))
@client.command(brief='superadmin')
async def sync_theimc_from_discord(ctx):
if ctx.author.name == 'abrazite':
theimc.create_org(api, create_api_context(ctx))
theimc.sync_org(api, create_api_context(ctx), ctx.message.channel.guild.members)
# Clear messages in chat
@client.command(brief='superadmin')
async def clear(ctx, amount=5):
if ctx.author.name == 'abrazite':
await ctx.channel.purge(limit=amount + 1)
await ctx.send(f'{amount} Posts have been cleared')
@client.command(brief='Reports when user joined the org')
async def is_member(ctx, personnel_str):
membership = api.membership(create_api_context(ctx), personnel_str)
if membership:
# todo(James): format date :)
await ctx.send(f'joined {membership["joinedDate"]}')
else:
await ctx.send('no membership found')
@client.command(brief='Shows basic personnel info')
async def whois(ctx, *, personnel_str=None):
if personnel_str:
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel_summary(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel:
message_str = f'{full_formatted_nick(personnel)}\r'
if personnel['username']:
message_str += f'\rdiscord:\t {personnel["username"]}#{personnel["discriminator"]}'
if personnel['citizenRecord']:
message_str += f'\rcitizenRecord:\t {personnel["citizenRecord"]}'
if personnel['citizenName']:
message_str += f'\rcitizenName:\t {personnel["citizenName"]}'
if personnel['handleName']:
message_str += f'\rhandleName:\t {personnel["handleName"]}'
message_str += '\r'
if personnel['rankDate']:
message_str += f'\rrankDate:\t {personnel["rankDate"]}'
await ctx.send(message_str)
else:
await ctx.send('error: record not found')
@client.command(brief='Lists all org branches')
async def list_branches(ctx, page=0):
LIMIT = 10
branches = api.branches(create_api_context(ctx), limit=LIMIT, page=page)
branches_strs = ''
if len(branches) == LIMIT or page > 0:
branches_strs += f'(page {page})\r\r'
for branch in branches:
branch_str = branch["abbreviation"]
if branch["branch"]:
branch_str += '\t' + branch["branch"] + '\r'
branches_strs += branch_str
await ctx.send(branches_strs)
@client.command(brief='Lists all org grades')
async def list_grades(ctx, page=0):
LIMIT = 10
grades = api.grades(create_api_context(ctx), limit=LIMIT, page=page)
grades_strs = ''
if len(grades) == LIMIT or page > 0:
grades_strs += f'(page {page})\r\r'
for grade in grades:
grade_str = grade["abbreviation"]
if grade["grade"]:
grade_str += '\t' + grade["grade"]
grades_strs += grade_str + '\r'
await ctx.send(grades_strs)
@client.command(brief='Lists all org ranks')
async def list_ranks(ctx, page=0):
LIMIT = 10
ranks = api.ranks(create_api_context(ctx), limit=LIMIT, page=page)
ranks_strs = ''
if len(ranks) == LIMIT or page > 0:
ranks_strs += f'(page {page})\r\r'
for rank in ranks:
ranks_str = ''
if rank["branchAbbreviation"]:
ranks_str += rank["branchAbbreviation"] + '-'
if rank["gradeAbbreviation"]:
ranks_str += rank["gradeAbbreviation"] + '-'
if rank["rankAbbreviation"]:
ranks_str += rank["rankAbbreviation"]
if rank["rankName"]:
ranks_str += '\t' + rank["rankName"]
ranks_strs += ranks_str + '\r'
await ctx.send(ranks_strs)
@client.command(brief='Lists all org certifications')
async def list_certifications(ctx, page: int = 0):
LIMIT = 10
certifications = api.certifications(create_api_context(ctx), limit=LIMIT, page=page)
certifications_strs = ''
if len(certifications) == LIMIT or page > 0:
certifications_strs += f'(page {page})\r\r'
for certification in certifications:
certification_str = certification["abbreviation"]
if certification["name"]:
certification_str += '\t' + certification["name"]
certifications_strs += certification_str + '\r'
await ctx.send(certifications_strs)
@client.command(brief='Lists all rank change records')
async def list_rank_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'rankChangeRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['rankChangeRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['abbreviation']:
record_str += '\t' + record['abbreviation']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all certification records')
async def list_cert_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'certificationRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['certificationRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['abbreviation']:
record_str += '\t' + record['abbreviation']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all ops attended by personnel')
async def list_op_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'operationAttendenceRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['operationAttendenceRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['name']:
record_str += '\t' + record['name']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all notes for personnel')
async def list_note_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'noteRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['noteRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['note']:
record_str += '\t' + record['note']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all personnel with filtering')
async def search_personnel(ctx, filter_str=None, page=0):
LIMIT = 10
summary = api.personnel_summary_all(create_api_context(ctx))
list_str = ''
summary = sorted(summary, key=lambda e: formatted_nick(e))
count = 0
total_count = 0
for personnel in summary:
include = filter_str is None
include = include or (filter_str == personnel["gradeAbbreviation"])
include = include or (filter_str == personnel["branchAbbreviation"])
include = include or (filter_str == personnel["rankAbbreviation"])
include = include or (filter_str == f'{personnel["branchAbbreviation"]}-{personnel["gradeAbbreviation"]}-{personnel["rankAbbreviation"]}')
include = include or (filter_str == f'{personnel["branchAbbreviation"]}-{personnel["rankAbbreviation"]}')
include = include or (filter_str == f'{personnel["citizenRecord"]}')
include = include or (filter_str == f'{personnel["citizenName"]}')
include = include or (filter_str == f'{personnel["handleName"]}')
include = include or (filter_str == f'{personnel["username"]}#{personnel["discriminator"]}')
if include:
total_count += 1
if include and count < LIMIT:
list_str += f'{formatted_nick(personnel)}\r'
count += 1
if total_count > LIMIT:
total_pages = int(total_count / LIMIT)
list_str = f'(page {page} of {total_pages})\r\r' + list_str
list_str += '\r'
await ctx.send(list_str)
@client.command(brief='Create a new org branch')
async def create_branch(ctx, abbreviation: str, branch: str = None):
record_id = api.create_branch(create_api_context(ctx), abbreviation, branch)
if record_id:
await ctx.send(f'created branch {abbreviation}')
else:
await ctx.send('error: no branch created')
@client.command(brief='Create a new org grade')
async def create_grade(ctx, abbreviation: str, grade: str = None):
record_id = api.create_grade(create_api_context(ctx), abbreviation, grade)
if record_id:
await ctx.send(f'created grade {abbreviation}')
else:
await ctx.send('error: no grade created')
@client.command(brief='Create a new org grade')
async def create_rank(ctx, abbreviation: str, rank: str = None, branch_str: str = None, grade_str: str = None):
record_id = api.create_rank(create_api_context(ctx), abbreviation, rank, branch_str, grade_str)
if record_id:
await ctx.send(f'created rank {abbreviation}')
else:
await ctx.send('error: no rank created')
@client.command(brief='Create a new certification')
async def create_certification(ctx, branch_str: str, abbreviation: str, name: str):
record_id = api.create_certification(create_api_context(ctx), branch_str, abbreviation, name)
if record_id:
await ctx.send(f'created certification {abbreviation}')
else:
await ctx.send('error: no certification created')
@client.command(brief='Records certification')
async def record_cert(ctx, personnel_or_channel_str: str, certification_str: str):
members = None
for channel in ctx.message.channel.guild.voice_channels:
if channel.name == personnel_or_channel_str:
members = []
for member in channel.members:
personnel_str = f'{member.name}#{member.discriminator}'
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
if personnel:
members.append(personnel)
if members is None:
personnel = api.personnel_summary(create_api_context(ctx), personnel_or_channel_str)
members = [personnel]
records_str = ''
for member in members:
personnel_str = f'{member["username"]}#{member["discriminator"]}'
record_id = api.record_cert(create_api_context(ctx), personnel_str, certification_str)
if record_id:
name = member["handleName"] if member["handleName"] else member["username"]
records_str += f'updated {name}\r'
if records_str != '':
await ctx.send(records_str)
else:
await ctx.send('warning: no records updated')
@client.command(brief='Records operation attendance')
async def record_op(ctx, personnel_or_channel_str: str, op_name: str = None):
members = None
for channel in ctx.message.channel.guild.voice_channels:
if channel.name == personnel_or_channel_str:
members = []
for member in channel.members:
personnel_str = f'{member.name}#{member.discriminator}'
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
if personnel:
members.append(personnel)
if members is None:
personnel = api.personnel_summary(create_api_context(ctx), personnel_or_channel_str)
members = [personnel]
records_str = ''
for member in members:
personnel_str = f'{member["username"]}#{member["discriminator"]}'
record_id = api.record_op(create_api_context(ctx), personnel_str, op_name)
if record_id:
name = member["handleName"] if member["handleName"] else member["username"]
records_str += f'updated {name}\r'
if records_str != '':
await ctx.send(records_str)
else:
await ctx.send('warning: no records updated')
@client.command(brief='Records note')
async def record_note(ctx, personnel_or_channel_str: str, *, note: str):
members = None
for channel in ctx.message.channel.guild.voice_channels:
if channel.name == personnel_or_channel_str:
members = []
for member in channel.members:
personnel_str = f'{member.name}#{member.discriminator}'
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
if personnel:
members.append(personnel)
if members is None:
personnel = api.personnel_summary(create_api_context(ctx), personnel_or_channel_str)
members = [personnel]
records_str = ''
for member in members:
personnel_str = f'{member["username"]}#{member["discriminator"]}'
record_id = api.record_note(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}', personnel_str, note)
if record_id:
name = member["handleName"] if member["handleName"] else member["username"]
records_str += f'updated {name}\r'
if records_str != '':
await ctx.send(records_str)
else:
await ctx.send('warning: no records updated')
@client.command(brief='Creates all records for a new member')
async def add_member(ctx, discord_handle, sc_handle_name, rank_str, recruited_by_str=None):
joined_date = ctx.message.channel.guild.get_member(discord_handle)
record_id = api.add_member(create_api_context(ctx), discord_handle, sc_handle_name, rank_str, recruited_by_str, joined_date)
if record_id:
await ctx.send('member added')
else:
await ctx.send('error: could not add member')
@client.command(brief='Records personnel joining org - do not use with add_member')
async def record_joined_org(ctx, personnel_str, recruited_by_str=None):
record_id = api.record_joined_org(create_api_context(ctx), personnel_str, recruited_by_str)
if record_id:
await ctx.send('joined org')
else:
await ctx.send('error: could not join org')
@client.command(brief='Records personnel leaving org')
async def record_left_org(ctx, personnel_str):
record_id = api.record_left_org(create_api_context(ctx), personnel_str)
if record_id:
await ctx.send('left org')
else:
await ctx.send('error: could not leave org')
@client.command(brief='Changes users rank, grade, and branch')
async def change_rank(ctx, personnel_str, rank_str):
record_id = api.change_rank(create_api_context(ctx), personnel_str, rank_str)
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
member = ctx.message.channel.guild.get_member_named(f'{personnel["username"]}#{personnel["discriminator"]}')
if member is None:
await ctx.send('error: could not change rank, no discord member found in guild')
return
if record_id:
try:
await member.edit(nick=formatted_nick(personnel))
await ctx.send('rank changed')
except:
await ctx.send('rank changed, could not update nickname')
else:
await ctx.send('error: could not change rank')
@client.command(brief='Changes user nickname - testing')
async def change_nick(ctx, personnel_str, nick):
member = ctx.message.channel.guild.get_member_named(personnel_str)
if member is None:
await ctx.send('error: could not change nickname, no discord member found in guild')
return
await member.edit(nick=nick)
@client.command(brief='Checks discord tags against database')
async def check_tags(ctx, correct_tags: bool = False):
report_users = ''
all_personnel = api.personnel_summary_all(create_api_context(ctx))
if all_personnel is None:
await ctx.send('error: could not lookup personnel')
return
for personnel in all_personnel:
personnel_str = f'{personnel["username"]}#{personnel["discriminator"]}'
member = ctx.message.channel.guild.get_member_named(personnel_str)
if member:
member_nick = member.nick if member.nick else member.display_name
nick = formatted_nick(personnel)
if member_nick != nick:
report_users += f'{member_nick}\t -> \t {nick}'
if correct_tags:
try:
await member.edit(nick=nick)
except:
report_users += '\tE'
report_users += '\r'
if len(report_users) > 0:
await ctx.send(report_users)
else:
await ctx.send('all member tags are correct')
@client.command(brief='Validates user credentials')
async def validate(ctx):
response = api.validate(create_api_context(ctx))
await ctx.send(response)
def create_api_context(ctx) -> org_manager_api.APIContext:
printable = set(string.printable)
return org_manager_api.APIContext(
ctx.author.id,
''.join(filter(lambda x: x in printable, ctx.author.name)),
ctx.author.discriminator
)
def formatted_nick(personnel) -> str:
tag = personnel["rankAbbreviation"]
name = personnel["handleName"] if personnel["handleName"] else personnel["username"]
return f'[{tag}] {name}'
def full_formatted_nick(personnel) -> str:
tag = ''
if personnel["branchAbbreviation"]:
tag += personnel["branchAbbreviation"] + '-'
if personnel["gradeAbbreviation"]:
tag += personnel["gradeAbbreviation"] + '-'
if personnel["rankAbbreviation"]:
tag += personnel["rankAbbreviation"]
name = personnel["handleName"] if personnel["handleName"] else personnel["username"]
return f'[{tag}] {name}'
client.run(secrets.CLIENT_KEY)
| import discord
from discord.ext import commands, tasks
import org_manager_api
import theimc
import secrets
import string
import os
os.environ['REQUESTS_CA_BUNDLE'] = os.path.join(
'/etc/ssl/certs/',
'ca-certificates.crt')
# API Server
API_SERVER = 'https://api.org-manager.space/1.0.0'
api = org_manager_api.OrgManagerAPI(API_SERVER, secrets.ORGANIZATION_ID)
# Prefix for calling bot in discord
client = commands.Bot(command_prefix='.')
# Runs on start to show bot is online
@client.event
async def on_ready():
await client.change_presence(status=discord.Status.online, activity=discord.Game('.help'))
print("Bot is ready")
app_info = await client.application_info()
print(f'https://discord.com/oauth2/authorize?client_id={app_info.id}&bot')
@client.command(brief='superadmin')
async def create_theimc_from_discord(ctx):
if ctx.author.name == 'abrazite':
theimc.create_org(api, create_api_context(ctx))
@client.command(brief='superadmin')
async def sync_theimc_from_discord(ctx):
if ctx.author.name == 'abrazite':
theimc.create_org(api, create_api_context(ctx))
theimc.sync_org(api, create_api_context(ctx), ctx.message.channel.guild.members)
# Clear messages in chat
@client.command(brief='superadmin')
async def clear(ctx, amount=5):
if ctx.author.name == 'abrazite':
await ctx.channel.purge(limit=amount + 1)
await ctx.send(f'{amount} Posts have been cleared')
@client.command(brief='Reports when user joined the org')
async def is_member(ctx, personnel_str):
membership = api.membership(create_api_context(ctx), personnel_str)
if membership:
# todo(James): format date :)
await ctx.send(f'joined {membership["joinedDate"]}')
else:
await ctx.send('no membership found')
@client.command(brief='Shows basic personnel info')
async def whois(ctx, *, personnel_str=None):
if personnel_str:
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel_summary(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel:
message_str = f'{full_formatted_nick(personnel)}\r'
if personnel['username']:
message_str += f'\rdiscord:\t {personnel["username"]}#{personnel["discriminator"]}'
if personnel['citizenRecord']:
message_str += f'\rcitizenRecord:\t {personnel["citizenRecord"]}'
if personnel['citizenName']:
message_str += f'\rcitizenName:\t {personnel["citizenName"]}'
if personnel['handleName']:
message_str += f'\rhandleName:\t {personnel["handleName"]}'
message_str += '\r'
if personnel['rankDate']:
message_str += f'\rrankDate:\t {personnel["rankDate"]}'
await ctx.send(message_str)
else:
await ctx.send('error: record not found')
@client.command(brief='Lists all org branches')
async def list_branches(ctx, page=0):
LIMIT = 10
branches = api.branches(create_api_context(ctx), limit=LIMIT, page=page)
branches_strs = ''
if len(branches) == LIMIT or page > 0:
branches_strs += f'(page {page})\r\r'
for branch in branches:
branch_str = branch["abbreviation"]
if branch["branch"]:
branch_str += '\t' + branch["branch"] + '\r'
branches_strs += branch_str
await ctx.send(branches_strs)
@client.command(brief='Lists all org grades')
async def list_grades(ctx, page=0):
LIMIT = 10
grades = api.grades(create_api_context(ctx), limit=LIMIT, page=page)
grades_strs = ''
if len(grades) == LIMIT or page > 0:
grades_strs += f'(page {page})\r\r'
for grade in grades:
grade_str = grade["abbreviation"]
if grade["grade"]:
grade_str += '\t' + grade["grade"]
grades_strs += grade_str + '\r'
await ctx.send(grades_strs)
@client.command(brief='Lists all org ranks')
async def list_ranks(ctx, page=0):
LIMIT = 10
ranks = api.ranks(create_api_context(ctx), limit=LIMIT, page=page)
ranks_strs = ''
if len(ranks) == LIMIT or page > 0:
ranks_strs += f'(page {page})\r\r'
for rank in ranks:
ranks_str = ''
if rank["branchAbbreviation"]:
ranks_str += rank["branchAbbreviation"] + '-'
if rank["gradeAbbreviation"]:
ranks_str += rank["gradeAbbreviation"] + '-'
if rank["rankAbbreviation"]:
ranks_str += rank["rankAbbreviation"]
if rank["rankName"]:
ranks_str += '\t' + rank["rankName"]
ranks_strs += ranks_str + '\r'
await ctx.send(ranks_strs)
@client.command(brief='Lists all org certifications')
async def list_certifications(ctx, page: int = 0):
LIMIT = 10
certifications = api.certifications(create_api_context(ctx), limit=LIMIT, page=page)
certifications_strs = ''
if len(certifications) == LIMIT or page > 0:
certifications_strs += f'(page {page})\r\r'
for certification in certifications:
certification_str = certification["abbreviation"]
if certification["name"]:
certification_str += '\t' + certification["name"]
certifications_strs += certification_str + '\r'
await ctx.send(certifications_strs)
@client.command(brief='Lists all rank change records')
async def list_rank_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'rankChangeRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['rankChangeRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['abbreviation']:
record_str += '\t' + record['abbreviation']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all certification records')
async def list_cert_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'certificationRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['certificationRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['abbreviation']:
record_str += '\t' + record['abbreviation']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all ops attended by personnel')
async def list_op_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'operationAttendenceRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['operationAttendenceRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['name']:
record_str += '\t' + record['name']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all notes for personnel')
async def list_note_records(ctx, personnel_str: str = None, page: int = 0):
LIMIT = 10
if personnel_str:
personnel = api.personnel(create_api_context(ctx), personnel_str)
else:
personnel = api.personnel(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}')
if personnel is None or 'noteRecords' not in personnel:
await ctx.send('error: no records found')
return
records = personnel['noteRecords']
record_str = ''
if len(records) == LIMIT or page > 0:
record_str += f'(page {page})\r\r'
for i, record in enumerate(records):
if page * LIMIT <= i < (page + 1) * LIMIT:
record_str += record['date']
if record['note']:
record_str += '\t' + record['note']
record_str += '\r'
await ctx.send(record_str)
@client.command(brief='Lists all personnel with filtering')
async def search_personnel(ctx, filter_str=None, page=0):
LIMIT = 10
summary = api.personnel_summary_all(create_api_context(ctx))
list_str = ''
summary = sorted(summary, key=lambda e: formatted_nick(e))
count = 0
total_count = 0
for personnel in summary:
include = filter_str is None
include = include or (filter_str == personnel["gradeAbbreviation"])
include = include or (filter_str == personnel["branchAbbreviation"])
include = include or (filter_str == personnel["rankAbbreviation"])
include = include or (filter_str == f'{personnel["branchAbbreviation"]}-{personnel["gradeAbbreviation"]}-{personnel["rankAbbreviation"]}')
include = include or (filter_str == f'{personnel["branchAbbreviation"]}-{personnel["rankAbbreviation"]}')
include = include or (filter_str == f'{personnel["citizenRecord"]}')
include = include or (filter_str == f'{personnel["citizenName"]}')
include = include or (filter_str == f'{personnel["handleName"]}')
include = include or (filter_str == f'{personnel["username"]}#{personnel["discriminator"]}')
if include:
total_count += 1
if include and count < LIMIT:
list_str += f'{formatted_nick(personnel)}\r'
count += 1
if total_count > LIMIT:
total_pages = int(total_count / LIMIT)
list_str = f'(page {page} of {total_pages})\r\r' + list_str
list_str += '\r'
await ctx.send(list_str)
@client.command(brief='Create a new org branch')
async def create_branch(ctx, abbreviation: str, branch: str = None):
record_id = api.create_branch(create_api_context(ctx), abbreviation, branch)
if record_id:
await ctx.send(f'created branch {abbreviation}')
else:
await ctx.send('error: no branch created')
@client.command(brief='Create a new org grade')
async def create_grade(ctx, abbreviation: str, grade: str = None):
record_id = api.create_grade(create_api_context(ctx), abbreviation, grade)
if record_id:
await ctx.send(f'created grade {abbreviation}')
else:
await ctx.send('error: no grade created')
@client.command(brief='Create a new org grade')
async def create_rank(ctx, abbreviation: str, rank: str = None, branch_str: str = None, grade_str: str = None):
record_id = api.create_rank(create_api_context(ctx), abbreviation, rank, branch_str, grade_str)
if record_id:
await ctx.send(f'created rank {abbreviation}')
else:
await ctx.send('error: no rank created')
@client.command(brief='Create a new certification')
async def create_certification(ctx, branch_str: str, abbreviation: str, name: str):
record_id = api.create_certification(create_api_context(ctx), branch_str, abbreviation, name)
if record_id:
await ctx.send(f'created certification {abbreviation}')
else:
await ctx.send('error: no certification created')
@client.command(brief='Records certification')
async def record_cert(ctx, personnel_or_channel_str: str, certification_str: str):
members = None
for channel in ctx.message.channel.guild.voice_channels:
if channel.name == personnel_or_channel_str:
members = []
for member in channel.members:
personnel_str = f'{member.name}#{member.discriminator}'
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
if personnel:
members.append(personnel)
if members is None:
personnel = api.personnel_summary(create_api_context(ctx), personnel_or_channel_str)
members = [personnel]
records_str = ''
for member in members:
personnel_str = f'{member["username"]}#{member["discriminator"]}'
record_id = api.record_cert(create_api_context(ctx), personnel_str, certification_str)
if record_id:
name = member["handleName"] if member["handleName"] else member["username"]
records_str += f'updated {name}\r'
if records_str != '':
await ctx.send(records_str)
else:
await ctx.send('warning: no records updated')
@client.command(brief='Records operation attendance')
async def record_op(ctx, personnel_or_channel_str: str, op_name: str = None):
members = None
for channel in ctx.message.channel.guild.voice_channels:
if channel.name == personnel_or_channel_str:
members = []
for member in channel.members:
personnel_str = f'{member.name}#{member.discriminator}'
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
if personnel:
members.append(personnel)
if members is None:
personnel = api.personnel_summary(create_api_context(ctx), personnel_or_channel_str)
members = [personnel]
records_str = ''
for member in members:
personnel_str = f'{member["username"]}#{member["discriminator"]}'
record_id = api.record_op(create_api_context(ctx), personnel_str, op_name)
if record_id:
name = member["handleName"] if member["handleName"] else member["username"]
records_str += f'updated {name}\r'
if records_str != '':
await ctx.send(records_str)
else:
await ctx.send('warning: no records updated')
@client.command(brief='Records note')
async def record_note(ctx, personnel_or_channel_str: str, *, note: str):
members = None
for channel in ctx.message.channel.guild.voice_channels:
if channel.name == personnel_or_channel_str:
members = []
for member in channel.members:
personnel_str = f'{member.name}#{member.discriminator}'
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
if personnel:
members.append(personnel)
if members is None:
personnel = api.personnel_summary(create_api_context(ctx), personnel_or_channel_str)
members = [personnel]
records_str = ''
for member in members:
personnel_str = f'{member["username"]}#{member["discriminator"]}'
record_id = api.record_note(create_api_context(ctx), f'{ctx.author.name}#{ctx.author.discriminator}', personnel_str, note)
if record_id:
name = member["handleName"] if member["handleName"] else member["username"]
records_str += f'updated {name}\r'
if records_str != '':
await ctx.send(records_str)
else:
await ctx.send('warning: no records updated')
@client.command(brief='Creates all records for a new member')
async def add_member(ctx, discord_handle, sc_handle_name, rank_str, recruited_by_str=None):
joined_date = ctx.message.channel.guild.get_member(discord_handle)
record_id = api.add_member(create_api_context(ctx), discord_handle, sc_handle_name, rank_str, recruited_by_str, joined_date)
if record_id:
await ctx.send('member added')
else:
await ctx.send('error: could not add member')
@client.command(brief='Records personnel joining org - do not use with add_member')
async def record_joined_org(ctx, personnel_str, recruited_by_str=None):
record_id = api.record_joined_org(create_api_context(ctx), personnel_str, recruited_by_str)
if record_id:
await ctx.send('joined org')
else:
await ctx.send('error: could not join org')
@client.command(brief='Records personnel leaving org')
async def record_left_org(ctx, personnel_str):
record_id = api.record_left_org(create_api_context(ctx), personnel_str)
if record_id:
await ctx.send('left org')
else:
await ctx.send('error: could not leave org')
@client.command(brief='Changes users rank, grade, and branch')
async def change_rank(ctx, personnel_str, rank_str):
record_id = api.change_rank(create_api_context(ctx), personnel_str, rank_str)
personnel = api.personnel_summary(create_api_context(ctx), personnel_str)
member = ctx.message.channel.guild.get_member_named(f'{personnel["username"]}#{personnel["discriminator"]}')
if member is None:
await ctx.send('error: could not change rank, no discord member found in guild')
return
if record_id:
try:
await member.edit(nick=formatted_nick(personnel))
await ctx.send('rank changed')
except:
await ctx.send('rank changed, could not update nickname')
else:
await ctx.send('error: could not change rank')
@client.command(brief='Changes user nickname - testing')
async def change_nick(ctx, personnel_str, nick):
member = ctx.message.channel.guild.get_member_named(personnel_str)
if member is None:
await ctx.send('error: could not change nickname, no discord member found in guild')
return
await member.edit(nick=nick)
@client.command(brief='Checks discord tags against database')
async def check_tags(ctx, correct_tags: bool = False):
report_users = ''
all_personnel = api.personnel_summary_all(create_api_context(ctx))
if all_personnel is None:
await ctx.send('error: could not lookup personnel')
return
for personnel in all_personnel:
personnel_str = f'{personnel["username"]}#{personnel["discriminator"]}'
member = ctx.message.channel.guild.get_member_named(personnel_str)
if member:
member_nick = member.nick if member.nick else member.display_name
nick = formatted_nick(personnel)
if member_nick != nick:
report_users += f'{member_nick}\t -> \t {nick}'
if correct_tags:
try:
await member.edit(nick=nick)
except:
report_users += '\tE'
report_users += '\r'
if len(report_users) > 0:
await ctx.send(report_users)
else:
await ctx.send('all member tags are correct')
@client.command(brief='Validates user credentials')
async def validate(ctx):
response = api.validate(create_api_context(ctx))
await ctx.send(response)
def create_api_context(ctx) -> org_manager_api.APIContext:
printable = set(string.printable)
return org_manager_api.APIContext(
ctx.author.id,
''.join(filter(lambda x: x in printable, ctx.author.name)),
ctx.author.discriminator
)
def formatted_nick(personnel) -> str:
tag = personnel["rankAbbreviation"]
name = personnel["handleName"] if personnel["handleName"] else personnel["username"]
return f'[{tag}] {name}'
def full_formatted_nick(personnel) -> str:
tag = ''
if personnel["branchAbbreviation"]:
tag += personnel["branchAbbreviation"] + '-'
if personnel["gradeAbbreviation"]:
tag += personnel["gradeAbbreviation"] + '-'
if personnel["rankAbbreviation"]:
tag += personnel["rankAbbreviation"]
name = personnel["handleName"] if personnel["handleName"] else personnel["username"]
return f'[{tag}] {name}'
client.run(secrets.CLIENT_KEY)
| en | 0.39764 | # API Server # Prefix for calling bot in discord # Runs on start to show bot is online # Clear messages in chat # todo(James): format date :) #{ctx.author.discriminator}') #{personnel["discriminator"]}' #{ctx.author.discriminator}') #{ctx.author.discriminator}') #{ctx.author.discriminator}') #{ctx.author.discriminator}') #{personnel["discriminator"]}') #{member.discriminator}' #{member["discriminator"]}' #{member.discriminator}' #{member["discriminator"]}' #{member.discriminator}' #{member["discriminator"]}' #{ctx.author.discriminator}', personnel_str, note) #{personnel["discriminator"]}') #{personnel["discriminator"]}' | 2.419804 | 2 |
vk/types/responses/docs.py | Inzilkin/vk.py | 24 | 6632250 | <reponame>Inzilkin/vk.py
from .others import SimpleResponse
from ..base import BaseModel
from ..attachments import Document
import typing
class Add(SimpleResponse):
pass
class Delete(SimpleResponse):
pass
class Edit(SimpleResponse):
pass
class GetResponse(BaseModel):
count: int = None
items: typing.List[Document] = None
class Get(BaseModel):
response: GetResponse = None
class GetById(BaseModel):
response: typing.List[Document] = None
class GetMessagesUploadServerResponse(BaseModel):
upload_url: str = None
class GetMessagesUploadServer(BaseModel):
response: GetMessagesUploadServerResponse = None
class GetTypesItems(BaseModel):
id: int = None
name: str = None
count: int = None
class GetTypesResponse(BaseModel):
count: int = None
items: typing.List[GetTypesItems] = None
class GetTypes(BaseModel):
response: GetTypesResponse = None
class GetUploadServer(BaseModel):
response: GetMessagesUploadServerResponse = None
class GetWallUploadServer(BaseModel):
response: GetMessagesUploadServerResponse = None
class Save(BaseModel):
response: typing.List[Document] = None
class SearchResponse(BaseModel):
count: int = None
items: typing.List[Document] = None
class Search(BaseModel):
response: SearchResponse = None
| from .others import SimpleResponse
from ..base import BaseModel
from ..attachments import Document
import typing
class Add(SimpleResponse):
pass
class Delete(SimpleResponse):
pass
class Edit(SimpleResponse):
pass
class GetResponse(BaseModel):
count: int = None
items: typing.List[Document] = None
class Get(BaseModel):
response: GetResponse = None
class GetById(BaseModel):
response: typing.List[Document] = None
class GetMessagesUploadServerResponse(BaseModel):
upload_url: str = None
class GetMessagesUploadServer(BaseModel):
response: GetMessagesUploadServerResponse = None
class GetTypesItems(BaseModel):
id: int = None
name: str = None
count: int = None
class GetTypesResponse(BaseModel):
count: int = None
items: typing.List[GetTypesItems] = None
class GetTypes(BaseModel):
response: GetTypesResponse = None
class GetUploadServer(BaseModel):
response: GetMessagesUploadServerResponse = None
class GetWallUploadServer(BaseModel):
response: GetMessagesUploadServerResponse = None
class Save(BaseModel):
response: typing.List[Document] = None
class SearchResponse(BaseModel):
count: int = None
items: typing.List[Document] = None
class Search(BaseModel):
response: SearchResponse = None | none | 1 | 2.10805 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.